
Blueprints hide-hypervisor-id-flavor-extra-spec [1] and add-kvm-hidden-feature [2] allow hiding KVM's signature for guests, which is necessary for Nvidia drivers to work in VMs with passthrough GPUs. While this works well for linux guests on KVM, it doesn't work for Windows guests. For them, KVM emulates some HyperV features. With the current implementation, KVM's signature is hidden, but HyperV's is not, and Nvidia drivers don't work in Windows VMs. This change generates an extra element in the libvirt xml for Windows guests on KVM which obfuscates HyperV's signature too, controlled by the existing image and flavor parameters (img_hide_hypervisor_id and hide_hypervisor_id correspondingly). The extra xml element is <vendor_id state='on' value='1234567890ab'/> in features/hyperv. [1] https://blueprints.launchpad.net/nova/+spec/hide-hypervisor-id-flavor-extra-spec [2] https://blueprints.launchpad.net/nova/+spec/add-kvm-hidden-feature Change-Id: Iaaeae9281301f14f4ae9b43f4a06de58b699fd68 Closes-Bug: 1779845
23172 lines
1.0 MiB
23172 lines
1.0 MiB
# Copyright 2010 OpenStack Foundation
|
|
# Copyright 2012 University Of Minho
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import binascii
|
|
from collections import deque
|
|
from collections import OrderedDict
|
|
import contextlib
|
|
import copy
|
|
import datetime
|
|
import errno
|
|
import glob
|
|
import io
|
|
import os
|
|
import random
|
|
import re
|
|
import shutil
|
|
import signal
|
|
import threading
|
|
import time
|
|
import unittest
|
|
|
|
from castellan import key_manager
|
|
import ddt
|
|
import eventlet
|
|
from eventlet import greenthread
|
|
import fixtures
|
|
from lxml import etree
|
|
import mock
|
|
from os_brick import encryptors
|
|
from os_brick import exception as brick_exception
|
|
from os_brick.initiator import connector
|
|
import os_resource_classes as orc
|
|
import os_vif
|
|
from oslo_concurrency import lockutils
|
|
from oslo_concurrency import processutils
|
|
from oslo_config import cfg
|
|
from oslo_serialization import jsonutils
|
|
from oslo_service import loopingcall
|
|
from oslo_utils import fileutils
|
|
from oslo_utils import fixture as utils_fixture
|
|
from oslo_utils.fixture import uuidsentinel as uuids
|
|
from oslo_utils import units
|
|
from oslo_utils import uuidutils
|
|
from oslo_utils import versionutils
|
|
import six
|
|
from six.moves import range
|
|
|
|
from nova.api.metadata import base as instance_metadata
|
|
from nova.compute import manager
|
|
from nova.compute import power_state
|
|
from nova.compute import provider_tree
|
|
from nova.compute import task_states
|
|
from nova.compute import utils as compute_utils
|
|
from nova.compute import vm_states
|
|
import nova.conf
|
|
from nova import context
|
|
from nova.db import api as db
|
|
from nova import exception
|
|
from nova.network import model as network_model
|
|
from nova import objects
|
|
from nova.objects import block_device as block_device_obj
|
|
from nova.objects import fields
|
|
from nova.objects import migrate_data as migrate_data_obj
|
|
from nova.objects import virtual_interface as obj_vif
|
|
from nova.pci import manager as pci_manager
|
|
from nova.pci import utils as pci_utils
|
|
import nova.privsep.fs
|
|
import nova.privsep.libvirt
|
|
from nova import test
|
|
from nova.tests.unit import fake_block_device
|
|
from nova.tests.unit import fake_diagnostics
|
|
from nova.tests.unit import fake_flavor
|
|
from nova.tests.unit import fake_instance
|
|
from nova.tests.unit import fake_network
|
|
import nova.tests.unit.image.fake
|
|
from nova.tests.unit import matchers
|
|
from nova.tests.unit.objects import test_diagnostics
|
|
from nova.tests.unit.objects import test_pci_device
|
|
from nova.tests.unit.objects import test_vcpu_model
|
|
from nova.tests.unit.virt.libvirt import fake_imagebackend
|
|
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
|
|
from nova.tests.unit.virt.libvirt import fakelibvirt
|
|
from nova import utils
|
|
from nova import version
|
|
from nova.virt import block_device as driver_block_device
|
|
from nova.virt import driver
|
|
from nova.virt import fake
|
|
from nova.virt import firewall as base_firewall
|
|
from nova.virt import hardware
|
|
from nova.virt.image import model as imgmodel
|
|
from nova.virt import images
|
|
from nova.virt.libvirt import blockinfo
|
|
from nova.virt.libvirt import config as vconfig
|
|
from nova.virt.libvirt import driver as libvirt_driver
|
|
from nova.virt.libvirt import firewall
|
|
from nova.virt.libvirt import guest as libvirt_guest
|
|
from nova.virt.libvirt import host
|
|
from nova.virt.libvirt import imagebackend
|
|
from nova.virt.libvirt import imagecache
|
|
from nova.virt.libvirt import migration as libvirt_migrate
|
|
from nova.virt.libvirt.storage import dmcrypt
|
|
from nova.virt.libvirt.storage import lvm
|
|
from nova.virt.libvirt.storage import rbd_utils
|
|
from nova.virt.libvirt import utils as libvirt_utils
|
|
from nova.virt.libvirt.volume import volume as volume_drivers
|
|
|
|
|
|
CONF = nova.conf.CONF
|
|
|
|
_fake_network_info = fake_network.fake_get_instance_nw_info
|
|
|
|
_fake_NodeDevXml = \
|
|
{"pci_0000_04_00_3": """
|
|
<device>
|
|
<name>pci_0000_04_00_3</name>
|
|
<parent>pci_0000_00_01_1</parent>
|
|
<driver>
|
|
<name>igb</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>4</bus>
|
|
<slot>0</slot>
|
|
<function>3</function>
|
|
<product id='0x1521'>I350 Gigabit Network Connection</product>
|
|
<vendor id='0x8086'>Intel Corporation</vendor>
|
|
<capability type='virt_functions'>
|
|
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
|
|
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
|
|
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
|
|
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
|
|
</capability>
|
|
</capability>
|
|
</device>""",
|
|
"pci_0000_04_10_7": """
|
|
<device>
|
|
<name>pci_0000_04_10_7</name>
|
|
<parent>pci_0000_00_01_1</parent>
|
|
<driver>
|
|
<name>igbvf</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>4</bus>
|
|
<slot>16</slot>
|
|
<function>7</function>
|
|
<product id='0x1520'>I350 Ethernet Controller Virtual Function
|
|
</product>
|
|
<vendor id='0x8086'>Intel Corporation</vendor>
|
|
<capability type='phys_function'>
|
|
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
|
|
</capability>
|
|
<capability type='virt_functions'>
|
|
</capability>
|
|
</capability>
|
|
</device>""",
|
|
"pci_0000_04_11_7": """
|
|
<device>
|
|
<name>pci_0000_04_11_7</name>
|
|
<parent>pci_0000_00_01_1</parent>
|
|
<driver>
|
|
<name>igbvf</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>4</bus>
|
|
<slot>17</slot>
|
|
<function>7</function>
|
|
<product id='0x1520'>I350 Ethernet Controller Virtual Function
|
|
</product>
|
|
<vendor id='0x8086'>Intel Corporation</vendor>
|
|
<numa node='0'/>
|
|
<capability type='phys_function'>
|
|
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
|
|
</capability>
|
|
<capability type='virt_functions'>
|
|
</capability>
|
|
</capability>
|
|
</device>""",
|
|
"pci_0000_04_00_1": """
|
|
<device>
|
|
<name>pci_0000_04_00_1</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path>
|
|
<parent>pci_0000_00_02_0</parent>
|
|
<driver>
|
|
<name>mlx5_core</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>4</bus>
|
|
<slot>0</slot>
|
|
<function>1</function>
|
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
|
|
<vendor id='0x15b3'>Mellanox Technologies</vendor>
|
|
<iommuGroup number='15'>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
|
|
</iommuGroup>
|
|
<numa node='0'/>
|
|
<pci-express>
|
|
<link validity='cap' port='0' speed='8' width='16'/>
|
|
<link validity='sta' speed='8' width='16'/>
|
|
</pci-express>
|
|
</capability>
|
|
</device>""",
|
|
# libvirt >= 1.3.0 nodedev-dumpxml
|
|
"pci_0000_03_00_0": """
|
|
<device>
|
|
<name>pci_0000_03_00_0</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
|
|
<parent>pci_0000_00_02_0</parent>
|
|
<driver>
|
|
<name>mlx5_core</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>3</bus>
|
|
<slot>0</slot>
|
|
<function>0</function>
|
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
|
|
<vendor id='0x15b3'>Mellanox Technologies</vendor>
|
|
<capability type='virt_functions' maxCount='16'>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
|
|
</capability>
|
|
<iommuGroup number='15'>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
|
|
</iommuGroup>
|
|
<numa node='0'/>
|
|
<pci-express>
|
|
<link validity='cap' port='0' speed='8' width='16'/>
|
|
<link validity='sta' speed='8' width='16'/>
|
|
</pci-express>
|
|
</capability>
|
|
</device>""",
|
|
"pci_0000_03_00_1": """
|
|
<device>
|
|
<name>pci_0000_03_00_1</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
|
|
<parent>pci_0000_00_02_0</parent>
|
|
<driver>
|
|
<name>mlx5_core</name>
|
|
</driver>
|
|
<capability type='pci'>
|
|
<domain>0</domain>
|
|
<bus>3</bus>
|
|
<slot>0</slot>
|
|
<function>1</function>
|
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
|
|
<vendor id='0x15b3'>Mellanox Technologies</vendor>
|
|
<capability type='virt_functions' maxCount='16'/>
|
|
<iommuGroup number='15'>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
|
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
|
|
</iommuGroup>
|
|
<numa node='0'/>
|
|
<pci-express>
|
|
<link validity='cap' port='0' speed='8' width='16'/>
|
|
<link validity='sta' speed='8' width='16'/>
|
|
</pci-express>
|
|
</capability>
|
|
</device>""",
|
|
"net_enp2s2_02_9a_a1_37_be_54": """
|
|
<device>
|
|
<name>net_enp2s2_02_9a_a1_37_be_54</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:02:02.0/net/enp2s2</path>
|
|
<parent>pci_0000_04_11_7</parent>
|
|
<capability type='net'>
|
|
<interface>enp2s2</interface>
|
|
<address>02:9a:a1:37:be:54</address>
|
|
<link state='down'/>
|
|
<feature name='rx'/>
|
|
<feature name='tx'/>
|
|
<feature name='sg'/>
|
|
<feature name='tso'/>
|
|
<feature name='gso'/>
|
|
<feature name='gro'/>
|
|
<feature name='rxvlan'/>
|
|
<feature name='txvlan'/>
|
|
<capability type='80203'/>
|
|
</capability>
|
|
</device>""",
|
|
"pci_0000_06_00_0": """
|
|
<device>
|
|
<name>pci_0000_06_00_0</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:06.0</path>
|
|
<parent></parent>
|
|
<driver>
|
|
<name>nvidia</name>
|
|
</driver>
|
|
<capability type="pci">
|
|
<domain>0</domain>
|
|
<bus>10</bus>
|
|
<slot>1</slot>
|
|
<function>5</function>
|
|
<product id="0x0FFE">GRID M60-0B</product>
|
|
<vendor id="0x10DE">Nvidia</vendor>
|
|
<numa node="8"/>
|
|
<capability type='mdev_types'>
|
|
<type id='nvidia-11'>
|
|
<name>GRID M60-0B</name>
|
|
<deviceAPI>vfio-pci</deviceAPI>
|
|
<availableInstances>16</availableInstances>
|
|
</type>
|
|
</capability>
|
|
</capability>
|
|
</device>""",
|
|
"mdev_4b20d080_1b54_4048_85b3_a6a62d165c01": """
|
|
<device>
|
|
<name>mdev_4b20d080_1b54_4048_85b3_a6a62d165c01</name>
|
|
<path>/sys/devices/pci0000:00/0000:00:02.0/4b20d080-1b54-4048-85b3-a6a62d165c01</path>
|
|
<parent>pci_0000_00_02_0</parent>
|
|
<driver>
|
|
<name>vfio_mdev</name>
|
|
</driver>
|
|
<capability type='mdev'>
|
|
<type id='nvidia-11'/>
|
|
<iommuGroup number='12'/>
|
|
</capability>
|
|
</device>
|
|
""",
|
|
}
|
|
|
|
_fake_cpu_info = {
|
|
"arch": "test_arch",
|
|
"model": "test_model",
|
|
"vendor": "test_vendor",
|
|
"topology": {
|
|
"sockets": 1,
|
|
"cores": 8,
|
|
"threads": 16
|
|
},
|
|
"features": ["feature1", "feature2"]
|
|
}
|
|
|
|
eph_default_ext = utils.get_hash_str(nova.privsep.fs._DEFAULT_FILE_SYSTEM)[:7]
|
|
|
|
_fake_qemu64_cpu_feature = """
|
|
<cpu mode='custom' match='exact'>
|
|
<model fallback='forbid'>qemu64</model>
|
|
<feature policy='require' name='svm'/>
|
|
<feature policy='require' name='lm'/>
|
|
<feature policy='require' name='nx'/>
|
|
<feature policy='require' name='syscall'/>
|
|
<feature policy='require' name='cx16'/>
|
|
<feature policy='require' name='pni'/>
|
|
<feature policy='require' name='sse2'/>
|
|
<feature policy='require' name='sse'/>
|
|
<feature policy='require' name='fxsr'/>
|
|
<feature policy='require' name='mmx'/>
|
|
<feature policy='require' name='clflush'/>
|
|
<feature policy='require' name='pse36'/>
|
|
<feature policy='require' name='pat'/>
|
|
<feature policy='require' name='cmov'/>
|
|
<feature policy='require' name='mca'/>
|
|
<feature policy='require' name='pge'/>
|
|
<feature policy='require' name='mtrr'/>
|
|
<feature policy='require' name='sep'/>
|
|
<feature policy='require' name='apic'/>
|
|
<feature policy='require' name='cx8'/>
|
|
<feature policy='require' name='mce'/>
|
|
<feature policy='require' name='pae'/>
|
|
<feature policy='require' name='msr'/>
|
|
<feature policy='require' name='tsc'/>
|
|
<feature policy='require' name='pse'/>
|
|
<feature policy='require' name='de'/>
|
|
<feature policy='require' name='fpu'/>
|
|
</cpu>
|
|
"""
|
|
|
|
_fake_broadwell_cpu_feature = """
|
|
<cpu mode='custom' match='exact'>
|
|
<model fallback='forbid'>Broadwell-noTSX</model>
|
|
<vendor>Intel</vendor>
|
|
<feature policy='require' name='smap'/>
|
|
<feature policy='require' name='adx'/>
|
|
<feature policy='require' name='rdseed'/>
|
|
<feature policy='require' name='invpcid'/>
|
|
<feature policy='require' name='erms'/>
|
|
<feature policy='require' name='bmi2'/>
|
|
<feature policy='require' name='smep'/>
|
|
<feature policy='require' name='avx2'/>
|
|
<feature policy='require' name='bmi1'/>
|
|
<feature policy='require' name='fsgsbase'/>
|
|
<feature policy='require' name='3dnowprefetch'/>
|
|
<feature policy='require' name='lahf_lm'/>
|
|
<feature policy='require' name='lm'/>
|
|
<feature policy='require' name='rdtscp'/>
|
|
<feature policy='require' name='nx'/>
|
|
<feature policy='require' name='syscall'/>
|
|
<feature policy='require' name='avx'/>
|
|
<feature policy='require' name='xsave'/>
|
|
<feature policy='require' name='aes'/>
|
|
<feature policy='require' name='tsc-deadline'/>
|
|
<feature policy='require' name='popcnt'/>
|
|
<feature policy='require' name='movbe'/>
|
|
<feature policy='require' name='x2apic'/>
|
|
<feature policy='require' name='sse4.2'/>
|
|
<feature policy='require' name='sse4.1'/>
|
|
<feature policy='require' name='pcid'/>
|
|
<feature policy='require' name='cx16'/>
|
|
<feature policy='require' name='fma'/>
|
|
<feature policy='require' name='ssse3'/>
|
|
<feature policy='require' name='pclmuldq'/>
|
|
<feature policy='require' name='pni'/>
|
|
<feature policy='require' name='sse2'/>
|
|
<feature policy='require' name='sse'/>
|
|
<feature policy='require' name='fxsr'/>
|
|
<feature policy='require' name='mmx'/>
|
|
<feature policy='require' name='clflush'/>
|
|
<feature policy='require' name='pse36'/>
|
|
<feature policy='require' name='pat'/>
|
|
<feature policy='require' name='cmov'/>
|
|
<feature policy='require' name='mca'/>
|
|
<feature policy='require' name='pge'/>
|
|
<feature policy='require' name='mtrr'/>
|
|
<feature policy='require' name='sep'/>
|
|
<feature policy='require' name='apic'/>
|
|
<feature policy='require' name='cx8'/>
|
|
<feature policy='require' name='mce'/>
|
|
<feature policy='require' name='pae'/>
|
|
<feature policy='require' name='msr'/>
|
|
<feature policy='require' name='tsc'/>
|
|
<feature policy='require' name='pse'/>
|
|
<feature policy='require' name='de'/>
|
|
<feature policy='require' name='fpu'/>
|
|
</cpu>
|
|
"""
|
|
|
|
|
|
def eph_name(size):
|
|
return ('ephemeral_%(size)s_%(ext)s' %
|
|
{'size': size, 'ext': eph_default_ext})
|
|
|
|
|
|
def fake_disk_info_byname(instance, type='qcow2'):
|
|
"""Return instance_disk_info corresponding accurately to the properties of
|
|
the given Instance object. The info is returned as an OrderedDict of
|
|
name->disk_info for each disk.
|
|
|
|
:param instance: The instance we're generating fake disk_info for.
|
|
:param type: libvirt's disk type.
|
|
:return: disk_info
|
|
:rtype: OrderedDict
|
|
"""
|
|
instance_dir = os.path.join(CONF.instances_path, instance.uuid)
|
|
|
|
def instance_path(name):
|
|
return os.path.join(instance_dir, name)
|
|
|
|
disk_info = OrderedDict()
|
|
|
|
# root disk
|
|
if (instance.image_ref is not None and
|
|
instance.image_ref != uuids.fake_volume_backed_image_ref):
|
|
cache_name = imagecache.get_cache_fname(instance.image_ref)
|
|
disk_info['disk'] = {
|
|
'type': type,
|
|
'path': instance_path('disk'),
|
|
'virt_disk_size': instance.flavor.root_gb * units.Gi,
|
|
'backing_file': cache_name,
|
|
'disk_size': instance.flavor.root_gb * units.Gi,
|
|
'over_committed_disk_size': 0}
|
|
|
|
swap_mb = instance.flavor.swap
|
|
if swap_mb > 0:
|
|
disk_info['disk.swap'] = {
|
|
'type': type,
|
|
'path': instance_path('disk.swap'),
|
|
'virt_disk_size': swap_mb * units.Mi,
|
|
'backing_file': 'swap_%s' % swap_mb,
|
|
'disk_size': swap_mb * units.Mi,
|
|
'over_committed_disk_size': 0}
|
|
|
|
eph_gb = instance.flavor.ephemeral_gb
|
|
if eph_gb > 0:
|
|
disk_info['disk.local'] = {
|
|
'type': type,
|
|
'path': instance_path('disk.local'),
|
|
'virt_disk_size': eph_gb * units.Gi,
|
|
'backing_file': eph_name(eph_gb),
|
|
'disk_size': eph_gb * units.Gi,
|
|
'over_committed_disk_size': 0}
|
|
|
|
if instance.config_drive:
|
|
disk_info['disk.config'] = {
|
|
'type': 'raw',
|
|
'path': instance_path('disk.config'),
|
|
'virt_disk_size': 1024,
|
|
'backing_file': '',
|
|
'disk_size': 1024,
|
|
'over_committed_disk_size': 0}
|
|
|
|
return disk_info
|
|
|
|
|
|
def fake_diagnostics_object(with_cpus=False, with_disks=False, with_nic=False):
|
|
diag_dict = {'config_drive': False,
|
|
'driver': 'libvirt',
|
|
'hypervisor': 'kvm',
|
|
'hypervisor_os': 'linux',
|
|
'memory_details': {'maximum': 2048, 'used': 1234},
|
|
'state': 'running',
|
|
'uptime': 10}
|
|
|
|
if with_cpus:
|
|
diag_dict['cpu_details'] = []
|
|
for id, t in enumerate([15340000000, 1640000000,
|
|
3040000000, 1420000000]):
|
|
diag_dict['cpu_details'].append({'id': id, 'time': t})
|
|
|
|
if with_disks:
|
|
diag_dict['disk_details'] = []
|
|
for i in range(2):
|
|
diag_dict['disk_details'].append(
|
|
{'read_bytes': 688640,
|
|
'read_requests': 169,
|
|
'write_bytes': 0,
|
|
'write_requests': 0,
|
|
'errors_count': 1})
|
|
|
|
if with_nic:
|
|
diag_dict['nic_details'] = [
|
|
{'mac_address': '52:54:00:a4:38:38',
|
|
'rx_drop': 0,
|
|
'rx_errors': 0,
|
|
'rx_octets': 4408,
|
|
'rx_packets': 82,
|
|
'tx_drop': 0,
|
|
'tx_errors': 0,
|
|
'tx_octets': 0,
|
|
'tx_packets': 0}]
|
|
|
|
return fake_diagnostics.fake_diagnostics_obj(**diag_dict)
|
|
|
|
|
|
def fake_disk_info_json(instance, type='qcow2'):
|
|
"""Return fake instance_disk_info corresponding accurately to the
|
|
properties of the given Instance object.
|
|
|
|
:param instance: The instance we're generating fake disk_info for.
|
|
:param type: libvirt's disk type.
|
|
:return: JSON representation of instance_disk_info for all disks.
|
|
:rtype: str
|
|
"""
|
|
disk_info = fake_disk_info_byname(instance, type)
|
|
return jsonutils.dumps(disk_info.values())
|
|
|
|
|
|
def get_injection_info(network_info=None, admin_pass=None, files=None):
|
|
return libvirt_driver.InjectionInfo(
|
|
network_info=network_info, admin_pass=admin_pass, files=files)
|
|
|
|
|
|
def _concurrency(signal, wait, done, target, is_block_dev=False):
|
|
signal.send()
|
|
wait.wait()
|
|
done.send()
|
|
|
|
|
|
class FakeVirtDomain(object):
|
|
|
|
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None,
|
|
info=None):
|
|
if uuidstr is None:
|
|
uuidstr = uuids.fake
|
|
self.uuidstr = uuidstr
|
|
self.id = id
|
|
self.domname = name
|
|
self._info = info or (
|
|
[power_state.RUNNING, 2048 * units.Mi,
|
|
1234 * units.Mi, None, None])
|
|
if fake_xml:
|
|
self._fake_dom_xml = fake_xml
|
|
else:
|
|
self._fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<name>testinstance1</name>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
def name(self):
|
|
if self.domname is None:
|
|
return "fake-domain %s" % self
|
|
else:
|
|
return self.domname
|
|
|
|
def ID(self):
|
|
return self.id
|
|
|
|
def info(self):
|
|
return self._info
|
|
|
|
def create(self):
|
|
pass
|
|
|
|
def managedSave(self, *args):
|
|
pass
|
|
|
|
def createWithFlags(self, launch_flags):
|
|
pass
|
|
|
|
def XMLDesc(self, flags):
|
|
return self._fake_dom_xml
|
|
|
|
def UUIDString(self):
|
|
return self.uuidstr
|
|
|
|
def attachDeviceFlags(self, xml, flags):
|
|
pass
|
|
|
|
def attachDevice(self, xml):
|
|
pass
|
|
|
|
def detachDeviceFlags(self, xml, flags):
|
|
pass
|
|
|
|
def snapshotCreateXML(self, xml, flags):
|
|
pass
|
|
|
|
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
|
|
pass
|
|
|
|
def blockRebase(self, disk, base, bandwidth=0, flags=0):
|
|
pass
|
|
|
|
def blockJobInfo(self, path, flags):
|
|
pass
|
|
|
|
def blockJobAbort(self, path, flags):
|
|
pass
|
|
|
|
def resume(self):
|
|
pass
|
|
|
|
def destroy(self):
|
|
pass
|
|
|
|
def fsFreeze(self, disks=None, flags=0):
|
|
pass
|
|
|
|
def fsThaw(self, disks=None, flags=0):
|
|
pass
|
|
|
|
def isActive(self):
|
|
return True
|
|
|
|
def isPersistent(self):
|
|
return True
|
|
|
|
def undefine(self):
|
|
return True
|
|
|
|
|
|
class CacheConcurrencyTestCase(test.NoDBTestCase):
|
|
def setUp(self):
|
|
super(CacheConcurrencyTestCase, self).setUp()
|
|
|
|
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
|
|
|
|
# utils.synchronized() will create the lock_path for us if it
|
|
# doesn't already exist. It will also delete it when it's done,
|
|
# which can cause race conditions with the multiple threads we
|
|
# use for tests. So, create the path here so utils.synchronized()
|
|
# won't delete it out from under one of the threads.
|
|
self.lock_path = os.path.join(CONF.instances_path, 'locks')
|
|
fileutils.ensure_tree(self.lock_path)
|
|
|
|
def fake_exists(fname):
|
|
basedir = os.path.join(CONF.instances_path,
|
|
CONF.image_cache_subdirectory_name)
|
|
if fname == basedir or fname == self.lock_path:
|
|
return True
|
|
return False
|
|
|
|
self.stub_out('os.path.exists', fake_exists)
|
|
self.stub_out('oslo_concurrency.processutils.execute',
|
|
lambda *a, **kw: None)
|
|
self.stub_out('nova.virt.disk.api.extend',
|
|
lambda image, size, use_cow=False: None)
|
|
|
|
def _fake_instance(self, uuid):
|
|
return objects.Instance(id=1, uuid=uuid)
|
|
|
|
def test_same_fname_concurrency(self):
|
|
# Ensures that the same fname cache runs at a sequentially.
|
|
uuid = uuids.fake
|
|
|
|
backend = imagebackend.Backend(False)
|
|
wait1 = eventlet.event.Event()
|
|
done1 = eventlet.event.Event()
|
|
sig1 = eventlet.event.Event()
|
|
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
|
|
'name').cache,
|
|
_concurrency, 'fname', None,
|
|
signal=sig1, wait=wait1, done=done1)
|
|
eventlet.sleep(0)
|
|
# Thread 1 should run before thread 2.
|
|
sig1.wait()
|
|
|
|
wait2 = eventlet.event.Event()
|
|
done2 = eventlet.event.Event()
|
|
sig2 = eventlet.event.Event()
|
|
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
|
|
'name').cache,
|
|
_concurrency, 'fname', None,
|
|
signal=sig2, wait=wait2, done=done2)
|
|
|
|
wait2.send()
|
|
eventlet.sleep(0)
|
|
try:
|
|
self.assertFalse(done2.ready())
|
|
finally:
|
|
wait1.send()
|
|
done1.wait()
|
|
eventlet.sleep(0)
|
|
self.assertTrue(done2.ready())
|
|
# Wait on greenthreads to assert they didn't raise exceptions
|
|
# during execution
|
|
thr1.wait()
|
|
thr2.wait()
|
|
|
|
def test_different_fname_concurrency(self):
|
|
# Ensures that two different fname caches are concurrent.
|
|
uuid = uuids.fake
|
|
|
|
backend = imagebackend.Backend(False)
|
|
wait1 = eventlet.event.Event()
|
|
done1 = eventlet.event.Event()
|
|
sig1 = eventlet.event.Event()
|
|
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
|
|
'name').cache,
|
|
_concurrency, 'fname2', None,
|
|
signal=sig1, wait=wait1, done=done1)
|
|
eventlet.sleep(0)
|
|
# Thread 1 should run before thread 2.
|
|
sig1.wait()
|
|
|
|
wait2 = eventlet.event.Event()
|
|
done2 = eventlet.event.Event()
|
|
sig2 = eventlet.event.Event()
|
|
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid),
|
|
'name').cache,
|
|
_concurrency, 'fname1', None,
|
|
signal=sig2, wait=wait2, done=done2)
|
|
eventlet.sleep(0)
|
|
# Wait for thread 2 to start.
|
|
sig2.wait()
|
|
|
|
wait2.send()
|
|
tries = 0
|
|
while not done2.ready() and tries < 10:
|
|
eventlet.sleep(0)
|
|
tries += 1
|
|
try:
|
|
self.assertTrue(done2.ready())
|
|
finally:
|
|
wait1.send()
|
|
eventlet.sleep(0)
|
|
# Wait on greenthreads to assert they didn't raise exceptions
|
|
# during execution
|
|
thr1.wait()
|
|
thr2.wait()
|
|
|
|
|
|
class FakeInvalidVolumeDriver(object):
|
|
def __init__(self, *args, **kwargs):
|
|
raise brick_exception.InvalidConnectorProtocol('oops!')
|
|
|
|
|
|
class FakeConfigGuestDisk(object):
|
|
def __init__(self, *args, **kwargs):
|
|
self.source_type = None
|
|
self.driver_cache = None
|
|
|
|
|
|
class FakeConfigGuest(object):
|
|
def __init__(self, *args, **kwargs):
|
|
self.driver_cache = None
|
|
|
|
|
|
class FakeNodeDevice(object):
|
|
def __init__(self, fakexml):
|
|
self.xml = fakexml
|
|
|
|
def XMLDesc(self, flags):
|
|
return self.xml
|
|
|
|
|
|
def _create_test_instance():
|
|
flavor = objects.Flavor(memory_mb=2048,
|
|
swap=0,
|
|
vcpu_weight=None,
|
|
root_gb=10,
|
|
id=2,
|
|
name=u'm1.small',
|
|
ephemeral_gb=20,
|
|
rxtx_factor=1.0,
|
|
flavorid=u'1',
|
|
vcpus=2,
|
|
extra_specs={})
|
|
return {
|
|
'id': 1,
|
|
'uuid': uuids.instance,
|
|
'memory_kb': '1024000',
|
|
'basepath': '/some/path',
|
|
'bridge_name': 'br100',
|
|
'display_name': "Acme webserver",
|
|
'vcpus': 2,
|
|
'project_id': 'fake',
|
|
'bridge': 'br101',
|
|
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
|
'root_gb': 10,
|
|
'ephemeral_gb': 20,
|
|
'instance_type_id': '5', # m1.small
|
|
'extra_specs': {},
|
|
'system_metadata': {
|
|
'image_disk_format': 'raw'
|
|
},
|
|
'flavor': flavor,
|
|
'new_flavor': None,
|
|
'old_flavor': None,
|
|
'pci_devices': objects.PciDeviceList(),
|
|
'numa_topology': None,
|
|
'config_drive': None,
|
|
'vm_mode': None,
|
|
'kernel_id': None,
|
|
'ramdisk_id': None,
|
|
'os_type': 'linux',
|
|
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
|
|
'ephemeral_key_uuid': None,
|
|
'vcpu_model': None,
|
|
'host': 'fake-host',
|
|
'task_state': None,
|
|
'trusted_certs': None
|
|
}
|
|
|
|
|
|
@ddt.ddt
|
|
class LibvirtConnTestCase(test.NoDBTestCase,
|
|
test_diagnostics.DiagnosticsComparisonMixin):
|
|
|
|
REQUIRES_LOCKING = True
|
|
|
|
_EPHEMERAL_20_DEFAULT = eph_name(20)
|
|
|
|
def setUp(self):
|
|
super(LibvirtConnTestCase, self).setUp()
|
|
self.user_id = 'fake'
|
|
self.project_id = 'fake'
|
|
self.context = context.get_admin_context()
|
|
temp_dir = self.useFixture(fixtures.TempDir()).path
|
|
self.flags(instances_path=temp_dir,
|
|
firewall_driver=None)
|
|
self.flags(snapshots_directory=temp_dir, group='libvirt')
|
|
|
|
self.flags(sysinfo_serial="hardware", group="libvirt")
|
|
|
|
# normally loaded during nova-compute startup
|
|
os_vif.initialize()
|
|
|
|
self.stub_out('nova.virt.disk.api.extend',
|
|
lambda image, size, use_cow=False: None)
|
|
|
|
self.stub_out('nova.virt.libvirt.imagebackend.Image.'
|
|
'resolve_driver_format',
|
|
imagebackend.Image._get_driver_format)
|
|
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
self.test_instance = _create_test_instance()
|
|
self.test_image_meta = {
|
|
"disk_format": "raw",
|
|
}
|
|
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
|
|
self)
|
|
self.device_xml_tmpl = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='raw' cache='none'/>
|
|
<source dev='{device_path}'/>
|
|
<target bus='virtio' dev='vdb'/>
|
|
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
|
|
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
|
|
function='0x0'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
def relpath(self, path):
|
|
return os.path.relpath(path, CONF.instances_path)
|
|
|
|
def tearDown(self):
|
|
nova.tests.unit.image.fake.FakeImageService_reset()
|
|
super(LibvirtConnTestCase, self).tearDown()
|
|
|
|
def test_driver_capabilities(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr.capabilities['has_imagecache'],
|
|
'Driver capabilities for \'has_imagecache\' '
|
|
'is invalid')
|
|
self.assertTrue(drvr.capabilities['supports_evacuate'],
|
|
'Driver capabilities for \'supports_evacuate\' '
|
|
'is invalid')
|
|
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
|
|
'Driver capabilities for '
|
|
'\'supports_migrate_to_same_host\' is invalid')
|
|
self.assertTrue(drvr.capabilities['supports_attach_interface'],
|
|
'Driver capabilities for '
|
|
'\'supports_attach_interface\' '
|
|
'is invalid')
|
|
self.assertTrue(drvr.capabilities['supports_extend_volume'],
|
|
'Driver capabilities for '
|
|
'\'supports_extend_volume\' '
|
|
'is invalid')
|
|
self.assertTrue(drvr.capabilities['supports_trusted_certs'],
|
|
'Driver capabilities for '
|
|
'\'supports_trusted_certs\' '
|
|
'is invalid')
|
|
self.assertTrue(drvr.capabilities['supports_image_type_qcow2'],
|
|
'Driver capabilities for '
|
|
'\'supports_image_type_qcow2\' '
|
|
'is invalid')
|
|
|
|
def test_driver_capabilities_qcow2_with_rbd(self):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
self.flags(force_raw_images=False)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr.capabilities['supports_image_type_qcow2'],
|
|
'Driver capabilities for '
|
|
'\'supports_image_type_qcow2\' '
|
|
'is invalid when \'images_type=rbd\'')
|
|
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
self.flags(force_raw_images=True)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr.capabilities['supports_image_type_qcow2'])
|
|
|
|
def test_driver_capabilities_qcow2_with_lvm(self):
|
|
self.flags(images_type='lvm', group='libvirt')
|
|
self.flags(force_raw_images=False)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr.capabilities['supports_image_type_qcow2'],
|
|
'Driver capabilities for '
|
|
'\'supports_image_type_qcow2\' '
|
|
'is invalid when \'images_type=lvm\'')
|
|
|
|
self.flags(images_type='lvm', group='libvirt')
|
|
self.flags(force_raw_images=True)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr.capabilities['supports_image_type_qcow2'])
|
|
|
|
def create_fake_libvirt_mock(self, **kwargs):
|
|
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
|
|
|
|
# A fake libvirt.virConnect
|
|
class FakeLibvirtDriver(object):
|
|
def defineXML(self, xml):
|
|
return FakeVirtDomain()
|
|
|
|
# Creating mocks
|
|
fake = FakeLibvirtDriver()
|
|
# Customizing above fake if necessary
|
|
for key, val in kwargs.items():
|
|
fake.__setattr__(key, val)
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._conn', fake)
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_connection',
|
|
lambda x: fake)
|
|
|
|
def fake_lookup(self, instance_name):
|
|
return FakeVirtDomain()
|
|
|
|
def fake_execute(self, *args, **kwargs):
|
|
open(args[-1], "a").close()
|
|
|
|
def _create_service(self, **kwargs):
|
|
service_ref = {'host': kwargs.get('host', 'dummy'),
|
|
'disabled': kwargs.get('disabled', False),
|
|
'binary': 'nova-compute',
|
|
'topic': 'compute',
|
|
'report_count': 0}
|
|
|
|
return objects.Service(**service_ref)
|
|
|
|
def _get_pause_flag(self, drvr, network_info, power_on=True,
|
|
vifs_already_plugged=False):
|
|
timeout = CONF.vif_plugging_timeout
|
|
|
|
events = []
|
|
if (drvr._conn_supports_start_paused and
|
|
utils.is_neutron() and
|
|
not vifs_already_plugged and
|
|
power_on and timeout):
|
|
events = drvr._get_neutron_events(network_info)
|
|
|
|
return bool(events)
|
|
|
|
def test_public_api_signatures(self):
|
|
baseinst = driver.ComputeDriver(None)
|
|
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertPublicAPISignatures(baseinst, inst)
|
|
|
|
def test_legacy_block_device_info(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertFalse(drvr.need_legacy_block_device_info)
|
|
|
|
@mock.patch.object(host.Host, "has_min_version")
|
|
def test_min_version_start_ok(self, mock_version):
|
|
mock_version.return_value = True
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
|
|
@mock.patch.object(host.Host, "has_min_version")
|
|
def test_min_version_start_abort(self, mock_version):
|
|
mock_version.return_value = False
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.NovaException,
|
|
drvr.init_host,
|
|
"dummyhost")
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_next_min_version_deprecation_warning(self, mock_warning,
|
|
mock_get_libversion):
|
|
# Skip test if there's no currently planned new min version
|
|
if (versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_VERSION)):
|
|
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
|
|
|
|
# Test that a warning is logged if the libvirt version is less than
|
|
# the next required minimum version.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
# assert that the next min version is in a warning message
|
|
expected_arg = {'version': versionutils.convert_version_to_str(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
|
|
version_arg_found = False
|
|
for call in mock_warning.call_args_list:
|
|
if call[0][1] == expected_arg:
|
|
version_arg_found = True
|
|
break
|
|
self.assertTrue(version_arg_found)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1)
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_next_min_qemu_version_deprecation_warning(self, mock_warning,
|
|
mock_get_libversion):
|
|
# Skip test if there's no currently planned new min version
|
|
if (versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_VERSION)):
|
|
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
|
|
|
|
# Test that a warning is logged if the libvirt version is less than
|
|
# the next required minimum version.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
# assert that the next min version is in a warning message
|
|
expected_arg = {'version': versionutils.convert_version_to_str(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION))}
|
|
version_arg_found = False
|
|
for call in mock_warning.call_args_list:
|
|
if call[0][1] == expected_arg:
|
|
version_arg_found = True
|
|
break
|
|
self.assertTrue(version_arg_found)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
|
|
# Skip test if there's no currently planned new min version
|
|
|
|
if (versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_VERSION)):
|
|
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
|
|
|
|
# Test that a warning is not logged if the libvirt version is greater
|
|
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
# assert that the next min version is in a warning message
|
|
expected_arg = {'version': versionutils.convert_version_to_str(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
|
|
version_arg_found = False
|
|
for call in mock_warning.call_args_list:
|
|
if call[0][1] == expected_arg:
|
|
version_arg_found = True
|
|
break
|
|
self.assertFalse(version_arg_found)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION))
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion):
|
|
# Skip test if there's no currently planned new min version
|
|
|
|
if (versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) ==
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_VERSION)):
|
|
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION")
|
|
|
|
# Test that a warning is not logged if the libvirt version is greater
|
|
# than or equal to NEXT_MIN_QEMU_VERSION.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
# assert that the next min version is in a warning message
|
|
expected_arg = {'version': versionutils.convert_version_to_str(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.NEXT_MIN_QEMU_VERSION))}
|
|
version_arg_found = False
|
|
for call in mock_warning.call_args_list:
|
|
if call[0][1] == expected_arg:
|
|
version_arg_found = True
|
|
break
|
|
self.assertFalse(version_arg_found)
|
|
|
|
# NOTE(sdague): python2.7 and python3.5 have different behaviors
|
|
# when it comes to comparing against the sentinel, so
|
|
# has_min_version is needed to pass python3.5.
|
|
@mock.patch.object(nova.virt.libvirt.host.Host, "has_min_version",
|
|
return_value=True)
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=mock.sentinel.qemu_version)
|
|
def test_qemu_image_version(self, mock_get_libversion, min_ver):
|
|
"""Test that init_host sets qemu image version
|
|
|
|
A sentinel is used here so that we aren't chasing this value
|
|
against minimums that get raised over time.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
self.assertEqual(images.QEMU_VERSION, mock.sentinel.qemu_version)
|
|
|
|
@mock.patch.object(fields.Architecture, "from_host",
|
|
return_value=fields.Architecture.PPC64)
|
|
def test_min_version_ppc_ok(self, mock_arch):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
|
|
@mock.patch.object(fields.Architecture, "from_host",
|
|
return_value=fields.Architecture.S390X)
|
|
def test_min_version_s390_ok(self, mock_arch):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("dummyhost")
|
|
|
|
def test_file_backed_memory_support_called(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
with mock.patch.object(drvr,
|
|
'_check_file_backed_memory_support') as mock_check_fb_support:
|
|
drvr.init_host("dummyhost")
|
|
self.assertTrue(mock_check_fb_support.called)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
|
|
def test_min_version_file_backed_ok(self, mock_libv, mock_qemu):
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
self.flags(ram_allocation_ratio=1.0)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._check_file_backed_memory_support()
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION) - 1)
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
|
|
def test_min_version_file_backed_old_libvirt(self, mock_libv, mock_qemu):
|
|
self.flags(file_backed_memory=1024, group="libvirt")
|
|
self.flags(ram_allocation_ratio=1.0)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.InternalError,
|
|
drvr._check_file_backed_memory_support)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION) - 1)
|
|
def test_min_version_file_backed_old_qemu(self, mock_libv, mock_qemu):
|
|
self.flags(file_backed_memory=1024, group="libvirt")
|
|
self.flags(ram_allocation_ratio=1.0)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.InternalError,
|
|
drvr._check_file_backed_memory_support)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION))
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION))
|
|
def test_min_version_file_backed_bad_ram_allocation_ratio(self, mock_libv,
|
|
mock_qemu):
|
|
self.flags(file_backed_memory=1024, group="libvirt")
|
|
self.flags(ram_allocation_ratio=1.5)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.InternalError,
|
|
drvr._check_file_backed_memory_support)
|
|
|
|
def _do_test_parse_migration_flags(self, lm_expected=None,
|
|
bm_expected=None):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._parse_migration_flags()
|
|
|
|
if lm_expected is not None:
|
|
self.assertEqual(lm_expected, drvr._live_migration_flags)
|
|
if bm_expected is not None:
|
|
self.assertEqual(bm_expected, drvr._block_migration_flags)
|
|
|
|
def test_parse_live_migration_flags_default(self):
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
|
|
|
|
def test_parse_live_migration_flags(self):
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
|
|
|
|
def test_parse_block_migration_flags_default(self):
|
|
self._do_test_parse_migration_flags(
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
|
|
|
|
def test_parse_block_migration_flags(self):
|
|
self._do_test_parse_migration_flags(
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
|
|
|
|
def test_parse_migration_flags_p2p_xen(self):
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
|
|
|
|
def test_live_migration_tunnelled_true(self):
|
|
self.flags(live_migration_tunnelled=True, group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_live_migration_with_native_tls(self, host):
|
|
self.flags(live_migration_with_native_tls=True, group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_TLS),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_TLS))
|
|
|
|
def test_live_migration_permit_postcopy_true(self):
|
|
self.flags(live_migration_permit_post_copy=True, group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
|
|
|
|
def test_live_migration_permit_auto_converge_true(self):
|
|
self.flags(live_migration_permit_auto_converge=True, group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE))
|
|
|
|
def test_live_migration_permit_auto_converge_and_post_copy_true(self):
|
|
self.flags(live_migration_permit_auto_converge=True, group='libvirt')
|
|
self.flags(live_migration_permit_post_copy=True, group='libvirt')
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY))
|
|
|
|
def test_live_migration_permit_postcopy_false(self):
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
|
|
|
|
def test_live_migration_permit_autoconverge_false(self):
|
|
self._do_test_parse_migration_flags(
|
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE),
|
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
|
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password(self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.set_admin_password(instance, "123")
|
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123")
|
|
|
|
@mock.patch('nova.objects.Instance.save')
|
|
@mock.patch('oslo_serialization.base64.encode_as_text')
|
|
@mock.patch('nova.api.metadata.password.convert_password')
|
|
@mock.patch('nova.crypto.ssh_encrypt_text')
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_saves_sysmeta(self, mock_get_guest,
|
|
ver, mock_image, mock_encrypt,
|
|
mock_convert, mock_encode,
|
|
mock_save):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
# Password will only be saved in sysmeta if the key_data is present
|
|
instance.key_data = 'ssh-rsa ABCFEFG'
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_get_guest.return_value = mock_guest
|
|
mock_convert.return_value = {'password_0': 'converted-password'}
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.set_admin_password(instance, "123")
|
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123")
|
|
mock_encrypt.assert_called_once_with(instance.key_data, '123')
|
|
mock_encode.assert_called_once_with(mock_encrypt.return_value)
|
|
mock_convert.assert_called_once_with(None, mock_encode.return_value)
|
|
self.assertEqual('converted-password',
|
|
instance.system_metadata['password_0'])
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_parallels(self, mock_get_guest):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.set_admin_password(instance, "123")
|
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123")
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.os_type = "windows"
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.set_admin_password(instance, "123")
|
|
|
|
mock_guest.set_user_password.assert_called_once_with(
|
|
"Administrator", "123")
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes",
|
|
"os_admin_user": "foo"
|
|
}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.set_admin_password(instance, "123")
|
|
|
|
mock_guest.set_user_password.assert_called_once_with("foo", "123")
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.SetAdminPasswdNotSupported,
|
|
drvr.set_admin_password, instance, "123")
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_set_admin_password_guest_agent_not_running(self, mock_svc):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.QemuGuestAgentNotEnabled,
|
|
drvr.set_admin_password, instance, "123")
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest.set_user_password.side_effect = (
|
|
fakelibvirt.libvirtError("error"))
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
with mock.patch.object(
|
|
drvr, '_save_instance_password_if_sshkey_present') as save_p:
|
|
self.assertRaises(exception.NovaException,
|
|
drvr.set_admin_password, instance, "123")
|
|
save_p.assert_not_called()
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_error_with_unicode(
|
|
self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest.set_user_password.side_effect = (
|
|
fakelibvirt.libvirtError(
|
|
b"failed: \xe9\x94\x99\xe8\xaf\xaf\xe3\x80\x82"))
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(exception.NovaException,
|
|
drvr.set_admin_password, instance, "123")
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_set_admin_password_not_implemented(
|
|
self, mock_get_guest, ver, mock_image):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_image.return_value = {"properties": {
|
|
"hw_qemu_guest_agent": "yes"}}
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
not_implemented = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
"Guest agent disappeared while executing command",
|
|
error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
|
|
mock_guest.set_user_password.side_effect = not_implemented
|
|
mock_get_guest.return_value = mock_guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertRaises(NotImplementedError,
|
|
drvr.set_admin_password, instance, "123")
|
|
|
|
@mock.patch.object(objects.Service, 'save')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
def test_set_host_enabled_with_disable(self, mock_svc, mock_save):
|
|
# Tests disabling an enabled host.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
svc = self._create_service(host='fake-mini')
|
|
mock_svc.return_value = svc
|
|
drvr._set_host_enabled(False)
|
|
self.assertTrue(svc.disabled)
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch.object(objects.Service, 'save')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
def test_set_host_enabled_with_enable(self, mock_svc, mock_save):
|
|
# Tests enabling a disabled host.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
svc = self._create_service(disabled=True, host='fake-mini')
|
|
mock_svc.return_value = svc
|
|
drvr._set_host_enabled(True)
|
|
# since disabled_reason is not set and not prefixed with "AUTO:",
|
|
# service should not be enabled.
|
|
mock_save.assert_not_called()
|
|
self.assertTrue(svc.disabled)
|
|
|
|
@mock.patch.object(objects.Service, 'save')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc,
|
|
mock_save):
|
|
# Tests enabling an enabled host.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
svc = self._create_service(disabled=False, host='fake-mini')
|
|
mock_svc.return_value = svc
|
|
drvr._set_host_enabled(True)
|
|
self.assertFalse(svc.disabled)
|
|
mock_save.assert_not_called()
|
|
|
|
@mock.patch.object(objects.Service, 'save')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc,
|
|
mock_save):
|
|
# Tests disabling a disabled host.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
svc = self._create_service(disabled=True, host='fake-mini')
|
|
mock_svc.return_value = svc
|
|
drvr._set_host_enabled(False)
|
|
mock_save.assert_not_called()
|
|
self.assertTrue(svc.disabled)
|
|
|
|
def test_set_host_enabled_swallows_exceptions(self):
|
|
# Tests that set_host_enabled will swallow exceptions coming from the
|
|
# db_api code so they don't break anything calling it, e.g. the
|
|
# _get_new_connection method.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
|
|
# Make db.service_get_by_compute_host raise NovaException; this
|
|
# is more robust than just raising ComputeHostNotFound.
|
|
db_mock.side_effect = exception.NovaException
|
|
drvr._set_host_enabled(False)
|
|
|
|
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
|
|
def test_prepare_pci_device(self, mock_lookup):
|
|
|
|
pci_devices = [dict(hypervisor_name='xxx')]
|
|
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
conn = drvr._host.get_connection()
|
|
|
|
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
|
|
drvr._prepare_pci_devices_for_use(pci_devices)
|
|
|
|
@mock.patch('nova.context.get_admin_context')
|
|
@mock.patch('nova.compute.utils.notify_about_libvirt_connect_error')
|
|
def test_versioned_notification(self, mock_notify, mock_get):
|
|
mock_get.return_value = self.context
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
fake_error = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, "Failed to connect to host",
|
|
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
|
|
with mock.patch('nova.virt.libvirt.host.Host._get_connection',
|
|
side_effect=fake_error):
|
|
self.assertRaises(exception.HypervisorUnavailable,
|
|
drvr._host.get_connection)
|
|
mock_get.assert_called_once_with()
|
|
mock_notify.assert_called_once_with(self.context, ip=CONF.my_ip,
|
|
exception=fake_error, tb=mock.ANY)
|
|
_, kwargs = mock_notify.call_args
|
|
self.assertIn('Traceback (most recent call last):', kwargs['tb'])
|
|
|
|
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
|
|
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
|
|
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
|
|
|
|
pci_devices = [dict(hypervisor_name='xxx',
|
|
id='id1',
|
|
instance_uuid='uuid')]
|
|
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
conn = drvr._host.get_connection()
|
|
|
|
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
|
|
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
|
|
|
|
self.assertRaises(exception.PciDevicePrepareFailed,
|
|
drvr._prepare_pci_devices_for_use, pci_devices)
|
|
|
|
@mock.patch.object(host.Host, "has_min_version", return_value=False)
|
|
def test_device_metadata(self, mock_version):
|
|
xml = """
|
|
<domain>
|
|
<name>dummy</name>
|
|
<uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid>
|
|
<memory>1048576</memory>
|
|
<vcpu>1</vcpu>
|
|
<os>
|
|
<type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type>
|
|
</os>
|
|
<devices>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source dev='/dev/mapper/generic'/>
|
|
<target dev='sda' bus='scsi'/>
|
|
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source dev='/dev/mapper/generic-1'/>
|
|
<target dev='hda' bus='ide'/>
|
|
<address type='drive' controller='0' bus='1' target='0' unit='0'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source dev='/dev/mapper/generic-2'/>
|
|
<target dev='hdb' bus='ide'/>
|
|
<address type='drive' controller='0' bus='1' target='1' unit='1'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source dev='/dev/mapper/aa1'/>
|
|
<target dev='sdb' bus='usb'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source dev='/var/lib/libvirt/images/centos'/>
|
|
<backingStore/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<boot order='1'/>
|
|
<alias name='virtio-disk0'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x09'
|
|
function='0x0'/>
|
|
</disk>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='none'/>
|
|
<source file='/var/lib/libvirt/images/generic.qcow2'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
<address type='virtio-mmio'/>
|
|
</disk>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source file='/var/lib/libvirt/images/test.qcow2'/>
|
|
<backingStore/>
|
|
<target dev='vdc' bus='virtio'/>
|
|
<alias name='virtio-disk1'/>
|
|
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<source network='default'/>
|
|
<model type='virtio'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
|
|
function='0x0'/>
|
|
</interface>
|
|
<interface type='network'>
|
|
<mac address='51:5a:2c:a4:5e:1b'/>
|
|
<source network='default'/>
|
|
<model type='virtio'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
|
|
function='0x1'/>
|
|
</interface>
|
|
<interface type='network'>
|
|
<mac address='fa:16:3e:d1:28:e4'/>
|
|
<source network='default'/>
|
|
<model type='virtio'/>
|
|
<address type='virtio-mmio'/>
|
|
</interface>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:14:6f:50'/>
|
|
<source network='default' bridge='virbr0'/>
|
|
<target dev='vnet0'/>
|
|
<model type='virtio'/>
|
|
<alias name='net0'/>
|
|
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/>
|
|
</interface>
|
|
<hostdev mode="subsystem" type="pci" managed="yes">
|
|
<source>
|
|
<address bus="0x06" domain="0x0000" function="0x1"
|
|
slot="0x00"/>
|
|
</source>
|
|
</hostdev>
|
|
</devices>
|
|
</domain>"""
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sda', 'tag': "db",
|
|
'volume_id': uuids.volume_1}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 2,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/hda', 'tag': "nfvfunc1",
|
|
'volume_id': uuids.volume_2}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 3,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sdb', 'tag': "nfvfunc2",
|
|
'volume_id': uuids.volume_3}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 4,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/hdb',
|
|
'volume_id': uuids.volume_4}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 5,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/vda', 'tag': "nfvfunc3",
|
|
'volume_id': uuids.volume_5}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 6,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/vdb', 'tag': "nfvfunc4",
|
|
'volume_id': uuids.volume_6}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 7,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/vdc', 'tag': "nfvfunc5",
|
|
'volume_id': uuids.volume_7}),
|
|
]
|
|
)
|
|
vif = obj_vif.VirtualInterface(context=self.context)
|
|
vif.address = '52:54:00:f6:35:8f'
|
|
vif.network_id = 123
|
|
vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
|
|
vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311'
|
|
vif.tag = 'mytag1'
|
|
|
|
vif1 = obj_vif.VirtualInterface(context=self.context)
|
|
vif1.address = '51:5a:2c:a4:5e:1b'
|
|
vif1.network_id = 123
|
|
vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
|
|
vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312'
|
|
|
|
vif2 = obj_vif.VirtualInterface(context=self.context)
|
|
vif2.address = 'fa:16:3e:d1:28:e4'
|
|
vif2.network_id = 123
|
|
vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
|
|
vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16'
|
|
vif2.tag = 'mytag2'
|
|
|
|
vif3 = obj_vif.VirtualInterface(context=self.context)
|
|
vif3.address = '52:54:00:14:6f:50'
|
|
vif3.network_id = 123
|
|
vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310'
|
|
vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86'
|
|
vif3.tag = 'mytag3'
|
|
|
|
vif4 = obj_vif.VirtualInterface(context=self.context)
|
|
vif4.address = 'da:d1:f2:91:95:c1'
|
|
vif4.tag = 'pf_tag'
|
|
|
|
vifs = [vif, vif1, vif2, vif3, vif4]
|
|
|
|
network_info = _fake_network_info(self, 4)
|
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
|
|
network_info[0]['address'] = "51:5a:2c:a4:5e:1b"
|
|
network_info[0]['details'] = dict(vlan='2145')
|
|
network_info[0]['profile'] = dict(trusted='true')
|
|
instance_ref.info_cache = objects.InstanceInfoCache(
|
|
network_info=network_info)
|
|
|
|
with test.nested(
|
|
mock.patch('nova.objects.VirtualInterfaceList'
|
|
'.get_by_instance_uuid', return_value=vifs),
|
|
mock.patch('nova.objects.BlockDeviceMappingList'
|
|
'.get_by_instance_uuid', return_value=bdms),
|
|
mock.patch('nova.virt.libvirt.host.Host.get_guest',
|
|
return_value=guest),
|
|
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
|
|
return_value=xml),
|
|
mock.patch.object(pci_utils, 'get_mac_by_pci_address',
|
|
return_value='da:d1:f2:91:95:c1')):
|
|
metadata_obj = drvr._build_device_metadata(self.context,
|
|
instance_ref)
|
|
metadata = metadata_obj.devices
|
|
self.assertEqual(11, len(metadata))
|
|
self.assertIsInstance(metadata[0],
|
|
objects.DiskMetadata)
|
|
self.assertIsInstance(metadata[0].bus,
|
|
objects.SCSIDeviceBus)
|
|
self.assertEqual(['db'], metadata[0].tags)
|
|
self.assertEqual(uuids.volume_1, metadata[0].serial)
|
|
self.assertFalse(metadata[0].bus.obj_attr_is_set('address'))
|
|
self.assertEqual(['nfvfunc1'], metadata[1].tags)
|
|
self.assertEqual(uuids.volume_2, metadata[1].serial)
|
|
self.assertIsInstance(metadata[1],
|
|
objects.DiskMetadata)
|
|
self.assertIsInstance(metadata[1].bus,
|
|
objects.IDEDeviceBus)
|
|
self.assertEqual(['nfvfunc1'], metadata[1].tags)
|
|
self.assertFalse(metadata[1].bus.obj_attr_is_set('address'))
|
|
self.assertIsInstance(metadata[2],
|
|
objects.DiskMetadata)
|
|
self.assertIsInstance(metadata[2].bus,
|
|
objects.USBDeviceBus)
|
|
self.assertEqual(['nfvfunc2'], metadata[2].tags)
|
|
self.assertEqual(uuids.volume_3, metadata[2].serial)
|
|
self.assertFalse(metadata[2].bus.obj_attr_is_set('address'))
|
|
self.assertIsInstance(metadata[3],
|
|
objects.DiskMetadata)
|
|
self.assertIsInstance(metadata[3].bus,
|
|
objects.PCIDeviceBus)
|
|
self.assertEqual(['nfvfunc3'], metadata[3].tags)
|
|
# NOTE(artom) We're not checking volume 4 because it's not tagged
|
|
# and only tagged devices appear in the metadata
|
|
self.assertEqual(uuids.volume_5, metadata[3].serial)
|
|
self.assertEqual('0000:00:09.0', metadata[3].bus.address)
|
|
self.assertIsInstance(metadata[4],
|
|
objects.DiskMetadata)
|
|
self.assertEqual(['nfvfunc4'], metadata[4].tags)
|
|
self.assertEqual(uuids.volume_6, metadata[4].serial)
|
|
self.assertIsInstance(metadata[5],
|
|
objects.DiskMetadata)
|
|
self.assertEqual(['nfvfunc5'], metadata[5].tags)
|
|
self.assertEqual(uuids.volume_7, metadata[5].serial)
|
|
self.assertIsInstance(metadata[6],
|
|
objects.NetworkInterfaceMetadata)
|
|
self.assertIsInstance(metadata[6].bus,
|
|
objects.PCIDeviceBus)
|
|
self.assertEqual(['mytag1'], metadata[6].tags)
|
|
self.assertEqual('0000:00:03.0', metadata[6].bus.address)
|
|
self.assertFalse(metadata[6].vf_trusted)
|
|
|
|
# Make sure that interface with vlan is exposed to the metadata
|
|
self.assertIsInstance(metadata[7],
|
|
objects.NetworkInterfaceMetadata)
|
|
self.assertEqual('51:5a:2c:a4:5e:1b', metadata[7].mac)
|
|
self.assertEqual(2145, metadata[7].vlan)
|
|
self.assertTrue(metadata[7].vf_trusted)
|
|
self.assertIsInstance(metadata[8],
|
|
objects.NetworkInterfaceMetadata)
|
|
self.assertEqual(['mytag2'], metadata[8].tags)
|
|
self.assertFalse(metadata[8].vf_trusted)
|
|
self.assertIsInstance(metadata[9],
|
|
objects.NetworkInterfaceMetadata)
|
|
self.assertEqual(['mytag3'], metadata[9].tags)
|
|
self.assertFalse(metadata[9].vf_trusted)
|
|
self.assertIsInstance(metadata[10],
|
|
objects.NetworkInterfaceMetadata)
|
|
self.assertEqual(['pf_tag'], metadata[10].tags)
|
|
self.assertEqual('da:d1:f2:91:95:c1', metadata[10].mac)
|
|
self.assertEqual('0000:06:00.1', metadata[10].bus.address)
|
|
|
|
@mock.patch.object(host.Host, 'get_connection')
|
|
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
|
|
def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn):
|
|
|
|
fake_domXML1_with_pci = (
|
|
"""<domain> <devices>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='none'/>
|
|
<source file='xxx'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<alias name='virtio-disk0'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x04' function='0x0'/>
|
|
</disk>
|
|
<hostdev mode="subsystem" type="pci" managed="yes">
|
|
<source>
|
|
<address function="0x1" slot="0x10" domain="0x0001"
|
|
bus="0x04"/>
|
|
</source>
|
|
</hostdev></devices></domain>""")
|
|
|
|
fake_domXML1_without_pci = (
|
|
"""<domain> <devices>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='none'/>
|
|
<source file='xxx'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<alias name='virtio-disk0'/>
|
|
<address type='pci' domain='0x0001' bus='0x00'
|
|
slot='0x04' function='0x0'/>
|
|
</disk></devices></domain>""")
|
|
|
|
pci_device_info = {'compute_node_id': 1,
|
|
'instance_uuid': 'uuid',
|
|
'address': '0001:04:10.1'}
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
pci_devices = [pci_device]
|
|
mocked_get_xml_desc.return_value = fake_domXML1_without_pci
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
dom = fakelibvirt.Domain(
|
|
drvr._get_connection(), fake_domXML1_with_pci, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr._detach_pci_devices(guest, pci_devices)
|
|
|
|
@mock.patch.object(host.Host, 'get_connection')
|
|
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
|
|
def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn):
|
|
|
|
fake_domXML1_with_pci = (
|
|
"""<domain> <devices>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='none'/>
|
|
<source file='xxx'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<alias name='virtio-disk0'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x04' function='0x0'/>
|
|
</disk>
|
|
<hostdev mode="subsystem" type="pci" managed="yes">
|
|
<source>
|
|
<address function="0x1" slot="0x10" domain="0x0001"
|
|
bus="0x04"/>
|
|
</source>
|
|
</hostdev></devices></domain>""")
|
|
|
|
pci_device_info = {'compute_node_id': 1,
|
|
'instance_uuid': 'uuid',
|
|
'address': '0001:04:10.1'}
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
pci_devices = [pci_device]
|
|
mocked_get_xml_desc.return_value = fake_domXML1_with_pci
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
dom = fakelibvirt.Domain(
|
|
drvr._get_connection(), fake_domXML1_with_pci, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
self.assertRaises(exception.PciDeviceDetachFailed,
|
|
drvr._detach_pci_devices, guest, pci_devices)
|
|
|
|
@mock.patch.object(connector, 'get_connector_properties')
|
|
def test_get_connector(self, fake_get_connector):
|
|
initiator = 'fake.initiator.iqn'
|
|
ip = 'fakeip'
|
|
host = 'fakehost'
|
|
wwpns = ['100010604b019419']
|
|
wwnns = ['200010604b019419']
|
|
self.flags(my_ip=ip)
|
|
self.flags(host=host)
|
|
|
|
expected = {
|
|
'ip': ip,
|
|
'initiator': initiator,
|
|
'host': host,
|
|
'wwpns': wwpns,
|
|
'wwnns': wwnns
|
|
}
|
|
volume = {
|
|
'id': 'fake'
|
|
}
|
|
|
|
# TODO(walter-boring) add the fake in os-brick
|
|
fake_get_connector.return_value = expected
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
result = drvr.get_volume_connector(volume)
|
|
self.assertThat(expected, matchers.DictMatches(result))
|
|
|
|
@mock.patch.object(connector, 'get_connector_properties')
|
|
def test_get_connector_storage_ip(self, fake_get_connector):
|
|
ip = '100.100.100.100'
|
|
storage_ip = '101.101.101.101'
|
|
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
|
|
volume = {
|
|
'id': 'fake'
|
|
}
|
|
expected = {
|
|
'ip': storage_ip
|
|
}
|
|
# TODO(walter-boring) add the fake in os-brick
|
|
fake_get_connector.return_value = expected
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
result = drvr.get_volume_connector(volume)
|
|
self.assertEqual(storage_ip, result['ip'])
|
|
|
|
def test_lifecycle_event_registration(self):
|
|
calls = []
|
|
|
|
def fake_registerErrorHandler(*args, **kwargs):
|
|
calls.append('fake_registerErrorHandler')
|
|
|
|
def fake_get_host_capabilities(**args):
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
cpu.arch = fields.Architecture.ARMV7
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
calls.append('fake_get_host_capabilities')
|
|
return caps
|
|
|
|
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
|
|
side_effect=fake_registerErrorHandler)
|
|
@mock.patch.object(host.Host, "get_capabilities",
|
|
side_effect=fake_get_host_capabilities)
|
|
def test_init_host(get_host_capabilities, register_error_handler):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host("test_host")
|
|
|
|
test_init_host()
|
|
# NOTE(dkliban): Will fail if get_host_capabilities is called before
|
|
# registerErrorHandler
|
|
self.assertEqual(['fake_registerErrorHandler',
|
|
'fake_get_host_capabilities'], calls)
|
|
|
|
def test_sanitize_log_to_xml(self):
|
|
# setup fake data
|
|
data = {'auth_password': 'scrubme'}
|
|
bdm = [{'connection_info': {'data': data}}]
|
|
bdi = {'block_device_mapping': bdm}
|
|
|
|
# Tests that the parameters to the _get_guest_xml method
|
|
# are sanitized for passwords when logged.
|
|
def fake_debug(*args, **kwargs):
|
|
if 'auth_password' in args[0]:
|
|
self.assertNotIn('scrubme', args[0])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
conf = mock.Mock()
|
|
with test.nested(
|
|
mock.patch.object(libvirt_driver.LOG, 'debug',
|
|
side_effect=fake_debug),
|
|
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
|
|
) as (
|
|
debug_mock, conf_mock
|
|
):
|
|
drvr._get_guest_xml(self.context, self.test_instance,
|
|
network_info={}, disk_info={},
|
|
image_meta={}, block_device_info=bdi)
|
|
# we don't care what the log message is, we just want to make sure
|
|
# our stub method is called which asserts the password is scrubbed
|
|
self.assertTrue(debug_mock.called)
|
|
|
|
@mock.patch.object(time, "time")
|
|
def test_get_guest_config(self, time_mock):
|
|
time_mock.return_value = 1234567.89
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
test_instance = copy.deepcopy(self.test_instance)
|
|
test_instance["display_name"] = "purple tomatoes"
|
|
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
|
|
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
|
|
|
|
ctxt = context.RequestContext(project_id=123,
|
|
project_name="aubergine",
|
|
user_id=456,
|
|
user_name="pie")
|
|
|
|
flavor = objects.Flavor(name='m1.small',
|
|
memory_mb=6,
|
|
vcpus=28,
|
|
root_gb=496,
|
|
ephemeral_gb=8128,
|
|
swap=33550336,
|
|
extra_specs={})
|
|
instance_ref = objects.Instance(**test_instance)
|
|
instance_ref.flavor = flavor
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info,
|
|
context=ctxt)
|
|
|
|
self.assertEqual(cfg.uuid, instance_ref["uuid"])
|
|
self.assertEqual(2, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeatureACPI)
|
|
self.assertIsInstance(cfg.features[1],
|
|
vconfig.LibvirtConfigGuestFeatureAPIC)
|
|
self.assertEqual(cfg.memory, 6 * units.Ki)
|
|
self.assertEqual(cfg.vcpus, 28)
|
|
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
|
|
self.assertEqual(cfg.os_boot_dev, ["hd"])
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(len(cfg.devices), 9)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[8],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
self.assertEqual(len(cfg.metadata), 1)
|
|
self.assertIsInstance(cfg.metadata[0],
|
|
vconfig.LibvirtConfigGuestMetaNovaInstance)
|
|
self.assertEqual(version.version_string_with_package(),
|
|
cfg.metadata[0].package)
|
|
self.assertEqual("purple tomatoes",
|
|
cfg.metadata[0].name)
|
|
self.assertEqual(1234567.89,
|
|
cfg.metadata[0].creationTime)
|
|
self.assertEqual("image",
|
|
cfg.metadata[0].roottype)
|
|
self.assertEqual(str(instance_ref["image_ref"]),
|
|
cfg.metadata[0].rootid)
|
|
|
|
self.assertIsInstance(cfg.metadata[0].owner,
|
|
vconfig.LibvirtConfigGuestMetaNovaOwner)
|
|
self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
|
|
cfg.metadata[0].owner.userid)
|
|
self.assertEqual("cupcake",
|
|
cfg.metadata[0].owner.username)
|
|
self.assertEqual("fake",
|
|
cfg.metadata[0].owner.projectid)
|
|
self.assertEqual("sweetshop",
|
|
cfg.metadata[0].owner.projectname)
|
|
|
|
self.assertIsInstance(cfg.metadata[0].flavor,
|
|
vconfig.LibvirtConfigGuestMetaNovaFlavor)
|
|
self.assertEqual("m1.small",
|
|
cfg.metadata[0].flavor.name)
|
|
self.assertEqual(6,
|
|
cfg.metadata[0].flavor.memory)
|
|
self.assertEqual(28,
|
|
cfg.metadata[0].flavor.vcpus)
|
|
self.assertEqual(496,
|
|
cfg.metadata[0].flavor.disk)
|
|
self.assertEqual(8128,
|
|
cfg.metadata[0].flavor.ephemeral)
|
|
self.assertEqual(33550336,
|
|
cfg.metadata[0].flavor.swap)
|
|
|
|
def test_get_guest_config_q35(self):
|
|
self.flags(virt_type="kvm",
|
|
group='libvirt')
|
|
|
|
TEST_AMOUNT_OF_PCIE_SLOTS = 8
|
|
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_machine_type":
|
|
"pc-q35-test"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
|
|
num_ports = 0
|
|
for device in cfg.devices:
|
|
try:
|
|
if (device.root_name == 'controller' and
|
|
device.model == 'pcie-root-port'):
|
|
num_ports += 1
|
|
except AttributeError:
|
|
pass
|
|
|
|
self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
|
|
|
|
def test_get_guest_config_pcie_i440fx(self):
|
|
self.flags(virt_type="kvm",
|
|
group='libvirt')
|
|
|
|
TEST_AMOUNT_OF_PCIE_SLOTS = 8
|
|
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_machine_type":
|
|
"pc-i440fx-test"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
|
|
num_ports = 0
|
|
for device in cfg.devices:
|
|
try:
|
|
if (device.root_name == 'controller' and
|
|
device.model == 'pcie-root-port'):
|
|
num_ports += 1
|
|
except AttributeError:
|
|
pass
|
|
|
|
# i440fx is not pcie machine so there should be no pcie ports
|
|
self.assertEqual(0, num_ports)
|
|
|
|
def test_get_guest_config_missing_ownership_info(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
test_instance = copy.deepcopy(self.test_instance)
|
|
|
|
ctxt = context.RequestContext(project_id=123,
|
|
project_name="aubergine",
|
|
user_id=456,
|
|
user_name="pie")
|
|
|
|
flavor = objects.Flavor(name='m1.small',
|
|
memory_mb=6,
|
|
vcpus=28,
|
|
root_gb=496,
|
|
ephemeral_gb=8128,
|
|
swap=33550336,
|
|
extra_specs={})
|
|
instance_ref = objects.Instance(**test_instance)
|
|
instance_ref.flavor = flavor
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info,
|
|
context=ctxt)
|
|
self.assertEqual("N/A",
|
|
cfg.metadata[0].owner.username)
|
|
self.assertEqual("N/A",
|
|
cfg.metadata[0].owner.projectname)
|
|
|
|
def test_get_guest_config_lxc(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, {'mapping': {}})
|
|
self.assertEqual(instance_ref["uuid"], cfg.uuid)
|
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
|
|
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
|
|
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
|
|
self.assertEqual("/sbin/init", cfg.os_init_path)
|
|
self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
|
|
cfg.os_cmdline)
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(3, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestFilesys)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestConsole)
|
|
|
|
def test_get_guest_config_lxc_with_id_maps(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
self.flags(uid_maps=['0:1000:100'], group='libvirt')
|
|
self.flags(gid_maps=['0:1000:100'], group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, {'mapping': {}})
|
|
self.assertEqual(instance_ref["uuid"], cfg.uuid)
|
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
|
|
self.assertEqual(instance_ref.vcpus, cfg.vcpus)
|
|
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
|
|
self.assertEqual("/sbin/init", cfg.os_init_path)
|
|
self.assertEqual("console=tty0 console=ttyS0 console=hvc0",
|
|
cfg.os_cmdline)
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(3, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestFilesys)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestConsole)
|
|
self.assertEqual(len(cfg.idmaps), 2)
|
|
self.assertIsInstance(cfg.idmaps[0],
|
|
vconfig.LibvirtConfigGuestUIDMap)
|
|
self.assertIsInstance(cfg.idmaps[1],
|
|
vconfig.LibvirtConfigGuestGIDMap)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_fits(self, is_able):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps)):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.cpu.numa)
|
|
|
|
@mock.patch('nova.privsep.utils.supports_direct_io',
|
|
new=mock.Mock(return_value=True))
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set', return_value=set([3])),
|
|
mock.patch.object(random, 'choice'),
|
|
mock.patch.object(drvr, '_has_numa_support',
|
|
return_value=False)
|
|
) as (get_host_cap_mock,
|
|
get_vcpu_pin_set_mock, choice_mock,
|
|
_has_numa_support_mock):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertFalse(choice_mock.called)
|
|
self.assertEqual(set([3]), cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.cpu.numa)
|
|
|
|
def _test_get_guest_memory_backing_config(
|
|
self, host_topology, inst_topology, numatune):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
with mock.patch.object(
|
|
drvr, "_get_host_numa_topology",
|
|
return_value=host_topology):
|
|
return drvr._get_guest_memory_backing_config(
|
|
inst_topology, numatune, {})
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_get_guest_memory_backing_config_large_success(self, mock_version):
|
|
host_topology = objects.NUMATopology(
|
|
cells=[
|
|
objects.NUMACell(
|
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
|
|
mempages=[
|
|
objects.NUMAPagesTopology(size_kb=4, total=2000,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=2048, total=512,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=1048576, total=0,
|
|
used=0),
|
|
])])
|
|
inst_topology = objects.InstanceNUMATopology(cells=[
|
|
objects.InstanceNUMACell(
|
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
|
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
|
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
|
|
numa_tune.memnodes[0].cellid = 0
|
|
numa_tune.memnodes[0].nodeset = [3]
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
host_topology, inst_topology, numa_tune)
|
|
self.assertEqual(1, len(result.hugepages))
|
|
self.assertEqual(2048, result.hugepages[0].size_kb)
|
|
self.assertEqual([0], result.hugepages[0].nodeset)
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_get_guest_memory_backing_config_smallest(self, mock_version):
|
|
host_topology = objects.NUMATopology(
|
|
cells=[
|
|
objects.NUMACell(
|
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
|
|
mempages=[
|
|
objects.NUMAPagesTopology(size_kb=4, total=2000,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=2048, total=512,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=1048576, total=0,
|
|
used=0),
|
|
])])
|
|
inst_topology = objects.InstanceNUMATopology(cells=[
|
|
objects.InstanceNUMACell(
|
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
|
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
|
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
|
|
numa_tune.memnodes[0].cellid = 0
|
|
numa_tune.memnodes[0].nodeset = [3]
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
host_topology, inst_topology, numa_tune)
|
|
self.assertIsNone(result)
|
|
|
|
def test_get_guest_memory_backing_config_realtime(self):
|
|
flavor = {"extra_specs": {
|
|
"hw:cpu_realtime": "yes",
|
|
"hw:cpu_policy": "dedicated"
|
|
}}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
membacking = drvr._get_guest_memory_backing_config(
|
|
None, None, flavor)
|
|
self.assertTrue(membacking.locked)
|
|
self.assertFalse(membacking.sharedpages)
|
|
|
|
def test_get_guest_memory_backing_config_file_backed(self):
|
|
self.flags(file_backed_memory=1024, group="libvirt")
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
None, None, None
|
|
)
|
|
self.assertTrue(result.sharedaccess)
|
|
self.assertTrue(result.filesource)
|
|
self.assertTrue(result.allocateimmediate)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
def test_get_guest_memory_backing_config_file_backed_discard(self,
|
|
mock_lib_version, mock_version):
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
|
|
mock_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
None, None, None
|
|
)
|
|
self.assertTrue(result.discard)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
def test_get_guest_memory_backing_config_file_backed_discard_libvirt(self,
|
|
mock_lib_version, mock_version):
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1
|
|
mock_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
None, None, None
|
|
)
|
|
self.assertFalse(result.discard)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
def test_get_guest_memory_backing_config_file_backed_discard_qemu(self,
|
|
mock_lib_version, mock_version):
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
|
|
mock_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) - 1
|
|
|
|
result = self._test_get_guest_memory_backing_config(
|
|
None, None, None
|
|
)
|
|
self.assertFalse(result.discard)
|
|
|
|
def test_get_guest_memory_backing_config_file_backed_hugepages(self):
|
|
self.flags(file_backed_memory=1024, group="libvirt")
|
|
host_topology = objects.NUMATopology(
|
|
cells=[
|
|
objects.NUMACell(
|
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024,
|
|
mempages=[
|
|
objects.NUMAPagesTopology(size_kb=4, total=2000,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=2048, total=512,
|
|
used=0),
|
|
objects.NUMAPagesTopology(size_kb=1048576, total=0,
|
|
used=0),
|
|
])])
|
|
inst_topology = objects.InstanceNUMATopology(cells=[
|
|
objects.InstanceNUMACell(
|
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
|
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
|
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
|
|
numa_tune.memnodes[0].cellid = 0
|
|
numa_tune.memnodes[0].nodeset = [3]
|
|
|
|
self.assertRaises(exception.MemoryPagesUnsupported,
|
|
self._test_get_guest_memory_backing_config,
|
|
host_topology, inst_topology, numa_tune)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_pci_no_numa_info(
|
|
self, is_able):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
pci_device_info = dict(test_pci_device.fake_db_dev)
|
|
pci_device_info.update(compute_node_id=1,
|
|
label='fake',
|
|
status=fields.PciDeviceStatus.AVAILABLE,
|
|
address='0000:00:00.1',
|
|
instance_uuid=None,
|
|
request_id=None,
|
|
extra_info={},
|
|
numa_node=None)
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(
|
|
host.Host, "get_capabilities", return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set', return_value=set([3])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
mock.patch.object(pci_manager, "get_instance_pci_devs",
|
|
return_value=[pci_device])):
|
|
cfg = conn._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(set([3]), cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.cpu.numa)
|
|
|
|
@mock.patch('nova.privsep.utils.supports_direct_io',
|
|
new=mock.Mock(return_value=True))
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
pci_device_info = dict(test_pci_device.fake_db_dev)
|
|
pci_device_info.update(compute_node_id=1,
|
|
label='fake',
|
|
status=fields.PciDeviceStatus.AVAILABLE,
|
|
address='0000:00:00.1',
|
|
instance_uuid=None,
|
|
request_id=None,
|
|
extra_info={},
|
|
numa_node=1)
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
pci_device_info.update(numa_node=0, address='0000:00:00.2')
|
|
pci_device2 = objects.PciDevice(**pci_device_info)
|
|
with test.nested(
|
|
mock.patch.object(
|
|
host.Host, "get_capabilities", return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set', return_value=set([3])),
|
|
mock.patch.object(random, 'choice'),
|
|
mock.patch.object(pci_manager, "get_instance_pci_devs",
|
|
return_value=[pci_device, pci_device2]),
|
|
mock.patch.object(conn, '_has_numa_support',
|
|
return_value=False)
|
|
) as (get_host_cap_mock,
|
|
get_vcpu_pin_set_mock, choice_mock, pci_mock,
|
|
_has_numa_support_mock):
|
|
cfg = conn._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertFalse(choice_mock.called)
|
|
self.assertEqual(set([3]), cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.cpu.numa)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getType')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
@mock.patch.object(host.Host, 'get_capabilities')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
|
|
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
|
|
fake_version, fake_type,
|
|
fake_arch, exception_class,
|
|
pagesize, mock_host,
|
|
mock_caps, mock_lib_version,
|
|
mock_version, mock_type):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0]),
|
|
memory=1024, pagesize=pagesize)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fake_arch
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
mock_type.return_value = fake_type
|
|
mock_version.return_value = fake_version
|
|
mock_lib_version.return_value = fake_lib_version
|
|
mock_caps.return_value = caps
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.assertRaises(exception_class,
|
|
drvr._get_guest_config,
|
|
instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
def test_get_guest_config_numa_other_arch_qemu(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
self._test_get_guest_config_numa_unsupported(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_VERSION),
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_VERSION),
|
|
host.HV_DRIVER_QEMU,
|
|
fields.Architecture.S390,
|
|
exception.NUMATopologyUnsupported,
|
|
None)
|
|
|
|
def test_get_guest_config_numa_xen(self):
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
self._test_get_guest_config_numa_unsupported(
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_VERSION),
|
|
versionutils.convert_version_to_int((4, 5, 0)),
|
|
'XEN',
|
|
fields.Architecture.X86_64,
|
|
exception.NUMATopologyUnsupported,
|
|
None)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
|
|
self, is_able):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology(kb_mem=4194304)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8)))
|
|
) as (has_min_version_mock, get_host_cap_mock,
|
|
get_vcpu_pin_set_mock, get_online_cpus_mock):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
# NOTE(ndipanov): we make sure that pin_set was taken into account
|
|
# when choosing viable cells
|
|
self.assertEqual(set([2, 3]), cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.cpu.numa)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0]), memory=1024),
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([2]), memory=1024)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = None
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps)):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.cpuset)
|
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
|
self.assertIsNone(cfg.numatune)
|
|
self.assertIsNotNone(cfg.cpu.numa)
|
|
for instance_cell, numa_cfg_cell in zip(
|
|
instance_topology.cells, cfg.cpu.numa.cells):
|
|
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_numa_host_instance_topo(self, is_able):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[objects.InstanceNUMACell(
|
|
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
|
|
objects.InstanceNUMACell(
|
|
id=2, cpuset=set([2, 3]), memory=1024,
|
|
pagesize=None)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([2, 3, 4, 5])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.cpuset)
|
|
# Test that the pinning is correct and limited to allowed only
|
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
|
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
|
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
|
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
|
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
|
|
self.assertIsNotNone(cfg.cpu.numa)
|
|
|
|
self.assertIsInstance(cfg.cputune.emulatorpin,
|
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
|
|
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
|
|
|
|
for instance_cell, numa_cfg_cell, index in zip(
|
|
instance_topology.cells,
|
|
cfg.cpu.numa.cells,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
|
|
allnodes = [cell.id for cell in instance_topology.cells]
|
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
|
|
self.assertEqual("strict", cfg.numatune.memory.mode)
|
|
|
|
for instance_cell, memnode, index in zip(
|
|
instance_topology.cells,
|
|
cfg.numatune.memnodes,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, memnode.cellid)
|
|
self.assertEqual([instance_cell.id], memnode.nodeset)
|
|
self.assertEqual("strict", memnode.mode)
|
|
|
|
def test_get_guest_config_numa_host_instance_topo_reordered(self):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[objects.InstanceNUMACell(
|
|
id=3, cpuset=set([0, 1]), memory=1024),
|
|
objects.InstanceNUMACell(
|
|
id=0, cpuset=set([2, 3]), memory=1024)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.cpuset)
|
|
# Test that the pinning is correct and limited to allowed only
|
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
|
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
|
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
|
|
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
|
|
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
|
|
self.assertIsNotNone(cfg.cpu.numa)
|
|
|
|
self.assertIsInstance(cfg.cputune.emulatorpin,
|
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
|
|
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
|
|
|
|
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
|
|
instance_topology.cells,
|
|
cfg.cpu.numa.cells)):
|
|
self.assertEqual(index, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
self.assertIsNone(numa_cfg_cell.memAccess)
|
|
|
|
allnodes = set([cell.id for cell in instance_topology.cells])
|
|
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
|
|
self.assertEqual("strict", cfg.numatune.memory.mode)
|
|
|
|
for index, (instance_cell, memnode) in enumerate(zip(
|
|
instance_topology.cells,
|
|
cfg.numatune.memnodes)):
|
|
self.assertEqual(index, memnode.cellid)
|
|
self.assertEqual([instance_cell.id], memnode.nodeset)
|
|
self.assertEqual("strict", memnode.mode)
|
|
|
|
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[objects.InstanceNUMACell(
|
|
id=1, cpuset=set([0, 1]), memory=1024,
|
|
cpu_pinning={0: 24, 1: 25}),
|
|
objects.InstanceNUMACell(
|
|
id=0, cpuset=set([2, 3]), memory=1024,
|
|
cpu_pinning={2: 0, 3: 1})])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology(
|
|
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
):
|
|
cfg = conn._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.cpuset)
|
|
# Test that the pinning is correct and limited to allowed only
|
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
|
|
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
|
|
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
|
|
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
|
|
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
|
|
self.assertIsNotNone(cfg.cpu.numa)
|
|
|
|
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
|
|
self.assertIsInstance(cfg.cputune.emulatorpin,
|
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
|
|
self.assertEqual(set([0, 1, 24, 25]),
|
|
cfg.cputune.emulatorpin.cpuset)
|
|
|
|
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
|
|
instance_topology.cells, cfg.cpu.numa.cells)):
|
|
self.assertEqual(i, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
self.assertIsNone(numa_cfg_cell.memAccess)
|
|
|
|
allnodes = set([cell.id for cell in instance_topology.cells])
|
|
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
|
|
self.assertEqual("strict", cfg.numatune.memory.mode)
|
|
|
|
for i, (instance_cell, memnode) in enumerate(zip(
|
|
instance_topology.cells, cfg.numatune.memnodes)):
|
|
self.assertEqual(i, memnode.cellid)
|
|
self.assertEqual([instance_cell.id], memnode.nodeset)
|
|
self.assertEqual("strict", memnode.mode)
|
|
|
|
def test_get_guest_config_numa_host_mempages_shared(self):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048),
|
|
objects.InstanceNUMACell(
|
|
id=2, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
for i, cell in enumerate(caps.host.topology.cells):
|
|
cell.mempages = fakelibvirt.create_mempages(
|
|
[(4, 1024 * i), (2048, i)])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([2, 3, 4, 5])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
for instance_cell, numa_cfg_cell, index in zip(
|
|
instance_topology.cells,
|
|
cfg.cpu.numa.cells,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
self.assertEqual("shared", numa_cfg_cell.memAccess)
|
|
|
|
allnodes = [cell.id for cell in instance_topology.cells]
|
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
|
|
self.assertEqual("strict", cfg.numatune.memory.mode)
|
|
|
|
for instance_cell, memnode, index in zip(
|
|
instance_topology.cells,
|
|
cfg.numatune.memnodes,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, memnode.cellid)
|
|
self.assertEqual([instance_cell.id], memnode.nodeset)
|
|
self.assertEqual("strict", memnode.mode)
|
|
|
|
self.assertEqual(0, len(cfg.cputune.vcpusched))
|
|
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
|
|
|
|
def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=2, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048),
|
|
objects.InstanceNUMACell(
|
|
id=3, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048)])
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
|
|
ephemeral_gb=8128, swap=33550336, name='fake',
|
|
extra_specs={
|
|
"hw:cpu_realtime": "yes",
|
|
"hw:cpu_policy": "dedicated",
|
|
"hw:cpu_realtime_mask": "^0-1"
|
|
})
|
|
instance_ref.flavor = flavor
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
for i, cell in enumerate(caps.host.topology.cells):
|
|
cell.mempages = fakelibvirt.create_mempages(
|
|
[(4, 1024 * i), (2048, i)])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([4, 5, 6, 7])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(8))),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
for instance_cell, numa_cfg_cell, index in zip(
|
|
instance_topology.cells,
|
|
cfg.cpu.numa.cells,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, numa_cfg_cell.id)
|
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
|
self.assertEqual(instance_cell.memory * units.Ki,
|
|
numa_cfg_cell.memory)
|
|
self.assertEqual("shared", numa_cfg_cell.memAccess)
|
|
|
|
allnodes = [cell.id for cell in instance_topology.cells]
|
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
|
|
self.assertEqual("strict", cfg.numatune.memory.mode)
|
|
|
|
for instance_cell, memnode, index in zip(
|
|
instance_topology.cells,
|
|
cfg.numatune.memnodes,
|
|
range(len(instance_topology.cells))):
|
|
self.assertEqual(index, memnode.cellid)
|
|
self.assertEqual([instance_cell.id], memnode.nodeset)
|
|
self.assertEqual("strict", memnode.mode)
|
|
|
|
self.assertEqual(1, len(cfg.cputune.vcpusched))
|
|
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
|
|
|
|
# Ensure vCPUs 0-1 are pinned on host CPUs 4-5 and 2-3 are
|
|
# set on host CPUs 6-7 according the realtime mask ^0-1
|
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[3].cpuset)
|
|
|
|
# We ensure that emulator threads are pinned on host CPUs
|
|
# 4-5 which are "normal" vCPUs
|
|
self.assertEqual(set([4, 5]), cfg.cputune.emulatorpin.cpuset)
|
|
|
|
# We ensure that the vCPUs RT are 2-3 set to the host CPUs
|
|
# which are 6, 7
|
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
|
|
|
|
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
emulator_threads_policy=(
|
|
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={0: 4, 1: 5},
|
|
cpuset_reserved=set([6])),
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={2: 7, 3: 8})])
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = "x86_64"
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref, image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([4, 5, 6, 7, 8])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(10))),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(set([6]), cfg.cputune.emulatorpin.cpuset)
|
|
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
|
|
|
|
def test_get_guest_config_numa_host_instance_shared_emulthreads_err(
|
|
self):
|
|
self.flags(cpu_shared_set="48-50", group="compute")
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
emulator_threads_policy=(
|
|
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={0: 4, 1: 5},
|
|
cpuset_reserved=set([6])),
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={2: 7, 3: 8})])
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = "x86_64"
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref, image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([4, 5, 6, 7, 8])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(range(10))),
|
|
):
|
|
# pCPUs [48-50] are not online
|
|
self.assertRaises(exception.Invalid, drvr._get_guest_config,
|
|
instance_ref, [], image_meta, disk_info)
|
|
|
|
def test_get_guest_config_numa_host_instance_shared_emulator_threads(
|
|
self):
|
|
self.flags(cpu_shared_set="48-50", group="compute")
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
emulator_threads_policy=(
|
|
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={0: 4, 1: 5},
|
|
cpuset_reserved=set([6])),
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048,
|
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
|
|
cpu_pinning={2: 7, 3: 8})])
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.numa_topology = instance_topology
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = "x86_64"
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref, image_meta)
|
|
|
|
with test.nested(
|
|
mock.patch.object(
|
|
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
|
return_value=instance_topology),
|
|
mock.patch.object(host.Host, 'has_min_version',
|
|
return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([4, 5, 6, 7, 8])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set(list(range(10)) +
|
|
[48, 50])),
|
|
):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
# cpu_shared_set is configured with [48, 49, 50] but only
|
|
# [48, 50] are online.
|
|
self.assertEqual(set([48, 50]), cfg.cputune.emulatorpin.cpuset)
|
|
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset)
|
|
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset)
|
|
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset)
|
|
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset)
|
|
|
|
def test_get_cpu_numa_config_from_instance(self):
|
|
topology = objects.InstanceNUMATopology(cells=[
|
|
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
|
|
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
|
|
])
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
|
|
|
|
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
|
|
self.assertEqual(0, conf.cells[0].id)
|
|
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
|
|
self.assertEqual(131072, conf.cells[0].memory)
|
|
self.assertEqual("shared", conf.cells[0].memAccess)
|
|
self.assertEqual(1, conf.cells[1].id)
|
|
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
|
|
self.assertEqual(131072, conf.cells[1].memory)
|
|
self.assertEqual("shared", conf.cells[1].memAccess)
|
|
|
|
def test_get_cpu_numa_config_from_instance_none(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
conf = drvr._get_cpu_numa_config_from_instance(None, False)
|
|
self.assertIsNone(conf)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
|
|
return_value=True)
|
|
def test_get_memnode_numa_config_from_instance(self, mock_numa):
|
|
instance_topology = objects.InstanceNUMATopology(cells=[
|
|
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
|
|
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
|
|
objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128)
|
|
])
|
|
|
|
host_topology = objects.NUMATopology(
|
|
cells=[
|
|
objects.NUMACell(
|
|
id=0, cpuset=set([1, 2]), siblings=[set([1]), set([2])],
|
|
memory=1024, mempages=[]),
|
|
objects.NUMACell(
|
|
id=1, cpuset=set([3, 4]), siblings=[set([3]), set([4])],
|
|
memory=1024, mempages=[]),
|
|
objects.NUMACell(
|
|
id=16, cpuset=set([5, 6]), siblings=[set([5]), set([6])],
|
|
memory=1024, mempages=[])])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
with test.nested(
|
|
mock.patch.object(drvr, "_get_host_numa_topology",
|
|
return_value=host_topology)):
|
|
guest_numa_config = drvr._get_guest_numa_config(instance_topology,
|
|
flavor={}, allowed_cpus=[1, 2, 3, 4, 5, 6], image_meta={})
|
|
self.assertEqual(2, guest_numa_config.numatune.memnodes[2].cellid)
|
|
self.assertEqual([16],
|
|
guest_numa_config.numatune.memnodes[2].nodeset)
|
|
self.assertEqual(set([5, 6]),
|
|
guest_numa_config.numaconfig.cells[2].cpus)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
|
|
return_value=True)
|
|
@mock.patch.object(host.Host, "get_capabilities")
|
|
def test_does_not_want_hugepages(self, mock_caps, mock_numa):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=4),
|
|
objects.InstanceNUMACell(
|
|
id=2, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=4)])
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
mock_caps.return_value = caps
|
|
|
|
host_topology = drvr._get_host_numa_topology()
|
|
|
|
self.assertFalse(drvr._wants_hugepages(None, None))
|
|
self.assertFalse(drvr._wants_hugepages(host_topology, None))
|
|
self.assertFalse(drvr._wants_hugepages(None, instance_topology))
|
|
self.assertFalse(drvr._wants_hugepages(host_topology,
|
|
instance_topology))
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
|
|
return_value=True)
|
|
@mock.patch.object(host.Host, "get_capabilities")
|
|
def test_does_want_hugepages(self, mock_caps, mock_numa):
|
|
for arch in [fields.Architecture.I686,
|
|
fields.Architecture.X86_64,
|
|
fields.Architecture.AARCH64,
|
|
fields.Architecture.PPC64LE,
|
|
fields.Architecture.PPC64]:
|
|
self._test_does_want_hugepages(mock_caps, mock_numa, arch)
|
|
|
|
def _test_does_want_hugepages(self, mock_caps, mock_numa, architecture):
|
|
self.flags(reserved_huge_pages=[
|
|
{'node': 0, 'size': 2048, 'count': 128},
|
|
{'node': 1, 'size': 2048, 'count': 1},
|
|
{'node': 3, 'size': 2048, 'count': 64}])
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
cells=[
|
|
objects.InstanceNUMACell(
|
|
id=1, cpuset=set([0, 1]),
|
|
memory=1024, pagesize=2048),
|
|
objects.InstanceNUMACell(
|
|
id=2, cpuset=set([2, 3]),
|
|
memory=1024, pagesize=2048)])
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = architecture
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
for i, cell in enumerate(caps.host.topology.cells):
|
|
cell.mempages = fakelibvirt.create_mempages(
|
|
[(4, 1024 * i), (2048, i)])
|
|
|
|
mock_caps.return_value = caps
|
|
|
|
host_topology = drvr._get_host_numa_topology()
|
|
self.assertEqual(128, host_topology.cells[0].mempages[1].reserved)
|
|
self.assertEqual(1, host_topology.cells[1].mempages[1].reserved)
|
|
self.assertEqual(0, host_topology.cells[2].mempages[1].reserved)
|
|
self.assertEqual(64, host_topology.cells[3].mempages[1].reserved)
|
|
|
|
self.assertTrue(drvr._wants_hugepages(host_topology,
|
|
instance_topology))
|
|
|
|
def test_get_guest_config_clock(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
hpet_map = {
|
|
fields.Architecture.X86_64: True,
|
|
fields.Architecture.I686: True,
|
|
fields.Architecture.PPC: False,
|
|
fields.Architecture.PPC64: False,
|
|
fields.Architecture.ARMV7: False,
|
|
fields.Architecture.AARCH64: False,
|
|
}
|
|
|
|
for guestarch, expect_hpet in hpet_map.items():
|
|
with mock.patch.object(libvirt_driver.libvirt_utils,
|
|
'get_arch',
|
|
return_value=guestarch):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta,
|
|
disk_info)
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "utc")
|
|
self.assertIsInstance(cfg.clock.timers[0],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertIsInstance(cfg.clock.timers[1],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual(cfg.clock.timers[0].name, "pit")
|
|
self.assertEqual(cfg.clock.timers[0].tickpolicy,
|
|
"delay")
|
|
self.assertEqual(cfg.clock.timers[1].name, "rtc")
|
|
self.assertEqual(cfg.clock.timers[1].tickpolicy,
|
|
"catchup")
|
|
if expect_hpet:
|
|
self.assertEqual(3, len(cfg.clock.timers))
|
|
self.assertIsInstance(cfg.clock.timers[2],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual('hpet', cfg.clock.timers[2].name)
|
|
self.assertFalse(cfg.clock.timers[2].present)
|
|
else:
|
|
self.assertEqual(2, len(cfg.clock.timers))
|
|
|
|
def test_get_guest_config_clock_hpet_false(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_time_hpet": "false"}})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
hpet_map = {
|
|
fields.Architecture.X86_64: True,
|
|
fields.Architecture.I686: True,
|
|
fields.Architecture.PPC: False,
|
|
fields.Architecture.PPC64: False,
|
|
fields.Architecture.ARMV7: False,
|
|
fields.Architecture.AARCH64: False,
|
|
}
|
|
|
|
for guestarch, expect_hpet in hpet_map.items():
|
|
with mock.patch.object(libvirt_driver.libvirt_utils,
|
|
'get_arch',
|
|
return_value=guestarch):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta,
|
|
disk_info)
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "utc")
|
|
self.assertIsInstance(cfg.clock.timers[0],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertIsInstance(cfg.clock.timers[1],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual(cfg.clock.timers[0].name, "pit")
|
|
self.assertEqual(cfg.clock.timers[0].tickpolicy,
|
|
"delay")
|
|
self.assertEqual(cfg.clock.timers[1].name, "rtc")
|
|
self.assertEqual(cfg.clock.timers[1].tickpolicy,
|
|
"catchup")
|
|
if expect_hpet:
|
|
self.assertEqual(3, len(cfg.clock.timers))
|
|
self.assertIsInstance(cfg.clock.timers[2],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual('hpet', cfg.clock.timers[2].name)
|
|
self.assertFalse(cfg.clock.timers[2].present)
|
|
else:
|
|
self.assertEqual(2, len(cfg.clock.timers))
|
|
|
|
def test_get_guest_config_clock_hpet_true(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"id": uuids.image_id,
|
|
"disk_format": "raw",
|
|
"properties": {"hw_time_hpet": "true"}})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
hpet_map = {
|
|
fields.Architecture.X86_64: True,
|
|
fields.Architecture.I686: True,
|
|
fields.Architecture.PPC: False,
|
|
fields.Architecture.PPC64: False,
|
|
fields.Architecture.ARMV7: False,
|
|
fields.Architecture.AARCH64: False,
|
|
}
|
|
|
|
for guestarch, expect_hpet in hpet_map.items():
|
|
with mock.patch.object(libvirt_driver.libvirt_utils,
|
|
'get_arch',
|
|
return_value=guestarch):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta,
|
|
disk_info)
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "utc")
|
|
self.assertIsInstance(cfg.clock.timers[0],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertIsInstance(cfg.clock.timers[1],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual(cfg.clock.timers[0].name, "pit")
|
|
self.assertEqual(cfg.clock.timers[0].tickpolicy,
|
|
"delay")
|
|
self.assertEqual(cfg.clock.timers[1].name, "rtc")
|
|
self.assertEqual(cfg.clock.timers[1].tickpolicy,
|
|
"catchup")
|
|
if expect_hpet:
|
|
self.assertEqual(3, len(cfg.clock.timers))
|
|
self.assertIsInstance(cfg.clock.timers[2],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual('hpet', cfg.clock.timers[2].name)
|
|
self.assertTrue(cfg.clock.timers[2].present)
|
|
else:
|
|
self.assertEqual(2, len(cfg.clock.timers))
|
|
|
|
def test_get_guest_config_clock_hpet_invalid(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_time_hpet": "blah"}})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
hpet_map = {
|
|
fields.Architecture.X86_64: True,
|
|
fields.Architecture.I686: True,
|
|
fields.Architecture.PPC: False,
|
|
fields.Architecture.PPC64: False,
|
|
fields.Architecture.ARMV7: False,
|
|
fields.Architecture.AARCH64: False,
|
|
}
|
|
|
|
for guestarch, expect_hpet in hpet_map.items():
|
|
with mock.patch.object(libvirt_driver.libvirt_utils,
|
|
'get_arch',
|
|
return_value=guestarch):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta,
|
|
disk_info)
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "utc")
|
|
self.assertIsInstance(cfg.clock.timers[0],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertIsInstance(cfg.clock.timers[1],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual(cfg.clock.timers[0].name, "pit")
|
|
self.assertEqual(cfg.clock.timers[0].tickpolicy,
|
|
"delay")
|
|
self.assertEqual(cfg.clock.timers[1].name, "rtc")
|
|
self.assertEqual(cfg.clock.timers[1].tickpolicy,
|
|
"catchup")
|
|
if expect_hpet:
|
|
self.assertEqual(3, len(cfg.clock.timers))
|
|
self.assertIsInstance(cfg.clock.timers[2],
|
|
vconfig.LibvirtConfigGuestTimer)
|
|
self.assertEqual('hpet', cfg.clock.timers[2].name)
|
|
# a non-boolean value of hw_time_hpet should be treated as
|
|
# False
|
|
self.assertFalse(cfg.clock.timers[2].present)
|
|
else:
|
|
self.assertEqual(2, len(cfg.clock.timers))
|
|
|
|
@mock.patch.object(libvirt_utils, 'get_arch')
|
|
def test_get_guest_config_windows_timer(self, mock_get_arch):
|
|
mock_get_arch.return_value = fields.Architecture.I686
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref['os_type'] = 'windows'
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "localtime")
|
|
|
|
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
|
|
self.assertEqual("pit", cfg.clock.timers[0].name)
|
|
self.assertEqual("rtc", cfg.clock.timers[1].name)
|
|
self.assertEqual("hpet", cfg.clock.timers[2].name)
|
|
self.assertFalse(cfg.clock.timers[2].present)
|
|
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
|
|
self.assertTrue(cfg.clock.timers[3].present)
|
|
|
|
self.assertEqual(3, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeatureACPI)
|
|
self.assertIsInstance(cfg.features[1],
|
|
vconfig.LibvirtConfigGuestFeatureAPIC)
|
|
self.assertIsInstance(cfg.features[2],
|
|
vconfig.LibvirtConfigGuestFeatureHyperV)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version',
|
|
new=mock.Mock(return_value=True))
|
|
def _test_get_guest_config_windows_hyperv(
|
|
self, flavor=None, image_meta=None, hvid_hidden=False):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref['os_type'] = 'windows'
|
|
if flavor is not None:
|
|
instance_ref.flavor = flavor
|
|
if image_meta is None:
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
|
|
self.assertIsInstance(cfg.clock,
|
|
vconfig.LibvirtConfigGuestClock)
|
|
self.assertEqual(cfg.clock.offset, "localtime")
|
|
|
|
num_features = 4 if hvid_hidden else 3
|
|
self.assertEqual(num_features, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeatureACPI)
|
|
self.assertIsInstance(cfg.features[1],
|
|
vconfig.LibvirtConfigGuestFeatureAPIC)
|
|
self.assertIsInstance(cfg.features[2],
|
|
vconfig.LibvirtConfigGuestFeatureHyperV)
|
|
if hvid_hidden:
|
|
self.assertIsInstance(cfg.features[3],
|
|
vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
|
|
self.assertTrue(cfg.features[2].relaxed)
|
|
self.assertTrue(cfg.features[2].spinlocks)
|
|
self.assertEqual(8191, cfg.features[2].spinlock_retries)
|
|
self.assertTrue(cfg.features[2].vapic)
|
|
self.assertEqual(hvid_hidden, cfg.features[2].vendorid_spoof)
|
|
|
|
def test_get_guest_config_windows_hyperv_feature2(self):
|
|
self._test_get_guest_config_windows_hyperv()
|
|
|
|
def test_get_guest_config_windows_hyperv_all_hide_flv(self):
|
|
# Similar to test_get_guest_config_windows_hyperv_feature2
|
|
# but also test hiding the HyperV signature with the flavor
|
|
# extra_spec "hide_hypervisor_id"
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "true"},
|
|
expected_attrs={"extra_specs"})
|
|
# this works for kvm (the default, tested below) and qemu
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
|
|
self._test_get_guest_config_windows_hyperv(
|
|
flavor=flavor_hide_id, hvid_hidden=True)
|
|
|
|
def test_get_guest_config_windows_hyperv_all_hide_img(self):
|
|
# Similar to test_get_guest_config_windows_hyperv_feature2
|
|
# but also test hiding the HyperV signature with the image
|
|
# property "img_hide_hypervisor_id"
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "true"}})
|
|
|
|
self._test_get_guest_config_windows_hyperv(
|
|
image_meta=image_meta, hvid_hidden=True)
|
|
|
|
def test_get_guest_config_windows_hyperv_all_hide_flv_img(self):
|
|
# Similar to test_get_guest_config_windows_hyperv_feature2
|
|
# but also test hiding the HyperV signature with both the flavor
|
|
# extra_spec "hide_hypervisor_id" and the image property
|
|
# "img_hide_hypervisor_id"
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "true"},
|
|
expected_attrs={"extra_specs"})
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "true"}})
|
|
|
|
self._test_get_guest_config_windows_hyperv(
|
|
flavor=flavor_hide_id, image_meta=image_meta, hvid_hidden=True)
|
|
|
|
def test_get_guest_config_with_two_nics(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 2),
|
|
image_meta, disk_info)
|
|
self.assertEqual(2, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeatureACPI)
|
|
self.assertIsInstance(cfg.features[1],
|
|
vconfig.LibvirtConfigGuestFeatureAPIC)
|
|
self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki)
|
|
self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus)
|
|
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
|
|
self.assertEqual(cfg.os_boot_dev, ["hd"])
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(len(cfg.devices), 9)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[8],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
def test_get_guest_config_bug_1118829(self):
|
|
self.flags(virt_type='uml', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
disk_info = {'disk_bus': 'virtio',
|
|
'cdrom_bus': 'ide',
|
|
'mapping': {u'vda': {'bus': 'virtio',
|
|
'type': 'disk',
|
|
'dev': u'vda'},
|
|
'root': {'bus': 'virtio',
|
|
'type': 'disk',
|
|
'dev': 'vda'}}}
|
|
|
|
# NOTE(jdg): For this specific test leave this blank
|
|
# This will exercise the failed code path still,
|
|
# and won't require fakes and stubs of the iscsi discovery
|
|
block_device_info = {}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
drvr._get_guest_config(instance_ref, [], image_meta, disk_info,
|
|
None, block_device_info)
|
|
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
|
|
|
|
def test_get_guest_config_with_root_device_name(self):
|
|
self.flags(virt_type='uml', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
block_device_info = {'root_device_name': '/dev/vdb'}
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
block_device_info)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info,
|
|
None, block_device_info)
|
|
self.assertEqual(0, len(cfg.features))
|
|
self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki)
|
|
self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus)
|
|
self.assertEqual(cfg.os_type, "uml")
|
|
self.assertEqual(cfg.os_boot_dev, [])
|
|
self.assertEqual(cfg.os_root, '/dev/vdb')
|
|
self.assertEqual(len(cfg.devices), 3)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestConsole)
|
|
|
|
def test_has_uefi_support_not_supported_arch(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self._stub_host_capabilities_cpu_arch(fields.Architecture.ALPHA)
|
|
self.assertFalse(drvr._has_uefi_support())
|
|
|
|
@mock.patch('os.path.exists', return_value=False)
|
|
def test_has_uefi_support_with_no_loader_existed(self, mock_exist):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertFalse(drvr._has_uefi_support())
|
|
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
def test_has_uefi_support(self, mock_has_version):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
self._stub_host_capabilities_cpu_arch(fields.Architecture.X86_64)
|
|
|
|
with mock.patch.object(drvr._host,
|
|
'has_min_version', return_value=True):
|
|
self.assertTrue(drvr._has_uefi_support())
|
|
|
|
def test_get_guest_config_with_uefi(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_firmware_type": "uefi"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
with mock.patch.object(drvr, "_has_uefi_support",
|
|
return_value=True) as mock_support:
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
mock_support.assert_called_once_with()
|
|
self.assertEqual(cfg.os_loader_type, "pflash")
|
|
|
|
def test_get_guest_config_with_block_device(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
conn_info = {'driver_volume_type': 'fake', 'data': {}}
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/vdc'}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 2,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/vdd'}),
|
|
]
|
|
)
|
|
info = {'block_device_mapping': driver_block_device.convert_volumes(
|
|
bdms
|
|
)}
|
|
info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
info['block_device_mapping'][1]['connection_info'] = conn_info
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
info)
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'
|
|
) as mock_save:
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info,
|
|
None, info)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
|
|
mock_save.assert_called_with()
|
|
|
|
def test_get_guest_config_lxc_with_attached_volume(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
conn_info = {'driver_volume_type': 'fake', 'data': {}}
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'boot_index': 0}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 2,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 3,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
}),
|
|
]
|
|
)
|
|
info = {'block_device_mapping': driver_block_device.convert_volumes(
|
|
bdms
|
|
)}
|
|
|
|
info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
info['block_device_mapping'][1]['connection_info'] = conn_info
|
|
info['block_device_mapping'][2]['connection_info'] = conn_info
|
|
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
|
|
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
|
|
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'
|
|
) as mock_save:
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
info)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info,
|
|
None, info)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
|
|
mock_save.assert_called_with()
|
|
|
|
def test_get_guest_config_with_configdrive(self):
|
|
# It's necessary to check if the architecture is power, because
|
|
# power doesn't have support to ide, and so libvirt translate
|
|
# all ide calls to scsi
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
# make configdrive.required_by() return True
|
|
instance_ref['config_drive'] = True
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
# Pick the first drive letter on the bus that is available
|
|
# as the config drive. Delete the last device hardcode as
|
|
# the config drive here.
|
|
|
|
expect = {"ppc": "sda", "ppc64": "sda",
|
|
"ppc64le": "sda", "aarch64": "sda"}
|
|
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hda")
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[2].target_dev, disk)
|
|
|
|
def test_get_guest_config_default_with_virtio_scsi_bus(self):
|
|
self._test_get_guest_config_with_virtio_scsi_bus()
|
|
|
|
@mock.patch.object(rbd_utils.RBDDriver, 'get_mon_addrs')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_get_guest_config_rbd_with_virtio_scsi_bus(
|
|
self, mock_rdb, mock_get_mon_addrs):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
mock_get_mon_addrs.return_value = ("host", 9876)
|
|
self._test_get_guest_config_with_virtio_scsi_bus()
|
|
|
|
def _test_get_guest_config_with_virtio_scsi_bus(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_scsi_model": "virtio-scsi",
|
|
"hw_disk_bus": "scsi"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
[])
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(0, cfg.devices[0].device_addr.unit)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(1, cfg.devices[1].device_addr.unit)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestController)
|
|
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
|
|
|
|
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_scsi_model": "virtio-scsi",
|
|
"hw_disk_bus": "scsi"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
conn_info = {'driver_volume_type': 'fake', 'data': {}}
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 2,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
|
|
]
|
|
)
|
|
bd_info = {
|
|
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
|
|
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
bd_info)
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'
|
|
) as mock_save:
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
|
|
disk_info, [], bd_info)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
|
|
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
|
|
self.assertEqual(2, cfg.devices[2].device_addr.unit)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
|
|
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
|
|
self.assertEqual(3, cfg.devices[3].device_addr.unit)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestController)
|
|
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
|
|
mock_save.assert_called_with()
|
|
|
|
def test_get_guest_config_one_scsi_volume_with_configdrive(self):
|
|
"""Tests that the unit attribute is only incremented for block devices
|
|
that have a scsi bus. Unit numbering should begin at 0 since we are not
|
|
booting from volume.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_scsi_model": "virtio-scsi",
|
|
"hw_disk_bus": "scsi"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.config_drive = 'True'
|
|
conn_info = {'driver_volume_type': 'fake'}
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
|
|
]
|
|
)
|
|
bd_info = {
|
|
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
|
|
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
bd_info)
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'):
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
|
|
disk_info, [], bd_info)
|
|
|
|
# The device order is determined by the order that devices are
|
|
# appended in _get_guest_storage_config in the driver.
|
|
|
|
# The first device will be the instance's local disk (since we're
|
|
# not booting from volume). It should begin unit numbering at 0.
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIn('disk', cfg.devices[0].source_path)
|
|
self.assertEqual('sda', cfg.devices[0].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[0].target_bus)
|
|
self.assertEqual(0, cfg.devices[0].device_addr.unit)
|
|
|
|
# The second device will be the ephemeral disk
|
|
# (the flavor in self.test_instance has ephemeral_gb > 0).
|
|
# It should have the next unit number of 1.
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIn('disk.local', cfg.devices[1].source_path)
|
|
self.assertEqual('sdb', cfg.devices[1].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[1].target_bus)
|
|
self.assertEqual(1, cfg.devices[1].device_addr.unit)
|
|
|
|
# This is the config drive. It should not have unit number set.
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIn('disk.config', cfg.devices[2].source_path)
|
|
self.assertEqual('hda', cfg.devices[2].target_dev)
|
|
self.assertEqual('ide', cfg.devices[2].target_bus)
|
|
self.assertIsNone(cfg.devices[2].device_addr)
|
|
|
|
# And this is the attached volume.
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual('sdc', cfg.devices[3].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[3].target_bus)
|
|
self.assertEqual(2, cfg.devices[3].device_addr.unit)
|
|
|
|
def test_get_guest_config_boot_from_volume_with_configdrive(self):
|
|
"""Tests that the unit attribute is only incremented for block devices
|
|
that have a scsi bus and that the bootable volume in a boot-from-volume
|
|
scenario always has the unit set to 0.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_scsi_model": "virtio-scsi",
|
|
"hw_disk_bus": "scsi"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.config_drive = 'True'
|
|
conn_info = {'driver_volume_type': 'fake'}
|
|
bdms = block_device_obj.block_device_make_list_from_dicts(
|
|
self.context, [
|
|
# This is the boot volume (boot_index = 0).
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 1,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sda', 'boot_index': 0}),
|
|
# This is just another attached volume.
|
|
fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 2,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
|
|
]
|
|
)
|
|
bd_info = {
|
|
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
|
|
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
bd_info)
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'):
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
|
|
disk_info, [], bd_info)
|
|
|
|
# The device order is determined by the order that devices are
|
|
# appended in _get_guest_storage_config in the driver.
|
|
|
|
# The first device will be the ephemeral disk
|
|
# (the flavor in self.test_instance has ephemeral_gb > 0).
|
|
# It should begin unit numbering at 1 because 0 is reserved for the
|
|
# boot volume for boot-from-volume.
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIn('disk.local', cfg.devices[0].source_path)
|
|
self.assertEqual('sdb', cfg.devices[0].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[0].target_bus)
|
|
self.assertEqual(1, cfg.devices[0].device_addr.unit)
|
|
|
|
# The second device will be the config drive. It should not have a
|
|
# unit number set.
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIn('disk.config', cfg.devices[1].source_path)
|
|
self.assertEqual('hda', cfg.devices[1].target_dev)
|
|
self.assertEqual('ide', cfg.devices[1].target_bus)
|
|
self.assertIsNone(cfg.devices[1].device_addr)
|
|
|
|
# The third device will be the boot volume. It should have a
|
|
# unit number of 0.
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual('sda', cfg.devices[2].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[2].target_bus)
|
|
self.assertEqual(0, cfg.devices[2].device_addr.unit)
|
|
|
|
# The fourth device will be the other attached volume.
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual('sdc', cfg.devices[3].target_dev)
|
|
self.assertEqual('scsi', cfg.devices[3].target_bus)
|
|
self.assertEqual(2, cfg.devices[3].device_addr.unit)
|
|
|
|
def _get_guest_config_with_graphics(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
return cfg
|
|
|
|
def test_get_guest_config_with_vnc(self):
|
|
self.flags(enabled=True,
|
|
server_listen='10.0.0.1',
|
|
keymap='en-ie',
|
|
group='vnc')
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(pointer_model='ps2mouse')
|
|
self.flags(enabled=False, group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
self.assertEqual(len(cfg.devices), 6)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, 'vnc')
|
|
self.assertEqual(cfg.devices[3].keymap, 'en-ie')
|
|
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
|
|
|
|
def test_get_guest_config_with_vnc_and_tablet(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=True,
|
|
group='libvirt')
|
|
self.flags(enabled=False, group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, "tablet")
|
|
self.assertEqual(cfg.devices[4].type, "vnc")
|
|
|
|
def test_get_guest_config_with_spice_and_tablet(self):
|
|
self.flags(enabled=False, group='vnc')
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=True,
|
|
group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=False,
|
|
server_listen='10.0.0.1',
|
|
keymap='en-ie',
|
|
group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, 'tablet')
|
|
self.assertEqual(cfg.devices[4].type, 'spice')
|
|
self.assertEqual(cfg.devices[4].keymap, 'en-ie')
|
|
self.assertEqual(cfg.devices[4].listen, '10.0.0.1')
|
|
|
|
def test_get_guest_config_with_spice_and_agent(self):
|
|
self.flags(enabled=False, group='vnc')
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=True,
|
|
group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
expect = {"ppc": "vga", "ppc64": "vga",
|
|
"ppc64le": "vga", "aarch64": "virtio"}
|
|
video_type = expect.get(blockinfo.libvirt_utils.get_arch({}), "qxl")
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestChannel)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
|
|
self.assertEqual(cfg.devices[3].type, 'spicevmc')
|
|
self.assertEqual(cfg.devices[4].type, "spice")
|
|
self.assertEqual(cfg.devices[5].type, video_type)
|
|
|
|
def test_get_guest_config_with_vnc_no_keymap(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=True, keymap=None, group='vnc')
|
|
self.flags(enabled=False, group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
for device in cfg.devices:
|
|
if device.root_name == 'graphics':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertEqual('vnc', device.type)
|
|
self.assertIsNone(device.keymap)
|
|
|
|
def test_get_guest_config_with_spice_no_keymap(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=True, keymap=None, group='spice')
|
|
self.flags(enabled=False, group='vnc')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
for device in cfg.devices:
|
|
if device.root_name == 'graphics':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertEqual('spice', device.type)
|
|
self.assertIsNone(device.keymap)
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_serial_ports_from_guest')
|
|
@mock.patch('nova.console.serial.acquire_port')
|
|
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
|
|
return_value=1)
|
|
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
|
|
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
|
|
mock_get_port_number,
|
|
mock_acquire_port,
|
|
mock_ports,
|
|
mock_guest):
|
|
self.flags(enabled=True, group='serial_console')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
expected = {
|
|
fields.Architecture.X86_64: vconfig.LibvirtConfigGuestSerial,
|
|
fields.Architecture.S390: vconfig.LibvirtConfigGuestConsole,
|
|
fields.Architecture.S390X: vconfig.LibvirtConfigGuestConsole}
|
|
|
|
for guest_arch, device_type in expected.items():
|
|
mock_get_arch.return_value = guest_arch
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
|
|
drvr._create_consoles(virt_type="kvm", guest_cfg=guest,
|
|
instance=instance, flavor={},
|
|
image_meta={})
|
|
self.assertEqual(1, len(guest.devices))
|
|
console_device = guest.devices[0]
|
|
self.assertIsInstance(console_device, device_type)
|
|
self.assertEqual("tcp", console_device.type)
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_serial_ports_from_guest')
|
|
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
|
|
return_value=4)
|
|
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
|
|
side_effect=[fields.Architecture.X86_64,
|
|
fields.Architecture.S390,
|
|
fields.Architecture.S390X])
|
|
def test_create_serial_console_devices_with_limit_exceeded_based_on_arch(
|
|
self, mock_get_arch, mock_get_port_number, mock_ports, mock_guest):
|
|
self.flags(enabled=True, group='serial_console')
|
|
self.flags(virt_type="qemu", group='libvirt')
|
|
flavor = 'fake_flavor'
|
|
image_meta = objects.ImageMeta()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.assertRaises(exception.SerialPortNumberLimitExceeded,
|
|
drvr._create_consoles,
|
|
"kvm", guest, instance, flavor, image_meta)
|
|
mock_get_arch.assert_called_with(image_meta)
|
|
mock_get_port_number.assert_called_with(flavor,
|
|
image_meta)
|
|
|
|
drvr._create_consoles("kvm", guest, instance, flavor, image_meta)
|
|
mock_get_arch.assert_called_with(image_meta)
|
|
mock_get_port_number.assert_called_with(flavor,
|
|
image_meta)
|
|
|
|
drvr._create_consoles("kvm", guest, instance, flavor, image_meta)
|
|
mock_get_arch.assert_called_with(image_meta)
|
|
mock_get_port_number.assert_called_with(flavor,
|
|
image_meta)
|
|
|
|
@mock.patch('nova.console.serial.acquire_port')
|
|
def test_get_guest_config_serial_console(self, acquire_port):
|
|
self.flags(enabled=True, group='serial_console')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
acquire_port.return_value = 11111
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(7, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("tcp", cfg.devices[2].type)
|
|
self.assertEqual(11111, cfg.devices[2].listen_port)
|
|
|
|
def test_get_guest_config_serial_console_through_flavor(self):
|
|
self.flags(enabled=True, group='serial_console')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(9, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[8],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("tcp", cfg.devices[2].type)
|
|
self.assertEqual("tcp", cfg.devices[3].type)
|
|
self.assertEqual("tcp", cfg.devices[4].type)
|
|
|
|
def test_get_guest_config_serial_console_invalid_flavor(self):
|
|
self.flags(enabled=True, group='serial_console')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.assertRaises(
|
|
exception.ImageSerialPortNumberInvalid,
|
|
drvr._get_guest_config, instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
def test_get_guest_config_serial_console_image_and_flavor(self):
|
|
self.flags(enabled=True, group='serial_console')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_serial_port_count": "3"}})
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
|
|
disk_info)
|
|
self.assertEqual(9, len(cfg.devices), cfg.devices)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[8],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("tcp", cfg.devices[2].type)
|
|
self.assertEqual("tcp", cfg.devices[3].type)
|
|
self.assertEqual("tcp", cfg.devices[4].type)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch('nova.console.serial.acquire_port')
|
|
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
|
|
return_value=1)
|
|
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
|
|
def test_guest_config_char_device_logd(self, mock_get_arch,
|
|
mock_get_number_serial_ports,
|
|
mock_acquire_port,
|
|
mock_host_has_min_version):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
def _test_consoles(arch_to_mock, serial_enabled,
|
|
expected_device_type, expected_device_cls,
|
|
virt_type='qemu'):
|
|
guest_cfg = vconfig.LibvirtConfigGuest()
|
|
mock_get_arch.return_value = arch_to_mock
|
|
self.flags(enabled=serial_enabled, group='serial_console')
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
drvr._create_consoles(virt_type, guest_cfg, instance=instance,
|
|
flavor=None, image_meta=None)
|
|
|
|
self.assertEqual(1, len(guest_cfg.devices))
|
|
device = guest_cfg.devices[0]
|
|
self.assertEqual(expected_device_type, device.type)
|
|
self.assertIsInstance(device, expected_device_cls)
|
|
self.assertIsInstance(device.log,
|
|
vconfig.LibvirtConfigGuestCharDeviceLog)
|
|
self.assertEqual("off", device.log.append)
|
|
self.assertIsNotNone(device.log.file)
|
|
self.assertTrue(device.log.file.endswith("console.log"))
|
|
|
|
_test_consoles(fields.Architecture.X86_64, False,
|
|
"pty", vconfig.LibvirtConfigGuestSerial)
|
|
_test_consoles(fields.Architecture.S390, True,
|
|
"tcp", vconfig.LibvirtConfigGuestConsole)
|
|
_test_consoles(fields.Architecture.X86_64, False,
|
|
"pty", vconfig.LibvirtConfigGuestConsole, 'xen')
|
|
|
|
@mock.patch('nova.console.serial.acquire_port')
|
|
def test_get_guest_config_serial_console_through_port_rng_exhausted(
|
|
self, acquire_port):
|
|
self.flags(enabled=True, group='serial_console')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
|
|
'127.0.0.1')
|
|
self.assertRaises(
|
|
exception.SocketPortRangeExhaustedException,
|
|
drvr._get_guest_config, instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
@mock.patch('nova.console.serial.release_port')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_serial_ports_from_guest')
|
|
def test_serial_console_release_port(
|
|
self, mock_get_serial_ports_from_guest, mock_get_guest,
|
|
mock_get_info, mock_release_port):
|
|
self.flags(enabled="True", group='serial_console')
|
|
|
|
guest = libvirt_guest.Guest(FakeVirtDomain())
|
|
guest.power_off = mock.Mock()
|
|
mock_get_info.return_value = hardware.InstanceInfo(
|
|
state=power_state.SHUTDOWN)
|
|
mock_get_guest.return_value = guest
|
|
mock_get_serial_ports_from_guest.return_value = iter([
|
|
('127.0.0.1', 10000), ('127.0.0.1', 10001)])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._destroy(objects.Instance(**self.test_instance))
|
|
mock_release_port.assert_has_calls(
|
|
[mock.call(host='127.0.0.1', port=10000),
|
|
mock.call(host='127.0.0.1', port=10001)])
|
|
|
|
@mock.patch('os.stat', return_value=mock.Mock(st_blocks=0))
|
|
@mock.patch('os.path.getsize', return_value=0)
|
|
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size',
|
|
return_value='fake-size')
|
|
def test_detach_encrypted_volumes(self, mock_get_volume_size,
|
|
mock_getsize, mock_stat):
|
|
"""Test that unencrypted volumes are not disconnected with dmcrypt."""
|
|
instance = objects.Instance(**self.test_instance)
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<driver name='fake-driver' type='fake-type' />
|
|
<source file='filename'/>
|
|
<target dev='vdc' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='fake-driver' type='fake-type' />
|
|
<source dev='/dev/mapper/disk'/>
|
|
<target dev='vda'/>
|
|
</disk>
|
|
<disk type='block' device='disk'>
|
|
<driver name='fake-driver' type='fake-type' />
|
|
<source dev='/dev/mapper/swap'/>
|
|
<target dev='vdb'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
dom = FakeVirtDomain(fake_xml=xml)
|
|
instance.ephemeral_key_uuid = uuids.ephemeral_key_uuid # encrypted
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
@mock.patch.object(dmcrypt, 'delete_volume')
|
|
@mock.patch.object(conn._host, '_get_domain', return_value=dom)
|
|
@mock.patch.object(libvirt_driver.disk_api, 'get_allocated_disk_size')
|
|
def detach_encrypted_volumes(block_device_info, mock_get_alloc_size,
|
|
mock_get_domain, mock_delete_volume):
|
|
conn._detach_encrypted_volumes(instance, block_device_info)
|
|
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
self.assertFalse(mock_delete_volume.called)
|
|
|
|
block_device_info = {'root_device_name': '/dev/vda',
|
|
'ephemerals': [],
|
|
'block_device_mapping': []}
|
|
|
|
detach_encrypted_volumes(block_device_info)
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
|
|
def test_get_serial_ports_from_guest(self, mock_get_xml_desc):
|
|
i = self._test_get_serial_ports_from_guest(None,
|
|
mock_get_xml_desc)
|
|
self.assertEqual([
|
|
('127.0.0.1', 100),
|
|
('127.0.0.1', 101),
|
|
('127.0.0.2', 100),
|
|
('127.0.0.2', 101)], list(i))
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
|
|
def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc):
|
|
i = self._test_get_serial_ports_from_guest('bind',
|
|
mock_get_xml_desc)
|
|
self.assertEqual([
|
|
('127.0.0.1', 101),
|
|
('127.0.0.2', 100)], list(i))
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
|
|
def test_get_serial_ports_from_guest_connect_only(self,
|
|
mock_get_xml_desc):
|
|
i = self._test_get_serial_ports_from_guest('connect',
|
|
mock_get_xml_desc)
|
|
self.assertEqual([
|
|
('127.0.0.1', 100),
|
|
('127.0.0.2', 101)], list(i))
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
|
|
def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc):
|
|
i = self._test_get_serial_ports_from_guest(None,
|
|
mock_get_xml_desc,
|
|
'console')
|
|
self.assertEqual([
|
|
('127.0.0.1', 100),
|
|
('127.0.0.1', 101),
|
|
('127.0.0.2', 100),
|
|
('127.0.0.2', 101)], list(i))
|
|
|
|
def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc,
|
|
dev_name='serial'):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<%(dev_name)s type="tcp">
|
|
<source host="127.0.0.1" service="100" mode="connect"/>
|
|
</%(dev_name)s>
|
|
<%(dev_name)s type="tcp">
|
|
<source host="127.0.0.1" service="101" mode="bind"/>
|
|
</%(dev_name)s>
|
|
<%(dev_name)s type="tcp">
|
|
<source host="127.0.0.2" service="100" mode="bind"/>
|
|
</%(dev_name)s>
|
|
<%(dev_name)s type="tcp">
|
|
<source host="127.0.0.2" service="101" mode="connect"/>
|
|
</%(dev_name)s>
|
|
</devices>
|
|
</domain>""" % {'dev_name': dev_name}
|
|
|
|
mock_get_xml_desc.return_value = xml
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
guest = libvirt_guest.Guest(FakeVirtDomain())
|
|
return drvr._get_serial_ports_from_guest(guest, mode=mode)
|
|
|
|
def test_get_guest_config_with_type_xen(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self.flags(virt_type='xen',
|
|
use_usb_tablet=False,
|
|
group='libvirt')
|
|
self.flags(enabled=False,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 6)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestConsole)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, "vnc")
|
|
self.assertEqual(cfg.devices[4].type, "xen")
|
|
|
|
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
|
|
return_value=fields.Architecture.S390X)
|
|
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
|
|
self.flags(enabled=False, group='vnc')
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=False,
|
|
group='libvirt')
|
|
|
|
self._stub_host_capabilities_cpu_arch(fields.Architecture.S390X)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
cfg = self._get_guest_config_via_fake_api(instance_ref)
|
|
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
terminal_device = cfg.devices[2]
|
|
self.assertIsInstance(terminal_device,
|
|
vconfig.LibvirtConfigGuestConsole)
|
|
self.assertEqual("sclp", terminal_device.target_type)
|
|
self.assertEqual("pty", terminal_device.type)
|
|
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
|
|
|
|
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
|
|
def get_host_capabilities_stub(self):
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
cpu.arch = cpu_arch
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
return caps
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
|
|
def _get_guest_config_via_fake_api(self, instance):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
return drvr._get_guest_config(instance, [],
|
|
image_meta, disk_info)
|
|
|
|
def test_get_guest_config_with_type_xen_pae_hvm(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self.flags(virt_type='xen',
|
|
use_usb_tablet=False,
|
|
group='libvirt')
|
|
self.flags(enabled=False,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref['vm_mode'] = fields.VMMode.HVM
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
|
|
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
|
|
self.assertEqual(3, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeaturePAE)
|
|
self.assertIsInstance(cfg.features[1],
|
|
vconfig.LibvirtConfigGuestFeatureACPI)
|
|
self.assertIsInstance(cfg.features[2],
|
|
vconfig.LibvirtConfigGuestFeatureAPIC)
|
|
|
|
def test_get_guest_config_with_type_xen_pae_pvm(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self.flags(virt_type='xen',
|
|
use_usb_tablet=False,
|
|
group='libvirt')
|
|
self.flags(enabled=False,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(cfg.os_type, fields.VMMode.XEN)
|
|
self.assertEqual(1, len(cfg.features))
|
|
self.assertIsInstance(cfg.features[0],
|
|
vconfig.LibvirtConfigGuestFeaturePAE)
|
|
|
|
def test_get_guest_config_with_vnc_and_spice(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=True,
|
|
group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 9)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestChannel)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[8],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, "tablet")
|
|
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
|
|
self.assertEqual(cfg.devices[4].type, 'spicevmc')
|
|
self.assertEqual(cfg.devices[5].type, "vnc")
|
|
self.assertEqual(cfg.devices[6].type, "spice")
|
|
|
|
def test_get_guest_config_with_watchdog_action_image_meta(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_watchdog_action": "none"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 8)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestWatchdog)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("none", cfg.devices[6].action)
|
|
|
|
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
|
|
agent_enabled=False, image_meta=None):
|
|
self.flags(enabled=vnc_enabled, group='vnc')
|
|
self.flags(enabled=spice_enabled,
|
|
agent_enabled=agent_enabled, group='spice')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
image_meta = objects.ImageMeta.from_dict(image_meta)
|
|
return drvr._get_guest_pointer_model(os_type, image_meta)
|
|
|
|
def test_use_ps2_mouse(self):
|
|
self.flags(pointer_model='ps2mouse')
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, fields.VMMode.HVM)
|
|
self.assertIsNone(tablet)
|
|
|
|
def test_get_guest_usb_tablet_wipe(self):
|
|
self.flags(use_usb_tablet=True, group='libvirt')
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, fields.VMMode.HVM)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, False, fields.VMMode.HVM)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, True, fields.VMMode.HVM)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, False, fields.VMMode.HVM)
|
|
self.assertIsNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, "foo")
|
|
self.assertIsNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, True, fields.VMMode.HVM, True)
|
|
self.assertIsNone(tablet)
|
|
|
|
def test_get_guest_usb_tablet_image_meta(self):
|
|
self.flags(use_usb_tablet=True, group='libvirt')
|
|
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, fields.VMMode.HVM, image_meta=image_meta)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, False, fields.VMMode.HVM, image_meta=image_meta)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, True, fields.VMMode.HVM, image_meta=image_meta)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, False, fields.VMMode.HVM, image_meta=image_meta)
|
|
self.assertIsNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, "foo", image_meta=image_meta)
|
|
self.assertIsNone(tablet)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
False, True, fields.VMMode.HVM, True, image_meta=image_meta)
|
|
self.assertIsNone(tablet)
|
|
|
|
def test_get_guest_usb_tablet_image_meta_no_vnc(self):
|
|
self.flags(use_usb_tablet=False, group='libvirt')
|
|
self.flags(pointer_model=None)
|
|
|
|
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
|
|
self.assertRaises(
|
|
exception.UnsupportedPointerModelRequested,
|
|
self._test_get_guest_usb_tablet,
|
|
False, False, fields.VMMode.HVM, True, image_meta=image_meta)
|
|
|
|
def test_get_guest_no_pointer_model_usb_tablet_set(self):
|
|
self.flags(use_usb_tablet=True, group='libvirt')
|
|
self.flags(pointer_model=None)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
def test_get_guest_no_pointer_model_usb_tablet_not_set(self):
|
|
self.flags(use_usb_tablet=False, group='libvirt')
|
|
self.flags(pointer_model=None)
|
|
|
|
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
|
|
self.assertIsNone(tablet)
|
|
|
|
def test_get_guest_pointer_model_usb_tablet(self):
|
|
self.flags(use_usb_tablet=False, group='libvirt')
|
|
self.flags(pointer_model='usbtablet')
|
|
tablet = self._test_get_guest_usb_tablet(True, True, fields.VMMode.HVM)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
def test_get_guest_pointer_model_usb_tablet_image(self):
|
|
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
|
|
tablet = self._test_get_guest_usb_tablet(
|
|
True, True, fields.VMMode.HVM, image_meta=image_meta)
|
|
self.assertIsNotNone(tablet)
|
|
|
|
def test_get_guest_pointer_model_usb_tablet_image_no_HVM(self):
|
|
self.flags(pointer_model=None)
|
|
self.flags(use_usb_tablet=False, group='libvirt')
|
|
image_meta = {"properties": {"hw_pointer_model": "usbtablet"}}
|
|
self.assertRaises(
|
|
exception.UnsupportedPointerModelRequested,
|
|
self._test_get_guest_usb_tablet,
|
|
True, True, fields.VMMode.XEN, image_meta=image_meta)
|
|
|
|
def test_get_guest_config_with_watchdog_action_flavor(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {"hw:watchdog_action": 'none'}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(8, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestWatchdog)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("none", cfg.devices[6].action)
|
|
|
|
def test_get_guest_config_with_watchdog_overrides_flavor(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw:watchdog_action': 'none'}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_watchdog_action": "pause"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(8, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestWatchdog)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual("pause", cfg.devices[6].action)
|
|
|
|
def test_get_guest_config_with_video_driver_image_meta(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_video_model": "vmvga"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[4].type, "vnc")
|
|
self.assertEqual(cfg.devices[5].type, "vmvga")
|
|
|
|
def test_get_guest_config_with_qga_through_image_meta(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_qemu_guest_agent": "yes"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 8)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigGuestChannel)
|
|
self.assertIsInstance(cfg.devices[7],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, "tablet")
|
|
self.assertEqual(cfg.devices[4].type, "vnc")
|
|
self.assertEqual(cfg.devices[6].type, "unix")
|
|
self.assertEqual(cfg.devices[6].target_name, "org.qemu.guest_agent.0")
|
|
|
|
def test_get_guest_config_with_video_driver_vram(self):
|
|
self.flags(enabled=False, group='vnc')
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_video_model": "qxl",
|
|
"hw_video_ram": "64"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestChannel)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[4].type, "spice")
|
|
self.assertEqual(cfg.devices[5].type, "qxl")
|
|
self.assertEqual(cfg.devices[5].vram, 64 * units.Mi / units.Ki)
|
|
|
|
@mock.patch('nova.virt.disk.api.teardown_container')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
|
|
@mock.patch('nova.virt.disk.api.setup_container')
|
|
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_unmount_fs_if_error_during_lxc_create_domain(self,
|
|
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
|
|
mock_get_info, mock_teardown):
|
|
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
|
|
we need to ensure the guest FS is unmounted from the host so that any
|
|
future `lvremove` calls will work.
|
|
"""
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_instance = mock.MagicMock()
|
|
mock_get_inst_path.return_value = '/tmp/'
|
|
mock_image_backend = mock.MagicMock()
|
|
drvr.image_backend = mock_image_backend
|
|
mock_image = mock.MagicMock()
|
|
mock_image.path = '/tmp/test.img'
|
|
drvr.image_backend.by_name.return_value = mock_image
|
|
mock_setup_container.return_value = '/dev/nbd0'
|
|
mock_get_info.side_effect = exception.InstanceNotFound(
|
|
instance_id='foo')
|
|
drvr._conn.defineXML = mock.Mock()
|
|
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_is_booted_from_volume',
|
|
return_value=False),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr, 'firewall_driver'),
|
|
mock.patch.object(drvr, 'cleanup')):
|
|
self.assertRaises(ValueError,
|
|
drvr._create_domain_and_network,
|
|
self.context,
|
|
'xml',
|
|
mock_instance, None)
|
|
|
|
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
|
|
|
|
def test_video_driver_flavor_limit_not_set(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_video_model": "qxl",
|
|
"hw_video_ram": "64"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
with mock.patch.object(objects.Instance, 'save'):
|
|
self.assertRaises(exception.RequestedVRamTooHigh,
|
|
drvr._get_guest_config,
|
|
instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
def test_video_driver_ram_above_flavor_limit(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_type = instance_ref.get_flavor()
|
|
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_video_model": "qxl",
|
|
"hw_video_ram": "64"}})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
with mock.patch.object(objects.Instance, 'save'):
|
|
self.assertRaises(exception.RequestedVRamTooHigh,
|
|
drvr._get_guest_config,
|
|
instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
def test_get_guest_config_without_qga_through_image_meta(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_qemu_guest_agent": "no"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[3].type, "tablet")
|
|
self.assertEqual(cfg.devices[4].type, "vnc")
|
|
|
|
def test_get_guest_config_with_rng_device(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(pointer_model='ps2mouse')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_rng_model": "virtio"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestRng)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[5].model, 'random')
|
|
self.assertEqual(cfg.devices[5].backend, '/dev/urandom')
|
|
self.assertIsNone(cfg.devices[5].rate_bytes)
|
|
self.assertIsNone(cfg.devices[5].rate_period)
|
|
|
|
def test_get_guest_config_with_rng_not_allowed(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(pointer_model='ps2mouse')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_rng_model": "virtio"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 6)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
def test_get_guest_config_with_rng_limits(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(pointer_model='ps2mouse')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
|
|
'hw_rng:rate_bytes': '1024',
|
|
'hw_rng:rate_period': '2'}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_rng_model": "virtio"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestRng)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[5].model, 'random')
|
|
self.assertEqual(cfg.devices[5].backend, '/dev/urandom')
|
|
self.assertEqual(cfg.devices[5].rate_bytes, 1024)
|
|
self.assertEqual(cfg.devices[5].rate_period, 2)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
|
|
def test_get_guest_config_with_rng_backend(self, mock_path):
|
|
self.flags(virt_type='kvm',
|
|
rng_dev_path='/dev/hw_rng',
|
|
group='libvirt')
|
|
self.flags(pointer_model='ps2mouse')
|
|
mock_path.return_value = True
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_rng_model": "virtio"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
self.assertEqual(len(cfg.devices), 7)
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestSerial)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestRng)
|
|
self.assertIsInstance(cfg.devices[6],
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
|
|
self.assertEqual(cfg.devices[5].model, 'random')
|
|
self.assertEqual(cfg.devices[5].backend, '/dev/hw_rng')
|
|
self.assertIsNone(cfg.devices[5].rate_bytes)
|
|
self.assertIsNone(cfg.devices[5].rate_period)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
|
|
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
|
|
self.flags(virt_type='kvm',
|
|
use_usb_tablet=False,
|
|
rng_dev_path='/dev/hw_rng',
|
|
group='libvirt')
|
|
mock_path.return_value = False
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_rng_model": "virtio"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.assertRaises(exception.RngDeviceNotExist,
|
|
drvr._get_guest_config,
|
|
instance_ref,
|
|
[],
|
|
image_meta, disk_info)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.vcpus = 4
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(4096, cfg.cputune.shares)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_with_cpu_quota(self, is_able):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
|
|
'quota:cpu_period': '20000'}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(10000, cfg.cputune.shares)
|
|
self.assertEqual(20000, cfg.cputune.period)
|
|
|
|
def test_get_guest_config_with_hiding_hypervisor_id(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "true"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertTrue(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def test_get_guest_config_with_hiding_hypervisor_id_flavor_extra_specs(
|
|
self):
|
|
# Input to the test: flavor extra_specs
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "true"},
|
|
expected_attrs={"extra_specs"})
|
|
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor = flavor_hide_id
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw"})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertTrue(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def test_get_guest_config_with_hiding_hypervisor_id_img_and_flavor(
|
|
self):
|
|
# Input to the test: image metadata (true) and flavor
|
|
# extra_specs (true)
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "true"},
|
|
expected_attrs={"extra_specs"})
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "true"}})
|
|
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor = flavor_hide_id
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertTrue(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def test_get_guest_config_with_hiding_hypervisor_id_img_or_flavor(
|
|
self):
|
|
# Input to the test: image metadata (false) and flavor
|
|
# extra_specs (true)
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "true"},
|
|
expected_attrs={"extra_specs"})
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "false"}})
|
|
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor = flavor_hide_id
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertTrue(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
# Input to the test: image metadata (true) and flavor
|
|
# extra_specs (false)
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "false"},
|
|
expected_attrs={"extra_specs"})
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "true"}})
|
|
|
|
instance_ref.flavor = flavor_hide_id
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertTrue(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def test_get_guest_config_without_hiding_hypervisor_id(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"img_hide_hypervisor_id": "false"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertFalse(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def test_get_guest_config_without_hiding_hypervisor_id_flavor_extra_specs(
|
|
self):
|
|
flavor_hide_id = fake_flavor.fake_flavor_obj(self.context,
|
|
extra_specs={"hide_hypervisor_id": "false"},
|
|
expected_attrs={"extra_specs"})
|
|
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor = flavor_hide_id
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw"})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
[],
|
|
image_meta,
|
|
disk_info)
|
|
|
|
self.assertFalse(
|
|
any(isinstance(feature, vconfig.LibvirtConfigGuestFeatureKvmHidden)
|
|
for feature in cfg.features))
|
|
|
|
def _test_get_guest_config_disk_cachemodes(self, images_type):
|
|
# Verify that the configured cachemodes are propagated to the device
|
|
# configurations.
|
|
if images_type == 'flat':
|
|
cachemode = 'file=directsync'
|
|
elif images_type == 'lvm':
|
|
cachemode = 'block=writethrough'
|
|
elif images_type == 'rbd':
|
|
cachemode = 'network=writeback'
|
|
self.flags(disk_cachemodes=[cachemode], group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
for d in cfg.devices:
|
|
if isinstance(d, vconfig.LibvirtConfigGuestDisk):
|
|
expected = cachemode.split('=')
|
|
self.assertEqual(expected[0], d.source_type)
|
|
self.assertEqual(expected[1], d.driver_cache)
|
|
|
|
def test_get_guest_config_disk_cachemodes_file(self):
|
|
self.flags(images_type='flat', group='libvirt')
|
|
self._test_get_guest_config_disk_cachemodes('flat')
|
|
|
|
def test_get_guest_config_disk_cachemodes_block(self):
|
|
self.flags(images_type='lvm', group='libvirt')
|
|
self.flags(images_volume_group='vols', group='libvirt')
|
|
self._test_get_guest_config_disk_cachemodes('lvm')
|
|
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
@mock.patch.object(rbd_utils, 'rados')
|
|
@mock.patch.object(rbd_utils.RBDDriver, 'get_mon_addrs',
|
|
return_value=(mock.Mock(), mock.Mock()))
|
|
def test_get_guest_config_disk_cachemodes_network(
|
|
self, mock_get_mon_addrs, mock_rados, mock_rbd):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
self._test_get_guest_config_disk_cachemodes('rbd')
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
|
|
'quota:cpu_period': '20000'}
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.assertRaises(ValueError,
|
|
drvr._get_guest_config,
|
|
instance_ref, [], image_meta, disk_info)
|
|
|
|
@mock.patch.object(
|
|
host.Host, "is_cpu_control_policy_capable", return_value=False)
|
|
def test_get_update_guest_cputune(self, is_able):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
|
|
'quota:cpu_period': '20000'}
|
|
self.assertRaises(
|
|
exception.UnsupportedHostCPUControlPolicy,
|
|
drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm")
|
|
|
|
def _create_fake_service_compute(self):
|
|
service_info = {
|
|
'id': 1729,
|
|
'host': 'fake',
|
|
'report_count': 0
|
|
}
|
|
service_ref = objects.Service(**service_info)
|
|
|
|
compute_info = {
|
|
'id': 1729,
|
|
'vcpus': 2,
|
|
'memory_mb': 1024,
|
|
'local_gb': 2048,
|
|
'vcpus_used': 0,
|
|
'memory_mb_used': 0,
|
|
'local_gb_used': 0,
|
|
'free_ram_mb': 1024,
|
|
'free_disk_gb': 2048,
|
|
'hypervisor_type': 'xen',
|
|
'hypervisor_version': 1,
|
|
'running_vms': 0,
|
|
'cpu_info': '',
|
|
'current_workload': 0,
|
|
'service_id': service_ref['id'],
|
|
'host': service_ref['host']
|
|
}
|
|
compute_ref = objects.ComputeNode(**compute_info)
|
|
return (service_ref, compute_ref)
|
|
|
|
def test_get_guest_config_with_pci_passthrough_kvm(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
service_ref, compute_ref = self._create_fake_service_compute()
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
pci_device_info = dict(test_pci_device.fake_db_dev)
|
|
pci_device_info.update(compute_node_id=1,
|
|
label='fake',
|
|
status=fields.PciDeviceStatus.ALLOCATED,
|
|
address='0000:00:00.1',
|
|
compute_id=compute_ref.id,
|
|
instance_uuid=instance.uuid,
|
|
request_id=None,
|
|
extra_info={})
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
pci_list = objects.PciDeviceList()
|
|
pci_list.objects.append(pci_device)
|
|
instance.pci_devices = pci_list
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance, [],
|
|
image_meta, disk_info)
|
|
|
|
had_pci = 0
|
|
# care only about the PCI devices
|
|
for dev in cfg.devices:
|
|
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
|
|
had_pci += 1
|
|
self.assertEqual(dev.type, 'pci')
|
|
self.assertEqual(dev.managed, 'yes')
|
|
self.assertEqual(dev.mode, 'subsystem')
|
|
|
|
self.assertEqual(dev.domain, "0000")
|
|
self.assertEqual(dev.bus, "00")
|
|
self.assertEqual(dev.slot, "00")
|
|
self.assertEqual(dev.function, "1")
|
|
self.assertEqual(had_pci, 1)
|
|
|
|
def test_get_guest_config_with_pci_passthrough_xen(self):
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
service_ref, compute_ref = self._create_fake_service_compute()
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
pci_device_info = dict(test_pci_device.fake_db_dev)
|
|
pci_device_info.update(compute_node_id=1,
|
|
label='fake',
|
|
status=fields.PciDeviceStatus.ALLOCATED,
|
|
address='0000:00:00.2',
|
|
compute_id=compute_ref.id,
|
|
instance_uuid=instance.uuid,
|
|
request_id=None,
|
|
extra_info={})
|
|
pci_device = objects.PciDevice(**pci_device_info)
|
|
pci_list = objects.PciDeviceList()
|
|
pci_list.objects.append(pci_device)
|
|
instance.pci_devices = pci_list
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance, [],
|
|
image_meta, disk_info)
|
|
had_pci = 0
|
|
# care only about the PCI devices
|
|
for dev in cfg.devices:
|
|
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
|
|
had_pci += 1
|
|
self.assertEqual(dev.type, 'pci')
|
|
self.assertEqual(dev.managed, 'no')
|
|
self.assertEqual(dev.mode, 'subsystem')
|
|
|
|
self.assertEqual(dev.domain, "0000")
|
|
self.assertEqual(dev.bus, "00")
|
|
self.assertEqual(dev.slot, "00")
|
|
self.assertEqual(dev.function, "2")
|
|
self.assertEqual(had_pci, 1)
|
|
|
|
def test_get_guest_config_os_command_line_through_image_meta(self):
|
|
self.flags(virt_type="kvm",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
|
|
self.test_instance['kernel_id'] = "fake_kernel_id"
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"os_command_line":
|
|
"fake_os_command_line"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
|
|
|
|
def test_get_guest_config_os_command_line_without_kernel_id(self):
|
|
self.flags(virt_type="kvm",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"os_command_line":
|
|
"fake_os_command_line"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsNone(cfg.os_cmdline)
|
|
|
|
def test_get_guest_config_os_command_empty(self):
|
|
self.flags(virt_type="kvm",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
|
|
self.test_instance['kernel_id'] = "fake_kernel_id"
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"os_command_line": ""}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
# the instance has 'root=/dev/vda console=tty0 console=ttyS0
|
|
# console=hvc0' set by default, so testing an empty string and None
|
|
# value in the os_command_line image property must pass
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertNotEqual(cfg.os_cmdline, "")
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_guest_storage_config")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
|
|
def test_get_guest_config_armv7(self, mock_numa, mock_storage):
|
|
def get_host_capabilities_stub(self):
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
cpu.arch = fields.Architecture.ARMV7
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
return caps
|
|
|
|
self.flags(virt_type="kvm",
|
|
group="libvirt")
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertEqual(cfg.os_mach_type, "virt")
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_guest_storage_config")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
def test_get_guest_config_aarch64(self, mock_path_exists,
|
|
mock_numa, mock_storage):
|
|
def get_host_capabilities_stub(self):
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
cpu.arch = fields.Architecture.AARCH64
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
return caps
|
|
|
|
TEST_AMOUNT_OF_PCIE_SLOTS = 8
|
|
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
|
|
group='libvirt')
|
|
|
|
self.flags(virt_type="kvm",
|
|
group="libvirt")
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertTrue(mock_path_exists.called)
|
|
mock_path_exists.assert_called_with(
|
|
libvirt_driver.DEFAULT_UEFI_LOADER_PATH['aarch64'])
|
|
self.assertEqual(cfg.os_mach_type, "virt")
|
|
|
|
num_ports = 0
|
|
for device in cfg.devices:
|
|
try:
|
|
if (device.root_name == 'controller' and
|
|
device.model == 'pcie-root-port'):
|
|
num_ports += 1
|
|
except AttributeError:
|
|
pass
|
|
|
|
self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_guest_storage_config")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
def test_get_guest_config_aarch64_with_graphics(self, mock_path_exists,
|
|
mock_numa, mock_storage):
|
|
def get_host_capabilities_stub(self):
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
cpu.arch = fields.Architecture.AARCH64
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
return caps
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
self.flags(enabled=True,
|
|
server_listen='10.0.0.1',
|
|
keymap='en-ie',
|
|
group='vnc')
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(enabled=False, group='spice')
|
|
|
|
cfg = self._get_guest_config_with_graphics()
|
|
|
|
self.assertTrue(mock_path_exists.called)
|
|
mock_path_exists.assert_called_with(
|
|
libvirt_driver.DEFAULT_UEFI_LOADER_PATH['aarch64'])
|
|
self.assertEqual(cfg.os_mach_type, "virt")
|
|
|
|
usbhost_exists = False
|
|
keyboard_exists = False
|
|
for device in cfg.devices:
|
|
if device.root_name == 'controller' and device.type == 'usb':
|
|
usbhost_exists = True
|
|
if device.root_name == 'input' and device.type == 'keyboard':
|
|
keyboard_exists = True
|
|
self.assertTrue(usbhost_exists)
|
|
self.assertTrue(keyboard_exists)
|
|
|
|
def test_get_guest_config_machine_type_s390(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
host_cpu_archs = (fields.Architecture.S390, fields.Architecture.S390X)
|
|
for host_cpu_arch in host_cpu_archs:
|
|
caps.host.cpu.arch = host_cpu_arch
|
|
os_mach_type = drvr._get_machine_type(image_meta, caps)
|
|
self.assertEqual('s390-ccw-virtio', os_mach_type)
|
|
|
|
def test_get_guest_config_machine_type_through_image_meta(self):
|
|
self.flags(virt_type="kvm",
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "raw",
|
|
"properties": {"hw_machine_type":
|
|
"fake_machine_type"}})
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
|
|
|
|
def test_get_guest_config_machine_type_from_config(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
|
|
group='libvirt')
|
|
|
|
def fake_getCapabilities():
|
|
return """
|
|
<capabilities>
|
|
<host>
|
|
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
|
|
<cpu>
|
|
<arch>x86_64</arch>
|
|
<model>Penryn</model>
|
|
<vendor>Intel</vendor>
|
|
<topology sockets='1' cores='2' threads='1'/>
|
|
<feature name='xtpr'/>
|
|
</cpu>
|
|
</host>
|
|
</capabilities>
|
|
"""
|
|
|
|
def fake_baselineCPU(cpu, flag):
|
|
return """<cpu mode='custom' match='exact'>
|
|
<model fallback='allow'>Penryn</model>
|
|
<vendor>Intel</vendor>
|
|
<feature policy='require' name='xtpr'/>
|
|
</cpu>
|
|
"""
|
|
|
|
# Make sure the host arch is mocked as x86_64
|
|
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
|
|
baselineCPU=fake_baselineCPU,
|
|
getVersion=lambda: 1005001)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
|
|
|
|
def _test_get_guest_config_ppc64(self, device_index):
|
|
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
|
|
"""
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
expected = (fields.Architecture.PPC64, fields.Architecture.PPC)
|
|
for guestarch in expected:
|
|
with mock.patch.object(libvirt_driver.libvirt_utils,
|
|
'get_arch',
|
|
return_value=guestarch):
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta,
|
|
disk_info)
|
|
self.assertIsInstance(cfg.devices[device_index],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
self.assertEqual(cfg.devices[device_index].type, 'vga')
|
|
|
|
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
|
|
self.flags(enabled=True, group='vnc')
|
|
self._test_get_guest_config_ppc64(5)
|
|
|
|
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
|
|
self.flags(enabled=True,
|
|
agent_enabled=True,
|
|
group='spice')
|
|
self._test_get_guest_config_ppc64(7)
|
|
|
|
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.extra_specs = extra_specs
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref, image_meta)
|
|
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
|
|
self.assertTrue(conf.os_bootmenu)
|
|
|
|
def test_get_guest_config_bootmenu_via_image_meta(self):
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{"disk_format": "raw",
|
|
"properties": {"hw_boot_menu": "True"}})
|
|
self._test_get_guest_config_bootmenu(image_meta, {})
|
|
|
|
def test_get_guest_config_bootmenu_via_extra_specs(self):
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
self.test_image_meta)
|
|
self._test_get_guest_config_bootmenu(image_meta,
|
|
{'hw:boot_menu': 'True'})
|
|
|
|
def test_get_guest_cpu_config_none(self):
|
|
self.flags(cpu_mode="none", group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertIsNone(conf.cpu.mode)
|
|
self.assertIsNone(conf.cpu.model)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
def test_get_guest_cpu_config_default_kvm(self):
|
|
self.flags(virt_type="kvm",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertIsNone(conf.cpu.mode)
|
|
self.assertIsNone(conf.cpu.model)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
def test_get_guest_cpu_config_default_uml(self):
|
|
self.flags(virt_type="uml",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsNone(conf.cpu)
|
|
|
|
def test_get_guest_cpu_config_default_lxc(self):
|
|
self.flags(virt_type="lxc",
|
|
cpu_mode='none',
|
|
group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsNone(conf.cpu)
|
|
|
|
def test_get_guest_cpu_config_host_passthrough(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="host-passthrough", group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "host-passthrough")
|
|
self.assertIsNone(conf.cpu.model)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
def test_get_guest_cpu_config_host_passthrough_aarch64(self):
|
|
expected = {
|
|
fields.Architecture.X86_64: "host-model",
|
|
fields.Architecture.I686: "host-model",
|
|
fields.Architecture.PPC: "host-model",
|
|
fields.Architecture.PPC64: "host-model",
|
|
fields.Architecture.ARMV7: "host-model",
|
|
fields.Architecture.AARCH64: "host-passthrough",
|
|
}
|
|
for guestarch, expect_mode in expected.items():
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = guestarch
|
|
with mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
if caps.host.cpu.arch == fields.Architecture.AARCH64:
|
|
drvr._has_uefi_support = mock.Mock(return_value=True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, expect_mode)
|
|
|
|
def test_get_guest_cpu_config_host_model(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="host-model", group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "host-model")
|
|
self.assertIsNone(conf.cpu.model)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
def test_get_guest_cpu_config_custom(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="custom",
|
|
cpu_model="Penryn",
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "custom")
|
|
self.assertEqual(conf.cpu.model, "Penryn")
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_get_guest_cpu_config_custom_with_extra_flags(self,
|
|
mock_warn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="custom",
|
|
cpu_model="IvyBridge",
|
|
cpu_model_extra_flags="pcid",
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "custom")
|
|
self.assertEqual(conf.cpu.model, "IvyBridge")
|
|
self.assertIn(conf.cpu.features.pop().name, "pcid")
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
self.assertFalse(mock_warn.called)
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_get_guest_cpu_config_custom_with_extra_flags_upper_case(self,
|
|
mock_warn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="custom",
|
|
cpu_model="IvyBridge",
|
|
cpu_model_extra_flags="PCID",
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual("custom", conf.cpu.mode)
|
|
self.assertEqual("IvyBridge", conf.cpu.model)
|
|
# At this point the upper case CPU flag is normalized to lower
|
|
# case, so assert for that
|
|
self.assertEqual("pcid", conf.cpu.features.pop().name)
|
|
self.assertEqual(instance_ref.flavor.vcpus, conf.cpu.sockets)
|
|
self.assertEqual(1, conf.cpu.cores)
|
|
self.assertEqual(1, conf.cpu.threads)
|
|
mock_warn.assert_not_called()
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_get_guest_cpu_config_custom_with_multiple_extra_flags(self,
|
|
mock_warn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="custom",
|
|
cpu_model="IvyBridge",
|
|
cpu_model_extra_flags=['pcid', 'vmx'],
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
features = [feature.name for feature in conf.cpu.features]
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "custom")
|
|
self.assertEqual(conf.cpu.model, "IvyBridge")
|
|
self.assertIn("pcid", features)
|
|
self.assertIn("vmx", features)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
mock_warn.assert_not_called()
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_get_guest_cpu_config_host_model_with_extra_flags(self,
|
|
mock_warn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="host-model",
|
|
cpu_model_extra_flags="pdpe1gb",
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
features = [feature.name for feature in conf.cpu.features]
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "host-model")
|
|
self.assertIn("pdpe1gb", features)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
# For 'host-model', it is now valid to use 'extra_flags';
|
|
# assert that no warning is thrown
|
|
mock_warn.assert_not_called()
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
def test_get_guest_cpu_config_host_passthrough_with_extra_flags(self,
|
|
mock_warn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(cpu_mode="host-passthrough",
|
|
cpu_model_extra_flags="invtsc",
|
|
group='libvirt')
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
features = [feature.name for feature in conf.cpu.features]
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "host-passthrough")
|
|
self.assertIn("invtsc", features)
|
|
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
|
|
self.assertEqual(conf.cpu.cores, 1)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
# We have lifted the restriction for 'host-passthrough' as well;
|
|
# so here too, assert that no warning is thrown
|
|
mock_warn.assert_not_called()
|
|
|
|
def test_get_guest_cpu_topology(self):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.flavor.vcpus = 8
|
|
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
conf = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertIsInstance(conf.cpu,
|
|
vconfig.LibvirtConfigGuestCPU)
|
|
self.assertEqual(conf.cpu.mode, "host-model")
|
|
self.assertEqual(conf.cpu.sockets, 4)
|
|
self.assertEqual(conf.cpu.cores, 2)
|
|
self.assertEqual(conf.cpu.threads, 1)
|
|
|
|
def test_get_guest_memory_balloon_config_by_default(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
self.assertEqual('virtio', device.model)
|
|
self.assertEqual(10, device.period)
|
|
|
|
def test_get_guest_memory_balloon_config_disable(self):
|
|
self.flags(mem_stats_period_seconds=0, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
no_exist = True
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
no_exist = False
|
|
break
|
|
self.assertTrue(no_exist)
|
|
|
|
def test_get_guest_memory_balloon_config_period_value(self):
|
|
self.flags(mem_stats_period_seconds=21, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
self.assertEqual('virtio', device.model)
|
|
self.assertEqual(21, device.period)
|
|
|
|
def test_get_guest_memory_balloon_config_qemu(self):
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
self.assertEqual('virtio', device.model)
|
|
self.assertEqual(10, device.period)
|
|
|
|
def test_get_guest_memory_balloon_config_xen(self):
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
self.assertIsInstance(device,
|
|
vconfig.LibvirtConfigMemoryBalloon)
|
|
self.assertEqual('xen', device.model)
|
|
self.assertEqual(10, device.period)
|
|
|
|
def test_get_guest_memory_balloon_config_lxc(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
no_exist = True
|
|
for device in cfg.devices:
|
|
if device.root_name == 'memballoon':
|
|
no_exist = False
|
|
break
|
|
self.assertTrue(no_exist)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG.warning')
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(host.Host, "get_capabilities")
|
|
def test_get_supported_perf_events_foo(self, mock_get_caps,
|
|
mock_min_version,
|
|
mock_warn):
|
|
self.flags(enabled_perf_events=['foo'], group='libvirt')
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
mock_get_caps.return_value = caps
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
events = drvr._get_supported_perf_events()
|
|
|
|
self.assertTrue(mock_warn.called)
|
|
self.assertEqual([], events)
|
|
|
|
@mock.patch.object(host.Host, "get_capabilities")
|
|
def _test_get_guest_with_perf(self, caps, events, mock_get_caps):
|
|
mock_get_caps.return_value = caps
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host('test_perf')
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref, [],
|
|
image_meta, disk_info)
|
|
|
|
self.assertEqual(events, cfg.perf_events)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version')
|
|
def test_get_guest_with_perf_libvirt_unsupported(self, mock_min_version):
|
|
|
|
def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None):
|
|
if lv_ver == libvirt_driver.MIN_LIBVIRT_PERF_VERSION:
|
|
return False
|
|
return True
|
|
|
|
mock_min_version.side_effect = fake_has_min_version
|
|
self.flags(enabled_perf_events=['cmt'], group='libvirt')
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
|
|
self._test_get_guest_with_perf(caps, [])
|
|
|
|
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True,
|
|
create=True)
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_get_guest_with_perf_host_unsupported(self,
|
|
mock_min_version):
|
|
self.flags(enabled_perf_events=['cmt'], group='libvirt')
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
self._test_get_guest_with_perf(caps, [])
|
|
|
|
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True,
|
|
create=True)
|
|
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBMT', True,
|
|
create=True)
|
|
@mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBML', True,
|
|
create=True)
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_intel_cmt_perf_deprecation_warning(self,
|
|
mock_min_version,
|
|
mock_warn):
|
|
perf_events = ['cmt', 'mbml', 'mbmt']
|
|
self.flags(enabled_perf_events=['cmt', 'mbml', 'mbmt'],
|
|
group='libvirt')
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
features = []
|
|
for f in ('cmt', 'mbm_local', 'mbm_total'):
|
|
feature = vconfig.LibvirtConfigGuestCPUFeature()
|
|
feature.name = f
|
|
feature.policy = fields.CPUFeaturePolicy.REQUIRE
|
|
features.append(feature)
|
|
|
|
caps.host.cpu.features = set(features)
|
|
self._test_get_guest_with_perf(caps, ['cmt', 'mbml', 'mbmt'])
|
|
warning_count = 0
|
|
call_args_list = mock_warn.call_args_list
|
|
for call in call_args_list:
|
|
# Call can be unpackaged as a tuple of args and kwargs
|
|
# so we want to check the first arg in the args list
|
|
if (len(call) == 2 and len(call[0]) == 2 and
|
|
call[0][1] in perf_events and
|
|
'Monitoring Intel CMT' in call[0][0]):
|
|
warning_count += 1
|
|
self.assertEqual(3, warning_count)
|
|
|
|
def test_xml_and_uri_no_ramdisk_no_kernel(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_uri(instance_data,
|
|
expect_kernel=False, expect_ramdisk=False)
|
|
|
|
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data.update({'vm_mode': fields.VMMode.HVM})
|
|
self._check_xml_and_uri(instance_data, expect_kernel=False,
|
|
expect_ramdisk=False, expect_xen_hvm=True)
|
|
|
|
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data.update({'vm_mode': fields.VMMode.XEN})
|
|
self._check_xml_and_uri(instance_data, expect_kernel=False,
|
|
expect_ramdisk=False, expect_xen_hvm=False,
|
|
xen_only=True)
|
|
|
|
def test_xml_and_uri_no_ramdisk(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
|
self._check_xml_and_uri(instance_data,
|
|
expect_kernel=True, expect_ramdisk=False)
|
|
|
|
def test_xml_and_uri_no_kernel(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
|
self._check_xml_and_uri(instance_data,
|
|
expect_kernel=False, expect_ramdisk=False)
|
|
|
|
def test_xml_and_uri(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
|
self._check_xml_and_uri(instance_data,
|
|
expect_kernel=True, expect_ramdisk=True)
|
|
|
|
def test_xml_and_uri_rescue(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
|
self._check_xml_and_uri(instance_data, expect_kernel=True,
|
|
expect_ramdisk=True, rescue=instance_data)
|
|
|
|
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_uri(instance_data, expect_kernel=False,
|
|
expect_ramdisk=False, rescue=instance_data)
|
|
|
|
def test_xml_and_uri_rescue_no_kernel(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['ramdisk_id'] = 'aki-deadbeef'
|
|
self._check_xml_and_uri(instance_data, expect_kernel=False,
|
|
expect_ramdisk=True, rescue=instance_data)
|
|
|
|
def test_xml_and_uri_rescue_no_ramdisk(self):
|
|
instance_data = dict(self.test_instance)
|
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
|
self._check_xml_and_uri(instance_data, expect_kernel=True,
|
|
expect_ramdisk=False, rescue=instance_data)
|
|
|
|
def test_xml_uuid(self):
|
|
self._check_xml_and_uuid(self.test_image_meta)
|
|
|
|
def test_lxc_container_and_uri(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_container(instance_data)
|
|
|
|
def test_xml_disk_prefix(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_disk_prefix(instance_data, None)
|
|
|
|
def test_xml_user_specified_disk_prefix(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_disk_prefix(instance_data, 'sd')
|
|
|
|
def test_xml_disk_driver(self):
|
|
instance_data = dict(self.test_instance)
|
|
self._check_xml_and_disk_driver(instance_data)
|
|
|
|
def test_xml_disk_bus_virtio(self):
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
self._check_xml_and_disk_bus(image_meta,
|
|
None,
|
|
(("disk", "virtio", "vda"),))
|
|
|
|
def test_xml_disk_bus_ide(self):
|
|
# It's necessary to check if the architecture is power, because
|
|
# power doesn't have support to ide, and so libvirt translate
|
|
# all ide calls to scsi
|
|
|
|
expected = {fields.Architecture.PPC: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.PPC64: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.PPC64LE: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.AARCH64: ("cdrom", "scsi", "sda")}
|
|
|
|
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
|
|
("cdrom", "ide", "hda"))
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "iso"})
|
|
self._check_xml_and_disk_bus(image_meta,
|
|
None,
|
|
(expec_val,))
|
|
|
|
def test_xml_disk_bus_sata(self):
|
|
# NOTE(sean-k-mooney): here we assert that when
|
|
# root_device_name is set in the block_device_info
|
|
# and hw_disk_bus is set in the image properties,
|
|
# we use the property value
|
|
expected = ("disk", "sata", "vda")
|
|
|
|
image_meta = objects.ImageMeta.from_dict({"properties": {
|
|
"hw_disk_bus": "sata"}})
|
|
block_device_info = {'root_device_name': "vda"}
|
|
self._check_xml_and_disk_bus(image_meta,
|
|
block_device_info,
|
|
(expected,))
|
|
|
|
def test_xml_disk_bus_ide_and_virtio(self):
|
|
# It's necessary to check if the architecture is power, because
|
|
# power doesn't have support to ide, and so libvirt translate
|
|
# all ide calls to scsi
|
|
|
|
expected = {fields.Architecture.PPC: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.PPC64: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.PPC64LE: ("cdrom", "scsi", "sda"),
|
|
fields.Architecture.AARCH64: ("cdrom", "scsi", "sda")}
|
|
|
|
swap = {'device_name': '/dev/vdc',
|
|
'swap_size': 1}
|
|
ephemerals = [{'device_type': 'disk',
|
|
'disk_bus': 'virtio',
|
|
'device_name': '/dev/vdb',
|
|
'size': 1}]
|
|
block_device_info = {
|
|
'swap': swap,
|
|
'ephemerals': ephemerals}
|
|
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
|
|
("cdrom", "ide", "hda"))
|
|
image_meta = objects.ImageMeta.from_dict({
|
|
"disk_format": "iso"})
|
|
self._check_xml_and_disk_bus(image_meta,
|
|
block_device_info,
|
|
(expec_val,
|
|
("disk", "virtio", "vdb"),
|
|
("disk", "virtio", "vdc")))
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
def test_instance_exists(self, mock_get_guest):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr.instance_exists(None))
|
|
|
|
mock_get_guest.side_effect = exception.InstanceNotFound(
|
|
instance_id='something')
|
|
self.assertFalse(drvr.instance_exists(None))
|
|
|
|
mock_get_guest.side_effect = exception.InternalError(err='something')
|
|
self.assertFalse(drvr.instance_exists(None))
|
|
|
|
def test_estimate_instance_overhead_spawn(self):
|
|
# test that method when called with instance ref
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
emulator_threads_policy=(
|
|
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
cells=[objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0]), memory=1024)])
|
|
instance_info = objects.Instance(**self.test_instance)
|
|
instance_info.numa_topology = instance_topology
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(1, overhead['vcpus'])
|
|
|
|
def test_estimate_instance_overhead_spawn_no_overhead(self):
|
|
# test that method when called with instance ref, no overhead
|
|
instance_topology = objects.InstanceNUMATopology(
|
|
emulator_threads_policy=(
|
|
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
cells=[objects.InstanceNUMACell(
|
|
id=0, cpuset=set([0]), memory=1024)])
|
|
instance_info = objects.Instance(**self.test_instance)
|
|
instance_info.numa_topology = instance_topology
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(0, overhead['vcpus'])
|
|
|
|
def test_estimate_instance_overhead_migrate(self):
|
|
# test that method when called with flavor ref
|
|
instance_info = objects.Flavor(extra_specs={
|
|
'hw:emulator_threads_policy': (
|
|
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(1, overhead['vcpus'])
|
|
|
|
def test_estimate_instance_overhead_migrate_no_overhead(self):
|
|
# test that method when called with flavor ref, no overhead
|
|
instance_info = objects.Flavor(extra_specs={
|
|
'hw:emulator_threads_policy': (
|
|
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(0, overhead['vcpus'])
|
|
|
|
def test_estimate_instance_overhead_usage(self):
|
|
# test that method when called with usage dict
|
|
instance_info = objects.Flavor(extra_specs={
|
|
'hw:emulator_threads_policy': (
|
|
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(1, overhead['vcpus'])
|
|
|
|
def test_estimate_instance_overhead_usage_no_overhead(self):
|
|
# test that method when called with usage dict, no overhead
|
|
instance_info = objects.Flavor(extra_specs={
|
|
'hw:emulator_threads_policy': (
|
|
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
self.assertEqual(0, overhead['vcpus'])
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
def test_list_instances(self, mock_list):
|
|
vm1 = FakeVirtDomain(id=3, name="instance00000001")
|
|
vm2 = FakeVirtDomain(id=17, name="instance00000002")
|
|
vm3 = FakeVirtDomain(name="instance00000003")
|
|
vm4 = FakeVirtDomain(name="instance00000004")
|
|
|
|
mock_list.return_value = [vm1, vm2, vm3, vm4]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
names = drvr.list_instances()
|
|
self.assertEqual(names[0], vm1.name())
|
|
self.assertEqual(names[1], vm2.name())
|
|
self.assertEqual(names[2], vm3.name())
|
|
self.assertEqual(names[3], vm4.name())
|
|
mock_list.assert_called_with(only_guests=True, only_running=False)
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
def test_list_instance_uuids(self, mock_list):
|
|
vm1 = FakeVirtDomain(id=3, name="instance00000001")
|
|
vm2 = FakeVirtDomain(id=17, name="instance00000002")
|
|
vm3 = FakeVirtDomain(name="instance00000003")
|
|
vm4 = FakeVirtDomain(name="instance00000004")
|
|
|
|
mock_list.return_value = [vm1, vm2, vm3, vm4]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
uuids = drvr.list_instance_uuids()
|
|
self.assertEqual(len(uuids), 4)
|
|
self.assertEqual(uuids[0], vm1.UUIDString())
|
|
self.assertEqual(uuids[1], vm2.UUIDString())
|
|
self.assertEqual(uuids[2], vm3.UUIDString())
|
|
self.assertEqual(uuids[3], vm4.UUIDString())
|
|
mock_list.assert_called_with(only_guests=True, only_running=False)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
|
|
return_value=None)
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count',
|
|
return_value=4)
|
|
def test_get_host_vcpus_is_empty(self, get_cpu_count, get_online_cpus):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.flags(vcpu_pin_set="")
|
|
vcpus = drvr._get_vcpu_total()
|
|
self.assertEqual(4, vcpus)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
|
|
def test_get_host_vcpus(self, get_online_cpus):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.flags(vcpu_pin_set="4-5")
|
|
get_online_cpus.return_value = set([4, 5, 6])
|
|
expected_vcpus = 2
|
|
vcpus = drvr._get_vcpu_total()
|
|
self.assertEqual(expected_vcpus, vcpus)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
|
|
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.flags(vcpu_pin_set="4-6")
|
|
get_online_cpus.return_value = set([4, 5])
|
|
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
|
|
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver:'
|
|
' virNodeNumOfDevices',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
self.flags(vcpu_pin_set="4-6")
|
|
get_online_cpus.side_effect = not_supported_exc
|
|
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
|
|
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver:'
|
|
' virNodeNumOfDevices',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
self.flags(vcpu_pin_set="1")
|
|
get_online_cpus.side_effect = not_supported_exc
|
|
expected_vcpus = 1
|
|
vcpus = drvr._get_vcpu_total()
|
|
self.assertEqual(expected_vcpus, vcpus)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
|
|
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
get_cpu_count.return_value = 2
|
|
expected_vcpus = 2
|
|
vcpus = drvr._get_vcpu_total()
|
|
self.assertEqual(expected_vcpus, vcpus)
|
|
get_cpu_count.return_value = 3
|
|
expected_vcpus = 3
|
|
vcpus = drvr._get_vcpu_total()
|
|
self.assertEqual(expected_vcpus, vcpus)
|
|
|
|
@mock.patch.object(host.Host, "has_min_version", return_value=True)
|
|
def test_quiesce(self, mock_has_min_version):
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
|
|
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{"properties": {"hw_qemu_guest_agent": "yes"}})
|
|
self.assertIsNone(drvr.quiesce(self.context, instance, image_meta))
|
|
mock_fsfreeze.assert_called_once_with()
|
|
|
|
@mock.patch.object(host.Host, "has_min_version", return_value=True)
|
|
def test_unquiesce(self, mock_has_min_version):
|
|
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
|
|
lookupByUUIDString=self.fake_lookup)
|
|
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{"properties": {"hw_qemu_guest_agent": "yes"}})
|
|
self.assertIsNone(drvr.unquiesce(self.context, instance,
|
|
image_meta))
|
|
mock_fsthaw.assert_called_once_with()
|
|
|
|
def test_create_snapshot_metadata(self):
|
|
base = objects.ImageMeta.from_dict(
|
|
{'disk_format': 'raw'})
|
|
instance_data = {'kernel_id': 'kernel',
|
|
'project_id': 'prj_id',
|
|
'ramdisk_id': 'ram_id',
|
|
'os_type': None}
|
|
instance = objects.Instance(**instance_data)
|
|
img_fmt = 'raw'
|
|
snp_name = 'snapshot_name'
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
|
|
expected = {'status': 'active',
|
|
'name': snp_name,
|
|
'properties': {
|
|
'kernel_id': instance['kernel_id'],
|
|
'image_location': 'snapshot',
|
|
'image_state': 'available',
|
|
'owner_id': instance['project_id'],
|
|
'ramdisk_id': instance['ramdisk_id'],
|
|
},
|
|
'disk_format': img_fmt,
|
|
'container_format': 'bare',
|
|
}
|
|
self.assertEqual(ret, expected)
|
|
|
|
# simulate an instance with os_type field defined
|
|
# disk format equals to ami
|
|
# container format not equals to bare
|
|
instance['os_type'] = 'linux'
|
|
base = objects.ImageMeta.from_dict(
|
|
{'disk_format': 'ami',
|
|
'container_format': 'test_container'})
|
|
expected['properties']['os_type'] = instance['os_type']
|
|
expected['disk_format'] = base.disk_format
|
|
expected['container_format'] = base.container_format
|
|
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
|
|
self.assertEqual(ret, expected)
|
|
|
|
def test_get_volume_driver(self):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}}
|
|
driver = conn._get_volume_driver(connection_info)
|
|
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
|
|
self.assertTrue(result)
|
|
|
|
def test_get_volume_driver_unknown(self):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'driver_volume_type': 'unknown',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}}
|
|
self.assertRaises(
|
|
exception.VolumeDriverNotFound,
|
|
conn._get_volume_driver,
|
|
connection_info
|
|
)
|
|
|
|
def _fake_libvirt_config_guest_disk(self):
|
|
fake_config = vconfig.LibvirtConfigGuestDisk()
|
|
fake_config.source_type = "network"
|
|
fake_config.source_device = "fake-type"
|
|
fake_config.driver_name = "qemu"
|
|
fake_config.driver_format = "raw"
|
|
fake_config.driver_cache = "none"
|
|
fake_config.source_protocol = "fake"
|
|
fake_config.source_name = "fake"
|
|
fake_config.target_bus = "fake-bus"
|
|
fake_config.target_dev = "vdb"
|
|
|
|
return fake_config
|
|
|
|
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_cache_mode')
|
|
def test_get_volume_config(self, _set_cache_mode, get_config):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}}
|
|
disk_info = {'bus': 'fake-bus', 'type': 'fake-type',
|
|
'dev': 'vdb'}
|
|
config_guest_disk = self._fake_libvirt_config_guest_disk()
|
|
|
|
get_config.return_value = copy.deepcopy(config_guest_disk)
|
|
config = drvr._get_volume_config(connection_info, disk_info)
|
|
get_config.assert_called_once_with(connection_info, disk_info)
|
|
_set_cache_mode.assert_called_once_with(config)
|
|
self.assertEqual(config_guest_disk.to_xml(), config.to_xml())
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_driver')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_attach_encryptor')
|
|
def test_connect_volume_encryption_success(
|
|
self, mock_attach_encryptor, mock_get_volume_driver):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_volume_driver = mock.MagicMock(
|
|
spec=volume_drivers.LibvirtBaseVolumeDriver)
|
|
mock_get_volume_driver.return_value = mock_volume_driver
|
|
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw',
|
|
'volume_id': uuids.volume_id}}
|
|
encryption = {'provider': encryptors.LUKS,
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
instance = mock.sentinel.instance
|
|
|
|
drvr._connect_volume(self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
|
|
mock_get_volume_driver.assert_called_once_with(connection_info)
|
|
mock_volume_driver.connect_volume.assert_called_once_with(
|
|
connection_info, instance)
|
|
mock_attach_encryptor.assert_called_once_with(
|
|
self.context, connection_info, encryption, True)
|
|
mock_volume_driver.disconnect_volume.assert_not_called()
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_driver')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_attach_encryptor')
|
|
def test_connect_volume_encryption_fail(
|
|
self, mock_attach_encryptor, mock_get_volume_driver):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_volume_driver = mock.MagicMock(
|
|
spec=volume_drivers.LibvirtBaseVolumeDriver)
|
|
mock_get_volume_driver.return_value = mock_volume_driver
|
|
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw',
|
|
'volume_id': uuids.volume_id}}
|
|
encryption = {'provider': encryptors.LUKS,
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
instance = mock.sentinel.instance
|
|
mock_attach_encryptor.side_effect = processutils.ProcessExecutionError
|
|
|
|
self.assertRaises(processutils.ProcessExecutionError,
|
|
drvr._connect_volume,
|
|
self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
|
|
mock_get_volume_driver.assert_called_once_with(connection_info)
|
|
mock_volume_driver.connect_volume.assert_called_once_with(
|
|
connection_info, instance)
|
|
mock_attach_encryptor.assert_called_once_with(
|
|
self.context, connection_info, encryption, True)
|
|
mock_volume_driver.disconnect_volume.assert_called_once_with(
|
|
connection_info, instance)
|
|
|
|
@mock.patch.object(key_manager, 'API')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
|
|
@mock.patch('nova.virt.libvirt.host.Host')
|
|
@mock.patch('os_brick.encryptors.luks.is_luks')
|
|
def test_connect_volume_native_luks(self, mock_is_luks, mock_host,
|
|
mock_get_volume_encryptor, mock_use_native_luks,
|
|
mock_get_volume_encryption, mock_get_key_mgr):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw',
|
|
'volume_id': uuids.volume_id}}
|
|
encryption = {'provider': encryptors.LUKS,
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
instance = mock.sentinel.instance
|
|
|
|
# Mock out the encryptors
|
|
mock_encryptor = mock.Mock()
|
|
mock_get_volume_encryptor.return_value = mock_encryptor
|
|
mock_is_luks.return_value = True
|
|
|
|
# Mock out the key manager
|
|
key = u'3734363537333734'
|
|
key_encoded = binascii.unhexlify(key)
|
|
mock_key = mock.Mock()
|
|
mock_key_mgr = mock.Mock()
|
|
mock_get_key_mgr.return_value = mock_key_mgr
|
|
mock_key_mgr.get.return_value = mock_key
|
|
mock_key.get_encoded.return_value = key_encoded
|
|
|
|
# assert that the secret is created for the encrypted volume during
|
|
# _connect_volume when use_native_luks is True
|
|
mock_get_volume_encryption.return_value = encryption
|
|
mock_use_native_luks.return_value = True
|
|
|
|
drvr._connect_volume(self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
drvr._host.create_secret.assert_called_once_with('volume',
|
|
uuids.volume_id, password=key)
|
|
mock_encryptor.attach_volume.assert_not_called()
|
|
|
|
# assert that the encryptor is used if use_native_luks is False
|
|
drvr._host.create_secret.reset_mock()
|
|
mock_get_volume_encryption.reset_mock()
|
|
mock_use_native_luks.return_value = False
|
|
|
|
drvr._connect_volume(self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
drvr._host.create_secret.assert_not_called()
|
|
mock_encryptor.attach_volume.assert_called_once_with(self.context,
|
|
**encryption)
|
|
|
|
# assert that we format the volume if is_luks is False
|
|
mock_use_native_luks.return_value = True
|
|
mock_is_luks.return_value = False
|
|
|
|
drvr._connect_volume(self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
mock_encryptor._format_volume.assert_called_once_with(key,
|
|
**encryption)
|
|
|
|
# assert that os-brick is used when allow_native_luks is False
|
|
mock_encryptor.attach_volume.reset_mock()
|
|
mock_is_luks.return_value = True
|
|
|
|
drvr._connect_volume(self.context, connection_info, instance,
|
|
encryption=encryption, allow_native_luks=False)
|
|
mock_encryptor.attach_volume.assert_called_once_with(self.context,
|
|
**encryption)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
|
|
def test_disconnect_volume_native_luks(self, mock_get_volume_encryptor):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._host = mock.Mock()
|
|
drvr._host.find_secret.return_value = mock.Mock()
|
|
connection_info = {'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw',
|
|
'volume_id': uuids.volume_id}}
|
|
encryption = {'provider': encryptors.LUKS,
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
instance = mock.sentinel.instance
|
|
|
|
# Mock out the encryptors
|
|
mock_encryptor = mock.Mock()
|
|
mock_get_volume_encryptor.return_value = mock_encryptor
|
|
|
|
# assert that a secret is deleted if found
|
|
drvr._disconnect_volume(self.context, connection_info, instance)
|
|
drvr._host.delete_secret.assert_called_once_with('volume',
|
|
uuids.volume_id)
|
|
mock_encryptor.detach_volume.assert_not_called()
|
|
|
|
# assert that the encryptor is used if no secret is found
|
|
drvr._host.find_secret.reset_mock()
|
|
drvr._host.delete_secret.reset_mock()
|
|
drvr._host.find_secret.return_value = None
|
|
|
|
drvr._disconnect_volume(self.context, connection_info, instance,
|
|
encryption=encryption)
|
|
drvr._host.delete_secret.assert_not_called()
|
|
mock_encryptor.detach_volume.called_once_with(self.context,
|
|
**encryption)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
|
|
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
|
|
@mock.patch('nova.volume.cinder.API.get')
|
|
def test_disconnect_multiattach_single_connection(
|
|
self, mock_volume_get, mock_get_volume_driver,
|
|
mock_get_instances, mock_detach_encryptor):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_volume_driver = mock.MagicMock(
|
|
spec=volume_drivers.LibvirtBaseVolumeDriver)
|
|
mock_get_volume_driver.return_value = mock_volume_driver
|
|
|
|
attachments = (
|
|
[('70ab645f-6ffc-406a-b3d2-5007a0c01b82',
|
|
{'mountpoint': u'/dev/vdb',
|
|
'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}),
|
|
('00803490-f768-4049-aa7d-151f54e6311e',
|
|
{'mountpoint': u'/dev/vdb',
|
|
'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})])
|
|
|
|
mock_volume_get.return_value = (
|
|
{'attachments': OrderedDict(attachments), 'multiattach': True,
|
|
'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'})
|
|
|
|
fake_connection_info = {
|
|
'multiattach': True,
|
|
'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}
|
|
fake_instance_1 = fake_instance.fake_instance_obj(
|
|
self.context,
|
|
host='fake-host-1')
|
|
|
|
mock_get_instances.return_value = (
|
|
['00803490-f768-4049-aa7d-151f54e6311e'])
|
|
drvr._disconnect_volume(
|
|
self.context, fake_connection_info, fake_instance_1)
|
|
mock_volume_driver.disconnect_volume.assert_called_once_with(
|
|
fake_connection_info, fake_instance_1)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_detach_encryptor')
|
|
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
|
|
@mock.patch('nova.volume.cinder.API.get')
|
|
def test_disconnect_multiattach_multi_connection(
|
|
self, mock_volume_get, mock_get_volume_driver,
|
|
mock_get_instances, mock_detach_encryptor):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_volume_driver = mock.MagicMock(
|
|
spec=volume_drivers.LibvirtBaseVolumeDriver)
|
|
mock_get_volume_driver.return_value = mock_volume_driver
|
|
|
|
attachments = (
|
|
[('70ab645f-6ffc-406a-b3d2-5007a0c01b82',
|
|
{'mountpoint': u'/dev/vdb',
|
|
'attachment_id': u'9402c249-99df-4f72-89e7-fd611493ee5d'}),
|
|
('00803490-f768-4049-aa7d-151f54e6311e',
|
|
{'mountpoint': u'/dev/vdb',
|
|
'attachment_id': u'd6128a7b-19c8-4a3e-8036-011396df95ac'})])
|
|
|
|
mock_volume_get.return_value = (
|
|
{'attachments': OrderedDict(attachments), 'multiattach': True,
|
|
'id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'})
|
|
|
|
fake_connection_info = {
|
|
'multiattach': True,
|
|
'volume_id': 'd30559cf-f092-4693-8589-0d0a1e7d9b1f'}
|
|
fake_instance_1 = fake_instance.fake_instance_obj(
|
|
self.context,
|
|
host='fake-host-1')
|
|
|
|
mock_get_instances.return_value = (
|
|
['00803490-f768-4049-aa7d-151f54e6311e',
|
|
'70ab645f-6ffc-406a-b3d2-5007a0c01b82'])
|
|
drvr._disconnect_volume(
|
|
self.context, fake_connection_info, fake_instance_1)
|
|
mock_volume_driver.disconnect_volume.assert_not_called()
|
|
|
|
def test_attach_invalid_volume_type(self):
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString \
|
|
= self.fake_lookup
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.VolumeDriverNotFound,
|
|
drvr.attach_volume, None,
|
|
{"driver_volume_type": "badtype"},
|
|
instance,
|
|
"/dev/sda")
|
|
|
|
def test_attach_blockio_invalid_hypervisor(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString \
|
|
= self.fake_lookup
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.InvalidHypervisorType,
|
|
drvr.attach_volume, None,
|
|
{"driver_volume_type": "fake",
|
|
"data": {"logical_block_size": "4096",
|
|
"physical_block_size": "4096"}
|
|
},
|
|
instance,
|
|
"/dev/sda")
|
|
|
|
def _test_check_discard(self, mock_log, driver_discard=None,
|
|
bus=None, should_log=False):
|
|
mock_config = mock.Mock()
|
|
mock_config.driver_discard = driver_discard
|
|
mock_config.target_bus = bus
|
|
mock_instance = mock.Mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._check_discard_for_attach_volume(mock_config, mock_instance)
|
|
self.assertEqual(should_log, mock_log.called)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
|
|
def test_check_discard_for_attach_volume_no_unmap(self, mock_log):
|
|
self._test_check_discard(mock_log, driver_discard=None,
|
|
bus='scsi', should_log=False)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
|
|
def test_check_discard_for_attach_volume_blk_controller(self, mock_log):
|
|
self._test_check_discard(mock_log, driver_discard='unmap',
|
|
bus='virtio', should_log=True)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
|
|
def test_check_discard_for_attach_volume_valid_controller(self, mock_log):
|
|
self._test_check_discard(mock_log, driver_discard='unmap',
|
|
bus='scsi', should_log=False)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
|
|
def test_check_discard_for_attach_volume_blk_controller_no_unmap(self,
|
|
mock_log):
|
|
self._test_check_discard(mock_log, driver_discard=None,
|
|
bus='virtio', should_log=False)
|
|
|
|
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
|
|
def test_attach_volume_with_libvirt_bug_breadcrumb(self, mock_get_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
bdm = {'device_name': 'vdb',
|
|
'disk_bus': 'fake-bus',
|
|
'device_type': 'fake-type'}
|
|
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
|
|
'dev': 'vdb'}
|
|
libvirt_exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
|
|
"unable to execute QEMU command 'object-add': Incorrect number"
|
|
" of padding bytes (56) found on decrypted data",
|
|
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr._host, 'get_guest'),
|
|
mock.patch('nova.virt.libvirt.driver.LOG'),
|
|
mock.patch.object(drvr, '_connect_volume'),
|
|
mock.patch.object(drvr, '_disconnect_volume'),
|
|
mock.patch.object(drvr, '_get_volume_config'),
|
|
mock.patch.object(drvr, '_check_discard_for_attach_volume'),
|
|
mock.patch.object(drvr, '_build_device_metadata'),
|
|
) as (mock_get_guest, mock_log, mock_connect_volume,
|
|
mock_disconnect_volume, mock_get_volume_config,
|
|
mock_check_discard, mock_build_metadata):
|
|
|
|
mock_conf = mock.MagicMock()
|
|
mock_guest = mock.MagicMock()
|
|
mock_guest.attach_device.side_effect = libvirt_exc
|
|
mock_get_volume_config.return_value = mock_conf
|
|
mock_get_guest.return_value = mock_guest
|
|
mock_get_info.return_value = disk_info
|
|
mock_build_metadata.return_value = objects.InstanceDeviceMetadata()
|
|
|
|
self.assertRaises(fakelibvirt.libvirtError, drvr.attach_volume,
|
|
self.context, connection_info, instance, "/dev/vdb",
|
|
disk_bus=bdm['disk_bus'], device_type=bdm['device_type'])
|
|
mock_log.warning.assert_called_once()
|
|
mock_disconnect_volume.assert_called_once()
|
|
|
|
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
|
|
def test_attach_volume_with_libvirt_exception(self, mock_get_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
bdm = {'device_name': 'vdb',
|
|
'disk_bus': 'fake-bus',
|
|
'device_type': 'fake-type'}
|
|
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
|
|
'dev': 'vdb'}
|
|
libvirt_exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
|
|
"Target vdb already exists', device is busy",
|
|
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr._host, 'get_guest'),
|
|
mock.patch('nova.virt.libvirt.driver.LOG'),
|
|
mock.patch.object(drvr, '_connect_volume'),
|
|
mock.patch.object(drvr, '_disconnect_volume'),
|
|
mock.patch.object(drvr, '_get_volume_config'),
|
|
mock.patch.object(drvr, '_check_discard_for_attach_volume'),
|
|
mock.patch.object(drvr, '_build_device_metadata'),
|
|
) as (mock_get_guest, mock_log, mock_connect_volume,
|
|
mock_disconnect_volume, mock_get_volume_config,
|
|
mock_check_discard, mock_build_metadata):
|
|
|
|
mock_conf = mock.MagicMock()
|
|
mock_guest = mock.MagicMock()
|
|
mock_guest.attach_device.side_effect = libvirt_exc
|
|
mock_get_volume_config.return_value = mock_conf
|
|
mock_get_guest.return_value = mock_guest
|
|
mock_get_info.return_value = disk_info
|
|
mock_build_metadata.return_value = objects.InstanceDeviceMetadata()
|
|
|
|
self.assertRaises(fakelibvirt.libvirtError, drvr.attach_volume,
|
|
self.context, connection_info, instance, "/dev/vdb",
|
|
disk_bus=bdm['disk_bus'], device_type=bdm['device_type'])
|
|
mock_log.exception.assert_called_once_with(u'Failed to attach '
|
|
'volume at mountpoint: %s', '/dev/vdb', instance=instance)
|
|
mock_disconnect_volume.assert_called_once()
|
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata')
|
|
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
def test_attach_volume_with_vir_domain_affect_live_flag(self,
|
|
mock_get_domain, mock_get_info, get_image):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = {}
|
|
get_image.return_value = image_meta
|
|
mock_dom = mock.MagicMock()
|
|
mock_get_domain.return_value = mock_dom
|
|
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
bdm = {'device_name': 'vdb',
|
|
'disk_bus': 'fake-bus',
|
|
'device_type': 'fake-type'}
|
|
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
|
|
'dev': 'vdb'}
|
|
mock_get_info.return_value = disk_info
|
|
mock_conf = mock.MagicMock()
|
|
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_connect_volume'),
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=mock_conf),
|
|
mock.patch.object(drvr, '_check_discard_for_attach_volume'),
|
|
mock.patch.object(drvr, '_build_device_metadata'),
|
|
mock.patch.object(objects.Instance, 'save')
|
|
) as (mock_connect_volume, mock_get_volume_config, mock_check_discard,
|
|
mock_build_metadata, mock_save):
|
|
for state in (power_state.RUNNING, power_state.PAUSED):
|
|
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
|
|
mock_build_metadata.return_value = \
|
|
objects.InstanceDeviceMetadata()
|
|
|
|
drvr.attach_volume(self.context, connection_info, instance,
|
|
"/dev/vdb", disk_bus=bdm['disk_bus'],
|
|
device_type=bdm['device_type'])
|
|
|
|
mock_get_domain.assert_called_with(instance)
|
|
mock_get_info.assert_called_with(
|
|
instance,
|
|
CONF.libvirt.virt_type,
|
|
test.MatchType(objects.ImageMeta),
|
|
bdm)
|
|
mock_connect_volume.assert_called_with(
|
|
self.context, connection_info, instance, encryption=None)
|
|
mock_get_volume_config.assert_called_with(
|
|
connection_info, disk_info)
|
|
mock_dom.attachDeviceFlags.assert_called_with(
|
|
mock_conf.to_xml(), flags=flags)
|
|
mock_check_discard.assert_called_with(mock_conf, instance)
|
|
mock_build_metadata.assert_called_with(self.context, instance)
|
|
mock_save.assert_called_with()
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
def test_detach_volume_with_vir_domain_affect_live_flag(self,
|
|
mock_get_domain):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_xml_with_disk = """<domain>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='/path/to/fake-volume'/>
|
|
<target dev='vdc' bus='virtio'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>"""
|
|
mock_xml_without_disk = """<domain>
|
|
<devices>
|
|
</devices>
|
|
</domain>"""
|
|
mock_dom = mock.MagicMock()
|
|
|
|
# Second time don't return anything about disk vdc so it looks removed
|
|
return_list = [mock_xml_with_disk, mock_xml_without_disk,
|
|
mock_xml_without_disk]
|
|
# Doubling the size of return list because we test with two guest power
|
|
# states
|
|
mock_dom.XMLDesc.side_effect = return_list + return_list
|
|
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
|
|
|
|
with mock.patch.object(drvr, '_disconnect_volume') as \
|
|
mock_disconnect_volume:
|
|
for state in (power_state.RUNNING, power_state.PAUSED):
|
|
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
|
|
mock_get_domain.return_value = mock_dom
|
|
drvr.detach_volume(
|
|
self.context, connection_info, instance, '/dev/vdc')
|
|
|
|
mock_get_domain.assert_called_with(instance)
|
|
mock_dom.detachDeviceFlags.assert_called_with(
|
|
"""<disk type="file" device="disk">
|
|
<source file="/path/to/fake-volume"/>
|
|
<target bus="virtio" dev="vdc"/>
|
|
</disk>
|
|
""", flags=flags)
|
|
mock_disconnect_volume.assert_called_with(
|
|
self.context, connection_info, instance, encryption=None)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
def test_detach_volume_disk_not_found(self, mock_get_domain,
|
|
mock_disconnect_volume):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_xml_without_disk = """<domain>
|
|
<devices>
|
|
</devices>
|
|
</domain>"""
|
|
mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
|
|
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
|
|
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
|
|
5678]
|
|
mock_get_domain.return_value = mock_dom
|
|
|
|
drvr.detach_volume(
|
|
self.context, connection_info, instance, '/dev/vdc')
|
|
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
mock_disconnect_volume.assert_called_once_with(
|
|
self.context, connection_info, instance, encryption=None)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
def test_detach_volume_disk_not_found_encryption(self, mock_get_domain,
|
|
mock_disconnect_volume,
|
|
mock_get_encryptor):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_xml_without_disk = """<domain>
|
|
<devices>
|
|
</devices>
|
|
</domain>"""
|
|
mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
|
|
encryption = mock.MagicMock()
|
|
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
|
|
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
|
|
5678]
|
|
mock_get_domain.return_value = mock_dom
|
|
|
|
drvr.detach_volume(self.context, connection_info, instance,
|
|
'/dev/vdc', encryption)
|
|
|
|
mock_disconnect_volume.assert_called_once_with(
|
|
self.context, connection_info, instance, encryption=encryption)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_driver')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_detach_volume_order_with_encryptors(self, mock_get_guest,
|
|
mock_get_encryptor, mock_get_volume_driver):
|
|
|
|
mock_volume_driver = mock.MagicMock(
|
|
spec=volume_drivers.LibvirtBaseVolumeDriver)
|
|
mock_get_volume_driver.return_value = mock_volume_driver
|
|
mock_guest = mock.MagicMock(spec=libvirt_guest.Guest)
|
|
mock_guest.get_power_state.return_value = power_state.RUNNING
|
|
mock_get_guest.return_value = mock_guest
|
|
mock_encryptor = mock.MagicMock(
|
|
spec=encryptors.nop.NoOpEncryptor)
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
|
|
mock_order = mock.Mock()
|
|
mock_order.attach_mock(mock_volume_driver.disconnect_volume,
|
|
'disconnect_volume')
|
|
mock_order.attach_mock(mock_guest.detach_device_with_retry(),
|
|
'detach_volume')
|
|
mock_order.attach_mock(mock_encryptor.detach_volume,
|
|
'detach_encryptor')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"device_path": "/fake",
|
|
"access_mode": "rw"}}
|
|
encryption = {"provider": "NoOpEncryptor"}
|
|
drvr.detach_volume(
|
|
self.context, connection_info, instance, '/dev/vdc',
|
|
encryption=encryption)
|
|
|
|
mock_order.assert_has_calls([
|
|
mock.call.detach_volume(),
|
|
mock.call.detach_encryptor(**encryption),
|
|
mock.call.disconnect_volume(connection_info, instance)])
|
|
|
|
def test_extend_volume(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {
|
|
'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}
|
|
}
|
|
|
|
new_size_in_kb = 20 * 1024 * 1024
|
|
|
|
guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
# block_device
|
|
block_device = mock.Mock(spec=libvirt_guest.BlockDevice)
|
|
block_device.resize = mock.Mock()
|
|
guest.get_block_device = mock.Mock(return_value=block_device)
|
|
drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
|
|
|
|
for state in (power_state.RUNNING, power_state.PAUSED):
|
|
guest.get_power_state = mock.Mock(return_value=state)
|
|
drvr.extend_volume(connection_info,
|
|
instance, new_size_in_kb * 1024)
|
|
drvr._extend_volume.assert_called_with(connection_info,
|
|
instance,
|
|
new_size_in_kb * 1024)
|
|
guest.get_block_device.assert_called_with('/fake')
|
|
block_device.resize.assert_called_with(20480)
|
|
|
|
def test_extend_volume_with_volume_driver_without_support(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
with mock.patch.object(drvr, '_extend_volume',
|
|
side_effect=NotImplementedError()):
|
|
connection_info = {'driver_volume_type': 'fake'}
|
|
self.assertRaises(exception.ExtendVolumeNotSupported,
|
|
drvr.extend_volume,
|
|
connection_info, instance, 0)
|
|
|
|
def test_extend_volume_disk_not_found(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {
|
|
'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}
|
|
}
|
|
new_size_in_kb = 20 * 1024 * 1024
|
|
|
|
xml_no_disk = "<domain><devices></devices></domain>"
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), xml_no_disk, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
guest.get_power_state = mock.Mock(return_value=power_state.RUNNING)
|
|
drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
|
|
|
|
drvr.extend_volume(connection_info, instance, new_size_in_kb * 1024)
|
|
|
|
def test_extend_volume_with_instance_not_found(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, '_get_domain',
|
|
side_effect=exception.InstanceNotFound(
|
|
instance_id=instance.uuid)),
|
|
mock.patch.object(drvr, '_extend_volume')
|
|
) as (_get_domain, _extend_volume):
|
|
connection_info = {'driver_volume_type': 'fake'}
|
|
self.assertRaises(exception.InstanceNotFound,
|
|
drvr.extend_volume,
|
|
connection_info, instance, 0)
|
|
|
|
def test_extend_volume_with_libvirt_error(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {
|
|
'driver_volume_type': 'fake',
|
|
'data': {'device_path': '/fake',
|
|
'access_mode': 'rw'}
|
|
}
|
|
new_size_in_kb = 20 * 1024 * 1024
|
|
|
|
guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
guest.get_power_state = mock.Mock(return_value=power_state.RUNNING)
|
|
# block_device
|
|
block_device = mock.Mock(spec=libvirt_guest.BlockDevice)
|
|
block_device.resize = mock.Mock(
|
|
side_effect=fakelibvirt.libvirtError('ERR'))
|
|
guest.get_block_device = mock.Mock(return_value=block_device)
|
|
drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
|
|
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr.extend_volume,
|
|
connection_info, instance, new_size_in_kb * 1024)
|
|
|
|
def test_extend_volume_with_no_device_path_attribute(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {
|
|
'serial': '58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'driver_volume_type': 'fake',
|
|
'data': {'cluster_name': 'fake',
|
|
'auth_enabled': False,
|
|
'volume_id': '58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'access_mode': 'rw'}
|
|
}
|
|
new_size_in_kb = 20 * 1024 * 1024
|
|
|
|
guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
# block_device
|
|
block_device = mock.Mock(spec=libvirt_guest.BlockDevice)
|
|
block_device.resize = mock.Mock()
|
|
disk = mock.Mock(
|
|
spec=vconfig.LibvirtConfigGuestDisk,
|
|
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
target_dev='vdb')
|
|
guest.get_block_device = mock.Mock(return_value=block_device)
|
|
guest.get_all_disks = mock.Mock(return_value=[disk])
|
|
drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
|
|
|
|
for state in (power_state.RUNNING, power_state.PAUSED):
|
|
guest.get_power_state = mock.Mock(return_value=state)
|
|
drvr.extend_volume(connection_info, instance,
|
|
new_size_in_kb * 1024)
|
|
drvr._extend_volume.assert_called_with(connection_info,
|
|
instance,
|
|
new_size_in_kb * 1024)
|
|
guest.get_block_device.assert_called_with('vdb')
|
|
block_device.resize.assert_called_with(20480)
|
|
|
|
def test_extend_volume_no_disk_found_by_serial(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
connection_info = {
|
|
'serial': '58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'driver_volume_type': 'fake',
|
|
'data': {'cluster_name': 'fake',
|
|
'auth_enabled': False,
|
|
'volume_id': '58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'access_mode': 'rw'}
|
|
}
|
|
new_size_in_kb = 20 * 1024 * 1024
|
|
|
|
guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
# block_device
|
|
block_device = mock.Mock(spec=libvirt_guest.BlockDevice)
|
|
block_device.resize = mock.Mock()
|
|
disk = mock.Mock(
|
|
spec=vconfig.LibvirtConfigGuestDisk,
|
|
serial='12345678-abcd-abcd-abcd-0123456789012',
|
|
target_dev='vdb')
|
|
guest.get_block_device = mock.Mock(return_value=block_device)
|
|
guest.get_all_disks = mock.Mock(return_value=[disk])
|
|
drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
drvr._extend_volume = mock.Mock(return_value=new_size_in_kb)
|
|
guest.get_power_state = mock.Mock(return_value=power_state.RUNNING)
|
|
|
|
self.assertRaises(
|
|
exception.VolumeNotFound,
|
|
drvr.extend_volume,
|
|
connection_info,
|
|
instance,
|
|
new_size_in_kb * 1024
|
|
)
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_use_encryptor_connection_info_incomplete(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert no attach attempt is made given incomplete connection_info.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'data': {}}
|
|
|
|
drvr._attach_encryptor(self.context, connection_info, None, False)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_attach_encryptor_unencrypted_volume_meta_missing(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if not provided encryption metadata is fetched even
|
|
if the volume is ultimately unencrypted and no attempt to attach
|
|
is made.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
encryption = {}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
mock_get_metadata.return_value = encryption
|
|
|
|
drvr._attach_encryptor(self.context, connection_info, None, False)
|
|
|
|
mock_get_metadata.assert_called_once_with(self.context,
|
|
drvr._volume_api, uuids.volume_id, connection_info)
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_attach_encryptor_unencrypted_volume_meta_provided(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if an empty encryption metadata dict is provided that
|
|
there is no additional attempt to lookup the metadata or attach the
|
|
encryptor.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
encryption = {}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
|
|
drvr._attach_encryptor(self.context, connection_info, encryption,
|
|
False)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_attach_encryptor_encrypted_volume_meta_missing(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if missing the encryption metadata of an encrypted
|
|
volume is fetched and then used to attach the encryptor for the volume.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_encryptor = mock.MagicMock()
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end'}
|
|
mock_get_metadata.return_value = encryption
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
|
|
drvr._attach_encryptor(self.context, connection_info, None, False)
|
|
|
|
mock_get_metadata.assert_called_once_with(self.context,
|
|
drvr._volume_api, uuids.volume_id, connection_info)
|
|
mock_get_encryptor.assert_called_once_with(connection_info,
|
|
encryption)
|
|
mock_encryptor.attach_volume.assert_called_once_with(self.context,
|
|
**encryption)
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_attach_encryptor_encrypted_volume_meta_provided(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that when provided there are no further attempts to fetch the
|
|
encryption metadata for the volume and that the provided metadata is
|
|
then used to attach the volume.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_encryptor = mock.MagicMock()
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end'}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
|
|
drvr._attach_encryptor(self.context, connection_info,
|
|
encryption, False)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_called_once_with(connection_info,
|
|
encryption)
|
|
mock_encryptor.attach_volume.assert_called_once_with(self.context,
|
|
**encryption)
|
|
|
|
@mock.patch.object(key_manager, 'API')
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_attach_encryptor_encrypted_native_luks_serial(self,
|
|
mock_get_encryptor, mock_get_metadata, mock_get_key_mgr):
|
|
"""Uses native luks encryption with a provider encryptor and the
|
|
connection_info has a serial but not volume_id in the 'data'
|
|
sub-dict.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_encryptor = mock.MagicMock()
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end',
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
connection_info = {'serial': uuids.serial, 'data': {}}
|
|
# Mock out the key manager
|
|
key = u'3734363537333734'
|
|
key_encoded = binascii.unhexlify(key)
|
|
mock_key = mock.Mock()
|
|
mock_key_mgr = mock.Mock()
|
|
mock_get_key_mgr.return_value = mock_key_mgr
|
|
mock_key_mgr.get.return_value = mock_key
|
|
mock_key.get_encoded.return_value = key_encoded
|
|
|
|
with mock.patch.object(drvr, '_use_native_luks', return_value=True):
|
|
with mock.patch.object(drvr._host, 'create_secret') as crt_scrt:
|
|
drvr._attach_encryptor(self.context, connection_info,
|
|
encryption, allow_native_luks=True)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_not_called()
|
|
crt_scrt.assert_called_once_with(
|
|
'volume', uuids.serial, password=key)
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_detach_encryptor_connection_info_incomplete(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert no detach attempt is made given incomplete connection_info.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
connection_info = {'data': {}}
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, None)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_detach_encryptor_unencrypted_volume_meta_missing(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if not provided encryption metadata is fetched even
|
|
if the volume is ultimately unencrypted and no attempt to detach
|
|
is made.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
encryption = {}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
mock_get_metadata.return_value = encryption
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, None)
|
|
|
|
mock_get_metadata.assert_called_once_with(self.context,
|
|
drvr._volume_api, uuids.volume_id, connection_info)
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_detach_encryptor_unencrypted_volume_meta_provided(self,
|
|
mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if an empty encryption metadata dict is provided that
|
|
there is no additional attempt to lookup the metadata or detach the
|
|
encryptor.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
encryption = {}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, encryption)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks')
|
|
def test_detach_encryptor_encrypted_volume_meta_missing(self,
|
|
mock_use_native_luks, mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that if missing the encryption metadata of an encrypted
|
|
volume is fetched and then used to detach the encryptor for the volume.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_encryptor = mock.MagicMock()
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end'}
|
|
mock_get_metadata.return_value = encryption
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
mock_use_native_luks.return_value = False
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, None)
|
|
|
|
mock_get_metadata.assert_called_once_with(self.context,
|
|
drvr._volume_api, uuids.volume_id, connection_info)
|
|
mock_get_encryptor.assert_called_once_with(connection_info,
|
|
encryption)
|
|
mock_encryptor.detach_volume.assert_called_once_with(**encryption)
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks')
|
|
def test_detach_encryptor_encrypted_volume_meta_provided(self,
|
|
mock_use_native_luks, mock_get_encryptor, mock_get_metadata):
|
|
"""Assert that when provided there are no further attempts to fetch the
|
|
encryption metadata for the volume and that the provided metadata is
|
|
then used to detach the volume.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_encryptor = mock.MagicMock()
|
|
mock_get_encryptor.return_value = mock_encryptor
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end'}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
mock_use_native_luks.return_value = False
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, encryption)
|
|
|
|
mock_get_metadata.assert_not_called()
|
|
mock_get_encryptor.assert_called_once_with(connection_info,
|
|
encryption)
|
|
mock_encryptor.detach_volume.assert_called_once_with(**encryption)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.find_secret')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
|
|
def test_detach_encryptor_native_luks_device_path_secret_missing(self,
|
|
mock_get_encryptor, mock_use_native_luks, mock_find_secret):
|
|
"""Assert that the encryptor is not built when native LUKS is
|
|
available, the associated volume secret is missing and device_path is
|
|
also missing from the connection_info.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
encryption = {'provider': 'luks', 'control_location': 'front-end',
|
|
'encryption_key_id': uuids.encryption_key_id}
|
|
connection_info = {'data': {'volume_id': uuids.volume_id}}
|
|
mock_find_secret.return_value = False
|
|
mock_use_native_luks.return_value = True
|
|
|
|
drvr._detach_encryptor(self.context, connection_info, encryption)
|
|
|
|
mock_find_secret.assert_called_once_with('volume', uuids.volume_id)
|
|
mock_get_encryptor.assert_not_called()
|
|
|
|
@mock.patch.object(host.Host, "has_min_version")
|
|
def test_use_native_luks(self, mock_has_min_version):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# True only when the required QEMU and Libvirt versions are available
|
|
# on the host and a valid LUKS provider is present within the
|
|
# encryption metadata dict.
|
|
mock_has_min_version.return_value = True
|
|
self.assertFalse(drvr._use_native_luks({}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor'
|
|
}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'CryptSetupEncryptor'}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': encryptors.PLAIN}))
|
|
self.assertTrue(drvr._use_native_luks({
|
|
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
|
|
self.assertTrue(drvr._use_native_luks({
|
|
'provider': 'LuksEncryptor'}))
|
|
self.assertTrue(drvr._use_native_luks({
|
|
'provider': encryptors.LUKS}))
|
|
|
|
# Always False when the required QEMU and Libvirt versions are not
|
|
# available on the host.
|
|
mock_has_min_version.return_value = False
|
|
self.assertFalse(drvr._use_native_luks({}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor'
|
|
}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'CryptSetupEncryptor'}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': encryptors.PLAIN}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': 'LuksEncryptor'}))
|
|
self.assertFalse(drvr._use_native_luks({
|
|
'provider': encryptors.LUKS}))
|
|
|
|
def test_multi_nic(self):
|
|
network_info = _fake_network_info(self, 2)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
xml = drvr._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info,
|
|
image_meta)
|
|
tree = etree.fromstring(xml)
|
|
interfaces = tree.findall("./devices/interface")
|
|
self.assertEqual(len(interfaces), 2)
|
|
self.assertEqual(interfaces[0].get('type'), 'bridge')
|
|
|
|
def _check_xml_and_container(self, instance):
|
|
instance_ref = objects.Instance(**instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
self.assertEqual(drvr._uri(), 'lxc:///')
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
xml = drvr._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info,
|
|
image_meta)
|
|
tree = etree.fromstring(xml)
|
|
|
|
check = [
|
|
(lambda t: t.find('.').get('type'), 'lxc'),
|
|
(lambda t: t.find('./os/type').text, 'exe'),
|
|
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
|
|
|
|
for i, (check, expected_result) in enumerate(check):
|
|
self.assertEqual(check(tree),
|
|
expected_result,
|
|
'%s failed common check %d' % (xml, i))
|
|
|
|
target = tree.find('./devices/filesystem/source').get('dir')
|
|
self.assertGreater(len(target), 0)
|
|
|
|
def _check_xml_and_disk_prefix(self, instance, prefix):
|
|
instance_ref = objects.Instance(**instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
def _get_prefix(p, default):
|
|
if p:
|
|
return p + 'a'
|
|
return default
|
|
|
|
type_disk_map = {
|
|
'qemu': [
|
|
(lambda t: t.find('.').get('type'), 'qemu'),
|
|
(lambda t: t.find('./devices/disk/target').get('dev'),
|
|
_get_prefix(prefix, 'vda'))],
|
|
'xen': [
|
|
(lambda t: t.find('.').get('type'), 'xen'),
|
|
(lambda t: t.find('./devices/disk/target').get('dev'),
|
|
_get_prefix(prefix, 'xvda'))],
|
|
'kvm': [
|
|
(lambda t: t.find('.').get('type'), 'kvm'),
|
|
(lambda t: t.find('./devices/disk/target').get('dev'),
|
|
_get_prefix(prefix, 'vda'))],
|
|
'uml': [
|
|
(lambda t: t.find('.').get('type'), 'uml'),
|
|
(lambda t: t.find('./devices/disk/target').get('dev'),
|
|
_get_prefix(prefix, 'ubda'))]
|
|
}
|
|
|
|
for (virt_type, checks) in type_disk_map.items():
|
|
self.flags(virt_type=virt_type, group='libvirt')
|
|
if prefix:
|
|
self.flags(disk_prefix=prefix, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
xml = drvr._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info,
|
|
image_meta)
|
|
tree = etree.fromstring(xml)
|
|
|
|
for i, (check, expected_result) in enumerate(checks):
|
|
self.assertEqual(check(tree),
|
|
expected_result,
|
|
'%s != %s failed check %d' %
|
|
(check(tree), expected_result, i))
|
|
|
|
def _check_xml_and_disk_driver(self, image_meta):
|
|
os_open = os.open
|
|
directio_supported = True
|
|
|
|
def os_open_stub(path, flags, *args, **kwargs):
|
|
if flags & os.O_DIRECT:
|
|
if not directio_supported:
|
|
raise OSError(errno.EINVAL,
|
|
'%s: %s' % (os.strerror(errno.EINVAL), path))
|
|
flags &= ~os.O_DIRECT
|
|
return os_open(path, flags, *args, **kwargs)
|
|
|
|
self.stub_out('os.open', os_open_stub)
|
|
|
|
def connection_supports_direct_io_stub(dirpath):
|
|
return directio_supported
|
|
|
|
self.stub_out('nova.privsep.utils.supports_direct_io',
|
|
connection_supports_direct_io_stub)
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
network_info = _fake_network_info(self, 1)
|
|
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
xml = drv._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info, image_meta)
|
|
tree = etree.fromstring(xml)
|
|
disks = tree.findall('./devices/disk/driver')
|
|
for guest_disk in disks:
|
|
self.assertEqual(guest_disk.get("cache"), "none")
|
|
|
|
directio_supported = False
|
|
|
|
# The O_DIRECT availability is cached on first use in
|
|
# LibvirtDriver, hence we re-create it here
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
xml = drv._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info, image_meta)
|
|
tree = etree.fromstring(xml)
|
|
disks = tree.findall('./devices/disk/driver')
|
|
for guest_disk in disks:
|
|
self.assertEqual(guest_disk.get("cache"), "writeback")
|
|
|
|
def _check_xml_and_disk_bus(self, image_meta,
|
|
block_device_info, wantConfig):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
network_info = _fake_network_info(self, 1)
|
|
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
block_device_info)
|
|
|
|
xml = drv._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info, image_meta,
|
|
block_device_info=block_device_info)
|
|
tree = etree.fromstring(xml)
|
|
|
|
got_disks = tree.findall('./devices/disk')
|
|
got_disk_targets = tree.findall('./devices/disk/target')
|
|
for i in range(len(wantConfig)):
|
|
want_device_type = wantConfig[i][0]
|
|
want_device_bus = wantConfig[i][1]
|
|
want_device_dev = wantConfig[i][2]
|
|
|
|
got_device_type = got_disks[i].get('device')
|
|
got_device_bus = got_disk_targets[i].get('bus')
|
|
got_device_dev = got_disk_targets[i].get('dev')
|
|
|
|
self.assertEqual(got_device_type, want_device_type)
|
|
self.assertEqual(got_device_bus, want_device_bus)
|
|
self.assertEqual(got_device_dev, want_device_dev)
|
|
|
|
def _check_xml_and_uuid(self, image_meta):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
network_info = _fake_network_info(self, 1)
|
|
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
xml = drv._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info, image_meta)
|
|
tree = etree.fromstring(xml)
|
|
self.assertEqual(tree.find('./uuid').text,
|
|
instance_ref['uuid'])
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_host_sysinfo_serial_hardware",)
|
|
def _check_xml_and_uri(self, instance, mock_serial,
|
|
expect_ramdisk=False, expect_kernel=False,
|
|
rescue=None, expect_xen_hvm=False, xen_only=False):
|
|
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
|
|
instance_ref = objects.Instance(**instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
xen_vm_mode = fields.VMMode.XEN
|
|
if expect_xen_hvm:
|
|
xen_vm_mode = fields.VMMode.HVM
|
|
|
|
type_uri_map = {'qemu': ('qemu:///system',
|
|
[(lambda t: t.find('.').get('type'), 'qemu'),
|
|
(lambda t: t.find('./os/type').text,
|
|
fields.VMMode.HVM),
|
|
(lambda t: t.find('./devices/emulator'), None)]),
|
|
'kvm': ('qemu:///system',
|
|
[(lambda t: t.find('.').get('type'), 'kvm'),
|
|
(lambda t: t.find('./os/type').text,
|
|
fields.VMMode.HVM),
|
|
(lambda t: t.find('./devices/emulator'), None)]),
|
|
'uml': ('uml:///system',
|
|
[(lambda t: t.find('.').get('type'), 'uml'),
|
|
(lambda t: t.find('./os/type').text,
|
|
fields.VMMode.UML)]),
|
|
'xen': ('xen:///',
|
|
[(lambda t: t.find('.').get('type'), 'xen'),
|
|
(lambda t: t.find('./os/type').text,
|
|
xen_vm_mode)])}
|
|
|
|
if expect_xen_hvm or xen_only:
|
|
hypervisors_to_check = ['xen']
|
|
else:
|
|
hypervisors_to_check = ['qemu', 'kvm', 'xen']
|
|
|
|
for hypervisor_type in hypervisors_to_check:
|
|
check_list = type_uri_map[hypervisor_type][1]
|
|
|
|
if rescue:
|
|
suffix = '.rescue'
|
|
else:
|
|
suffix = ''
|
|
if expect_kernel:
|
|
check = (lambda t: self.relpath(t.find('./os/kernel').text).
|
|
split('/')[1], 'kernel' + suffix)
|
|
else:
|
|
check = (lambda t: t.find('./os/kernel'), None)
|
|
check_list.append(check)
|
|
|
|
if expect_kernel:
|
|
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
|
|
text, hypervisor_type == "qemu")
|
|
check_list.append(check)
|
|
# Hypervisors that only support vm_mode.HVM should not produce
|
|
# configuration that results in kernel arguments
|
|
if not expect_kernel and (hypervisor_type in
|
|
['qemu', 'kvm']):
|
|
check = (lambda t: t.find('./os/root'), None)
|
|
check_list.append(check)
|
|
check = (lambda t: t.find('./os/cmdline'), None)
|
|
check_list.append(check)
|
|
|
|
if expect_ramdisk:
|
|
check = (lambda t: self.relpath(t.find('./os/initrd').text).
|
|
split('/')[1], 'ramdisk' + suffix)
|
|
else:
|
|
check = (lambda t: t.find('./os/initrd'), None)
|
|
check_list.append(check)
|
|
|
|
if hypervisor_type in ['qemu', 'kvm']:
|
|
xpath = "./sysinfo/system/entry"
|
|
check = (lambda t: t.findall(xpath)[0].get("name"),
|
|
"manufacturer")
|
|
check_list.append(check)
|
|
check = (lambda t: t.findall(xpath)[0].text,
|
|
version.vendor_string())
|
|
check_list.append(check)
|
|
|
|
check = (lambda t: t.findall(xpath)[1].get("name"),
|
|
"product")
|
|
check_list.append(check)
|
|
check = (lambda t: t.findall(xpath)[1].text,
|
|
version.product_string())
|
|
check_list.append(check)
|
|
|
|
check = (lambda t: t.findall(xpath)[2].get("name"),
|
|
"version")
|
|
check_list.append(check)
|
|
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
|
|
# converted to None), so we need an `or ''` to correct for that
|
|
check = (lambda t: t.findall(xpath)[2].text or '',
|
|
version.version_string_with_package())
|
|
check_list.append(check)
|
|
|
|
check = (lambda t: t.findall(xpath)[3].get("name"),
|
|
"serial")
|
|
check_list.append(check)
|
|
check = (lambda t: t.findall(xpath)[3].text,
|
|
"cef19ce0-0ca2-11df-855d-b19fbce37686")
|
|
check_list.append(check)
|
|
|
|
check = (lambda t: t.findall(xpath)[4].get("name"),
|
|
"uuid")
|
|
check_list.append(check)
|
|
check = (lambda t: t.findall(xpath)[4].text,
|
|
instance['uuid'])
|
|
check_list.append(check)
|
|
|
|
if hypervisor_type in ['qemu', 'kvm']:
|
|
check = (lambda t: t.findall('./devices/serial')[0].get(
|
|
'type'), 'pty')
|
|
check_list.append(check)
|
|
else:
|
|
check = (lambda t: t.find('./devices/console').get(
|
|
'type'), 'pty')
|
|
check_list.append(check)
|
|
|
|
common_checks = [
|
|
(lambda t: t.find('.').tag, 'domain'),
|
|
(lambda t: t.find('./memory').text, '2097152')]
|
|
if rescue:
|
|
common_checks += [
|
|
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
|
|
get('file')).split('/')[1], 'disk.rescue'),
|
|
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
|
|
get('file')).split('/')[1], 'disk')]
|
|
else:
|
|
common_checks += [(lambda t: self.relpath(t.findall(
|
|
'./devices/disk/source')[0].get('file')).split('/')[1],
|
|
'disk')]
|
|
common_checks += [(lambda t: self.relpath(t.findall(
|
|
'./devices/disk/source')[1].get('file')).split('/')[1],
|
|
'disk.local')]
|
|
|
|
for virt_type in hypervisors_to_check:
|
|
expected_uri = type_uri_map[virt_type][0]
|
|
checks = type_uri_map[virt_type][1]
|
|
self.flags(virt_type=virt_type, group='libvirt')
|
|
|
|
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
|
|
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
self.assertEqual(drvr._uri(), expected_uri)
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
rescue=rescue)
|
|
|
|
xml = drvr._get_guest_xml(self.context, instance_ref,
|
|
network_info, disk_info,
|
|
image_meta,
|
|
rescue=rescue)
|
|
tree = etree.fromstring(xml)
|
|
for i, (check, expected_result) in enumerate(checks):
|
|
self.assertEqual(check(tree),
|
|
expected_result,
|
|
'%s != %s failed check %d' %
|
|
(check(tree), expected_result, i))
|
|
|
|
for i, (check, expected_result) in enumerate(common_checks):
|
|
self.assertEqual(check(tree),
|
|
expected_result,
|
|
'%s != %s failed common check %d' %
|
|
(check(tree), expected_result, i))
|
|
|
|
filterref = './devices/interface/filterref'
|
|
vif = network_info[0]
|
|
nic_id = vif['address'].lower().replace(':', '')
|
|
fw = firewall.NWFilterFirewall(drvr)
|
|
instance_filter_name = fw._instance_filter_name(instance_ref,
|
|
nic_id)
|
|
self.assertEqual(tree.find(filterref).get('filter'),
|
|
instance_filter_name)
|
|
|
|
# This test is supposed to make sure we don't
|
|
# override a specifically set uri
|
|
#
|
|
# Deliberately not just assigning this string to CONF.connection_uri
|
|
# and checking against that later on. This way we make sure the
|
|
# implementation doesn't fiddle around with the CONF.
|
|
testuri = 'something completely different'
|
|
self.flags(connection_uri=testuri, group='libvirt')
|
|
for virt_type in type_uri_map:
|
|
self.flags(virt_type=virt_type, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertEqual(drvr._uri(), testuri)
|
|
|
|
def test_ensure_filtering_rules_for_instance_timeout(self):
|
|
# ensure_filtering_fules_for_instance() finishes with timeout.
|
|
# Preparing mocks
|
|
class FakeTime(object):
|
|
def __init__(self):
|
|
self.counter = 0
|
|
|
|
def sleep(self, t):
|
|
self.counter += t
|
|
|
|
fake_timer = FakeTime()
|
|
|
|
# _fake_network_info must be called before create_fake_libvirt_mock(),
|
|
# as _fake_network_info calls importutils.import_class() and
|
|
# create_fake_libvirt_mock() mocks importutils.import_class().
|
|
network_info = _fake_network_info(self, 1)
|
|
self.create_fake_libvirt_mock()
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
# Start test
|
|
try:
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
|
|
'setup_basic_filtering', lambda *a: None)
|
|
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
|
|
'prepare_instance_filter', lambda *a: None)
|
|
self.stub_out('nova.virt.libvirt.firewall.IptablesFirewallDriver.'
|
|
'instance_filter_exists', lambda *a: None)
|
|
self.stub_out('eventlet.greenthread.sleep',
|
|
lambda t: fake_timer.sleep(t))
|
|
drvr.ensure_filtering_rules_for_instance(instance_ref,
|
|
network_info)
|
|
except exception.NovaException as e:
|
|
msg = ('The firewall filter for %s does not exist' %
|
|
instance_ref['name'])
|
|
c1 = (0 <= six.text_type(e).find(msg))
|
|
self.assertTrue(c1)
|
|
|
|
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
|
|
"amount of time")
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'disk_available_least': 400,
|
|
'cpu_info': 'asdf',
|
|
}
|
|
filename = "file"
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
|
|
# No need for the src_compute_info
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, None, compute_info, True)
|
|
return_value.is_volume_backed = False
|
|
self.assertEqual({'filename': 'file',
|
|
'image_type': 'default',
|
|
'disk_available_mb': 409600,
|
|
'disk_over_commit': False,
|
|
'block_migration': True,
|
|
'is_volume_backed': False,
|
|
'dst_wants_file_backed_memory': False,
|
|
'file_backed_memory_discard': False,
|
|
'graphics_listen_addr_spice': '127.0.0.1',
|
|
'graphics_listen_addr_vnc': '127.0.0.1',
|
|
'serial_listen_addr': None},
|
|
return_value.obj_to_primitive()['nova_object.data'])
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'disk_available_least': -1000,
|
|
'free_disk_gb': 50,
|
|
'cpu_info': 'asdf',
|
|
}
|
|
filename = "file"
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
|
|
# No need for the src_compute_info
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, None, compute_info, True, True)
|
|
return_value.is_volume_backed = False
|
|
self.assertEqual({'filename': 'file',
|
|
'image_type': 'default',
|
|
'disk_available_mb': 51200,
|
|
'disk_over_commit': True,
|
|
'block_migration': True,
|
|
'is_volume_backed': False,
|
|
'dst_wants_file_backed_memory': False,
|
|
'file_backed_memory_discard': False,
|
|
'graphics_listen_addr_spice': '127.0.0.1',
|
|
'graphics_listen_addr_vnc': '127.0.0.1',
|
|
'serial_listen_addr': None},
|
|
return_value.obj_to_primitive()['nova_object.data'])
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'disk_available_least': 400,
|
|
'cpu_info': 'asdf',
|
|
}
|
|
filename = "file"
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
# No need for the src_compute_info
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, None, compute_info, False)
|
|
return_value.is_volume_backed = False
|
|
self.assertEqual({'filename': 'file',
|
|
'image_type': 'default',
|
|
'block_migration': False,
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 409600,
|
|
'is_volume_backed': False,
|
|
'dst_wants_file_backed_memory': False,
|
|
'file_backed_memory_discard': False,
|
|
'graphics_listen_addr_spice': '127.0.0.1',
|
|
'graphics_listen_addr_vnc': '127.0.0.1',
|
|
'serial_listen_addr': None},
|
|
return_value.obj_to_primitive()['nova_object.data'])
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file',
|
|
return_value='fake')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_fills_listen_addrs(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
# Tests that check_can_live_migrate_destination returns the listen
|
|
# addresses required by check_can_live_migrate_source.
|
|
self.flags(server_listen='192.0.2.12', group='vnc')
|
|
self.flags(server_listen='198.51.100.34', group='spice')
|
|
self.flags(proxyclient_address='203.0.113.56', group='serial_console')
|
|
self.flags(enabled=True, group='serial_console')
|
|
mock_cpu.return_value = 1
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
|
|
result = drvr.check_can_live_migrate_destination(
|
|
self.context, instance_ref, compute_info, compute_info)
|
|
|
|
self.assertEqual('192.0.2.12',
|
|
str(result.graphics_listen_addr_vnc))
|
|
self.assertEqual('198.51.100.34',
|
|
str(result.graphics_listen_addr_spice))
|
|
self.assertEqual('203.0.113.56',
|
|
str(result.serial_listen_addr))
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file',
|
|
return_value='fake')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU',
|
|
return_value=1)
|
|
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
self.flags(proxyclient_address='127.0.0.1', group='serial_console')
|
|
self.flags(enabled=False, group='serial_console')
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
|
|
result = drvr.check_can_live_migrate_destination(
|
|
self.context, instance_ref, compute_info, compute_info)
|
|
self.assertIsNone(result.serial_listen_addr)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file',
|
|
return_value='fake')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
|
|
def test_check_can_live_migrate_guest_cpu_none_model(
|
|
self, mock_cpu, mock_test_file):
|
|
# Tests that when instance.vcpu_model.model is None, the host cpu
|
|
# model is used for live migration.
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
instance_ref.vcpu_model.model = None
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
|
|
result = drvr.check_can_live_migrate_destination(
|
|
self.context, instance_ref, compute_info, compute_info)
|
|
result.is_volume_backed = False
|
|
mock_cpu.assert_called_once_with(None, 'asdf', instance_ref)
|
|
self.assertEqual({'filename': 'fake',
|
|
'image_type': CONF.libvirt.images_type,
|
|
'block_migration': False,
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 1024,
|
|
'is_volume_backed': False,
|
|
'dst_wants_file_backed_memory': False,
|
|
'file_backed_memory_discard': False,
|
|
'graphics_listen_addr_spice': '127.0.0.1',
|
|
'graphics_listen_addr_vnc': '127.0.0.1',
|
|
'serial_listen_addr': None},
|
|
result.obj_to_primitive()['nova_object.data'])
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_no_instance_cpu_info(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': jsonutils.dumps({
|
|
"vendor": "AMD",
|
|
"arch": fields.Architecture.I686,
|
|
"features": ["sse3"],
|
|
"model": "Opteron_G3",
|
|
"topology": {"cores": 2, "threads": 1, "sockets": 4}
|
|
}), 'disk_available_least': 1}
|
|
filename = "file"
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, compute_info, compute_info, False)
|
|
# NOTE(danms): Compute manager would have set this, so set it here
|
|
return_value.is_volume_backed = False
|
|
self.assertEqual({'filename': 'file',
|
|
'image_type': 'default',
|
|
'block_migration': False,
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 1024,
|
|
'is_volume_backed': False,
|
|
'dst_wants_file_backed_memory': False,
|
|
'file_backed_memory_discard': False,
|
|
'graphics_listen_addr_spice': '127.0.0.1',
|
|
'graphics_listen_addr_vnc': '127.0.0.1',
|
|
'serial_listen_addr': None},
|
|
return_value.obj_to_primitive()['nova_object.data'])
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_file_backed(
|
|
self, mock_cpu, mock_test_file, mock_svc):
|
|
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'disk_available_least': 400,
|
|
'cpu_info': 'asdf',
|
|
}
|
|
|
|
filename = "file"
|
|
|
|
svc = objects.Service()
|
|
svc.version = 32
|
|
mock_svc.return_value = svc
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
# No need for the src_compute_info
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, None, compute_info, False)
|
|
|
|
self.assertTrue(return_value.dst_wants_file_backed_memory)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_shared_storage_test_file')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def _test_check_can_live_migrate_dest_file_backed_discard(
|
|
self, libvirt_version, qemu_version, mock_cpu, mock_test_file,
|
|
mock_svc, mock_lib_version, mock_version):
|
|
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
mock_lib_version.return_value = libvirt_version
|
|
mock_version.return_value = qemu_version
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'disk_available_least': 400,
|
|
'cpu_info': 'asdf',
|
|
}
|
|
|
|
filename = "file"
|
|
|
|
svc = objects.Service()
|
|
svc.version = 32
|
|
mock_svc.return_value = svc
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
# mounted_on_same_shared_storage
|
|
mock_test_file.return_value = filename
|
|
# No need for the src_compute_info
|
|
return_value = drvr.check_can_live_migrate_destination(self.context,
|
|
instance_ref, None, compute_info, False)
|
|
|
|
return return_value
|
|
|
|
def test_check_can_live_migrate_dest_file_backed_discard(self):
|
|
libvirt_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
|
|
qemu_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
|
|
|
|
data = self._test_check_can_live_migrate_dest_file_backed_discard(
|
|
libvirt_version, qemu_version)
|
|
|
|
self.assertTrue(data.dst_wants_file_backed_memory)
|
|
self.assertTrue(data.file_backed_memory_discard)
|
|
|
|
def test_check_can_live_migrate_dest_file_backed_discard_bad_libvirt(self):
|
|
libvirt_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1
|
|
qemu_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION)
|
|
|
|
data = self._test_check_can_live_migrate_dest_file_backed_discard(
|
|
libvirt_version, qemu_version)
|
|
|
|
self.assertTrue(data.dst_wants_file_backed_memory)
|
|
self.assertFalse(data.file_backed_memory_discard)
|
|
|
|
def test_check_can_live_migrate_dest_file_backed_discard_bad_qemu(self):
|
|
libvirt_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION)
|
|
qemu_version = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) - 1
|
|
|
|
data = self._test_check_can_live_migrate_dest_file_backed_discard(
|
|
libvirt_version, qemu_version)
|
|
|
|
self.assertTrue(data.dst_wants_file_backed_memory)
|
|
self.assertFalse(data.file_backed_memory_discard)
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
|
|
self, mock_cpu, mock_svc):
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
|
|
|
|
svc = objects.Service(host="old")
|
|
svc.version = 32
|
|
mock_svc.return_value = svc
|
|
|
|
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
|
|
self.assertRaises(exception.InvalidCPUInfo,
|
|
drvr.check_can_live_migrate_destination,
|
|
self.context, instance_ref,
|
|
compute_info, compute_info, False)
|
|
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
|
|
@mock.patch('nova.objects.Service.version', 30)
|
|
def test_check_can_live_migrate_dest_incompatible_file_backed(
|
|
self, mock_cpu, mock_svc):
|
|
|
|
self.flags(file_backed_memory=1024, group='libvirt')
|
|
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
|
|
# _check_cpu_match
|
|
mock_cpu.return_value = 1
|
|
|
|
svc = objects.Service(host="old")
|
|
svc.version = 31
|
|
mock_svc.return_value = svc
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
|
|
|
|
self.assertRaises(exception.MigrationPreCheckError,
|
|
drvr.check_can_live_migrate_destination,
|
|
self.context, instance_ref,
|
|
compute_info, compute_info, False)
|
|
|
|
@mock.patch.object(host.Host, 'compare_cpu')
|
|
@mock.patch.object(nova.virt.libvirt, 'config')
|
|
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_compare.return_value = 5
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info),
|
|
instance)
|
|
self.assertIsNone(ret)
|
|
|
|
@mock.patch.object(host.Host, 'compare_cpu')
|
|
@mock.patch.object(nova.virt.libvirt, 'config')
|
|
def test_compare_cpu_handles_not_supported_error_gracefully(self,
|
|
mock_vconfig,
|
|
mock_compare):
|
|
instance = objects.Instance(**self.test_instance)
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver:'
|
|
' virCompareCPU',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
mock_compare.side_effect = not_supported_exc
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info),
|
|
instance)
|
|
self.assertIsNone(ret)
|
|
|
|
@mock.patch.object(host.Host, 'compare_cpu')
|
|
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
|
|
'_vcpu_model_to_cpu_config')
|
|
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
|
|
mock_compare):
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_compare.return_value = 6
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None,
|
|
instance)
|
|
self.assertIsNone(ret)
|
|
|
|
def test_compare_cpu_virt_type_xen(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = conn._compare_cpu(None, None, instance)
|
|
self.assertIsNone(ret)
|
|
|
|
def test_compare_cpu_virt_type_qemu(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ret = conn._compare_cpu(None, None, instance)
|
|
self.assertIsNone(ret)
|
|
|
|
@mock.patch.object(host.Host, 'compare_cpu')
|
|
@mock.patch.object(nova.virt.libvirt, 'config')
|
|
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
|
|
mock_compare):
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_compare.return_value = 0
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.InvalidCPUInfo,
|
|
conn._compare_cpu, None,
|
|
jsonutils.dumps(_fake_cpu_info),
|
|
instance)
|
|
|
|
@mock.patch.object(host.Host, 'compare_cpu')
|
|
@mock.patch.object(nova.virt.libvirt, 'config')
|
|
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
|
|
mock_compare):
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.MigrationPreCheckError,
|
|
conn._compare_cpu, None,
|
|
jsonutils.dumps(_fake_cpu_info),
|
|
instance)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_cleanup_shared_storage_test_file')
|
|
def test_check_can_live_migrate_dest_cleanup_works_correctly(
|
|
self, mock_clean):
|
|
objects.Instance(**self.test_instance)
|
|
dest_check_data = objects.LibvirtLiveMigrateData(
|
|
filename="file",
|
|
block_migration=True,
|
|
disk_over_commit=False,
|
|
disk_available_mb=1024)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
drvr.cleanup_live_migration_destination_check(self.context,
|
|
dest_check_data)
|
|
mock_clean.assert_called_once_with('file')
|
|
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('os.utime')
|
|
def test_check_shared_storage_test_file_exists(self, mock_utime,
|
|
mock_path_exists):
|
|
tmpfile_path = os.path.join(CONF.instances_path, 'tmp123')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr._check_shared_storage_test_file(
|
|
'tmp123', mock.sentinel.instance))
|
|
mock_utime.assert_called_once_with(CONF.instances_path, None)
|
|
mock_path_exists.assert_called_once_with(tmpfile_path)
|
|
|
|
@mock.patch('os.path.exists', return_value=False)
|
|
@mock.patch('os.utime')
|
|
def test_check_shared_storage_test_file_does_not_exist(self, mock_utime,
|
|
mock_path_exists):
|
|
tmpfile_path = os.path.join(CONF.instances_path, 'tmp123')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr._check_shared_storage_test_file(
|
|
'tmp123', mock.sentinel.instance))
|
|
mock_utime.assert_called_once_with(CONF.instances_path, None)
|
|
mock_path_exists.assert_called_once_with(tmpfile_path)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_check_shared_storage_test_file')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_is_shared_block_storage')
|
|
def _test_can_live_migrate_source(self, mock_is_shared, mock_check_shared,
|
|
block_migration=False,
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
disk_available_mb=1024,
|
|
exception=None):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dest_check_data = objects.LibvirtLiveMigrateData(
|
|
filename='file',
|
|
image_type='default',
|
|
block_migration=block_migration,
|
|
disk_over_commit=False,
|
|
disk_available_mb=disk_available_mb)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
mock_is_shared.return_value = is_shared_block_storage
|
|
mock_check_shared.return_value = is_shared_instance_path
|
|
|
|
if exception:
|
|
self.assertRaises(exception, drvr.check_can_live_migrate_source,
|
|
self.context, instance, dest_check_data)
|
|
else:
|
|
ret = drvr.check_can_live_migrate_source(self.context, instance,
|
|
dest_check_data)
|
|
|
|
mock_is_shared.assert_called_once_with(instance, dest_check_data, None)
|
|
mock_check_shared.assert_called_once_with('file', instance)
|
|
|
|
if exception:
|
|
return (instance, dest_check_data)
|
|
|
|
if block_migration:
|
|
self.assertIsInstance(ret, objects.LibvirtLiveMigrateData)
|
|
self.assertIn('is_shared_block_storage', ret)
|
|
self.assertFalse(ret.is_shared_block_storage)
|
|
self.assertIn('is_shared_instance_path', ret)
|
|
self.assertFalse(ret.is_shared_instance_path)
|
|
|
|
if is_shared_block_storage:
|
|
self.assertTrue(ret.is_shared_block_storage)
|
|
|
|
if is_shared_instance_path:
|
|
self.assertTrue(ret.is_shared_instance_path)
|
|
|
|
return (instance, dest_check_data)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_assert_dest_node_has_enough_disk')
|
|
def test_check_can_live_migrate_source_block_migration(
|
|
self, mock_assert_dest):
|
|
instance, dest_check_data = self._test_can_live_migrate_source(
|
|
block_migration=True)
|
|
mock_assert_dest.assert_called_once_with(
|
|
self.context, instance, dest_check_data.disk_available_mb,
|
|
False, None)
|
|
|
|
def test_check_can_live_migrate_source_shared_block_storage(self):
|
|
self._test_can_live_migrate_source(is_shared_block_storage=True)
|
|
|
|
def test_check_can_live_migrate_source_shared_instance_path(self):
|
|
self._test_can_live_migrate_source(is_shared_instance_path=True)
|
|
|
|
def test_check_can_live_migrate_source_non_shared_fails(self):
|
|
self._test_can_live_migrate_source(
|
|
exception=exception.InvalidSharedStorage)
|
|
|
|
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
|
|
self._test_can_live_migrate_source(
|
|
block_migration=True, is_shared_block_storage=True,
|
|
exception=exception.InvalidLocalStorage)
|
|
|
|
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
|
|
self._test_can_live_migrate_source(
|
|
block_migration=True, is_shared_instance_path=True,
|
|
exception=exception.InvalidLocalStorage)
|
|
|
|
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
|
|
self._test_can_live_migrate_source(
|
|
exception=exception.InvalidSharedStorage)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info')
|
|
def test_check_can_live_migrate_source_with_dest_not_enough_disk(
|
|
self, mock_get_bdi):
|
|
mock_get_bdi.return_value = [{"virt_disk_size": 2}]
|
|
instance, _ = self._test_can_live_migrate_source(
|
|
block_migration=True, disk_available_mb=0,
|
|
exception=exception.MigrationError)
|
|
mock_get_bdi.assert_called_once_with(instance, None)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_assert_dest_node_has_enough_disk')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_shared_block_storage', return_value=False)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_check_shared_storage_test_file', return_value=False)
|
|
def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error(
|
|
self, mock_check, mock_shared_block, mock_enough,
|
|
mock_min_version):
|
|
|
|
self.flags(live_migration_tunnelled=True,
|
|
group='libvirt')
|
|
bdi = {'block_device_mapping': ['bdm']}
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dest_check_data = objects.LibvirtLiveMigrateData(
|
|
filename='file',
|
|
image_type='default',
|
|
block_migration=True,
|
|
disk_over_commit=False,
|
|
disk_available_mb=100)
|
|
drvr._parse_migration_flags()
|
|
self.assertRaises(exception.MigrationPreCheckError,
|
|
drvr.check_can_live_migrate_source,
|
|
self.context, instance, dest_check_data,
|
|
block_device_info=bdi)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_assert_dest_node_has_enough_disk')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_shared_block_storage')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_check_shared_storage_test_file')
|
|
def _test_check_can_live_migrate_source_block_migration_none(
|
|
self, block_migrate, is_shared_instance_path, is_share_block,
|
|
mock_check, mock_shared_block, mock_enough, mock_verson):
|
|
|
|
mock_check.return_value = is_shared_instance_path
|
|
mock_shared_block.return_value = is_share_block
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dest_check_data = objects.LibvirtLiveMigrateData(
|
|
filename='file',
|
|
image_type='default',
|
|
disk_over_commit=False,
|
|
disk_available_mb=100)
|
|
dest_check_data_ret = drvr.check_can_live_migrate_source(
|
|
self.context, instance, dest_check_data)
|
|
self.assertEqual(block_migrate, dest_check_data_ret.block_migration)
|
|
|
|
def test_check_can_live_migrate_source_block_migration_none_shared1(self):
|
|
self._test_check_can_live_migrate_source_block_migration_none(
|
|
False,
|
|
True,
|
|
False)
|
|
|
|
def test_check_can_live_migrate_source_block_migration_none_shared2(self):
|
|
self._test_check_can_live_migrate_source_block_migration_none(
|
|
False,
|
|
False,
|
|
True)
|
|
|
|
def test_check_can_live_migrate_source_block_migration_none_no_share(self):
|
|
self._test_check_can_live_migrate_source_block_migration_none(
|
|
True,
|
|
False,
|
|
False)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_assert_dest_node_has_enough_disk')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_assert_dest_node_has_enough_disk')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_shared_block_storage')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_check_shared_storage_test_file')
|
|
def test_check_can_live_migration_source_disk_over_commit_none(self,
|
|
mock_check, mock_shared_block, mock_enough, mock_disk_check):
|
|
|
|
mock_check.return_value = False
|
|
mock_shared_block.return_value = False
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dest_check_data = objects.LibvirtLiveMigrateData(
|
|
filename='file',
|
|
image_type='default',
|
|
disk_available_mb=100)
|
|
|
|
drvr.check_can_live_migrate_source(
|
|
self.context, instance, dest_check_data)
|
|
|
|
self.assertFalse(mock_disk_check.called)
|
|
|
|
def _is_shared_block_storage_test_create_mocks(self, disks):
|
|
# Test data
|
|
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>{}</devices></domain>")
|
|
disks_xml = ''
|
|
for dsk in disks:
|
|
if dsk['type'] is not 'network':
|
|
disks_xml = ''.join([disks_xml,
|
|
"<disk type='{type}'>"
|
|
"<driver name='qemu' type='{driver}'/>"
|
|
"<source {source}='{source_path}'/>"
|
|
"<target dev='{target_dev}' bus='virtio'/>"
|
|
"</disk>".format(**dsk)])
|
|
else:
|
|
disks_xml = ''.join([disks_xml,
|
|
"<disk type='{type}'>"
|
|
"<driver name='qemu' type='{driver}'/>"
|
|
"<source protocol='{source_proto}'"
|
|
"name='{source_image}' >"
|
|
"<host name='hostname' port='7000'/>"
|
|
"<config file='/path/to/file'/>"
|
|
"</source>"
|
|
"<target dev='{target_dev}'"
|
|
"bus='ide'/>".format(**dsk)])
|
|
|
|
# Preparing mocks
|
|
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_virDomain.XMLDesc = mock.Mock()
|
|
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
|
|
|
|
mock_lookup = mock.Mock()
|
|
|
|
def mock_lookup_side_effect(name):
|
|
return mock_virDomain
|
|
mock_lookup.side_effect = mock_lookup_side_effect
|
|
|
|
mock_qemu_img_info = mock.Mock()
|
|
mock_qemu_img_info.return_value = mock.Mock(disk_size=10737418240,
|
|
virtual_size=10737418240)
|
|
mock_stat = mock.Mock()
|
|
mock_stat.return_value = mock.Mock(st_blocks=20971520)
|
|
mock_get_size = mock.Mock()
|
|
mock_get_size.return_value = 10737418240
|
|
|
|
return (mock_stat, mock_get_size, mock_qemu_img_info, mock_lookup)
|
|
|
|
def test_is_shared_block_storage_rbd(self):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
bdi = {'block_device_mapping': []}
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
data = objects.LibvirtLiveMigrateData(image_type='rbd')
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertTrue(drvr._is_shared_block_storage(instance, data,
|
|
block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
self.assertTrue(drvr._is_storage_shared_with('foo', 'bar'))
|
|
|
|
def test_is_shared_block_storage_lvm(self):
|
|
self.flags(images_type='lvm', group='libvirt')
|
|
bdi = {'block_device_mapping': []}
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
data = objects.LibvirtLiveMigrateData(image_type='lvm',
|
|
is_volume_backed=False,
|
|
is_shared_instance_path=False)
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr._is_shared_block_storage(
|
|
instance, data,
|
|
block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
|
|
def test_is_shared_block_storage_qcow2(self):
|
|
self.flags(images_type='qcow2', group='libvirt')
|
|
bdi = {'block_device_mapping': []}
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
data = objects.LibvirtLiveMigrateData(image_type='qcow2',
|
|
is_volume_backed=False,
|
|
is_shared_instance_path=False)
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr._is_shared_block_storage(
|
|
instance, data,
|
|
block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
|
|
def test_is_shared_block_storage_rbd_only_source(self):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
bdi = {'block_device_mapping': []}
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False,
|
|
is_volume_backed=False)
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr._is_shared_block_storage(
|
|
instance, data,
|
|
block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
|
|
def test_is_shared_block_storage_rbd_only_dest(self):
|
|
bdi = {'block_device_mapping': []}
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
data = objects.LibvirtLiveMigrateData(image_type='rbd',
|
|
is_volume_backed=False,
|
|
is_shared_instance_path=False)
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertFalse(drvr._is_shared_block_storage(
|
|
instance, data,
|
|
block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
|
|
def test_is_shared_block_storage_volume_backed(self):
|
|
disks = [{'type': 'block',
|
|
'driver': 'raw',
|
|
'source': 'dev',
|
|
'source_path': '/dev/disk',
|
|
'target_dev': 'vda'}]
|
|
bdi = {'block_device_mapping': [
|
|
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
(mock_stat, mock_get_size, mock_qemu_img_info, mock_lookup) =\
|
|
self._is_shared_block_storage_test_create_mocks(disks)
|
|
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
|
|
is_shared_instance_path=False)
|
|
with mock.patch.object(host.Host, '_get_domain', mock_lookup):
|
|
self.assertTrue(drvr._is_shared_block_storage(instance, data,
|
|
block_device_info = bdi))
|
|
mock_lookup.assert_called_once_with(instance)
|
|
|
|
def test_is_shared_block_storage_volume_backed_with_disk(self):
|
|
disks = [{'type': 'block',
|
|
'driver': 'raw',
|
|
'source': 'dev',
|
|
'source_path': '/dev/disk',
|
|
'target_dev': 'vda'},
|
|
{'type': 'file',
|
|
'driver': 'raw',
|
|
'source': 'file',
|
|
'source_path': '/instance/disk.local',
|
|
'target_dev': 'vdb'}]
|
|
bdi = {'block_device_mapping': [
|
|
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
(mock_stat, mock_get_size, mock_qemu_img_info, mock_lookup) =\
|
|
self._is_shared_block_storage_test_create_mocks(disks)
|
|
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
|
|
is_shared_instance_path=False)
|
|
with test.nested(
|
|
mock.patch('os.stat', mock_stat),
|
|
mock.patch('os.path.getsize', mock_get_size),
|
|
mock.patch.object(libvirt_driver.disk_api,
|
|
'get_disk_info', mock_qemu_img_info),
|
|
mock.patch.object(host.Host, '_get_domain', mock_lookup)):
|
|
self.assertFalse(drvr._is_shared_block_storage(
|
|
instance, data,
|
|
block_device_info = bdi))
|
|
mock_stat.assert_called_once_with('/instance/disk.local')
|
|
mock_get_size.assert_called_once_with('/instance/disk.local')
|
|
mock_lookup.assert_called_once_with(instance)
|
|
|
|
def test_is_shared_block_storage_nfs(self):
|
|
bdi = {'block_device_mapping': []}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_image_backend = mock.MagicMock()
|
|
drvr.image_backend = mock_image_backend
|
|
mock_backend = mock.MagicMock()
|
|
mock_image_backend.backend.return_value = mock_backend
|
|
mock_backend.is_file_in_instance_path.return_value = True
|
|
mock_get_instance_disk_info = mock.Mock()
|
|
data = objects.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=True,
|
|
image_type='foo')
|
|
with mock.patch.object(drvr, '_get_instance_disk_info',
|
|
mock_get_instance_disk_info):
|
|
self.assertTrue(drvr._is_shared_block_storage(
|
|
'instance', data, block_device_info=bdi))
|
|
self.assertEqual(0, mock_get_instance_disk_info.call_count)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
|
|
def test_live_migration_update_graphics_xml(self, mock_xml,
|
|
mock_migrateToURI3,
|
|
mock_min_version):
|
|
self.compute = manager.ComputeManager()
|
|
instance_ref = self.test_instance
|
|
target_connection = '127.0.0.2'
|
|
|
|
xml_tmpl = ("<domain type='kvm'>"
|
|
"<devices>"
|
|
"<graphics type='vnc' listen='{vnc}'>"
|
|
"<listen address='{vnc}'/>"
|
|
"</graphics>"
|
|
"<graphics type='spice' listen='{spice}'>"
|
|
"<listen address='{spice}'/>"
|
|
"</graphics>"
|
|
"</devices>"
|
|
"</domain>")
|
|
|
|
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
|
|
spice='5.6.7.8')
|
|
|
|
target_xml = xml_tmpl.format(vnc='10.0.0.1',
|
|
spice='10.0.0.2')
|
|
target_xml = etree.tostring(etree.fromstring(target_xml),
|
|
encoding='unicode')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# Preparing mocks
|
|
mock_xml.return_value = initial_xml
|
|
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
|
|
|
disk_paths = ['vda', 'vdb']
|
|
_bandwidth = CONF.libvirt.live_migration_bandwidth
|
|
params = {
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'migrate_disks': disk_paths,
|
|
'bandwidth': _bandwidth,
|
|
'destination_xml': target_xml,
|
|
}
|
|
|
|
# start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='10.0.0.1',
|
|
graphics_listen_addr_spice='10.0.0.2',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=False)
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr._live_migration_operation,
|
|
self.context, instance_ref, target_connection,
|
|
False, migrate_data, guest, disk_paths)
|
|
mock_xml.assert_called_once_with(
|
|
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
def test_live_migration_parallels_no_new_xml(self):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
self.flags(enabled=False, group='vnc')
|
|
target_connection = None
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance_dict = dict(self.test_instance)
|
|
instance_dict.update({'host': 'fake',
|
|
'power_state': power_state.RUNNING,
|
|
'vm_state': vm_states.ACTIVE})
|
|
instance = objects.Instance(**instance_dict)
|
|
|
|
params = {
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
}
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
target_connect_addr=target_connection,
|
|
block_migration=False)
|
|
dom_mock = mock.MagicMock()
|
|
guest = libvirt_guest.Guest(dom_mock)
|
|
drvr._live_migration_operation(self.context, instance,
|
|
target_connection, False,
|
|
migrate_data, guest, None)
|
|
dom_mock.migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
@mock.patch.object(utils, 'spawn')
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(fakelibvirt.Connection, '_mark_running')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_live_migration_monitor')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_live_migration_copy_disk_paths')
|
|
def test_live_migration_parallels_no_migrate_disks(self,
|
|
mock_copy_disk_paths,
|
|
mock_monitor,
|
|
mock_running,
|
|
mock_guest,
|
|
mock_thread):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance_dict = dict(self.test_instance)
|
|
instance_dict.update({'host': 'fake',
|
|
'power_state': power_state.RUNNING,
|
|
'vm_state': vm_states.ACTIVE})
|
|
instance = objects.Instance(**instance_dict)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
block_migration=True)
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), '<domain/>', True)
|
|
guest = libvirt_guest.Guest(dom)
|
|
mock_guest.return_value = guest
|
|
drvr._live_migration(self.context, instance, 'dest',
|
|
lambda: None, lambda: None, True,
|
|
migrate_data)
|
|
self.assertFalse(mock_copy_disk_paths.called)
|
|
mock_thread.assert_called_once_with(
|
|
drvr._live_migration_operation,
|
|
self.context, instance, 'dest', True,
|
|
migrate_data, guest, [])
|
|
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch.object(nova.virt.libvirt.migration,
|
|
'get_updated_guest_xml', return_value='')
|
|
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
|
|
def test_live_migration_update_volume_xml(self, mock_xml,
|
|
mock_updated_guest_xml,
|
|
mock_migrateToURI3):
|
|
self.compute = manager.ComputeManager()
|
|
instance_ref = self.test_instance
|
|
target_connection = '127.0.0.2'
|
|
|
|
target_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'cde.67890.opst-lun-Z')
|
|
|
|
# Prepare mocks
|
|
mock_xml.return_value = target_xml
|
|
|
|
disk_paths = ['vda', 'vdb']
|
|
params = {
|
|
'migrate_disks': disk_paths,
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'destination_xml': target_xml
|
|
}
|
|
|
|
# Start test
|
|
connection_info = {
|
|
u'driver_volume_type': u'iscsi',
|
|
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
u'data': {
|
|
u'access_mode': u'rw', u'target_discovered': False,
|
|
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'device_path':
|
|
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
},
|
|
}
|
|
bdm = objects.LibvirtLiveMigrateBDMInfo(
|
|
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
bus='virtio', type='disk', dev='vdb',
|
|
connection_info=connection_info)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='',
|
|
target_connect_addr=target_connection,
|
|
bdms=[bdm],
|
|
block_migration=False)
|
|
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
mock_updated_guest_xml.return_value = target_xml
|
|
drvr._live_migration_operation(
|
|
self.context, instance_ref, target_connection,
|
|
False, migrate_data, guest, disk_paths)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
mock_updated_guest_xml.assert_called_once_with(
|
|
guest, migrate_data, mock.ANY, get_vif_config=None)
|
|
|
|
def test_live_migration_update_vifs_xml(self):
|
|
"""Tests that when migrate_data.vifs is populated, the destination
|
|
guest xml is updated with the migrate_data.vifs configuration.
|
|
"""
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
source_vif_normal = network_model.VIF(
|
|
id=uuids.port_id, type=network_model.VIF_TYPE_OVS,
|
|
vnic_type=network_model.VNIC_TYPE_NORMAL, details={'foo': 'bar'},
|
|
profile={'binding:host_id': 'fake-source-host'})
|
|
|
|
vif = objects.VIFMigrateData(port_id=uuids.port_id,
|
|
source_vif=source_vif_normal)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='',
|
|
target_connect_addr=None,
|
|
bdms=[],
|
|
block_migration=False,
|
|
vifs=[vif])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
fake_xml = '<domain type="qemu"/>'
|
|
|
|
def fake_get_updated_guest_xml(guest, migrate_data, get_volume_config,
|
|
get_vif_config=None):
|
|
self.assertIsNotNone(get_vif_config)
|
|
return fake_xml
|
|
|
|
@mock.patch.object(drvr, "detach_interface")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
side_effect=fake_get_updated_guest_xml)
|
|
@mock.patch.object(drvr._host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(guest, 'migrate')
|
|
def _test_normal(migrate, has_min_version,
|
|
get_updated_guest_xml, detach):
|
|
drvr._live_migration_operation(
|
|
self.context, instance, 'dest.host', False,
|
|
migrate_data, guest, [])
|
|
get_updated_guest_xml.assert_called_once()
|
|
migrate.assert_called()
|
|
detach.assert_not_called()
|
|
|
|
_test_normal()
|
|
|
|
source_vif_direct = network_model.VIF(
|
|
id=uuids.port_id, type=network_model.VIF_TYPE_OVS,
|
|
vnic_type=network_model.VNIC_TYPE_DIRECT, details={'foo': 'bar'},
|
|
profile={'binding:host_id': 'fake-source-host'})
|
|
|
|
vif_direct = objects.VIFMigrateData(port_id=uuids.port_id,
|
|
source_vif=source_vif_direct)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='', target_connect_addr=None,
|
|
bdms=[], block_migration=False, vifs=[vif_direct])
|
|
|
|
@mock.patch.object(drvr, "detach_interface")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
side_effect=fake_get_updated_guest_xml)
|
|
@mock.patch.object(drvr._host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(guest, 'migrate')
|
|
def _test_direct(migrate, has_min_version,
|
|
get_updated_guest_xml, detach):
|
|
drvr._live_migration_operation(
|
|
self.context, instance, 'dest.host', False,
|
|
migrate_data, guest, [])
|
|
get_updated_guest_xml.assert_called_once()
|
|
migrate.assert_called()
|
|
detach.asset_called()
|
|
|
|
_test_direct()
|
|
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='', target_connect_addr=None,
|
|
bdms=[], block_migration=False, vifs=[vif, vif_direct])
|
|
|
|
@mock.patch.object(drvr, "detach_interface")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
side_effect=fake_get_updated_guest_xml)
|
|
@mock.patch.object(drvr._host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(guest, 'migrate')
|
|
def _test_mix(migrate, has_min_version, get_updated_guest_xml, detach):
|
|
drvr._live_migration_operation(
|
|
self.context, instance, 'dest.host', False,
|
|
migrate_data, guest, [])
|
|
get_updated_guest_xml.assert_called_once()
|
|
migrate.assert_called()
|
|
detach.asset_called_once()
|
|
|
|
_test_mix()
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch.object(nova.virt.libvirt.migration,
|
|
'get_updated_guest_xml', return_value='')
|
|
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
|
|
def test_live_migration_with_valid_target_connect_addr(self, mock_xml,
|
|
mock_updated_guest_xml,
|
|
mock_migrateToURI3,
|
|
mock_min_version):
|
|
self.compute = manager.ComputeManager()
|
|
instance_ref = self.test_instance
|
|
target_connection = '127.0.0.2'
|
|
|
|
target_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'cde.67890.opst-lun-Z')
|
|
|
|
# Prepare mocks
|
|
mock_xml.return_value = target_xml
|
|
|
|
disk_paths = ['vda', 'vdb']
|
|
params = {
|
|
'migrate_disks': disk_paths,
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'destination_xml': target_xml,
|
|
}
|
|
|
|
# start test
|
|
connection_info = {
|
|
u'driver_volume_type': u'iscsi',
|
|
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
u'data': {
|
|
u'access_mode': u'rw', u'target_discovered': False,
|
|
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'device_path':
|
|
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
},
|
|
}
|
|
bdm = objects.LibvirtLiveMigrateBDMInfo(
|
|
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
bus='virtio', type='disk', dev='vdb',
|
|
connection_info=connection_info)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='',
|
|
target_connect_addr=target_connection,
|
|
bdms=[bdm],
|
|
block_migration=False)
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
mock_updated_guest_xml.return_value = target_xml
|
|
drvr._live_migration_operation(self.context, instance_ref,
|
|
target_connection, False, migrate_data,
|
|
guest, disk_paths)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
def test_update_volume_xml(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
initial_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'abc.12345.opst-lun-X')
|
|
target_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'cde.67890.opst-lun-Z')
|
|
target_xml = etree.tostring(etree.fromstring(target_xml),
|
|
encoding='unicode')
|
|
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
|
|
|
|
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
|
|
bus='virtio',
|
|
type='disk',
|
|
dev='vdb')
|
|
bdmi.connection_info = {u'driver_volume_type': u'iscsi',
|
|
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
u'data': {u'access_mode': u'rw', u'target_discovered': False,
|
|
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
'device_path':
|
|
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
|
|
|
|
conf = vconfig.LibvirtConfigGuestDisk()
|
|
conf.source_device = bdmi.type
|
|
conf.driver_name = "qemu"
|
|
conf.driver_format = "raw"
|
|
conf.driver_cache = "none"
|
|
conf.target_dev = bdmi.dev
|
|
conf.target_bus = bdmi.bus
|
|
conf.serial = bdmi.connection_info.get('serial')
|
|
conf.source_type = "block"
|
|
conf.source_path = bdmi.connection_info['data'].get('device_path')
|
|
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=conf),
|
|
mock.patch.object(guest, 'get_xml_desc',
|
|
return_value=initial_xml)):
|
|
config = libvirt_migrate.get_updated_guest_xml(guest,
|
|
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
|
|
drvr._get_volume_config)
|
|
parser = etree.XMLParser(remove_blank_text=True)
|
|
config = etree.fromstring(config, parser)
|
|
target_xml = etree.fromstring(target_xml, parser)
|
|
self.assertEqual(etree.tostring(target_xml, encoding='unicode'),
|
|
etree.tostring(config, encoding='unicode'))
|
|
|
|
def test_live_migration_uri(self):
|
|
addresses = ('127.0.0.1', '127.0.0.1:4444', '[::1]:4444',
|
|
'[0:0:0:0:0:0:0:1]:4444', u'127.0.0.1', u'destination',
|
|
)
|
|
|
|
hypervisor_uri_map = (
|
|
('xen', 'xenmigr://%s/system'),
|
|
('kvm', 'qemu+tcp://%s/system'),
|
|
('qemu', 'qemu+tcp://%s/system'),
|
|
('parallels', 'parallels+tcp://%s/system'),
|
|
# anything else will return None
|
|
('lxc', None),
|
|
)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
for dest in addresses:
|
|
for hyperv, uri in hypervisor_uri_map:
|
|
self.flags(virt_type=hyperv, group='libvirt')
|
|
if uri is not None:
|
|
uri = uri % dest
|
|
self.assertEqual(uri, drvr._live_migration_uri(dest))
|
|
else:
|
|
self.assertRaises(exception.LiveMigrationURINotAvailable,
|
|
drvr._live_migration_uri,
|
|
dest)
|
|
|
|
def test_live_migration_uri_ipv6(self):
|
|
addresses = ('::1', '0:0:0:0:0:0:0:1', u'::1')
|
|
|
|
hypervisor_uri_map = (
|
|
('xen', 'xenmigr://[%s]/system'),
|
|
('kvm', 'qemu+tcp://[%s]/system'),
|
|
('qemu', 'qemu+tcp://[%s]/system'),
|
|
('parallels', 'parallels+tcp://[%s]/system'),
|
|
# anything else will return None
|
|
('lxc', None),
|
|
)
|
|
|
|
for dest in addresses:
|
|
for hyperv, uri in hypervisor_uri_map:
|
|
self.flags(virt_type=hyperv, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
if uri is not None:
|
|
uri = uri % dest
|
|
self.assertEqual(uri, drvr._live_migration_uri(dest))
|
|
else:
|
|
self.assertRaises(exception.LiveMigrationURINotAvailable,
|
|
drvr._live_migration_uri,
|
|
dest)
|
|
|
|
def test_live_migration_uri_forced(self):
|
|
dest = 'destination'
|
|
for hyperv in ('kvm', 'xen'):
|
|
self.flags(virt_type=hyperv, group='libvirt')
|
|
|
|
forced_uri = 'foo://%s/bar'
|
|
self.flags(live_migration_uri=forced_uri, group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
|
|
|
|
def test_live_migration_scheme(self):
|
|
self.flags(live_migration_scheme='ssh', group='libvirt')
|
|
dest = 'destination'
|
|
uri = 'qemu+ssh://%s/system'
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(uri % dest, drvr._live_migration_uri(dest))
|
|
|
|
def test_live_migration_scheme_does_not_override_uri(self):
|
|
forced_uri = 'qemu+ssh://%s/system'
|
|
self.flags(live_migration_uri=forced_uri, group='libvirt')
|
|
self.flags(live_migration_scheme='tcp', group='libvirt')
|
|
dest = 'destination'
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
|
|
|
|
def test_migrate_uri(self):
|
|
hypervisor_uri_map = (
|
|
('xen', None),
|
|
('kvm', 'tcp://%s'),
|
|
('qemu', 'tcp://%s'),
|
|
)
|
|
dest = 'destination'
|
|
for hyperv, uri in hypervisor_uri_map:
|
|
self.flags(virt_type=hyperv, group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
if uri is not None:
|
|
uri = uri % dest
|
|
self.assertEqual(uri, drvr._migrate_uri(dest))
|
|
|
|
def test_migrate_uri_forced_live_migration_uri(self):
|
|
dest = 'destination'
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
forced_uri = 'qemu+tcp://user:pass@%s/system'
|
|
self.flags(live_migration_uri=forced_uri, group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual('tcp://%s' % dest, drvr._migrate_uri(dest))
|
|
|
|
def test_migrate_uri_forced_live_migration_inboud_addr(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
addresses = ('127.0.0.1', '127.0.0.1:4444', '[::1]:4444',
|
|
'[0:0:0:0:0:0:0:1]:4444', u'127.0.0.1', u'destination',
|
|
)
|
|
|
|
for dest in addresses:
|
|
uri = 'tcp://%s'
|
|
result = drvr._migrate_uri(dest)
|
|
self.assertEqual(uri % dest, result)
|
|
self.assertIsInstance(result, str)
|
|
|
|
def test_migrate_uri_forced_live_migration_inboud_addr_ipv6(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
addresses = ('::1', '0:0:0:0:0:0:0:1', u'::1')
|
|
|
|
for dest in addresses:
|
|
uri = 'tcp://[%s]'
|
|
result = drvr._migrate_uri(dest)
|
|
self.assertEqual(uri % dest, result)
|
|
self.assertIsInstance(result, str)
|
|
|
|
def test_update_volume_xml_no_serial(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
xml_tmpl = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='block' device='disk'>
|
|
<driver name='qemu' type='raw' cache='none'/>
|
|
<source dev='{device_path}'/>
|
|
<target bus='virtio' dev='vdb'/>
|
|
<serial></serial>
|
|
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
|
|
function='0x0'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'abc.12345.opst-lun-X')
|
|
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'abc.12345.opst-lun-X')
|
|
target_xml = etree.tostring(etree.fromstring(target_xml),
|
|
encoding='unicode')
|
|
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
|
|
connection_info = {
|
|
u'driver_volume_type': u'iscsi',
|
|
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
u'data': {
|
|
u'access_mode': u'rw', u'target_discovered': False,
|
|
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
|
u'device_path':
|
|
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
|
|
},
|
|
}
|
|
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
|
|
bus='virtio',
|
|
dev='vdb',
|
|
type='disk')
|
|
bdmi.connection_info = connection_info
|
|
|
|
conf = vconfig.LibvirtConfigGuestDisk()
|
|
conf.source_device = bdmi.type
|
|
conf.driver_name = "qemu"
|
|
conf.driver_format = "raw"
|
|
conf.driver_cache = "none"
|
|
conf.target_dev = bdmi.dev
|
|
conf.target_bus = bdmi.bus
|
|
conf.serial = bdmi.connection_info.get('serial')
|
|
conf.source_type = "block"
|
|
conf.source_path = bdmi.connection_info['data'].get('device_path')
|
|
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=conf),
|
|
mock.patch.object(guest, 'get_xml_desc',
|
|
return_value=initial_xml)):
|
|
config = libvirt_migrate.get_updated_guest_xml(guest,
|
|
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
|
|
drvr._get_volume_config)
|
|
self.assertEqual(target_xml, config)
|
|
|
|
def test_update_volume_xml_no_connection_info(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
initial_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'abc.12345.opst-lun-X')
|
|
target_xml = self.device_xml_tmpl.format(
|
|
device_path='/dev/disk/by-path/'
|
|
'ip-1.2.3.4:3260-iqn.'
|
|
'abc.12345.opst-lun-X')
|
|
target_xml = etree.tostring(etree.fromstring(target_xml),
|
|
encoding='unicode')
|
|
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
|
|
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
|
|
dev='vdb',
|
|
type='disk',
|
|
bus='scsi',
|
|
format='qcow')
|
|
bdmi.connection_info = {}
|
|
conf = vconfig.LibvirtConfigGuestDisk()
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=conf),
|
|
mock.patch.object(guest, 'get_xml_desc',
|
|
return_value=initial_xml)):
|
|
config = libvirt_migrate.get_updated_guest_xml(
|
|
guest,
|
|
objects.LibvirtLiveMigrateData(bdms=[bdmi]),
|
|
drvr._get_volume_config)
|
|
self.assertEqual(target_xml, config)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_serial_ports_from_guest')
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
|
|
def test_live_migration_update_serial_console_xml(self, mock_xml,
|
|
mock_migrateToURI3,
|
|
mock_get,
|
|
mock_min_version):
|
|
self.compute = manager.ComputeManager()
|
|
instance_ref = self.test_instance
|
|
target_connection = '127.0.0.2'
|
|
|
|
xml_tmpl = ("<domain type='kvm'>"
|
|
"<devices>"
|
|
"<console type='tcp'>"
|
|
"<source mode='bind' host='{addr}' service='{port}'/>"
|
|
"<target type='serial' port='0'/>"
|
|
"</console>"
|
|
"</devices>"
|
|
"</domain>")
|
|
|
|
initial_xml = xml_tmpl.format(addr='9.0.0.1', port='10100')
|
|
|
|
target_xml = xml_tmpl.format(addr='9.0.0.12', port='10200')
|
|
target_xml = etree.tostring(etree.fromstring(target_xml),
|
|
encoding='unicode')
|
|
|
|
# Preparing mocks
|
|
mock_xml.return_value = initial_xml
|
|
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
|
|
|
disk_paths = ['vda', 'vdb']
|
|
params = {
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'migrate_disks': ['vda', 'vdb'],
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'destination_xml': target_xml,
|
|
}
|
|
|
|
# start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='10.0.0.1',
|
|
graphics_listen_addr_spice='10.0.0.2',
|
|
serial_listen_addr='9.0.0.12',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=False,
|
|
serial_listen_ports=[10200])
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr._live_migration_operation,
|
|
self.context, instance_ref, target_connection,
|
|
False, migrate_data, guest, disk_paths)
|
|
mock_xml.assert_called_once_with(
|
|
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
def test_live_migration_fails_without_serial_console_address(self):
|
|
self.compute = manager.ComputeManager()
|
|
self.flags(enabled=True, group='serial_console')
|
|
self.flags(proxyclient_address='', group='serial_console')
|
|
instance_dict = dict(self.test_instance)
|
|
instance_dict.update({'host': 'fake',
|
|
'power_state': power_state.RUNNING,
|
|
'vm_state': vm_states.ACTIVE})
|
|
instance_ref = objects.Instance(**instance_dict)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# Preparing mocks
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
|
|
# start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
serial_listen_addr='',
|
|
target_connect_addr=None,
|
|
bdms=[],
|
|
block_migration=False)
|
|
self.assertRaises(exception.MigrationError,
|
|
drvr._live_migration_operation,
|
|
self.context, instance_ref, 'dest',
|
|
False, migrate_data, guest, [])
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
return_value='')
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
|
|
return_value='<xml></xml>')
|
|
def test_live_migration_uses_migrateToURI3(
|
|
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
|
|
mock_min_version):
|
|
|
|
target_connection = '127.0.0.2'
|
|
# Preparing mocks
|
|
disk_paths = ['vda', 'vdb']
|
|
params = {
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'migrate_disks': ['vda', 'vdb'],
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
}
|
|
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
|
|
|
# Start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='0.0.0.0',
|
|
graphics_listen_addr_spice='0.0.0.0',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=False)
|
|
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr._live_migration_operation,
|
|
self.context, instance, target_connection,
|
|
False, migrate_data, guest, disk_paths)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
|
|
return_value='<xml/>')
|
|
def _test_live_migration_block_migration_flags(self,
|
|
device_names, expected_flags,
|
|
mock_old_xml, mock_min_version, mock_migrateToURI3):
|
|
|
|
target_connection = '127.0.0.2'
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='0.0.0.0',
|
|
graphics_listen_addr_spice='0.0.0.0',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=True)
|
|
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._parse_migration_flags()
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr._live_migration_operation(self.context, instance,
|
|
target_connection,
|
|
True, migrate_data, guest,
|
|
device_names)
|
|
|
|
params = {
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'migrate_disks': device_names,
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'destination_xml': '<xml/>',
|
|
}
|
|
if not params['migrate_disks']:
|
|
del params['migrate_disks']
|
|
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection), params=params,
|
|
flags=expected_flags)
|
|
|
|
def test_live_migration_block_migration_with_devices(self):
|
|
device_names = ['vda']
|
|
expected_flags = (fakelibvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
fakelibvirt.VIR_MIGRATE_PEER2PEER |
|
|
fakelibvirt.VIR_MIGRATE_LIVE)
|
|
|
|
self._test_live_migration_block_migration_flags(device_names,
|
|
expected_flags)
|
|
|
|
def test_live_migration_block_migration_all_filtered(self):
|
|
device_names = []
|
|
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
fakelibvirt.VIR_MIGRATE_PEER2PEER |
|
|
fakelibvirt.VIR_MIGRATE_LIVE)
|
|
|
|
self._test_live_migration_block_migration_flags(device_names,
|
|
expected_flags)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
return_value='')
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
|
|
def test_block_live_migration_tunnelled_migrateToURI3(
|
|
self, mock_old_xml, mock_new_xml,
|
|
mock_migrateToURI3, mock_min_version):
|
|
self.flags(live_migration_tunnelled=True, group='libvirt')
|
|
|
|
target_connection = None
|
|
device_names = ['disk1', 'disk2']
|
|
|
|
# Preparing mocks
|
|
# Since we are passing the VIR_MIGRATE_TUNNELLED flag, the
|
|
# 'parms' dict will not (as expected) contain 'migrate_disks'
|
|
params = {
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth
|
|
}
|
|
# Start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='0.0.0.0',
|
|
graphics_listen_addr_spice='0.0.0.0',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=True)
|
|
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._parse_migration_flags()
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr._live_migration_operation(self.context, instance,
|
|
target_connection, True, migrate_data,
|
|
guest, device_names)
|
|
|
|
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
fakelibvirt.VIR_MIGRATE_TUNNELLED |
|
|
fakelibvirt.VIR_MIGRATE_PEER2PEER |
|
|
fakelibvirt.VIR_MIGRATE_LIVE)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=expected_flags)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml',
|
|
return_value='')
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
|
|
def test_block_live_migration_native_tls(
|
|
self, mock_old_xml, mock_new_xml,
|
|
mock_migrateToURI3, mock_min_version):
|
|
self.flags(live_migration_with_native_tls=True, group='libvirt')
|
|
|
|
target_connection = None
|
|
disk_paths = ['vda', 'vdb']
|
|
|
|
params = {
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'migrate_disks': disk_paths
|
|
}
|
|
|
|
# Start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='0.0.0.0',
|
|
graphics_listen_addr_spice='0.0.0.0',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=True)
|
|
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._parse_migration_flags()
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr._live_migration_operation(self.context, instance,
|
|
target_connection, True, migrate_data,
|
|
guest, disk_paths)
|
|
|
|
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
|
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
|
|
fakelibvirt.VIR_MIGRATE_PEER2PEER |
|
|
fakelibvirt.VIR_MIGRATE_NON_SHARED_INC |
|
|
fakelibvirt.VIR_MIGRATE_TLS |
|
|
fakelibvirt.VIR_MIGRATE_LIVE)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=expected_flags)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc',
|
|
return_value='<xml/>')
|
|
def test_live_migration_raises_exception(self, mock_xml,
|
|
mock_migrateToURI3,
|
|
mock_min_version):
|
|
# Prepare data
|
|
self.compute = manager.ComputeManager()
|
|
instance_ref = self.test_instance
|
|
target_connection = '127.0.0.2'
|
|
|
|
disk_paths = ['vda', 'vdb']
|
|
params = {
|
|
'migrate_uri': 'tcp://127.0.0.2',
|
|
'migrate_disks': disk_paths,
|
|
'bandwidth': CONF.libvirt.live_migration_bandwidth,
|
|
'destination_xml': '<xml/>',
|
|
}
|
|
|
|
# Prepare mocks
|
|
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
|
|
|
# Start test
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
graphics_listen_addr_vnc='10.0.0.1',
|
|
graphics_listen_addr_spice='10.0.0.2',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connection,
|
|
bdms=[],
|
|
block_migration=False)
|
|
dom = fakelibvirt.virDomain
|
|
guest = libvirt_guest.Guest(dom)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr._live_migration_operation,
|
|
self.context, instance_ref, target_connection,
|
|
False, migrate_data, guest, disk_paths)
|
|
mock_migrateToURI3.assert_called_once_with(
|
|
drvr._live_migration_uri(target_connection),
|
|
params=params, flags=0)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
|
|
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
|
|
mock_get_instance_path,
|
|
mock_exist,
|
|
mock_shutil
|
|
):
|
|
# destroy method may raise InstanceTerminationFailure or
|
|
# InstancePowerOffFailure, here use their base class Invalid.
|
|
mock_destroy.side_effect = exception.Invalid(reason='just test')
|
|
fake_instance_path = os.path.join(cfg.CONF.instances_path,
|
|
'/fake_instance_uuid')
|
|
mock_get_instance_path.return_value = fake_instance_path
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=False,
|
|
instance_relative_path=False)
|
|
self.assertRaises(exception.Invalid,
|
|
drvr.rollback_live_migration_at_destination,
|
|
"context", "instance", [], None, True, migrate_data)
|
|
mock_exist.assert_called_once_with(fake_instance_path)
|
|
mock_shutil.assert_called_once_with(fake_instance_path)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
|
|
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
|
|
mock_get_instance_path,
|
|
mock_exist,
|
|
mock_shutil
|
|
):
|
|
|
|
def fake_destroy(ctxt, instance, network_info,
|
|
block_device_info=None, destroy_disks=True):
|
|
# This is just here to test the signature. Seems there should
|
|
# be a better way to do this with mock and autospec.
|
|
pass
|
|
|
|
mock_destroy.side_effect = fake_destroy
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=True,
|
|
instance_relative_path=False)
|
|
drvr.rollback_live_migration_at_destination("context", "instance", [],
|
|
None, True, migrate_data)
|
|
mock_destroy.assert_called_once_with("context", "instance", [],
|
|
None, True)
|
|
self.assertFalse(mock_get_instance_path.called)
|
|
self.assertFalse(mock_exist.called)
|
|
self.assertFalse(mock_shutil.called)
|
|
|
|
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
|
|
def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml):
|
|
self.flags(live_migration_tunnelled=True, group='libvirt')
|
|
xml = """
|
|
<domain>
|
|
<name>dummy</name>
|
|
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
|
|
<devices>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.root"/>
|
|
<target dev="vda"/>
|
|
</disk>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.shared"/>
|
|
<target dev="vdb"/>
|
|
<shareable/>
|
|
</disk>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.config"/>
|
|
<target dev="vdc"/>
|
|
<readonly/>
|
|
</disk>
|
|
<disk type="block">
|
|
<source dev="/dev/mapper/somevol"/>
|
|
<target dev="vdd"/>
|
|
</disk>
|
|
<disk type="network">
|
|
<source protocol="https" name="url_path">
|
|
<host name="hostname" port="443"/>
|
|
</source>
|
|
</disk>
|
|
</devices>
|
|
</domain>"""
|
|
mock_xml.return_value = xml
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._parse_migration_flags()
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
|
|
paths = drvr._live_migration_copy_disk_paths(None, None, guest)
|
|
self.assertEqual((["/var/lib/nova/instance/123/disk.root",
|
|
"/dev/mapper/somevol"], ['vda', 'vdd']), paths)
|
|
|
|
@mock.patch.object(host.Host, "get_connection")
|
|
@mock.patch.object(host.Host, "has_min_version", return_value=True)
|
|
@mock.patch('nova.virt.driver.get_block_device_info')
|
|
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
|
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
|
|
def test_live_migration_copy_disk_paths_selective_block_migration(
|
|
self, mock_xml, mock_get_instance,
|
|
mock_block_device_info, mock_version, mock_conn):
|
|
xml = """
|
|
<domain>
|
|
<name>dummy</name>
|
|
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
|
|
<devices>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.root"/>
|
|
<target dev="vda"/>
|
|
</disk>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.shared"/>
|
|
<target dev="vdb"/>
|
|
</disk>
|
|
<disk type="file">
|
|
<source file="/var/lib/nova/instance/123/disk.config"/>
|
|
<target dev="vdc"/>
|
|
</disk>
|
|
<disk type="block">
|
|
<source dev="/dev/mapper/somevol"/>
|
|
<target dev="vdd"/>
|
|
</disk>
|
|
<disk type="network">
|
|
<source protocol="https" name="url_path">
|
|
<host name="hostname" port="443"/>
|
|
</source>
|
|
</disk>
|
|
</devices>
|
|
</domain>"""
|
|
mock_xml.return_value = xml
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.root_device_name = '/dev/vda'
|
|
block_device_info = {
|
|
'swap': {
|
|
'disk_bus': u'virtio',
|
|
'swap_size': 10,
|
|
'device_name': u'/dev/vdc'
|
|
},
|
|
'root_device_name': u'/dev/vda',
|
|
'ephemerals': [{
|
|
'guest_format': u'ext3',
|
|
'device_name': u'/dev/vdb',
|
|
'disk_bus': u'virtio',
|
|
'device_type': u'disk',
|
|
'size': 1
|
|
}],
|
|
'block_device_mapping': [{
|
|
'guest_format': None,
|
|
'boot_index': None,
|
|
'mount_device': u'/dev/vdd',
|
|
'connection_info': {
|
|
u'driver_volume_type': u'iscsi',
|
|
'serial': u'147df29f-aec2-4851-b3fe-f68dad151834',
|
|
u'data': {
|
|
u'access_mode': u'rw',
|
|
u'target_discovered': False,
|
|
u'encrypted': False,
|
|
u'qos_specs': None,
|
|
u'target_iqn': u'iqn.2010-10.org.openstack:'
|
|
u'volume-147df29f-aec2-4851-b3fe-'
|
|
u'f68dad151834',
|
|
u'target_portal': u'10.102.44.141:3260', u'volume_id':
|
|
u'147df29f-aec2-4851-b3fe-f68dad151834',
|
|
u'target_lun': 1,
|
|
u'auth_password': u'cXELT66FngwzTwpf',
|
|
u'auth_username': u'QbQQjj445uWgeQkFKcVw',
|
|
u'auth_method': u'CHAP'
|
|
}
|
|
},
|
|
'disk_bus': None,
|
|
'device_type': None,
|
|
'delete_on_termination': False
|
|
}]
|
|
}
|
|
mock_block_device_info.return_value = block_device_info
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
return_value = drvr._live_migration_copy_disk_paths(self.context,
|
|
instance,
|
|
guest)
|
|
expected = (['/var/lib/nova/instance/123/disk.root',
|
|
'/var/lib/nova/instance/123/disk.shared',
|
|
'/var/lib/nova/instance/123/disk.config'],
|
|
['vda', 'vdb', 'vdc'])
|
|
self.assertEqual(expected, return_value)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_live_migration_copy_disk_paths")
|
|
def test_live_migration_data_gb_plain(self, mock_paths):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
data_gb = drvr._live_migration_data_gb(instance, [])
|
|
self.assertEqual(2, data_gb)
|
|
self.assertEqual(0, mock_paths.call_count)
|
|
|
|
def test_live_migration_data_gb_block(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
def fake_stat(path):
|
|
class StatResult(object):
|
|
def __init__(self, size):
|
|
self._size = size
|
|
|
|
@property
|
|
def st_size(self):
|
|
return self._size
|
|
|
|
if path == "/var/lib/nova/instance/123/disk.root":
|
|
return StatResult(10 * units.Gi)
|
|
elif path == "/dev/mapper/somevol":
|
|
return StatResult(1.5 * units.Gi)
|
|
else:
|
|
raise Exception("Should not be reached")
|
|
|
|
disk_paths = ["/var/lib/nova/instance/123/disk.root",
|
|
"/dev/mapper/somevol"]
|
|
with mock.patch.object(os, "stat") as mock_stat:
|
|
mock_stat.side_effect = fake_stat
|
|
data_gb = drvr._live_migration_data_gb(instance, disk_paths)
|
|
# Expecting 2 GB for RAM, plus 10 GB for disk.root
|
|
# and 1.5 GB rounded to 2 GB for somevol, so 14 GB
|
|
self.assertEqual(14, data_gb)
|
|
|
|
EXPECT_SUCCESS = 1
|
|
EXPECT_FAILURE = 2
|
|
EXPECT_ABORT = 3
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, "migrate_start_postcopy")
|
|
@mock.patch.object(time, "time")
|
|
@mock.patch.object(time, "sleep",
|
|
side_effect=lambda x: eventlet.sleep(0))
|
|
@mock.patch.object(host.Host, "get_connection")
|
|
@mock.patch.object(libvirt_guest.Guest, "get_job_info")
|
|
@mock.patch.object(objects.Instance, "save")
|
|
@mock.patch.object(objects.Migration, "save")
|
|
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
|
|
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
|
|
@mock.patch.object(libvirt_guest.Guest, "pause")
|
|
def _test_live_migration_monitoring(self,
|
|
job_info_records,
|
|
time_records,
|
|
expect_result,
|
|
mock_pause,
|
|
mock_abort,
|
|
mock_running,
|
|
mock_save,
|
|
mock_mig_save,
|
|
mock_job_info,
|
|
mock_conn,
|
|
mock_sleep,
|
|
mock_time,
|
|
mock_postcopy_switch,
|
|
current_mig_status=None,
|
|
expected_mig_status=None,
|
|
scheduled_action=None,
|
|
scheduled_action_executed=False,
|
|
block_migration=False,
|
|
expected_switch=False):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr.active_migrations[instance.uuid] = deque()
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
|
|
guest = libvirt_guest.Guest(dom)
|
|
finish_event = eventlet.event.Event()
|
|
|
|
def fake_job_info():
|
|
while True:
|
|
self.assertGreater(len(job_info_records), 0)
|
|
rec = job_info_records.pop(0)
|
|
|
|
if type(rec) == str:
|
|
if rec == "thread-finish":
|
|
finish_event.send()
|
|
elif rec == "domain-stop":
|
|
dom.destroy()
|
|
elif rec == "force_complete":
|
|
drvr.active_migrations[instance.uuid].append(
|
|
"force-complete")
|
|
else:
|
|
if len(time_records) > 0:
|
|
time_records.pop(0)
|
|
return rec
|
|
return rec
|
|
|
|
def fake_time():
|
|
if len(time_records) > 0:
|
|
return time_records[0]
|
|
else:
|
|
return int(
|
|
datetime.datetime(2001, 1, 20, 20, 1, 0)
|
|
.strftime('%s'))
|
|
|
|
mock_job_info.side_effect = fake_job_info
|
|
mock_time.side_effect = fake_time
|
|
|
|
dest = mock.sentinel.migrate_dest
|
|
migration = objects.Migration(context=self.context, id=1)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
migration=migration, block_migration=block_migration)
|
|
|
|
if current_mig_status:
|
|
migrate_data.migration.status = current_mig_status
|
|
else:
|
|
migrate_data.migration.status = "unset"
|
|
migrate_data.migration.save()
|
|
|
|
fake_post_method = mock.MagicMock()
|
|
fake_recover_method = mock.MagicMock()
|
|
drvr._live_migration_monitor(self.context, instance,
|
|
guest, dest,
|
|
fake_post_method,
|
|
fake_recover_method,
|
|
False,
|
|
migrate_data,
|
|
finish_event,
|
|
[])
|
|
if scheduled_action_executed:
|
|
if scheduled_action == 'pause':
|
|
self.assertTrue(mock_pause.called)
|
|
if scheduled_action == 'postcopy_switch':
|
|
self.assertTrue(mock_postcopy_switch.called)
|
|
else:
|
|
if scheduled_action == 'pause':
|
|
self.assertFalse(mock_pause.called)
|
|
if scheduled_action == 'postcopy_switch':
|
|
self.assertFalse(mock_postcopy_switch.called)
|
|
mock_mig_save.assert_called_with()
|
|
|
|
if expect_result == self.EXPECT_SUCCESS:
|
|
self.assertFalse(fake_recover_method.called,
|
|
'Recover method called when success expected')
|
|
self.assertFalse(mock_abort.called,
|
|
'abortJob not called when success expected')
|
|
if expected_switch:
|
|
self.assertTrue(mock_postcopy_switch.called)
|
|
fake_post_method.assert_called_once_with(
|
|
self.context, instance, dest, False, migrate_data)
|
|
else:
|
|
if expect_result == self.EXPECT_ABORT:
|
|
self.assertTrue(mock_abort.called,
|
|
'abortJob called when abort expected')
|
|
else:
|
|
self.assertFalse(mock_abort.called,
|
|
'abortJob not called when failure expected')
|
|
self.assertFalse(fake_post_method.called,
|
|
'Post method called when success not expected')
|
|
if expected_mig_status:
|
|
fake_recover_method.assert_called_once_with(
|
|
self.context, instance, dest, migrate_data,
|
|
migration_status=expected_mig_status)
|
|
else:
|
|
fake_recover_method.assert_called_once_with(
|
|
self.context, instance, dest, migrate_data)
|
|
self.assertNotIn(instance.uuid, drvr.active_migrations)
|
|
|
|
def test_live_migration_monitor_success(self):
|
|
# A normal sequence where see all the normal job states
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS)
|
|
|
|
def test_live_migration_handle_pause_normal(self):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled in between VIR_DOMAIN_JOB_UNBOUNDED
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="running",
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=True)
|
|
|
|
def test_live_migration_handle_pause_on_start(self):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is
|
|
# not ready yet
|
|
domain_info_records = [
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="preparing",
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=True)
|
|
|
|
def test_live_migration_handle_pause_on_finish(self):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is
|
|
# ready
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="completed",
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=False)
|
|
|
|
def test_live_migration_handle_pause_on_cancel(self):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE,
|
|
current_mig_status="cancelled",
|
|
expected_mig_status='cancelled',
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=False)
|
|
|
|
def test_live_migration_handle_pause_on_failure(self):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_FAILED
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE,
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_normal(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and postcopy
|
|
# switch scheduled in between VIR_DOMAIN_JOB_UNBOUNDED
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="running",
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=True)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_on_start(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and postcopy
|
|
# switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and
|
|
# finish_event is not ready yet
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="preparing",
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=True)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_on_finish(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and postcopy
|
|
# switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and
|
|
# finish_event is ready
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="completed",
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_on_cancel(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and postcopy
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE,
|
|
current_mig_status="cancelled",
|
|
expected_mig_status='cancelled',
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_pause_on_postcopy(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled after migration switched to postcopy
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="running (post-copy)",
|
|
scheduled_action="pause",
|
|
scheduled_action_executed=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_on_postcopy(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and pause
|
|
# scheduled after migration switched to postcopy
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS,
|
|
current_mig_status="running (post-copy)",
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_handle_postcopy_on_failure(self,
|
|
mock_postcopy_enabled):
|
|
# A normal sequence where see all the normal job states, and postcopy
|
|
# scheduled in case of job type VIR_DOMAIN_JOB_FAILED
|
|
mock_postcopy_enabled.return_value = True
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
"force_complete",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE,
|
|
scheduled_action="postcopy_switch",
|
|
scheduled_action_executed=False)
|
|
|
|
def test_live_migration_monitor_success_race(self):
|
|
# A normalish sequence but we're too slow to see the
|
|
# completed job state
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_SUCCESS)
|
|
|
|
def test_live_migration_monitor_failed(self):
|
|
# A failed sequence where we see all the expected events
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE)
|
|
|
|
def test_live_migration_monitor_failed_race(self):
|
|
# A failed sequence where we are too slow to see the
|
|
# failed event
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE)
|
|
|
|
def test_live_migration_monitor_cancelled(self):
|
|
# A cancelled sequence where we see all the events
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, [],
|
|
self.EXPECT_FAILURE,
|
|
expected_mig_status='cancelled')
|
|
|
|
@mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime")
|
|
@mock.patch("nova.virt.libvirt.migration.downtime_steps")
|
|
def test_live_migration_monitor_downtime(self, mock_downtime_steps,
|
|
mock_set_downtime):
|
|
self.flags(live_migration_completion_timeout=1000000,
|
|
group='libvirt')
|
|
# We've setup 4 fake downtime steps - first value is the
|
|
# time delay, second is the downtime value
|
|
downtime_steps = [
|
|
(90, 10),
|
|
(180, 50),
|
|
(270, 200),
|
|
(500, 300),
|
|
]
|
|
mock_downtime_steps.return_value = downtime_steps
|
|
|
|
# Each one of these fake times is used for time.time()
|
|
# when a new domain_info_records entry is consumed.
|
|
# Times are chosen so that only the first 3 downtime
|
|
# steps are needed.
|
|
fake_times = [0, 1, 30, 95, 150, 200, 300]
|
|
|
|
# A normal sequence where see all the normal job states
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records,
|
|
fake_times, self.EXPECT_SUCCESS)
|
|
|
|
mock_set_downtime.assert_has_calls([mock.call(10),
|
|
mock.call(50),
|
|
mock.call(200)])
|
|
|
|
def test_live_migration_monitor_completion(self):
|
|
self.flags(live_migration_completion_timeout=100,
|
|
group='libvirt')
|
|
# Each one of these fake times is used for time.time()
|
|
# when a new domain_info_records entry is consumed.
|
|
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
|
|
|
|
# A normal sequence where see all the normal job states
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records,
|
|
fake_times, self.EXPECT_ABORT,
|
|
expected_mig_status='cancelled')
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_is_post_copy_enabled")
|
|
def test_live_migration_monitor_force_complete_postcopy(self,
|
|
mock_postcopy_enabled):
|
|
self.flags(live_migration_completion_timeout=40,
|
|
live_migration_timeout_action='force_complete',
|
|
group='libvirt')
|
|
mock_postcopy_enabled.return_value = True
|
|
|
|
# Each one of these fake times is used for time.time()
|
|
# when a new domain_info_records entry is consumed.
|
|
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
|
|
|
|
domain_info_records = [
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
|
|
"thread-finish",
|
|
"domain-stop",
|
|
libvirt_guest.JobInfo(
|
|
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
|
|
]
|
|
|
|
self._test_live_migration_monitoring(domain_info_records, fake_times,
|
|
self.EXPECT_SUCCESS,
|
|
expected_switch=True)
|
|
|
|
@mock.patch.object(host.Host, "get_connection")
|
|
@mock.patch.object(utils, "spawn")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
|
|
@mock.patch.object(host.Host, "get_guest")
|
|
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_live_migration_copy_disk_paths")
|
|
def test_live_migration_main(self, mock_copy_disk_path, mock_running,
|
|
mock_guest, mock_monitor, mock_thread,
|
|
mock_conn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
dom = fakelibvirt.Domain(drvr._get_connection(),
|
|
"<domain><name>demo</name></domain>", True)
|
|
guest = libvirt_guest.Guest(dom)
|
|
migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
|
|
disks_to_copy = (['/some/path/one', '/test/path/two'],
|
|
['vda', 'vdb'])
|
|
mock_copy_disk_path.return_value = disks_to_copy
|
|
|
|
mock_guest.return_value = guest
|
|
|
|
def fake_post():
|
|
pass
|
|
|
|
def fake_recover():
|
|
pass
|
|
|
|
drvr._live_migration(self.context, instance, "fakehost",
|
|
fake_post, fake_recover, True,
|
|
migrate_data)
|
|
mock_copy_disk_path.assert_called_once_with(self.context, instance,
|
|
guest)
|
|
|
|
class AnyEventletEvent(object):
|
|
def __eq__(self, other):
|
|
return type(other) == eventlet.event.Event
|
|
|
|
mock_thread.assert_called_once_with(
|
|
drvr._live_migration_operation,
|
|
self.context, instance, "fakehost", True,
|
|
migrate_data, guest, disks_to_copy[1])
|
|
mock_monitor.assert_called_once_with(
|
|
self.context, instance, guest, "fakehost",
|
|
fake_post, fake_recover, True,
|
|
migrate_data, AnyEventletEvent(), disks_to_copy[0])
|
|
|
|
@mock.patch('os.path.exists', return_value=False)
|
|
@mock.patch('nova.virt.libvirt.utils.create_image')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_fetch_instance_kernel_ramdisk')
|
|
def _do_test_create_images_and_backing(self, disk_type, mock_fetch,
|
|
mock_create, mock_exists):
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
disk_info = {'path': 'foo', 'type': disk_type,
|
|
'disk_size': 1 * 1024 ** 3,
|
|
'virt_disk_size': 20 * 1024 ** 3,
|
|
'backing_file': None}
|
|
|
|
drvr._create_images_and_backing(self.context, instance,
|
|
"/fake/instance/dir", [disk_info])
|
|
|
|
mock_fetch.assert_called_once_with(self.context, instance,
|
|
fallback_from_host=None)
|
|
mock_create.assert_called_once_with(
|
|
disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
|
|
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
|
|
|
|
def test_create_images_and_backing_qcow2(self):
|
|
self._do_test_create_images_and_backing('qcow2')
|
|
|
|
def test_create_images_and_backing_raw(self):
|
|
self._do_test_create_images_and_backing('raw')
|
|
|
|
def test_create_images_and_backing_images_not_exist_no_fallback(self):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.test_instance.update({'user_id': 'fake-user',
|
|
'os_type': None,
|
|
'project_id': 'fake-project'})
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
backing_file = imagecache.get_cache_fname(instance.image_ref)
|
|
disk_info = [
|
|
{u'backing_file': backing_file,
|
|
u'disk_size': 10747904,
|
|
u'path': u'disk_path',
|
|
u'type': u'qcow2',
|
|
u'virt_disk_size': 25165824}]
|
|
|
|
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
|
|
side_effect=exception.ImageNotFound(
|
|
image_id="fake_id")):
|
|
self.assertRaises(exception.ImageNotFound,
|
|
conn._create_images_and_backing,
|
|
self.context, instance,
|
|
"/fake/instance/dir", disk_info)
|
|
|
|
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
|
|
@mock.patch('nova.privsep.path.utime')
|
|
def test_create_images_and_backing_images_not_exist_fallback(
|
|
self, mock_utime, mock_create_cow_image):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
base_dir = os.path.join(CONF.instances_path,
|
|
CONF.image_cache_subdirectory_name)
|
|
trusted_certs = objects.TrustedCerts(
|
|
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
|
|
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
|
|
self.test_instance.update({'user_id': 'fake-user',
|
|
'os_type': None,
|
|
'trusted_certs': trusted_certs,
|
|
'kernel_id': uuids.kernel_id,
|
|
'ramdisk_id': uuids.ramdisk_id,
|
|
'project_id': 'fake-project'})
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
backing_file = imagecache.get_cache_fname(instance.image_ref)
|
|
backfile_path = os.path.join(base_dir, backing_file)
|
|
disk_size = 10747904
|
|
virt_disk_size = 25165824
|
|
disk_info = [
|
|
{u'backing_file': backing_file,
|
|
u'disk_size': disk_size,
|
|
u'path': u'disk_path',
|
|
u'type': u'qcow2',
|
|
u'virt_disk_size': virt_disk_size}]
|
|
|
|
def fake_copy_image(src, dest, **kwargs):
|
|
# backing file should be present and have a smaller size
|
|
# than instance root disk in order to assert resize_image()
|
|
if dest == backfile_path:
|
|
# dest is created under TempDir() fixture,
|
|
# it will go away after test cleanup
|
|
with open(dest, 'a'):
|
|
pass
|
|
with test.nested(
|
|
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image',
|
|
side_effect=fake_copy_image),
|
|
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
|
|
side_effect=exception.ImageNotFound(
|
|
image_id=uuids.fake_id)),
|
|
mock.patch.object(imagebackend.Qcow2, 'resize_image'),
|
|
mock.patch.object(imagebackend.Image, 'get_disk_size',
|
|
return_value=disk_size),
|
|
) as (copy_image_mock, fetch_image_mock, resize_image_mock,
|
|
get_disk_size_mock):
|
|
conn._create_images_and_backing(self.context, instance,
|
|
"/fake/instance/dir", disk_info,
|
|
fallback_from_host="fake_host")
|
|
kernel_path = os.path.join(CONF.instances_path,
|
|
self.test_instance['uuid'],
|
|
'kernel')
|
|
ramdisk_path = os.path.join(CONF.instances_path,
|
|
self.test_instance['uuid'],
|
|
'ramdisk')
|
|
copy_image_mock.assert_has_calls([
|
|
mock.call(dest=backfile_path, src=backfile_path,
|
|
host='fake_host', receive=True),
|
|
mock.call(dest=kernel_path, src=kernel_path,
|
|
host='fake_host', receive=True),
|
|
mock.call(dest=ramdisk_path, src=ramdisk_path,
|
|
host='fake_host', receive=True)
|
|
])
|
|
fetch_image_mock.assert_has_calls([
|
|
mock.call(context=self.context,
|
|
target=backfile_path,
|
|
image_id=self.test_instance['image_ref'],
|
|
trusted_certs=trusted_certs),
|
|
mock.call(self.context, kernel_path, instance.kernel_id,
|
|
trusted_certs),
|
|
mock.call(self.context, ramdisk_path, instance.ramdisk_id,
|
|
trusted_certs)
|
|
])
|
|
resize_image_mock.assert_called_once_with(virt_disk_size)
|
|
|
|
mock_utime.assert_called()
|
|
mock_create_cow_image.assert_called_once_with(
|
|
backfile_path, '/fake/instance/dir/disk_path')
|
|
|
|
@mock.patch('nova.virt.libvirt.utils.create_image',
|
|
new=mock.NonCallableMock())
|
|
@mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image')
|
|
def test_create_images_and_backing_images_exist(
|
|
self, mock_fetch_image):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.test_instance.update({'user_id': 'fake-user',
|
|
'os_type': None,
|
|
'kernel_id': 'fake_kernel_id',
|
|
'ramdisk_id': 'fake_ramdisk_id',
|
|
'project_id': 'fake-project'})
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
disk_info = [
|
|
{u'backing_file': imagecache.get_cache_fname(instance.image_ref),
|
|
u'disk_size': 10747904,
|
|
u'path': u'disk_path',
|
|
u'type': u'qcow2',
|
|
u'virt_disk_size': 25165824}]
|
|
|
|
with test.nested(
|
|
mock.patch.object(imagebackend.Image, 'get_disk_size',
|
|
return_value=0),
|
|
mock.patch.object(os.path, 'exists', return_value=True)
|
|
):
|
|
conn._create_images_and_backing(self.context, instance,
|
|
'/fake/instance/dir', disk_info)
|
|
self.assertFalse(mock_fetch_image.called)
|
|
|
|
@mock.patch('nova.privsep.path.utime')
|
|
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
|
|
def test_create_images_and_backing_ephemeral_gets_created(
|
|
self, mock_create_cow_image, mock_utime):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
base_dir = os.path.join(CONF.instances_path,
|
|
CONF.image_cache_subdirectory_name)
|
|
instance = objects.Instance(**self.test_instance)
|
|
disk_info_byname = fake_disk_info_byname(instance)
|
|
|
|
disk_info_byname['disk.local']['backing_file'] = 'ephemeral_foo'
|
|
disk_info_byname['disk.local']['virt_disk_size'] = 1 * units.Gi
|
|
|
|
disk_info = disk_info_byname.values()
|
|
|
|
with test.nested(
|
|
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
|
|
mock.patch.object(drvr, '_create_ephemeral'),
|
|
mock.patch.object(imagebackend.Image, 'verify_base_size'),
|
|
mock.patch.object(imagebackend.Image, 'get_disk_size')
|
|
) as (fetch_image_mock, create_ephemeral_mock, verify_base_size_mock,
|
|
disk_size_mock):
|
|
disk_size_mock.return_value = 0
|
|
drvr._create_images_and_backing(self.context, instance,
|
|
CONF.instances_path, disk_info)
|
|
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
|
|
|
|
root_backing, ephemeral_backing = [
|
|
os.path.join(base_dir, name)
|
|
for name in (disk_info_byname['disk']['backing_file'],
|
|
'ephemeral_foo')
|
|
]
|
|
|
|
create_ephemeral_mock.assert_called_once_with(
|
|
ephemeral_size=1, fs_label='ephemeral_foo',
|
|
os_type='linux', target=ephemeral_backing)
|
|
|
|
fetch_image_mock.assert_called_once_with(
|
|
context=self.context, image_id=instance.image_ref,
|
|
target=root_backing, trusted_certs=instance.trusted_certs)
|
|
|
|
verify_base_size_mock.assert_has_calls([
|
|
mock.call(root_backing, instance.flavor.root_gb * units.Gi),
|
|
mock.call(ephemeral_backing, 1 * units.Gi)
|
|
])
|
|
|
|
mock_utime.assert_has_calls([
|
|
mock.call(root_backing),
|
|
mock.call(ephemeral_backing)])
|
|
|
|
# TODO(efried): Should these be disk_info[path]??
|
|
mock_create_cow_image.assert_has_calls([
|
|
mock.call(root_backing, CONF.instances_path + '/disk'),
|
|
mock.call(ephemeral_backing,
|
|
CONF.instances_path + '/disk.local')])
|
|
|
|
def test_create_images_and_backing_disk_info_none(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
drvr._create_images_and_backing(self.context, instance,
|
|
"/fake/instance/dir", None)
|
|
|
|
# Assert that we did nothing
|
|
self.assertEqual({}, fake_backend.created_disks)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_fetch_instance_kernel_ramdisk')
|
|
def test_create_images_and_backing_parallels(self, mock_fetch):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.vm_mode = fields.VMMode.EXE
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
drvr._create_images_and_backing(self.context, instance,
|
|
'/fake/instance/dir', None)
|
|
self.assertFalse(mock_fetch.called)
|
|
|
|
def _migrate_data_to_dict(self, migrate_data):
|
|
primitive = migrate_data.obj_to_primitive()['nova_object.data']
|
|
primitive['bdms'] = [bdm['nova_object.data'] for bdm in
|
|
primitive['bdms']]
|
|
return primitive
|
|
|
|
def _generate_target_ret(self, target_connect_addr=None):
|
|
return self._migrate_data_to_dict(objects.LibvirtLiveMigrateData(
|
|
block_migration=False,
|
|
instance_relative_path='foo',
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
serial_listen_ports=[],
|
|
src_supports_native_luks=True,
|
|
supported_perf_events=[],
|
|
graphics_listen_addr_spice='127.0.0.1',
|
|
graphics_listen_addr_vnc='127.0.0.1',
|
|
serial_listen_addr='127.0.0.1',
|
|
target_connect_addr=target_connect_addr,
|
|
bdms=[
|
|
objects.LibvirtLiveMigrateBDMInfo(
|
|
serial='12345', bus='scsi', dev='sda', type='disk',
|
|
boot_index=None, format=None,
|
|
connection_info_json=jsonutils.dumps({
|
|
'serial': '12345',
|
|
'data': {
|
|
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
|
|
'-iqn.abc.12345.opst-lun-X'}})),
|
|
objects.LibvirtLiveMigrateBDMInfo(
|
|
serial='67890', bus='scsi', dev='sdb', type='disk',
|
|
boot_index=None, format=None,
|
|
connection_info_json=jsonutils.dumps({
|
|
'serial': '67890',
|
|
'data': {
|
|
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
|
|
'-iqn.cde.67890.opst-lun-Z'}}))]))
|
|
|
|
def test_pre_live_migration_works_correctly_mocked(self):
|
|
self._test_pre_live_migration_works_correctly_mocked()
|
|
|
|
def test_pre_live_migration_with_transport_ip(self):
|
|
self.flags(live_migration_inbound_addr='127.0.0.2',
|
|
group='libvirt')
|
|
target_ret = self._generate_target_ret('127.0.0.2')
|
|
self._test_pre_live_migration_works_correctly_mocked(
|
|
target_ret=target_ret)
|
|
|
|
def test_pre_live_migration_only_dest_supports_native_luks(self):
|
|
# Assert that allow_native_luks is False when src_supports_native_luks
|
|
# is missing from migrate data during a P to Q LM.
|
|
self._test_pre_live_migration_works_correctly_mocked(
|
|
src_supports_native_luks=None, dest_supports_native_luks=True,
|
|
allow_native_luks=False)
|
|
|
|
def test_pre_live_migration_only_src_supports_native_luks(self):
|
|
# Assert that allow_native_luks is False when dest_supports_native_luks
|
|
# is False due to unmet QEMU and Libvirt deps on the dest compute.
|
|
self._test_pre_live_migration_works_correctly_mocked(
|
|
src_supports_native_luks=True, dest_supports_native_luks=False,
|
|
allow_native_luks=False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_connect_volume')
|
|
@mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
|
|
def _test_pre_live_migration_works_correctly_mocked(
|
|
self, mock_file_open, mock_connect, mock_plug,
|
|
target_ret=None, src_supports_native_luks=True,
|
|
dest_supports_native_luks=True, allow_native_luks=True):
|
|
# Creating testdata
|
|
c = context.get_admin_context()
|
|
instance = objects.Instance(root_device_name='/dev/vda',
|
|
**self.test_instance)
|
|
bdms = objects.BlockDeviceMappingList(objects=[
|
|
fake_block_device.fake_bdm_object(c, {
|
|
'connection_info': jsonutils.dumps({
|
|
'serial': '12345',
|
|
'data': {
|
|
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
|
|
'-iqn.abc.12345.opst-lun-X'
|
|
}
|
|
}),
|
|
'device_name': '/dev/sda',
|
|
'volume_id': uuids.volume1,
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume'
|
|
}),
|
|
fake_block_device.fake_bdm_object(c, {
|
|
'connection_info': jsonutils.dumps({
|
|
'serial': '67890',
|
|
'data': {
|
|
'device_path': '/dev/disk/by-path/ip-1.2.3.4:3260'
|
|
'-iqn.cde.67890.opst-lun-Z'
|
|
}
|
|
}),
|
|
'device_name': '/dev/sdb',
|
|
'volume_id': uuids.volume2,
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume'
|
|
})
|
|
])
|
|
# We go through get_block_device_info to simulate what the
|
|
# ComputeManager sends to the driver (make sure we're using the
|
|
# correct type of BDM objects since there are many of them and
|
|
# they are super confusing).
|
|
block_device_info = driver.get_block_device_info(instance, bdms)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
class FakeNetworkInfo(object):
|
|
def fixed_ips(self):
|
|
return ["test_ip_addr"]
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_create_images_and_backing',
|
|
lambda *args, **kwargs: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_native_luks_available',
|
|
lambda self: dest_supports_native_luks)
|
|
|
|
nw_info = FakeNetworkInfo()
|
|
|
|
expected_connect_calls = []
|
|
for v in block_device_info['block_device_mapping']:
|
|
expected_connect_calls.append(
|
|
mock.call(c, v['connection_info'], instance,
|
|
allow_native_luks=allow_native_luks))
|
|
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
block_migration=False,
|
|
instance_relative_path='foo',
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
graphics_listen_addr_vnc='127.0.0.1',
|
|
graphics_listen_addr_spice='127.0.0.1',
|
|
serial_listen_addr='127.0.0.1',
|
|
)
|
|
|
|
if not target_ret:
|
|
target_ret = self._generate_target_ret()
|
|
if src_supports_native_luks:
|
|
migrate_data.src_supports_native_luks = True
|
|
else:
|
|
target_ret.pop('src_supports_native_luks')
|
|
result = drvr.pre_live_migration(
|
|
c, instance, block_device_info, nw_info, None,
|
|
migrate_data=migrate_data)
|
|
result_dict = self._migrate_data_to_dict(result)
|
|
# connection_info_json is a string, needs special handling
|
|
for bdms in zip(target_ret.pop('bdms'), result_dict.pop('bdms')):
|
|
conninfo0 = jsonutils.loads(bdms[0].pop('connection_info_json'))
|
|
conninfo1 = jsonutils.loads(bdms[1].pop('connection_info_json'))
|
|
self.assertEqual(conninfo0, conninfo1)
|
|
self.assertThat(bdms[0], matchers.DictMatches(bdms[1]))
|
|
self.assertThat(target_ret, matchers.DictMatches(result_dict))
|
|
mock_connect.assert_has_calls(expected_connect_calls)
|
|
self.assertEqual(len(expected_connect_calls), mock_connect.call_count)
|
|
mock_plug.assert_called_once_with(test.MatchType(objects.Instance),
|
|
nw_info)
|
|
|
|
@mock.patch.object(os, 'mkdir')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
|
|
@mock.patch('nova.virt.libvirt.driver.remotefs.'
|
|
'RemoteFilesystem.copy_file')
|
|
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
|
|
@mock.patch('nova.virt.configdrive.required_by', return_value=True)
|
|
def test_pre_live_migration_block_with_config_drive_success(
|
|
self, mock_required_by, block_device_info_get_mapping,
|
|
mock_copy_file, mock_get_instance_path, mock_mkdir):
|
|
self.flags(config_drive_format='iso9660')
|
|
vol = {'block_device_mapping': [
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
|
|
fake_instance_path = os.path.join(cfg.CONF.instances_path,
|
|
'/fake_instance_uuid')
|
|
mock_get_instance_path.return_value = fake_instance_path
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
migrate_data = objects.LibvirtLiveMigrateData()
|
|
migrate_data.is_shared_instance_path = False
|
|
migrate_data.is_shared_block_storage = False
|
|
migrate_data.block_migration = True
|
|
migrate_data.instance_relative_path = 'foo'
|
|
src = "%s:%s/disk.config" % (instance.host, fake_instance_path)
|
|
|
|
result = drvr.pre_live_migration(
|
|
self.context, instance, vol, [], None, migrate_data)
|
|
|
|
block_device_info_get_mapping.assert_called_once_with(
|
|
{'block_device_mapping': [
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
|
|
]}
|
|
)
|
|
mock_copy_file.assert_called_once_with(src, fake_instance_path)
|
|
|
|
migrate_data.graphics_listen_addrs_vnc = '127.0.0.1'
|
|
migrate_data.graphics_listen_addrs_spice = '127.0.0.1'
|
|
migrate_data.serial_listen_addr = '127.0.0.1'
|
|
self.assertEqual(migrate_data, result)
|
|
|
|
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
|
|
return_value=())
|
|
@mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
|
|
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
|
|
self, mock_open, mock_block_device_info_get_mapping):
|
|
self.flags(config_drive_format='vfat')
|
|
# Creating testdata
|
|
vol = {'block_device_mapping': [
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.config_drive = 'True'
|
|
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=False,
|
|
is_shared_block_storage=False,
|
|
block_migration=False,
|
|
instance_relative_path='foo',
|
|
)
|
|
res_data = drvr.pre_live_migration(
|
|
self.context, instance, vol, [], None, migrate_data)
|
|
mock_block_device_info_get_mapping.assert_called_once_with(
|
|
{'block_device_mapping': [
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
|
|
]}
|
|
)
|
|
self.assertEqual({'block_migration': False,
|
|
'instance_relative_path': 'foo',
|
|
'is_shared_block_storage': False,
|
|
'is_shared_instance_path': False,
|
|
'serial_listen_ports': [],
|
|
'supported_perf_events': [],
|
|
'target_connect_addr': None,
|
|
'bdms': []},
|
|
res_data.obj_to_primitive()['nova_object.data'])
|
|
|
|
def _test_pre_live_migration_volume_backed(self, encrypted_volumes=False):
|
|
inst_ref = objects.Instance(root_device_name='/dev/vda',
|
|
**self.test_instance)
|
|
bdms = objects.BlockDeviceMappingList(objects=[
|
|
fake_block_device.fake_bdm_object(self.context, {
|
|
'connection_info': jsonutils.dumps({
|
|
'serial': uuids.vol1,
|
|
'data': {
|
|
'device_path': '/dev/disk/path/lun-X'
|
|
}
|
|
}),
|
|
'device_name': '/dev/sda',
|
|
'volume_id': uuids.vol1,
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume'
|
|
}),
|
|
fake_block_device.fake_bdm_object(self.context, {
|
|
'connection_info': jsonutils.dumps({
|
|
'serial': uuids.vol2,
|
|
'data': {
|
|
'device_path': '/dev/disk/path/lun-Z'
|
|
}
|
|
}),
|
|
'device_name': '/dev/sdb',
|
|
'volume_id': uuids.vol2,
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume'
|
|
})
|
|
])
|
|
# We go through get_block_device_info to simulate what the
|
|
# ComputeManager sends to the driver (make sure we're using the
|
|
# correct type of BDM objects since there are many of them and
|
|
# they are super confusing).
|
|
block_device_info = driver.get_block_device_info(inst_ref, bdms)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_is_native_luks_available'),
|
|
mock.patch.object(drvr._host, 'find_secret'),
|
|
mock.patch.object(drvr, '_connect_volume'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
) as (mock_is_luks_available, mock_find_secret,
|
|
mock_connect_volume, mock_plug_vifs):
|
|
|
|
mock_is_luks_available.return_value = True
|
|
mock_find_secret.return_value = None
|
|
if encrypted_volumes:
|
|
secret_vol1 = mock.Mock()
|
|
secret_vol1.UUIDString.return_value = uuids.secret_vol1
|
|
secret_vol2 = mock.Mock()
|
|
secret_vol2.UUIDString.return_value = uuids.secret_vol2
|
|
mock_find_secret.side_effect = [secret_vol1, secret_vol2]
|
|
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=True,
|
|
is_shared_block_storage=False,
|
|
is_volume_backed=True,
|
|
block_migration=False,
|
|
instance_relative_path=inst_ref['name'],
|
|
disk_over_commit=False,
|
|
disk_available_mb=123,
|
|
image_type='qcow2',
|
|
filename='foo',
|
|
src_supports_native_luks=True,
|
|
)
|
|
|
|
expected_migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_instance_path=True,
|
|
is_shared_block_storage=False,
|
|
is_volume_backed=True,
|
|
block_migration=False,
|
|
instance_relative_path=inst_ref['name'],
|
|
disk_over_commit=False,
|
|
disk_available_mb=123,
|
|
image_type='qcow2',
|
|
filename='foo',
|
|
serial_listen_ports=[],
|
|
supported_perf_events=[],
|
|
target_connect_addr=None,
|
|
src_supports_native_luks=True
|
|
)
|
|
|
|
bdmi_vol1 = migrate_data_obj.LibvirtLiveMigrateBDMInfo()
|
|
bdmi_vol1.boot_index = None
|
|
bdmi_vol1.format = None
|
|
bdmi_vol1.serial = uuids.vol1
|
|
bdmi_vol1.connection_info = {
|
|
u'data': {'device_path': u'/dev/disk/path/lun-X'},
|
|
u'serial': uuids.vol1}
|
|
bdmi_vol1.bus = 'scsi'
|
|
bdmi_vol1.dev = 'sda'
|
|
bdmi_vol1.type = 'disk'
|
|
|
|
bdmi_vol2 = migrate_data_obj.LibvirtLiveMigrateBDMInfo()
|
|
bdmi_vol2.boot_index = None
|
|
bdmi_vol2.format = None
|
|
bdmi_vol2.serial = uuids.vol2
|
|
bdmi_vol2.connection_info = {
|
|
u'data': {'device_path': u'/dev/disk/path/lun-Z'},
|
|
u'serial': uuids.vol2}
|
|
bdmi_vol2.bus = 'scsi'
|
|
bdmi_vol2.dev = 'sdb'
|
|
bdmi_vol2.type = 'disk'
|
|
|
|
if encrypted_volumes:
|
|
bdmi_vol1.encryption_secret_uuid = uuids.secret_vol1
|
|
bdmi_vol2.encryption_secret_uuid = uuids.secret_vol2
|
|
|
|
expected_migrate_data.bdms = [bdmi_vol1, bdmi_vol2]
|
|
|
|
returned_migrate_data = drvr.pre_live_migration(
|
|
self.context, inst_ref, block_device_info, [], None,
|
|
migrate_data)
|
|
|
|
expected_connect_volume_calls = []
|
|
for bdm in block_device_info['block_device_mapping']:
|
|
expected_call = mock.call(self.context, bdm['connection_info'],
|
|
inst_ref, allow_native_luks=True)
|
|
expected_connect_volume_calls.append(expected_call)
|
|
mock_connect_volume.assert_has_calls(expected_connect_volume_calls)
|
|
|
|
if encrypted_volumes:
|
|
mock_find_secret.assert_has_calls(
|
|
[mock.call('volume', uuids.vol1),
|
|
mock.call('volume', uuids.vol2)])
|
|
|
|
# FIXME(lyarwood): This is taken from test_os_vif_util.py and as
|
|
# noted there should be removed if the ComparableVersionedObject
|
|
# mix-in is ever used for these objects.
|
|
expected_migrate_data.obj_reset_changes(recursive=True)
|
|
returned_migrate_data.obj_reset_changes(recursive=True)
|
|
expected = expected_migrate_data.obj_to_primitive()
|
|
returned = returned_migrate_data.obj_to_primitive()
|
|
# We have to manually deserialize the connection_info_json so
|
|
# that the equality comparison uses a dict rather than a string
|
|
# with a random hashseed sort order on the keys.
|
|
for migrate_data in (expected, returned):
|
|
for bdm_data in migrate_data['nova_object.data']['bdms']:
|
|
bdm = bdm_data['nova_object.data']
|
|
bdm['connection_info_json'] = (
|
|
jsonutils.loads(bdm['connection_info_json']))
|
|
self.assertEqual(expected, returned)
|
|
|
|
def test_pre_live_migration_volume_backed(self):
|
|
self._test_pre_live_migration_volume_backed()
|
|
|
|
def test_pre_live_migration_volume_backed_encrypted(self):
|
|
self._test_pre_live_migration_volume_backed(encrypted_volumes=True)
|
|
|
|
@mock.patch.object(eventlet.greenthread, 'sleep',
|
|
side_effect=eventlet.sleep(0))
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs',
|
|
side_effect=processutils.ProcessExecutionError)
|
|
def test_pre_live_migration_plug_vifs_retry_fails(self, mock_plug,
|
|
mock_sleep):
|
|
self.flags(live_migration_retry_count=3)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
disk_info_json = jsonutils.dumps({})
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=True,
|
|
is_shared_instance_path=True,
|
|
block_migration=False,
|
|
)
|
|
self.assertRaises(processutils.ProcessExecutionError,
|
|
drvr.pre_live_migration,
|
|
self.context, instance, block_device_info=None,
|
|
network_info=[], disk_info=disk_info_json,
|
|
migrate_data=migrate_data)
|
|
# Called 3 times because of live_migration_retry_count is 3
|
|
mock_plug.assert_has_calls([mock.call(instance, [])] * 3)
|
|
self.assertEqual(3, mock_plug.call_count)
|
|
# Called 'live_migration_retry_count - 1' times
|
|
mock_sleep.assert_has_calls([mock.call(1)] * 2)
|
|
self.assertEqual(2, mock_sleep.call_count)
|
|
|
|
@mock.patch.object(eventlet.greenthread, 'sleep',
|
|
side_effect=eventlet.sleep(0))
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs')
|
|
def test_pre_live_migration_plug_vifs_retry_works(self, mock_plug,
|
|
mock_sleep):
|
|
self.flags(live_migration_retry_count=3)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
mock_plug.side_effect = [processutils.ProcessExecutionError(),
|
|
processutils.ProcessExecutionError(), None]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
disk_info_json = jsonutils.dumps({})
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=True,
|
|
is_shared_instance_path=True,
|
|
block_migration=False,
|
|
)
|
|
drvr.pre_live_migration(self.context, instance, block_device_info=None,
|
|
network_info=[], disk_info=disk_info_json,
|
|
migrate_data=migrate_data)
|
|
# Called 3 times
|
|
mock_plug.assert_has_calls([mock.call(instance, [])] * 3)
|
|
self.assertEqual(3, mock_plug.call_count)
|
|
# Called 2 times because the third 'plug_vifs' call is successful.
|
|
mock_sleep.assert_has_calls([mock.call(1)] * 2)
|
|
self.assertEqual(2, mock_sleep.call_count)
|
|
|
|
def test_pre_live_migration_plug_vifs_with_dest_port_bindings(self):
|
|
"""Tests that we use the LibvirtLiveMigrateData.vifs destination host
|
|
port binding details when plugging VIFs during pre_live_migration.
|
|
"""
|
|
source_vif = network_model.VIF(
|
|
id=uuids.port_id, type=network_model.VIF_TYPE_OVS,
|
|
vnic_type=network_model.VNIC_TYPE_NORMAL, details={'foo': 'bar'},
|
|
profile={'binding:host_id': 'fake-source-host'})
|
|
migrate_vifs = [objects.VIFMigrateData(
|
|
port_id=uuids.port_id, vnic_type=network_model.VNIC_TYPE_NORMAL,
|
|
vif_type=network_model.VIF_TYPE_OVS, vif_details={'bar': 'baz'},
|
|
profile={'binding:host_id': 'fake-dest-host'},
|
|
host='fake-dest-host', source_vif=source_vif)]
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
vifs=migrate_vifs)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance()
|
|
network_info = mock.NonCallableMock()
|
|
with mock.patch.object(drvr, 'plug_vifs') as plug_vifs:
|
|
drvr._pre_live_migration_plug_vifs(
|
|
instance, network_info, migrate_data)
|
|
expected_network_info = network_model.NetworkInfo([
|
|
migrate_vifs[0].get_dest_vif()])
|
|
plug_vifs.assert_called_once_with(instance, expected_network_info)
|
|
|
|
def test_pre_live_migration_image_not_created_with_shared_storage(self):
|
|
migrate_data_set = [{'is_shared_block_storage': False,
|
|
'is_shared_instance_path': True,
|
|
'is_volume_backed': False,
|
|
'filename': 'foo',
|
|
'instance_relative_path': 'bar',
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 123,
|
|
'image_type': 'qcow2',
|
|
'block_migration': False},
|
|
{'is_shared_block_storage': True,
|
|
'is_shared_instance_path': True,
|
|
'is_volume_backed': False,
|
|
'filename': 'foo',
|
|
'instance_relative_path': 'bar',
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 123,
|
|
'image_type': 'qcow2',
|
|
'block_migration': False},
|
|
{'is_shared_block_storage': False,
|
|
'is_shared_instance_path': True,
|
|
'is_volume_backed': False,
|
|
'filename': 'foo',
|
|
'instance_relative_path': 'bar',
|
|
'disk_over_commit': False,
|
|
'disk_available_mb': 123,
|
|
'image_type': 'qcow2',
|
|
'block_migration': True}]
|
|
|
|
def _to_obj(d):
|
|
return migrate_data_obj.LibvirtLiveMigrateData(**d)
|
|
migrate_data_set = map(_to_obj, migrate_data_set)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
# creating mocks
|
|
with test.nested(
|
|
mock.patch.object(drvr,
|
|
'_create_images_and_backing'),
|
|
mock.patch.object(drvr,
|
|
'ensure_filtering_rules_for_instance'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
) as (
|
|
create_image_mock,
|
|
rules_mock,
|
|
plug_mock,
|
|
):
|
|
disk_info_json = jsonutils.dumps({})
|
|
for migrate_data in migrate_data_set:
|
|
res = drvr.pre_live_migration(self.context, instance,
|
|
block_device_info=None,
|
|
network_info=[],
|
|
disk_info=disk_info_json,
|
|
migrate_data=migrate_data)
|
|
self.assertFalse(create_image_mock.called)
|
|
self.assertIsInstance(res,
|
|
objects.LibvirtLiveMigrateData)
|
|
|
|
@mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
|
|
def test_pre_live_migration_with_not_shared_instance_path(
|
|
self, mock_file_open):
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
block_migration=False,
|
|
instance_relative_path='foo',
|
|
)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
def check_instance_dir(context, instance,
|
|
instance_dir, disk_info,
|
|
fallback_from_host=False):
|
|
self.assertTrue(instance_dir)
|
|
# creating mocks
|
|
with test.nested(
|
|
mock.patch.object(drvr,
|
|
'_create_images_and_backing',
|
|
side_effect=check_instance_dir),
|
|
mock.patch.object(drvr,
|
|
'ensure_filtering_rules_for_instance'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
) as (
|
|
create_image_mock,
|
|
rules_mock,
|
|
plug_mock,
|
|
):
|
|
disk_info_json = jsonutils.dumps({})
|
|
res = drvr.pre_live_migration(self.context, instance,
|
|
block_device_info=None,
|
|
network_info=[],
|
|
disk_info=disk_info_json,
|
|
migrate_data=migrate_data)
|
|
create_image_mock.assert_has_calls(
|
|
[mock.call(self.context, instance, mock.ANY, {},
|
|
fallback_from_host=instance.host)])
|
|
self.assertIsInstance(res, objects.LibvirtLiveMigrateData)
|
|
|
|
def test_pre_live_migration_recreate_disk_info(self):
|
|
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
block_migration=True,
|
|
instance_relative_path='/some/path/',
|
|
)
|
|
disk_info = [{'disk_size': 5368709120, 'type': 'raw',
|
|
'virt_disk_size': 5368709120,
|
|
'path': '/some/path/disk',
|
|
'backing_file': '', 'over_committed_disk_size': 0},
|
|
{'disk_size': 1073741824, 'type': 'raw',
|
|
'virt_disk_size': 1073741824,
|
|
'path': '/some/path/disk.eph0',
|
|
'backing_file': '', 'over_committed_disk_size': 0}]
|
|
image_disk_info = {'/some/path/disk': 'raw',
|
|
'/some/path/disk.eph0': 'raw'}
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance_path = os.path.dirname(disk_info[0]['path'])
|
|
disk_info_path = os.path.join(instance_path, 'disk.info')
|
|
|
|
with test.nested(
|
|
mock.patch.object(os, 'mkdir'),
|
|
mock.patch('nova.virt.libvirt.utils.write_to_file'),
|
|
mock.patch.object(drvr, '_create_images_and_backing')
|
|
) as (
|
|
mkdir, write_to_file, create_images_and_backing
|
|
):
|
|
drvr.pre_live_migration(self.context, instance,
|
|
block_device_info=None,
|
|
network_info=[],
|
|
disk_info=jsonutils.dumps(disk_info),
|
|
migrate_data=migrate_data)
|
|
write_to_file.assert_called_with(disk_info_path,
|
|
jsonutils.dumps(image_disk_info))
|
|
|
|
@mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
|
|
def test_pre_live_migration_with_perf_events(self, mock_file_open):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._supported_perf_events = ['cmt']
|
|
|
|
migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=False,
|
|
is_shared_instance_path=False,
|
|
block_migration=False,
|
|
instance_relative_path='foo',
|
|
)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
res = drvr.pre_live_migration(self.context, instance,
|
|
block_device_info=None,
|
|
network_info=[],
|
|
disk_info=None,
|
|
migrate_data=migrate_data)
|
|
self.assertEqual(['cmt'], res.supported_perf_events)
|
|
|
|
@mock.patch('os.stat')
|
|
@mock.patch('os.path.getsize')
|
|
@mock.patch('nova.virt.disk.api.get_disk_info')
|
|
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
|
|
return_value='file')
|
|
def test_get_instance_disk_info_works_correctly(
|
|
self, mock_get_disk_backing_file, mock_qemu_img_info,
|
|
mock_get_size, mock_stat):
|
|
# Test data
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='file'><driver name='qemu' type='raw'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/test/disk.local'/>"
|
|
"<target dev='vdb' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
|
|
# Preparing mocks
|
|
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
mock_qemu_img_info.return_value = mock.Mock(disk_size=3328599655,
|
|
virtual_size=21474836480)
|
|
mock_stat.return_value = mock.Mock(st_blocks=20971520)
|
|
mock_get_size.return_value = 10737418240
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance.uuid:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
info = drvr.get_instance_disk_info(instance)
|
|
info = jsonutils.loads(info)
|
|
self.assertEqual(info[0]['type'], 'raw')
|
|
self.assertEqual(info[0]['path'], '/test/disk')
|
|
self.assertEqual(info[0]['disk_size'], 10737418240)
|
|
self.assertEqual(info[0]['virt_disk_size'], 10737418240)
|
|
self.assertEqual(info[0]['backing_file'], "")
|
|
self.assertEqual(info[0]['over_committed_disk_size'], 0)
|
|
self.assertEqual(info[1]['type'], 'qcow2')
|
|
self.assertEqual(info[1]['path'], '/test/disk.local')
|
|
self.assertEqual(info[1]['disk_size'], 3328599655)
|
|
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
|
|
self.assertEqual(info[1]['backing_file'], "file")
|
|
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
|
|
|
|
vdmock.XMLDesc.assert_called_once_with(0)
|
|
mock_qemu_img_info.called_once_with('/test/disk.local')
|
|
mock_stat.called_once_with('/test/disk')
|
|
mock_get_size.called_once_with('/test/disk')
|
|
mock_get_disk_backing_file.assert_called()
|
|
|
|
def test_post_live_migration(self):
|
|
vol1_conn_info = {'data': {'test_data': mock.sentinel.vol1},
|
|
'serial': 'fake_serial1'}
|
|
vol2_conn_info = {'data': {'test_data': mock.sentinel.vol2},
|
|
'serial': 'fake_serial2'}
|
|
|
|
bdi = {'block_device_mapping': [
|
|
{'attachment_id': None,
|
|
'connection_info': vol1_conn_info,
|
|
'mount_device': '/dev/sda',
|
|
},
|
|
{'attachment_id': None,
|
|
'connection_info': vol2_conn_info,
|
|
'mount_device': '/dev/sdb', }]}
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
inst_ref = {'id': 'foo'}
|
|
cntx = context.get_admin_context()
|
|
|
|
# Set up the mock expectations
|
|
@mock.patch.object(driver, 'block_device_info_get_mapping',
|
|
return_value=bdi['block_device_mapping'])
|
|
@mock.patch.object(drvr, '_disconnect_volume')
|
|
def _test(_disconnect_volume, block_device_info_get_mapping):
|
|
drvr.post_live_migration(cntx, inst_ref, bdi)
|
|
|
|
block_device_info_get_mapping.assert_called_once_with(bdi)
|
|
_disconnect_volume.assert_has_calls([
|
|
mock.call(cntx, vol1_conn_info, inst_ref),
|
|
mock.call(cntx, vol2_conn_info, inst_ref)])
|
|
|
|
_test()
|
|
|
|
@mock.patch('os.stat')
|
|
@mock.patch('os.path.getsize')
|
|
@mock.patch('nova.virt.disk.api.get_disk_info')
|
|
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
|
|
return_value='file')
|
|
def test_get_instance_disk_info_excludes_volumes(
|
|
self, mock_get_disk_backing_file, mock_qemu_img_info,
|
|
mock_get_size, mock_stat):
|
|
# Test data
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='file'><driver name='qemu' type='raw'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/test/disk.local'/>"
|
|
"<target dev='vdb' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/fake/path/to/volume1'/>"
|
|
"<target dev='vdc' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/fake/path/to/volume2'/>"
|
|
"<target dev='vdd' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
|
|
# Preparing mocks
|
|
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
mock_qemu_img_info.return_value = mock.Mock(disk_size=3328599655,
|
|
virtual_size=21474836480)
|
|
mock_stat.return_value = mock.Mock(st_blocks=20971520)
|
|
mock_get_size.return_value = 10737418240
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance.uuid:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
conn_info = {'driver_volume_type': 'fake'}
|
|
info = {'block_device_mapping': [
|
|
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
|
|
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
info = drvr.get_instance_disk_info(instance,
|
|
block_device_info=info)
|
|
info = jsonutils.loads(info)
|
|
self.assertEqual(info[0]['type'], 'raw')
|
|
self.assertEqual(info[0]['path'], '/test/disk')
|
|
self.assertEqual(info[0]['disk_size'], 10737418240)
|
|
self.assertEqual(info[0]['backing_file'], "")
|
|
self.assertEqual(info[0]['over_committed_disk_size'], 0)
|
|
self.assertEqual(info[1]['type'], 'qcow2')
|
|
self.assertEqual(info[1]['path'], '/test/disk.local')
|
|
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
|
|
self.assertEqual(info[1]['backing_file'], "file")
|
|
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
|
|
|
|
vdmock.XMLDesc.assert_called_once_with(0)
|
|
mock_qemu_img_info.assert_called_once_with('/test/disk.local')
|
|
mock_stat.assert_called_once_with('/test/disk')
|
|
mock_get_size.assert_called_once_with('/test/disk')
|
|
mock_get_disk_backing_file.assert_called()
|
|
|
|
@mock.patch('os.stat')
|
|
@mock.patch('os.path.getsize')
|
|
def test_get_instance_disk_info_no_bdinfo_passed(self, mock_get_size,
|
|
mock_stat):
|
|
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
|
|
# without access to Nova's block device information. We want to make
|
|
# sure that we guess volumes mostly correctly in that case as well
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='file'><driver name='qemu' type='raw'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"<disk type='block'><driver name='qemu' type='raw'/>"
|
|
"<source file='/fake/path/to/volume1'/>"
|
|
"<target dev='vdb' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
path = '/test/disk'
|
|
size = 10737418240
|
|
|
|
# Preparing mocks
|
|
vdmock = mock.Mock(autospec=fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
mock_stat.return_value = mock.Mock(st_blocks=20971520)
|
|
mock_get_size.return_value = 10737418240
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance.uuid:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
info = drvr.get_instance_disk_info(instance)
|
|
|
|
info = jsonutils.loads(info)
|
|
self.assertEqual(1, len(info))
|
|
self.assertEqual(info[0]['type'], 'raw')
|
|
self.assertEqual(info[0]['path'], path)
|
|
self.assertEqual(info[0]['disk_size'], size)
|
|
self.assertEqual(info[0]['backing_file'], "")
|
|
self.assertEqual(info[0]['over_committed_disk_size'], 0)
|
|
|
|
vdmock.XMLDesc.assert_called_once_with(0)
|
|
mock_stat.assert_called_once_with(path)
|
|
mock_get_size.assert_called_once_with(path)
|
|
|
|
def test_spawn_with_network_info(self):
|
|
def fake_getLibVersion():
|
|
return fakelibvirt.FAKE_LIBVIRT_VERSION
|
|
|
|
def fake_getCapabilities():
|
|
return """
|
|
<capabilities>
|
|
<host>
|
|
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
|
|
<cpu>
|
|
<arch>x86_64</arch>
|
|
<model>Penryn</model>
|
|
<vendor>Intel</vendor>
|
|
<topology sockets='1' cores='2' threads='1'/>
|
|
<feature name='xtpr'/>
|
|
</cpu>
|
|
</host>
|
|
</capabilities>
|
|
"""
|
|
|
|
def fake_baselineCPU(cpu, flag):
|
|
return """<cpu mode='custom' match='exact'>
|
|
<model fallback='allow'>Penryn</model>
|
|
<vendor>Intel</vendor>
|
|
<feature policy='require' name='xtpr'/>
|
|
</cpu>
|
|
"""
|
|
|
|
# _fake_network_info must be called before create_fake_libvirt_mock(),
|
|
# as _fake_network_info calls importutils.import_class() and
|
|
# create_fake_libvirt_mock() mocks importutils.import_class().
|
|
network_info = _fake_network_info(self, 1)
|
|
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
|
|
getCapabilities=fake_getCapabilities,
|
|
getVersion=lambda: 1005001,
|
|
baselineCPU=fake_baselineCPU)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.image_ref = uuids.image_ref
|
|
instance.config_drive = ''
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
utils.tempdir(),
|
|
mock.patch('nova.virt.libvirt.driver.libvirt'),
|
|
mock.patch.object(drvr, '_build_device_metadata'),
|
|
mock.patch.object(drvr, 'get_info'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter')
|
|
) as (
|
|
tmpdir,
|
|
mock_orig_libvirt,
|
|
mock_build_device_metadata,
|
|
mock_get_info,
|
|
mock_ignored, mock_ignored
|
|
):
|
|
self.flags(instances_path=tmpdir)
|
|
|
|
hw_running = hardware.InstanceInfo(state=power_state.RUNNING)
|
|
mock_get_info.return_value = hw_running
|
|
mock_build_device_metadata.return_value = None
|
|
|
|
del mock_orig_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
|
|
|
|
drvr.spawn(self.context, instance, image_meta, [], 'herp', {},
|
|
network_info=network_info)
|
|
|
|
mock_get_info.assert_called_once_with(instance)
|
|
mock_build_device_metadata.assert_called_once_with(self.context,
|
|
instance)
|
|
|
|
# Methods called directly by spawn()
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_domain_and_network')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
|
|
# Methods called by _create_configdrive via post_xml_callback
|
|
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder._make_iso9660')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
|
|
@mock.patch.object(instance_metadata, 'InstanceMetadata')
|
|
def test_spawn_with_config_drive(self, mock_instance_metadata,
|
|
mock_build_device_metadata,
|
|
mock_mkisofs, mock_get_info,
|
|
mock_create_domain_and_network,
|
|
mock_get_guest_xml):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.config_drive = 'True'
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
instance_info = hardware.InstanceInfo(state=power_state.RUNNING)
|
|
|
|
mock_build_device_metadata.return_value = None
|
|
|
|
def fake_create_domain_and_network(
|
|
context, xml, instance, network_info,
|
|
block_device_info=None, power_on=True,
|
|
vifs_already_plugged=False, post_xml_callback=None,
|
|
destroy_disks_on_failure=False):
|
|
# The config disk should be created by this callback, so we need
|
|
# to execute it.
|
|
post_xml_callback()
|
|
|
|
fake_backend = self.useFixture(
|
|
fake_imagebackend.ImageBackendFixture(exists=lambda _: False))
|
|
|
|
mock_get_info.return_value = instance_info
|
|
mock_create_domain_and_network.side_effect = \
|
|
fake_create_domain_and_network
|
|
|
|
drvr.spawn(self.context, instance, image_meta, [], None, {})
|
|
|
|
# We should have imported 'disk.config'
|
|
config_disk = fake_backend.disks['disk.config']
|
|
config_disk.import_file.assert_called_once_with(instance, mock.ANY,
|
|
'disk.config')
|
|
|
|
def test_spawn_without_image_meta(self):
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 1
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
|
|
lambda *a, **kw: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_create_domain_and_network',
|
|
lambda *a, **kw: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
|
|
lambda self, instance: hardware.InstanceInfo(
|
|
state=power_state.RUNNING))
|
|
|
|
drvr.spawn(self.context, instance, image_meta, [], None, {})
|
|
|
|
# We should have created a root disk and an ephemeral disk
|
|
self.assertEqual(['disk', 'disk.local'],
|
|
sorted(fake_backend.created_disks.keys()))
|
|
|
|
def _test_spawn_disks(self, image_ref, block_device_info):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
# Volume-backed instance created without image
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.image_ref = image_ref
|
|
instance.root_device_name = '/dev/vda'
|
|
instance.uuid = uuids.instance_uuid
|
|
|
|
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_guest_xml'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(drvr, 'get_info')
|
|
) as (
|
|
mock_get_guest_xml, mock_create_domain_and_network,
|
|
mock_get_info
|
|
):
|
|
hw_running = hardware.InstanceInfo(state=power_state.RUNNING)
|
|
mock_get_info.return_value = hw_running
|
|
|
|
drvr.spawn(self.context, instance,
|
|
image_meta, [], None, {},
|
|
block_device_info=block_device_info)
|
|
|
|
# Return a sorted list of created disks
|
|
return sorted(backend.created_disks.keys())
|
|
|
|
def test_spawn_from_volume_no_image_ref(self):
|
|
block_device_info = {'root_device_name': '/dev/vda',
|
|
'block_device_mapping': [
|
|
{'mount_device': 'vda',
|
|
'boot_index': 0}]}
|
|
|
|
disks_created = self._test_spawn_disks(None, block_device_info)
|
|
|
|
# We should have created the ephemeral disk, and nothing else
|
|
self.assertEqual(['disk.local'], disks_created)
|
|
|
|
def test_spawn_from_volume_with_image_ref(self):
|
|
block_device_info = {'root_device_name': '/dev/vda',
|
|
'block_device_mapping': [
|
|
{'mount_device': 'vda',
|
|
'boot_index': 0}]}
|
|
|
|
disks_created = self._test_spawn_disks(uuids.image_ref,
|
|
block_device_info)
|
|
|
|
# We should have created the ephemeral disk, and nothing else
|
|
self.assertEqual(['disk.local'], disks_created)
|
|
|
|
def test_spawn_from_image(self):
|
|
disks_created = self._test_spawn_disks(uuids.image_ref, None)
|
|
|
|
# We should have created the root and ephemeral disks
|
|
self.assertEqual(['disk', 'disk.local'], disks_created)
|
|
|
|
def test_start_lxc_from_volume(self):
|
|
self.flags(virt_type="lxc",
|
|
group='libvirt')
|
|
|
|
def check_setup_container(image, container_dir=None):
|
|
self.assertIsInstance(image, imgmodel.LocalBlockImage)
|
|
self.assertEqual(image.path, '/dev/path/to/dev')
|
|
return '/dev/nbd1'
|
|
|
|
bdm = {
|
|
'guest_format': None,
|
|
'boot_index': 0,
|
|
'mount_device': '/dev/sda',
|
|
'connection_info': {
|
|
'driver_volume_type': 'iscsi',
|
|
'serial': 'afc1',
|
|
'data': {
|
|
'access_mode': 'rw',
|
|
'target_discovered': False,
|
|
'encrypted': False,
|
|
'qos_specs': None,
|
|
'target_iqn': 'iqn: volume-afc1',
|
|
'target_portal': 'ip: 3260',
|
|
'volume_id': 'afc1',
|
|
'target_lun': 1,
|
|
'auth_password': 'uj',
|
|
'auth_username': '47',
|
|
'auth_method': 'CHAP'
|
|
}
|
|
},
|
|
'disk_bus': 'scsi',
|
|
'device_type': 'disk',
|
|
'delete_on_termination': False
|
|
}
|
|
|
|
def _connect_volume_side_effect(ctxt, connection_info, instance):
|
|
bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev'
|
|
|
|
def _get(key, opt=None):
|
|
return bdm.get(key, opt)
|
|
|
|
def getitem(key):
|
|
return bdm[key]
|
|
|
|
def setitem(key, val):
|
|
bdm[key] = val
|
|
|
|
bdm_mock = mock.MagicMock()
|
|
bdm_mock.__getitem__.side_effect = getitem
|
|
bdm_mock.__setitem__.side_effect = setitem
|
|
bdm_mock.get = _get
|
|
|
|
disk_mock = mock.MagicMock()
|
|
disk_mock.source_path = '/dev/path/to/dev'
|
|
|
|
block_device_info = {'block_device_mapping': [bdm_mock],
|
|
'root_device_name': '/dev/sda'}
|
|
|
|
# Volume-backed instance created without image
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = ''
|
|
instance_ref['root_device_name'] = '/dev/sda'
|
|
instance_ref['ephemeral_gb'] = 0
|
|
instance_ref['uuid'] = uuids.fake
|
|
inst_obj = objects.Instance(**instance_ref)
|
|
image_meta = objects.ImageMeta.from_dict({})
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
|
|
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr, '_connect_volume',
|
|
side_effect=_connect_volume_side_effect),
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=disk_mock),
|
|
mock.patch.object(drvr, 'get_info',
|
|
return_value=hardware.InstanceInfo(
|
|
state=power_state.RUNNING)),
|
|
mock.patch('nova.virt.disk.api.setup_container',
|
|
side_effect=check_setup_container),
|
|
mock.patch('nova.virt.disk.api.teardown_container'),
|
|
mock.patch.object(objects.Instance, 'save')):
|
|
|
|
drvr.spawn(self.context, inst_obj, image_meta, [], None, {},
|
|
network_info=[],
|
|
block_device_info=block_device_info)
|
|
self.assertEqual('/dev/nbd1',
|
|
inst_obj.system_metadata.get(
|
|
'rootfs_device_name'))
|
|
|
|
def test_spawn_with_pci_devices(self):
|
|
class FakeLibvirtPciDevice(object):
|
|
def dettach(self):
|
|
return None
|
|
|
|
def reset(self):
|
|
return None
|
|
|
|
def fake_node_device_lookup_by_name(address):
|
|
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
|
|
% dict(hex='[\da-f]', oct='[0-8]'))
|
|
pattern = re.compile(pattern)
|
|
if pattern.match(address) is None:
|
|
raise fakelibvirt.libvirtError()
|
|
return FakeLibvirtPciDevice()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
|
|
lambda *args, **kwargs: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_create_domain_and_network',
|
|
lambda *args, **kwargs: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
|
|
lambda self, instance: hardware.InstanceInfo(
|
|
state=power_state.RUNNING))
|
|
|
|
mock_connection = mock.MagicMock(
|
|
nodeDeviceLookupByName=fake_node_device_lookup_by_name)
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 'my_fake_image'
|
|
instance = objects.Instance(**instance_ref)
|
|
instance['pci_devices'] = objects.PciDeviceList(
|
|
objects=[objects.PciDevice(address='0000:00:00.0')])
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
with mock.patch.object(drvr, '_get_connection',
|
|
return_value=mock_connection):
|
|
drvr.spawn(self.context, instance, image_meta, [], None, {})
|
|
|
|
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
|
|
gotFiles = []
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 1
|
|
instance = objects.Instance(**instance_ref)
|
|
instance['os_type'] = os_type
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
|
|
lambda *args, **kwargs: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_create_domain_and_network',
|
|
lambda *args, **kwargs: None)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
|
|
lambda self, instance: hardware.InstanceInfo(
|
|
state=power_state.RUNNING))
|
|
if mkfs:
|
|
self.stub_out(
|
|
'nova.privsep.fs._MKFS_COMMAND',
|
|
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
|
|
self.useFixture(
|
|
fake_imagebackend.ImageBackendFixture(got_files=gotFiles))
|
|
|
|
drvr._create_image(self.context, instance, disk_info['mapping'])
|
|
drvr._get_guest_xml(self.context, instance, None,
|
|
disk_info, image_meta)
|
|
|
|
wantFiles = [
|
|
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
|
|
'size': 10 * units.Gi},
|
|
{'filename': filename,
|
|
'size': 20 * units.Gi},
|
|
]
|
|
self.assertEqual(gotFiles, wantFiles)
|
|
|
|
def test_create_image_plain_os_type_blank(self):
|
|
self._test_create_image_plain(os_type='',
|
|
filename=self._EPHEMERAL_20_DEFAULT,
|
|
mkfs=False)
|
|
|
|
def test_create_image_plain_os_type_none(self):
|
|
self._test_create_image_plain(os_type=None,
|
|
filename=self._EPHEMERAL_20_DEFAULT,
|
|
mkfs=False)
|
|
|
|
def test_create_image_plain_os_type_set_no_fs(self):
|
|
self._test_create_image_plain(os_type='test',
|
|
filename=self._EPHEMERAL_20_DEFAULT,
|
|
mkfs=False)
|
|
|
|
def test_create_image_plain_os_type_set_with_fs(self):
|
|
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
|
|
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
|
|
|
|
self._test_create_image_plain(os_type='test',
|
|
filename=ephemeral_file_name,
|
|
mkfs=True)
|
|
|
|
def test_create_image_initrd(self):
|
|
kernel_id = uuids.kernel_id
|
|
ramdisk_id = uuids.ramdisk_id
|
|
|
|
kernel_fname = imagecache.get_cache_fname(kernel_id)
|
|
ramdisk_fname = imagecache.get_cache_fname(ramdisk_id)
|
|
|
|
filename = self._EPHEMERAL_20_DEFAULT
|
|
|
|
gotFiles = []
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = uuids.instance_id
|
|
instance_ref['kernel_id'] = uuids.kernel_id
|
|
instance_ref['ramdisk_id'] = uuids.ramdisk_id
|
|
instance_ref['os_type'] = 'test'
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
fake_backend = self.useFixture(
|
|
fake_imagebackend.ImageBackendFixture(got_files=gotFiles))
|
|
|
|
with test.nested(
|
|
mock.patch.object(driver, '_get_guest_xml'),
|
|
mock.patch.object(driver, '_create_domain_and_network'),
|
|
mock.patch.object(driver, 'get_info',
|
|
return_value=[hardware.InstanceInfo(state=power_state.RUNNING)])
|
|
):
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
driver._create_image(self.context, instance, disk_info['mapping'])
|
|
|
|
# Assert that kernel and ramdisk were fetched with fetch_raw_image
|
|
# and no size
|
|
for name, disk in fake_backend.disks.items():
|
|
cache = disk.cache
|
|
if name in ('kernel', 'ramdisk'):
|
|
cache.assert_called_once_with(
|
|
context=self.context, filename=mock.ANY, image_id=mock.ANY,
|
|
fetch_func=fake_backend.mock_fetch_raw_image)
|
|
|
|
wantFiles = [
|
|
{'filename': kernel_fname,
|
|
'size': None},
|
|
{'filename': ramdisk_fname,
|
|
'size': None},
|
|
{'filename': imagecache.get_cache_fname(uuids.instance_id),
|
|
'size': 10 * units.Gi},
|
|
{'filename': filename,
|
|
'size': 20 * units.Gi},
|
|
]
|
|
self.assertEqual(wantFiles, gotFiles)
|
|
|
|
def test_injection_info_is_sanitized(self):
|
|
info = get_injection_info(
|
|
network_info=mock.sentinel.network_info,
|
|
files=mock.sentinel.files,
|
|
admin_pass='verybadpass')
|
|
self.assertNotIn('verybadpass', str(info))
|
|
self.assertNotIn('verybadpass', repr(info))
|
|
|
|
@mock.patch(
|
|
'nova.virt.libvirt.driver.LibvirtDriver._build_device_metadata')
|
|
@mock.patch('nova.api.metadata.base.InstanceMetadata')
|
|
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
|
|
def test_create_configdrive(self, mock_make_drive,
|
|
mock_instance_metadata,
|
|
mock_build_device_metadata):
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.config_drive = 'True'
|
|
|
|
backend = self.useFixture(
|
|
fake_imagebackend.ImageBackendFixture(exists=lambda path: False))
|
|
|
|
mock_build_device_metadata.return_value = None
|
|
injection_info = get_injection_info(
|
|
network_info=mock.sentinel.network_info,
|
|
admin_pass=mock.sentinel.admin_pass,
|
|
files=mock.sentinel.files
|
|
)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_configdrive(self.context, instance, injection_info)
|
|
|
|
expected_config_drive_path = os.path.join(
|
|
CONF.instances_path, instance.uuid, 'disk.config')
|
|
mock_make_drive.assert_called_once_with(expected_config_drive_path)
|
|
mock_instance_metadata.assert_called_once_with(instance,
|
|
request_context=self.context,
|
|
network_info=mock.sentinel.network_info,
|
|
content=mock.sentinel.files,
|
|
extra_md={'admin_pass': mock.sentinel.admin_pass})
|
|
|
|
backend.disks['disk.config'].import_file.assert_called_once_with(
|
|
instance, mock.ANY, 'disk.config')
|
|
|
|
@ddt.unpack
|
|
@ddt.data({'expected': 200, 'flavor_size': 200},
|
|
{'expected': 100, 'flavor_size': 200, 'bdi_size': 100},
|
|
{'expected': 200, 'flavor_size': 200, 'bdi_size': 100,
|
|
'legacy': True})
|
|
def test_create_image_with_swap(self, expected,
|
|
flavor_size=None, bdi_size=None,
|
|
legacy=False):
|
|
# Test the precedence of swap disk size specified in both the bdm and
|
|
# the flavor.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = ''
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
if flavor_size is not None:
|
|
instance.flavor.swap = flavor_size
|
|
|
|
bdi = {'block_device_mapping': [{'boot_index': 0}]}
|
|
if bdi_size is not None:
|
|
bdi['swap'] = {'swap_size': bdi_size, 'device_name': '/dev/vdb'}
|
|
|
|
create_image_kwargs = {}
|
|
if legacy:
|
|
create_image_kwargs['ignore_bdi_for_swap'] = True
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance, image_meta,
|
|
block_device_info=bdi)
|
|
|
|
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
drvr._create_image(self.context, instance, disk_info['mapping'],
|
|
block_device_info=bdi, **create_image_kwargs)
|
|
|
|
backend.mock_create_swap.assert_called_once_with(
|
|
target='swap_%i' % expected, swap_mb=expected,
|
|
context=self.context)
|
|
backend.disks['disk.swap'].cache.assert_called_once_with(
|
|
fetch_func=mock.ANY, filename='swap_%i' % expected,
|
|
size=expected * units.Mi, context=self.context, swap_mb=expected)
|
|
|
|
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
|
|
def test_create_vz_container_with_swap(self, mock_cache):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance_ref = copy.deepcopy(self.test_instance)
|
|
instance_ref['vm_mode'] = fields.VMMode.EXE
|
|
instance_ref['flavor'].swap = 1024
|
|
instance = objects.Instance(**instance_ref)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance, image_meta)
|
|
self.assertRaises(exception.Invalid,
|
|
drvr._create_image,
|
|
self.context, instance, disk_info['mapping'])
|
|
|
|
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
|
|
side_effect=exception.ImageNotFound(image_id='fake-id'))
|
|
def test_create_image_not_exist_no_fallback(self, mock_cache):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
self.assertRaises(exception.ImageNotFound,
|
|
drvr._create_image,
|
|
self.context, instance, disk_info['mapping'])
|
|
|
|
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
|
|
def test_create_image_not_exist_fallback(self, mock_cache):
|
|
|
|
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
|
|
def second_call(fetch_func, filename, size=None, *args, **kwargs):
|
|
# call copy_from_host ourselves because we mocked image.cache()
|
|
fetch_func('fake-target')
|
|
# further calls have no side effect
|
|
mock_cache.side_effect = None
|
|
mock_cache.side_effect = second_call
|
|
# raise an error only the first call
|
|
raise exception.ImageNotFound(image_id='fake-id')
|
|
|
|
mock_cache.side_effect = side_effect
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
|
|
with mock.patch('nova.virt.libvirt.utils.copy_image') as mock_copy:
|
|
drvr._create_image(self.context, instance, disk_info['mapping'],
|
|
fallback_from_host='fake-source-host')
|
|
mock_copy.assert_called_once_with(src='fake-target',
|
|
dest='fake-target',
|
|
host='fake-source-host',
|
|
receive=True)
|
|
|
|
@mock.patch('nova.privsep.fs.get_file_extension_for_os_type')
|
|
def test_create_image_with_ephemerals(self, mock_get_ext):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = ''
|
|
instance = objects.Instance(**instance_ref)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
bdi = {'ephemerals': [{'size': 100}],
|
|
'block_device_mapping': [{'boot_index': 0}]}
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance, image_meta,
|
|
block_device_info=bdi)
|
|
mock_get_ext.return_value = mock.sentinel.file_ext
|
|
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
drvr._create_image(self.context, instance, disk_info['mapping'],
|
|
block_device_info=bdi)
|
|
|
|
filename = 'ephemeral_100_%s' % mock.sentinel.file_ext
|
|
backend.mock_create_ephemeral.assert_called_once_with(
|
|
target=filename, ephemeral_size=100, fs_label='ephemeral0',
|
|
is_block_dev=mock.sentinel.is_block_dev, os_type='linux',
|
|
specified_fs=None, context=self.context, vm_mode=None)
|
|
backend.disks['disk.eph0'].cache.assert_called_once_with(
|
|
fetch_func=mock.ANY, context=self.context,
|
|
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
|
|
specified_fs=None)
|
|
|
|
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
|
|
def test_create_image_resize_snap_backend(self, mock_cache):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.task_state = task_states.RESIZE_FINISH
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
|
|
fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
drvr._create_image(self.context, instance, disk_info['mapping'])
|
|
|
|
# Assert we called create_snap on the root disk
|
|
fake_backend.disks['disk'].create_snap.assert_called_once_with(
|
|
libvirt_utils.RESIZE_SNAPSHOT_NAME)
|
|
|
|
@mock.patch('nova.privsep.fs.mkfs')
|
|
def test_create_ephemeral_specified_fs(self, fake_mkfs):
|
|
self.flags(default_ephemeral_format='ext3')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=True, specified_fs='ext4')
|
|
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
|
|
'myVol')])
|
|
|
|
@mock.patch('nova.privsep.path.utime')
|
|
@mock.patch('nova.virt.libvirt.utils.fetch_image')
|
|
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
|
|
def test_create_ephemeral_specified_fs_not_valid(
|
|
self, mock_create_cow_image, mock_fetch_image, mock_utime):
|
|
CONF.set_override('default_ephemeral_format', 'ext4')
|
|
ephemerals = [{'device_type': 'disk',
|
|
'disk_bus': 'virtio',
|
|
'device_name': '/dev/vdb',
|
|
'guest_format': 'dummy',
|
|
'size': 1}]
|
|
block_device_info = {
|
|
'ephemerals': ephemerals}
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 1
|
|
instance = objects.Instance(**instance_ref)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta)
|
|
disk_info['mapping'].pop('disk.local')
|
|
|
|
with test.nested(
|
|
mock.patch.object(utils, 'execute'),
|
|
mock.patch.object(drvr, 'get_info'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(imagebackend.Image, 'verify_base_size'),
|
|
mock.patch.object(imagebackend.Image, 'get_disk_size')
|
|
) as (execute_mock, get_info_mock,
|
|
create_mock, verify_base_size_mock, disk_size_mock):
|
|
disk_size_mock.return_value = 0
|
|
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
|
|
context, instance, disk_info['mapping'],
|
|
block_device_info=block_device_info)
|
|
|
|
@mock.patch('nova.privsep.fs.mkfs')
|
|
def test_create_ephemeral_default(self, fake_mkfs):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=True)
|
|
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
|
|
'myVol')])
|
|
|
|
@mock.patch('nova.privsep.fs.mkfs')
|
|
def test_create_ephemeral_with_conf(self, fake_mkfs):
|
|
CONF.set_override('default_ephemeral_format', 'ext4')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=True)
|
|
fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something',
|
|
'myVol')])
|
|
|
|
@mock.patch('nova.privsep.fs.configurable_mkfs')
|
|
def test_create_ephemeral_with_arbitrary(self, fake_mkfs):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.privsep.fs._MKFS_COMMAND',
|
|
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=True)
|
|
fake_mkfs.assert_has_calls([mock.call('linux', 'myVol',
|
|
'/dev/something', True, None,
|
|
None)])
|
|
|
|
@mock.patch('nova.privsep.fs.configurable_mkfs')
|
|
def test_create_ephemeral_with_ext3(self, fake_mkfs):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.privsep.fs._MKFS_COMMAND',
|
|
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=True)
|
|
fake_mkfs.assert_has_calls([mock.call('linux', 'myVol',
|
|
'/dev/something', True, None,
|
|
None)])
|
|
|
|
@mock.patch('nova.virt.libvirt.utils.create_ploop_image')
|
|
def test_create_ephemeral_parallels(self, mock_create_ploop):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
|
|
is_block_dev=False,
|
|
specified_fs='fs_format',
|
|
vm_mode=fields.VMMode.EXE)
|
|
mock_create_ploop.assert_called_once_with('expanded',
|
|
'/dev/something',
|
|
'20G', 'fs_format')
|
|
|
|
@mock.patch('nova.privsep.fs.unprivileged_mkfs')
|
|
@mock.patch('nova.virt.libvirt.utils.create_image')
|
|
def test_create_swap_default(self, mock_create_image, mock_mkfs):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._create_swap('/dev/something', 1)
|
|
mock_mkfs.assert_has_calls([mock.call('swap', '/dev/something')])
|
|
|
|
def test_ensure_console_log_for_instance_pass(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_console_log_path'),
|
|
mock.patch('nova.virt.libvirt.utils.file_open')
|
|
) as (mock_path, mock_open):
|
|
drvr._ensure_console_log_for_instance(mock.ANY)
|
|
mock_path.assert_called_once()
|
|
mock_open.assert_called_once()
|
|
|
|
def test_ensure_console_log_for_instance_pass_w_permissions(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_console_log_path'),
|
|
mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=IOError(errno.EACCES, 'exc'))
|
|
) as (mock_path, mock_open):
|
|
drvr._ensure_console_log_for_instance(mock.ANY)
|
|
mock_path.assert_called_once()
|
|
mock_open.assert_called_once()
|
|
|
|
def test_ensure_console_log_for_instance_fail(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_console_log_path'),
|
|
mock.patch('nova.virt.libvirt.utils.file_open',
|
|
side_effect=IOError(errno.EREMOTE, 'exc'))
|
|
) as (mock_path, mock_open):
|
|
self.assertRaises(
|
|
IOError,
|
|
drvr._ensure_console_log_for_instance,
|
|
mock.ANY)
|
|
|
|
@mock.patch('nova.privsep.path.last_bytes',
|
|
return_value=(b'67890', 0))
|
|
def test_get_console_output_file(self, mock_last_bytes):
|
|
with utils.tempdir() as tmpdir:
|
|
self.flags(instances_path=tmpdir)
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 123456
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
console_dir = (os.path.join(tmpdir, instance['name']))
|
|
console_log = '%s/console.log' % (console_dir)
|
|
fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='file'>
|
|
<source path='%s'/>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
""" % console_log
|
|
|
|
def fake_lookup(id):
|
|
return FakeVirtDomain(fake_dom_xml)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
try:
|
|
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
|
|
libvirt_driver.MAX_CONSOLE_BYTES = 5
|
|
with mock.patch('os.path.exists', return_value=True):
|
|
output = drvr.get_console_output(self.context, instance)
|
|
finally:
|
|
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
|
|
|
|
self.assertEqual(b'67890', output)
|
|
|
|
# this test resembles test_get_console_output_file() except
|
|
# that the instance was created with a tcp-based serial console
|
|
# which results in a different XML
|
|
@mock.patch('nova.privsep.path.last_bytes',
|
|
return_value=(b'67891', 0))
|
|
def test_get_console_output_tcp(self, mock_last_bytes):
|
|
with utils.tempdir() as tmpdir:
|
|
self.flags(instances_path=tmpdir)
|
|
# flags to enable the serial console are not necessary
|
|
# as we use a fake dom
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 123456
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
console_dir = (os.path.join(tmpdir, instance['name']))
|
|
console_log = '%s/console.log' % (console_dir)
|
|
fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='tcp'>
|
|
<log file='%s'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
""" % console_log
|
|
|
|
def fake_lookup(id):
|
|
return FakeVirtDomain(fake_dom_xml)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
try:
|
|
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
|
|
libvirt_driver.MAX_CONSOLE_BYTES = 5
|
|
with mock.patch('os.path.exists', return_value=True):
|
|
output = drvr.get_console_output(self.context, instance)
|
|
finally:
|
|
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
|
|
|
|
self.assertEqual(b'67891', output)
|
|
|
|
def test_get_console_output_file_missing(self):
|
|
with utils.tempdir() as tmpdir:
|
|
self.flags(instances_path=tmpdir)
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 123456
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
console_log = os.path.join(tmpdir, instance['name'],
|
|
'non-existent.log')
|
|
fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='file'>
|
|
<source path='%s'/>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
""" % console_log
|
|
|
|
def fake_lookup(id):
|
|
return FakeVirtDomain(fake_dom_xml)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with mock.patch('os.path.exists', return_value=False):
|
|
output = drvr.get_console_output(self.context, instance)
|
|
|
|
self.assertEqual('', output)
|
|
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('nova.privsep.path.last_bytes',
|
|
return_value=(b'67890', 0))
|
|
@mock.patch('nova.privsep.path.writefile')
|
|
@mock.patch('nova.privsep.libvirt.readpty')
|
|
def test_get_console_output_pty(self, mocked_readfile, mocked_writefile,
|
|
mocked_last_bytes, mocked_path_exists):
|
|
with utils.tempdir() as tmpdir:
|
|
self.flags(instances_path=tmpdir)
|
|
|
|
instance_ref = self.test_instance
|
|
instance_ref['image_ref'] = 123456
|
|
instance = objects.Instance(**instance_ref)
|
|
|
|
console_dir = (os.path.join(tmpdir, instance['name']))
|
|
pty_file = '%s/fake_pty' % (console_dir)
|
|
fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='pty'>
|
|
<source path='%s'/>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
""" % pty_file
|
|
|
|
def fake_lookup(id):
|
|
return FakeVirtDomain(fake_dom_xml)
|
|
|
|
mocked_readfile.return_value = 'foo'
|
|
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
try:
|
|
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
|
|
libvirt_driver.MAX_CONSOLE_BYTES = 5
|
|
output = drvr.get_console_output(self.context, instance)
|
|
finally:
|
|
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
|
|
|
|
self.assertEqual(b'67890', output)
|
|
|
|
def test_get_console_output_pty_not_available(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
fake_dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='pty'>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
def fake_lookup(id):
|
|
return FakeVirtDomain(fake_dom_xml)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
libvirt_driver.LibvirtDriver._conn.lookupByUUIDString = fake_lookup
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.ConsoleNotAvailable,
|
|
drvr.get_console_output, self.context, instance)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
|
|
def test_get_console_output_not_available(self, mock_get_xml, get_domain):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='foo'>
|
|
<source path='srcpath'/>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
mock_get_xml.return_value = xml
|
|
get_domain.return_value = mock.MagicMock()
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.ConsoleNotAvailable,
|
|
drvr.get_console_output, self.context, instance)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
@mock.patch.object(libvirt_guest.Guest, 'get_xml_desc')
|
|
def test_get_console_output_logrotate(self, mock_get_xml, get_domain):
|
|
fake_libvirt_utils.files['console.log'] = b'uvwxyz'
|
|
fake_libvirt_utils.files['console.log.0'] = b'klmnopqrst'
|
|
fake_libvirt_utils.files['console.log.1'] = b'abcdefghij'
|
|
|
|
def mock_path_exists(path):
|
|
return os.path.basename(path) in fake_libvirt_utils.files
|
|
|
|
def mock_last_bytes(path, count):
|
|
with fake_libvirt_utils.file_open(path) as flo:
|
|
return nova.privsep.path._last_bytes_inner(flo, count)
|
|
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
</disk>
|
|
<console type='file'>
|
|
<source path='console.log'/>
|
|
<target port='0'/>
|
|
</console>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
mock_get_xml.return_value = xml
|
|
get_domain.return_value = mock.MagicMock()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
def _get_logd_output(bytes_to_read):
|
|
with utils.tempdir() as tmp_dir:
|
|
self.flags(instances_path=tmp_dir)
|
|
log_data = ""
|
|
try:
|
|
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
|
|
libvirt_driver.MAX_CONSOLE_BYTES = bytes_to_read
|
|
with mock.patch('os.path.exists',
|
|
side_effect=mock_path_exists):
|
|
with mock.patch('nova.privsep.path.last_bytes',
|
|
side_effect=mock_last_bytes):
|
|
log_data = drvr.get_console_output(self.context,
|
|
instance)
|
|
finally:
|
|
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
|
|
return log_data
|
|
|
|
# span across only 1 file (with remaining bytes)
|
|
self.assertEqual(b'wxyz', _get_logd_output(4))
|
|
# span across only 1 file (exact bytes)
|
|
self.assertEqual(b'uvwxyz', _get_logd_output(6))
|
|
# span across 2 files (with remaining bytes)
|
|
self.assertEqual(b'opqrstuvwxyz', _get_logd_output(12))
|
|
# span across all files (exact bytes)
|
|
self.assertEqual(b'abcdefghijklmnopqrstuvwxyz', _get_logd_output(26))
|
|
# span across all files with more bytes than available
|
|
self.assertEqual(b'abcdefghijklmnopqrstuvwxyz', _get_logd_output(30))
|
|
# files are not available
|
|
fake_libvirt_utils.files = {}
|
|
self.assertEqual('', _get_logd_output(30))
|
|
# reset the file for other tests
|
|
fake_libvirt_utils.files['console.log'] = b'01234567890'
|
|
|
|
def test_get_host_ip_addr(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
ip = drvr.get_host_ip_addr()
|
|
self.assertEqual(ip, CONF.my_ip)
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
|
@mock.patch('nova.compute.utils.get_machine_ips')
|
|
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
|
|
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.get_host_ip_addr()
|
|
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
|
|
u'not found on any of the '
|
|
u'interfaces: %(ifaces)s',
|
|
{'ifaces': '8.8.8.8, 75.75.75.75',
|
|
'my_ip': mock.ANY})
|
|
|
|
def test_conn_event_handler(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
service_mock = mock.MagicMock()
|
|
service_mock.disabled.return_value = False
|
|
with test.nested(
|
|
mock.patch.object(drvr._host, "_connect",
|
|
side_effect=fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
"Failed to connect to host",
|
|
error_code=
|
|
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
|
|
mock.patch.object(drvr._host, "_init_events",
|
|
return_value=None),
|
|
mock.patch.object(objects.Service, "get_by_compute_host",
|
|
return_value=service_mock)):
|
|
|
|
# verify that the driver registers for the close callback
|
|
# and re-connects after receiving the callback
|
|
self.assertRaises(exception.HypervisorUnavailable,
|
|
drvr.init_host,
|
|
"wibble")
|
|
self.assertTrue(service_mock.disabled)
|
|
|
|
def test_command_with_broken_connection(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
service_mock = mock.MagicMock()
|
|
service_mock.disabled.return_value = False
|
|
with test.nested(
|
|
mock.patch.object(drvr._host, "_connect",
|
|
side_effect=fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
"Failed to connect to host",
|
|
error_code=
|
|
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
|
|
mock.patch.object(drvr._host, "_init_events",
|
|
return_value=None),
|
|
mock.patch.object(host.Host, "has_min_version",
|
|
return_value=True),
|
|
mock.patch.object(drvr, "_do_quality_warnings",
|
|
return_value=None),
|
|
mock.patch.object(objects.Service, "get_by_compute_host",
|
|
return_value=service_mock),
|
|
mock.patch.object(host.Host, "get_capabilities")):
|
|
|
|
self.assertRaises(exception.HypervisorUnavailable,
|
|
drvr.init_host, ("wibble",))
|
|
self.assertTrue(service_mock.disabled)
|
|
|
|
def test_service_resume_after_broken_connection(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
service_mock = mock.MagicMock()
|
|
service_mock.disabled.return_value = True
|
|
with test.nested(
|
|
mock.patch.object(drvr._host, "_connect",
|
|
return_value=mock.MagicMock()),
|
|
mock.patch.object(drvr._host, "_init_events",
|
|
return_value=None),
|
|
mock.patch.object(host.Host, "has_min_version",
|
|
return_value=True),
|
|
mock.patch.object(drvr, "_do_quality_warnings",
|
|
return_value=None),
|
|
mock.patch.object(objects.Service, "get_by_compute_host",
|
|
return_value=service_mock),
|
|
mock.patch.object(host.Host, "get_capabilities")):
|
|
|
|
drvr.init_host("wibble")
|
|
drvr.get_num_instances()
|
|
drvr._host._dispatch_conn_event()
|
|
self.assertFalse(service_mock.disabled)
|
|
self.assertIsNone(service_mock.disabled_reason)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
|
|
@mock.patch.object(host.Host, '_get_domain',
|
|
side_effect=exception.InstanceNotFound(
|
|
instance_id=uuids.instance))
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_immediate_delete(self, mock_save, mock_get, mock_delete):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, {})
|
|
mock_save.assert_called_once_with()
|
|
mock_get.assert_has_calls([mock.call(instance)] * 3)
|
|
self.assertEqual(3, mock_get.call_count)
|
|
mock_delete.assert_called_once_with(instance)
|
|
|
|
@mock.patch.object(objects.Instance, 'get_by_uuid')
|
|
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
|
|
@mock.patch.object(objects.Instance, 'save', autospec=True)
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
|
|
@mock.patch.object(driver, 'block_device_info_get_mapping')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
|
|
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
|
|
mock_disconnect_volume,
|
|
mock_delete_instance_files, mock_destroy,
|
|
mock_inst_save, mock_inst_obj_load_attr,
|
|
mock_get_by_uuid, volume_fail=False):
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
vol = {'block_device_mapping': [
|
|
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
|
|
|
|
mock_mapping.return_value = vol['block_device_mapping']
|
|
mock_delete_instance_files.return_value = True
|
|
mock_get_by_uuid.return_value = instance
|
|
if volume_fail:
|
|
mock_disconnect_volume.return_value = (
|
|
exception.VolumeNotFound('vol'))
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.destroy(self.context, instance, [], vol)
|
|
|
|
def test_destroy_removes_disk(self):
|
|
self._test_destroy_removes_disk(volume_fail=False)
|
|
|
|
def test_destroy_removes_disk_volume_fails(self):
|
|
self._test_destroy_removes_disk(volume_fail=True)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
|
|
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
|
|
mock_unplug_vifs):
|
|
instance = fake_instance.fake_instance_obj(
|
|
None, name='instancename', id=1,
|
|
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.destroy(self.context, instance, [], None, False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
|
|
mock_teardown_container,
|
|
mock_cleanup):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
fake_domain = FakeVirtDomain()
|
|
|
|
def destroy_side_effect(*args, **kwargs):
|
|
fake_domain._info[0] = power_state.SHUTDOWN
|
|
|
|
with mock.patch.object(fake_domain, 'destroy',
|
|
side_effect=destroy_side_effect) as mock_domain_destroy:
|
|
mock_get_domain.return_value = fake_domain
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
network_info = []
|
|
drvr.destroy(self.context, instance, network_info, None, False)
|
|
|
|
mock_get_domain.assert_has_calls([mock.call(instance),
|
|
mock.call(instance)])
|
|
mock_domain_destroy.assert_called_once_with()
|
|
mock_teardown_container.assert_called_once_with(instance)
|
|
mock_cleanup.assert_called_once_with(self.context, instance,
|
|
network_info, None, False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
|
|
mock_get_domain, mock_teardown_container, mock_cleanup):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
inf_exception = exception.InstanceNotFound(instance_id=instance.uuid)
|
|
mock_get_domain.side_effect = inf_exception
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
network_info = []
|
|
drvr.destroy(self.context, instance, network_info, None, False)
|
|
|
|
mock_get_domain.assert_has_calls([mock.call(instance),
|
|
mock.call(instance)])
|
|
mock_teardown_container.assert_called_once_with(instance)
|
|
mock_cleanup.assert_called_once_with(self.context, instance,
|
|
network_info, None, False)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_reboot_different_ids(self, mock_get, mock_create):
|
|
class FakeLoopingCall(object):
|
|
def start(self, *a, **k):
|
|
return self
|
|
|
|
def wait(self):
|
|
return None
|
|
|
|
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
|
|
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
|
|
|
|
# Mock domain
|
|
mock_domain = mock.create_autospec(fakelibvirt.virDomain)
|
|
mock_domain.info.side_effect = [
|
|
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
|
|
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
|
|
mock_domain.ID.side_effect = ['some_fake_id', 'some_fake_id',
|
|
'some_other_fake_id',
|
|
'some_other_fake_id']
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
mock_get.return_value = mock_domain
|
|
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
|
|
lambda *a, **k: FakeLoopingCall())
|
|
self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
|
|
|
|
drvr.reboot(None, instance, [], 'SOFT')
|
|
|
|
mock_domain.info.assert_has_calls([mock.call()] * 2)
|
|
self.assertEqual(2, mock_domain.info.call_count)
|
|
mock_domain.ID.assert_has_calls([mock.call()] * 4)
|
|
self.assertEqual(4, mock_domain.ID.call_count)
|
|
mock_domain.shutdown.assert_called_once_with()
|
|
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
|
|
self.assertEqual(2, mock_get.call_count)
|
|
mock_create.assert_called_once_with(domain=mock_domain)
|
|
|
|
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
|
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
|
|
@mock.patch.object(greenthread, 'sleep')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
|
|
mock_sleep, mock_loopingcall,
|
|
mock_get_instance_pci_devs):
|
|
class FakeLoopingCall(object):
|
|
def start(self, *a, **k):
|
|
return self
|
|
|
|
def wait(self):
|
|
return None
|
|
|
|
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
|
|
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
|
|
self.reboot_hard_reboot_called = False
|
|
|
|
# Mock domain
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
|
|
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
|
|
mock_domain.info.side_effect = return_values
|
|
mock_domain.ID.return_value = 'some_fake_id'
|
|
mock_domain.shutdown.side_effect = mock.Mock()
|
|
|
|
def fake_hard_reboot(*args, **kwargs):
|
|
self.reboot_hard_reboot_called = True
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_domain.return_value = mock_domain
|
|
mock_hard_reboot.side_effect = fake_hard_reboot
|
|
mock_loopingcall.return_value = FakeLoopingCall()
|
|
mock_get_instance_pci_devs.return_value = []
|
|
drvr.reboot(None, instance, [], 'SOFT')
|
|
self.assertTrue(self.reboot_hard_reboot_called)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
|
|
mock_hard_reboot):
|
|
# Tests that a hard reboot is performed when a soft reboot results
|
|
# in raising a libvirtError.
|
|
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
|
|
# setup mocks
|
|
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_virDomain.info.return_value = (
|
|
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
|
|
mock_virDomain.ID.return_value = 'some_fake_id'
|
|
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
context = None
|
|
instance = objects.Instance(**self.test_instance)
|
|
network_info = []
|
|
mock_get_domain.return_value = mock_virDomain
|
|
|
|
drvr.reboot(context, instance, network_info, 'SOFT')
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _test_resume_state_on_host_boot_with_state(self, state,
|
|
mock_get_domain,
|
|
mock_hard_reboot):
|
|
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_virDomain.info.return_value = ([state, None, None, None, None])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_get_domain.return_value = mock_virDomain
|
|
instance = objects.Instance(**self.test_instance)
|
|
network_info = _fake_network_info(self, 1)
|
|
|
|
drvr.resume_state_on_host_boot(self.context, instance, network_info,
|
|
block_device_info=None)
|
|
|
|
ignored_states = (power_state.RUNNING,
|
|
power_state.SUSPENDED,
|
|
power_state.NOSTATE,
|
|
power_state.PAUSED)
|
|
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
|
|
|
|
def test_resume_state_on_host_boot_with_running_state(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
|
|
|
|
def test_resume_state_on_host_boot_with_suspended_state(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
|
|
|
|
def test_resume_state_on_host_boot_with_paused_state(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
|
|
|
|
def test_resume_state_on_host_boot_with_nostate(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
|
|
|
|
def test_resume_state_on_host_boot_with_shutdown_state(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
|
|
|
|
def test_resume_state_on_host_boot_with_crashed_state(self):
|
|
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
|
|
self, mock_get_domain, mock_hard_reboot):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_get_domain.side_effect = exception.InstanceNotFound(
|
|
instance_id='fake')
|
|
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
|
|
block_device_info=None)
|
|
|
|
mock_hard_reboot.assert_called_once_with(self.context,
|
|
instance, [], None)
|
|
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
|
'_get_instance_disk_info_from_config')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.destroy')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
|
'_get_all_assigned_mediated_devices')
|
|
def test_hard_reboot(self, mock_get_mdev, mock_destroy, mock_get_disk_info,
|
|
mock_get_guest_xml, mock_create_domain_and_network,
|
|
mock_get_info):
|
|
self.context.auth_token = True # any non-None value will suffice
|
|
instance = objects.Instance(**self.test_instance)
|
|
network_info = _fake_network_info(self, 1)
|
|
block_device_info = None
|
|
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='file'><driver name='qemu' type='raw'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/test/disk.local'/>"
|
|
"<target dev='vdb' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
|
|
mock_get_mdev.return_value = {uuids.mdev1: uuids.inst1}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
|
|
hardware.InstanceInfo(state=power_state.RUNNING)]
|
|
mock_get_info.side_effect = return_values
|
|
|
|
mock_get_guest_xml.return_value = dummyxml
|
|
mock_get_disk_info.return_value = \
|
|
fake_disk_info_byname(instance).values()
|
|
|
|
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
|
|
with mock.patch('os.path.exists', return_value=True):
|
|
drvr._hard_reboot(self.context, instance, network_info,
|
|
block_device_info)
|
|
|
|
disks = backend.disks
|
|
|
|
# NOTE(mdbooth): _create_images_and_backing() passes a full path in
|
|
# 'disk_name' when creating a disk. This is wrong, but happens to
|
|
# work due to handling by each individual backend. This will be
|
|
# fixed in a subsequent commit.
|
|
#
|
|
# We translate all the full paths into disk names here to make the
|
|
# test readable
|
|
disks = {os.path.basename(name): value
|
|
for name, value in disks.items()}
|
|
|
|
# We should have called cache() on the root and ephemeral disks
|
|
for name in ('disk', 'disk.local'):
|
|
self.assertTrue(disks[name].cache.called)
|
|
|
|
mock_get_mdev.assert_called_once_with(instance)
|
|
mock_destroy.assert_called_once_with(self.context, instance,
|
|
network_info, destroy_disks=False,
|
|
block_device_info=block_device_info)
|
|
|
|
mock_get_guest_xml.assert_called_once_with(self.context, instance,
|
|
network_info, mock.ANY, mock.ANY,
|
|
block_device_info=block_device_info, mdevs=[uuids.mdev1])
|
|
mock_create_domain_and_network.assert_called_once_with(self.context,
|
|
dummyxml, instance, network_info,
|
|
block_device_info=block_device_info, vifs_already_plugged=True)
|
|
|
|
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
|
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
|
|
@mock.patch('nova.pci.manager.get_instance_pci_devs')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
|
'_get_instance_disk_info_from_config')
|
|
@mock.patch('nova.virt.libvirt.utils.write_to_file')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
|
|
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
|
'_get_all_assigned_mediated_devices')
|
|
def test_hard_reboot_does_not_call_glance_show(self,
|
|
mock_get_mdev, mock_destroy, mock_get_disk_info,
|
|
mock_get_guest_config, mock_get_instance_path, mock_write_to_file,
|
|
mock_get_instance_disk_info, mock_create_images_and_backing,
|
|
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
|
|
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
|
|
"""For a hard reboot, we shouldn't need an additional call to glance
|
|
to get the image metadata.
|
|
|
|
This is important for automatically spinning up instances on a
|
|
host-reboot, since we won't have a user request context that'll allow
|
|
the Glance request to go through. We have to rely on the cached image
|
|
metadata, instead.
|
|
|
|
https://bugs.launchpad.net/nova/+bug/1339386
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
mock_get_mdev.return_value = {}
|
|
|
|
network_info = mock.MagicMock()
|
|
block_device_info = mock.MagicMock()
|
|
mock_get_disk_info.return_value = {}
|
|
mock_get_guest_config.return_value = mock.MagicMock()
|
|
mock_get_instance_path.return_value = '/foo'
|
|
mock_looping_call.return_value = mock.MagicMock()
|
|
drvr._image_api = mock.MagicMock()
|
|
|
|
drvr._hard_reboot(self.context, instance, network_info,
|
|
block_device_info)
|
|
|
|
self.assertFalse(drvr._image_api.get.called)
|
|
mock_ensure_tree.assert_called_once_with('/foo')
|
|
|
|
def test_suspend(self):
|
|
guest = libvirt_guest.Guest(FakeVirtDomain(id=1))
|
|
dom = guest._domain
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.ephemeral_key_uuid = None
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
@mock.patch.object(dmcrypt, 'delete_volume')
|
|
@mock.patch.object(conn, '_get_instance_disk_info_from_config',
|
|
return_value=[])
|
|
@mock.patch.object(conn, '_detach_mediated_devices')
|
|
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
|
|
@mock.patch.object(conn, '_detach_pci_devices')
|
|
@mock.patch.object(pci_manager, 'get_instance_pci_devs',
|
|
return_value='pci devs')
|
|
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
|
|
def suspend(mock_get_guest, mock_get_instance_pci_devs,
|
|
mock_detach_pci_devices,
|
|
mock_detach_direct_passthrough_ports,
|
|
mock_detach_mediated_devices,
|
|
mock_get_instance_disk_info,
|
|
mock_delete_volume):
|
|
mock_managedSave = mock.Mock()
|
|
dom.managedSave = mock_managedSave
|
|
|
|
conn.suspend(self.context, instance)
|
|
|
|
mock_managedSave.assert_called_once_with(0)
|
|
self.assertFalse(mock_get_instance_disk_info.called)
|
|
mock_delete_volume.assert_has_calls([mock.call(disk['path'])
|
|
for disk in mock_get_instance_disk_info.return_value], False)
|
|
|
|
suspend()
|
|
|
|
@mock.patch.object(time, 'sleep')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
|
|
mock_sleep, seconds_to_shutdown,
|
|
timeout, retry_interval,
|
|
shutdown_attempts, succeeds):
|
|
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
|
|
shutdown_count = []
|
|
|
|
# Mock domain
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple]
|
|
return_shutdowns = [shutdown_count.append("shutdown")]
|
|
retry_countdown = retry_interval
|
|
for x in range(min(seconds_to_shutdown, timeout)):
|
|
return_infos.append(
|
|
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
|
|
if retry_countdown == 0:
|
|
return_shutdowns.append(shutdown_count.append("shutdown"))
|
|
retry_countdown = retry_interval
|
|
else:
|
|
retry_countdown -= 1
|
|
|
|
if seconds_to_shutdown < timeout:
|
|
return_infos.append(
|
|
(libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
|
|
|
|
mock_domain.info.side_effect = return_infos
|
|
mock_domain.shutdown.side_effect = return_shutdowns
|
|
|
|
def fake_create_domain(**kwargs):
|
|
self.reboot_create_called = True
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_get_domain.return_value = mock_domain
|
|
mock_create_domain.side_effect = fake_create_domain
|
|
result = drvr._clean_shutdown(instance, timeout, retry_interval)
|
|
|
|
self.assertEqual(succeeds, result)
|
|
self.assertEqual(shutdown_attempts, len(shutdown_count))
|
|
|
|
def test_clean_shutdown_first_time(self):
|
|
self._test_clean_shutdown(seconds_to_shutdown=2,
|
|
timeout=5,
|
|
retry_interval=3,
|
|
shutdown_attempts=1,
|
|
succeeds=True)
|
|
|
|
def test_clean_shutdown_with_retry(self):
|
|
self._test_clean_shutdown(seconds_to_shutdown=4,
|
|
timeout=5,
|
|
retry_interval=3,
|
|
shutdown_attempts=2,
|
|
succeeds=True)
|
|
|
|
def test_clean_shutdown_failure(self):
|
|
self._test_clean_shutdown(seconds_to_shutdown=6,
|
|
timeout=5,
|
|
retry_interval=3,
|
|
shutdown_attempts=2,
|
|
succeeds=False)
|
|
|
|
def test_clean_shutdown_no_wait(self):
|
|
self._test_clean_shutdown(seconds_to_shutdown=6,
|
|
timeout=0,
|
|
retry_interval=3,
|
|
shutdown_attempts=1,
|
|
succeeds=False)
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
|
|
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
|
|
@mock.patch.object(utils, 'get_image_from_system_metadata',
|
|
return_value=None)
|
|
def test_attach_direct_passthrough_ports(self,
|
|
mock_get_image_metadata, mock_ID, mock_attachDevice):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
|
|
guest = libvirt_guest.Guest(FakeVirtDomain())
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
drvr._attach_direct_passthrough_ports(
|
|
self.context, instance, guest, network_info)
|
|
mock_get_image_metadata.assert_called_once_with(
|
|
instance.system_metadata)
|
|
self.assertTrue(mock_attachDevice.called)
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
|
|
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
|
|
@mock.patch.object(utils, 'get_image_from_system_metadata',
|
|
return_value=None)
|
|
def test_attach_direct_physical_passthrough_ports(self,
|
|
mock_get_image_metadata, mock_ID, mock_attachDevice):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
|
|
guest = libvirt_guest.Guest(FakeVirtDomain())
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
drvr._attach_direct_passthrough_ports(
|
|
self.context, instance, guest, network_info)
|
|
mock_get_image_metadata.assert_called_once_with(
|
|
instance.system_metadata)
|
|
self.assertTrue(mock_attachDevice.called)
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
|
|
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
|
|
@mock.patch.object(utils, 'get_image_from_system_metadata',
|
|
return_value=None)
|
|
def test_attach_direct_passthrough_ports_with_info_cache(self,
|
|
mock_get_image_metadata, mock_ID, mock_attachDevice):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
network_info = _fake_network_info(self, 1)
|
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
|
|
instance.info_cache = objects.InstanceInfoCache(
|
|
network_info=network_info)
|
|
guest = libvirt_guest.Guest(FakeVirtDomain())
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
drvr._attach_direct_passthrough_ports(
|
|
self.context, instance, guest, None)
|
|
mock_get_image_metadata.assert_called_once_with(
|
|
instance.system_metadata)
|
|
self.assertTrue(mock_attachDevice.called)
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def _test_detach_direct_passthrough_ports(self,
|
|
mock_has_min_version, vif_type):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
expeted_pci_slot = "0000:00:00.0"
|
|
network_info = _fake_network_info(self, 1)
|
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
|
|
# some more adjustments for the fake network_info so that
|
|
# the correct get_config function will be executed (vif's
|
|
# get_config_hw_veb - which is according to the real SRIOV vif)
|
|
# and most importantly the pci_slot which is translated to
|
|
# cfg.source_dev, then to PciDevice.address and sent to
|
|
# _detach_pci_devices
|
|
network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot)
|
|
network_info[0]['type'] = vif_type
|
|
network_info[0]['details'] = dict(vlan="2145")
|
|
instance.info_cache = objects.InstanceInfoCache(
|
|
network_info=network_info)
|
|
# fill the pci_devices of the instance so that
|
|
# pci_manager.get_instance_pci_devs will not return an empty list
|
|
# which will eventually fail the assertion for detachDeviceFlags
|
|
expected_pci_device_obj = (
|
|
objects.PciDevice(address=expeted_pci_slot, request_id=None))
|
|
instance.pci_devices = objects.PciDeviceList()
|
|
instance.pci_devices.objects = [expected_pci_device_obj]
|
|
|
|
domain = FakeVirtDomain()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
|
|
drvr._detach_direct_passthrough_ports(
|
|
self.context, instance, guest)
|
|
mock_detach_pci.assert_called_once_with(
|
|
guest, [expected_pci_device_obj])
|
|
|
|
def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
|
|
# Note: test detach_direct_passthrough_ports method for vif with config
|
|
# LibvirtConfigGuestInterface
|
|
self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
|
|
|
|
def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
|
|
# Note: test detach_direct_passthrough_ports method for vif with config
|
|
# LibvirtConfigGuestHostdevPCI
|
|
self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
|
|
def test_detach_duplicate_mac_direct_passthrough_ports(
|
|
self, mock_detachDeviceFlags, mock_has_min_version):
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
network_info = _fake_network_info(self, 2)
|
|
|
|
for network_info_inst in network_info:
|
|
network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
|
|
network_info_inst['type'] = "hw_veb"
|
|
network_info_inst['details'] = dict(vlan="2145")
|
|
network_info_inst['address'] = "fa:16:3e:96:2a:48"
|
|
|
|
network_info[0]['profile'] = dict(pci_slot="0000:00:00.0")
|
|
network_info[1]['profile'] = dict(pci_slot="0000:00:00.1")
|
|
|
|
instance.info_cache = objects.InstanceInfoCache(
|
|
network_info=network_info)
|
|
# fill the pci_devices of the instance so that
|
|
# pci_manager.get_instance_pci_devs will not return an empty list
|
|
# which will eventually fail the assertion for detachDeviceFlags
|
|
instance.pci_devices = objects.PciDeviceList()
|
|
instance.pci_devices.objects = [
|
|
objects.PciDevice(address='0000:00:00.0', request_id=None),
|
|
objects.PciDevice(address='0000:00:00.1', request_id=None)
|
|
]
|
|
|
|
domain = FakeVirtDomain()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
drvr._detach_direct_passthrough_ports(self.context, instance, guest)
|
|
|
|
expected_xml = [
|
|
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
|
|
' <source>\n'
|
|
' <address bus="0x00" domain="0x0000" \
|
|
function="0x0" slot="0x00"/>\n'
|
|
' </source>\n'
|
|
'</hostdev>\n'),
|
|
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
|
|
' <source>\n'
|
|
' <address bus="0x00" domain="0x0000" \
|
|
function="0x1" slot="0x00"/>\n'
|
|
' </source>\n'
|
|
'</hostdev>\n')
|
|
]
|
|
|
|
mock_detachDeviceFlags.has_calls([
|
|
mock.call(expected_xml[0], flags=1),
|
|
mock.call(expected_xml[1], flags=1)
|
|
])
|
|
|
|
def test_resume(self):
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='file'><driver name='qemu' type='raw'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
|
"<source file='/test/disk.local'/>"
|
|
"<target dev='vdb' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
instance = objects.Instance(**self.test_instance)
|
|
network_info = _fake_network_info(self, 1)
|
|
block_device_info = None
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
guest = libvirt_guest.Guest('fake_dom')
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_get_existing_domain_xml',
|
|
return_value=dummyxml),
|
|
mock.patch.object(drvr, '_create_domain_and_network',
|
|
return_value=guest),
|
|
mock.patch.object(drvr, '_attach_pci_devices'),
|
|
mock.patch.object(pci_manager, 'get_instance_pci_devs',
|
|
return_value='fake_pci_devs'),
|
|
mock.patch.object(utils, 'get_image_from_system_metadata'),
|
|
mock.patch.object(guest, 'sync_guest_time'),
|
|
mock.patch.object(drvr, '_wait_for_running',
|
|
side_effect=loopingcall.LoopingCallDone()),
|
|
) as (_get_existing_domain_xml, _create_domain_and_network,
|
|
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
|
|
mock_sync_time, mock_wait):
|
|
get_image_metadata.return_value = {'bar': 234}
|
|
|
|
drvr.resume(self.context, instance, network_info,
|
|
block_device_info)
|
|
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
|
|
network_info, block_device_info)])
|
|
_create_domain_and_network.assert_has_calls([mock.call(
|
|
self.context, dummyxml,
|
|
instance, network_info,
|
|
block_device_info=block_device_info,
|
|
vifs_already_plugged=True)])
|
|
self.assertTrue(mock_sync_time.called)
|
|
_attach_pci_devices.assert_has_calls([mock.call(guest,
|
|
'fake_pci_devs')])
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
|
|
mock_get_info, mock_get_domain):
|
|
dom_mock = mock.MagicMock()
|
|
dom_mock.undefineFlags.return_value = 1
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_get_domain.return_value = dom_mock
|
|
mock_get_info.return_value = hardware.InstanceInfo(
|
|
state=power_state.SHUTDOWN, internal_id=-1)
|
|
mock_delete_instance_files.return_value = None
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, [])
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
@mock.patch.object(rbd_utils, 'rados')
|
|
def test_cleanup_rbd(self, mock_rados, mock_rbd, mock_connect,
|
|
mock_disconnect, mock_destroy_volume):
|
|
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
|
|
instance = objects.Instance(**self.test_instance)
|
|
all_volumes = [uuids.other_instance + '_disk',
|
|
uuids.other_instance + '_disk.swap',
|
|
instance.uuid + '_disk',
|
|
instance.uuid + '_disk.swap']
|
|
|
|
mock_rbd.RBD.return_value.list.return_value = all_volumes
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._cleanup_rbd(instance)
|
|
calls = [mock.call(mock.ANY, instance.uuid + '_disk'),
|
|
mock.call(mock.ANY, instance.uuid + '_disk.swap')]
|
|
mock_destroy_volume.assert_has_calls(calls)
|
|
self.assertEqual(2, mock_destroy_volume.call_count)
|
|
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
@mock.patch.object(rbd_utils, 'rados')
|
|
def test_cleanup_rbd_resize_reverting(self, mock_rados, mock_rbd,
|
|
mock_connect, mock_disconnect,
|
|
mock_destroy_volume):
|
|
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.task_state = task_states.RESIZE_REVERTING
|
|
all_volumes = [uuids.other_instance + '_disk',
|
|
uuids.other_instance + '_disk.local',
|
|
instance.uuid + '_disk',
|
|
instance.uuid + '_disk.local']
|
|
mock_rbd.RBD.return_value.list.return_value = all_volumes
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._cleanup_rbd(instance)
|
|
mock_destroy_volume.assert_called_once_with(
|
|
mock.ANY, instance.uuid + '_disk.local')
|
|
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_destroy_undefines_no_undefine_flags(self, mock_save):
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_domain.undefineFlags.side_effect = fakelibvirt.libvirtError('Err')
|
|
mock_domain.ID.return_value = 123
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
|
|
drvr._has_uefi_support = mock.Mock(return_value=False)
|
|
drvr.delete_instance_files = mock.Mock(return_value=None)
|
|
drvr.get_info = mock.Mock(return_value=
|
|
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
|
|
)
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, [])
|
|
|
|
self.assertEqual(2, mock_domain.ID.call_count)
|
|
mock_domain.destroy.assert_called_once_with()
|
|
mock_domain.undefineFlags.assert_called_once_with(1)
|
|
mock_domain.undefine.assert_called_once_with()
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_domain.undefineFlags.side_effect = AttributeError()
|
|
mock_domain.ID.return_value = 123
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
|
|
drvr._has_uefi_support = mock.Mock(return_value=False)
|
|
drvr.delete_instance_files = mock.Mock(return_value=None)
|
|
drvr.get_info = mock.Mock(return_value=
|
|
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
|
|
)
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, [])
|
|
|
|
self.assertEqual(1, mock_domain.ID.call_count)
|
|
mock_domain.destroy.assert_called_once_with()
|
|
mock_domain.undefineFlags.assert_called_once_with(1)
|
|
mock_domain.hasManagedSaveImage.assert_has_calls([mock.call(0)])
|
|
mock_domain.managedSaveRemove.assert_called_once_with(0)
|
|
mock_domain.undefine.assert_called_once_with()
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_domain.undefineFlags.side_effect = AttributeError()
|
|
mock_domain.hasManagedSaveImage.side_effect = AttributeError()
|
|
mock_domain.ID.return_value = 123
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
|
|
drvr._has_uefi_support = mock.Mock(return_value=False)
|
|
drvr.delete_instance_files = mock.Mock(return_value=None)
|
|
drvr.get_info = mock.Mock(return_value=
|
|
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
|
|
)
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, [])
|
|
|
|
self.assertEqual(1, mock_domain.ID.call_count)
|
|
mock_domain.destroy.assert_called_once_with()
|
|
mock_domain.undefineFlags.assert_called_once_with(1)
|
|
mock_domain.hasManagedSaveImage.assert_has_calls([mock.call(0)])
|
|
mock_domain.undefine.assert_called_once_with()
|
|
mock_save.assert_called_once_with()
|
|
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_destroy_removes_nvram(self, mock_save):
|
|
mock_domain = mock.Mock(fakelibvirt.virDomain)
|
|
mock_domain.ID.return_value = 123
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
|
|
drvr._has_uefi_support = mock.Mock(return_value=True)
|
|
drvr.delete_instance_files = mock.Mock(return_value=None)
|
|
drvr.get_info = mock.Mock(return_value=hardware.InstanceInfo(
|
|
state=power_state.SHUTDOWN, internal_id=-1))
|
|
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr.destroy(self.context, instance, [])
|
|
|
|
self.assertEqual(1, mock_domain.ID.call_count)
|
|
mock_domain.destroy.assert_called_once_with()
|
|
# undefineFlags should now be called with 5 as uefi us supported
|
|
mock_domain.undefineFlags.assert_called_once_with(
|
|
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE |
|
|
fakelibvirt.VIR_DOMAIN_UNDEFINE_NVRAM
|
|
)
|
|
mock_domain.undefine.assert_not_called()
|
|
mock_save.assert_called_once_with()
|
|
|
|
def test_destroy_timed_out(self):
|
|
mock_virdomain = mock.Mock(autospec=fakelibvirt.virDomain)
|
|
mock_virdomain.destroy.side_effect = fakelibvirt.libvirtError(
|
|
'timed out')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: mock_virdomain)
|
|
self.stub_out('nova.tests.unit.virt.libvirt.fakelibvirt.libvirtError.'
|
|
'get_error_code',
|
|
lambda self: fakelibvirt.VIR_ERR_OPERATION_TIMEOUT)
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.assertRaises(exception.InstancePowerOffFailure,
|
|
drvr.destroy, self.context, instance, [])
|
|
|
|
mock_virdomain.ID.assert_called_once_with()
|
|
mock_virdomain.destroy.assert_called_once_with()
|
|
|
|
def test_private_destroy_not_found(self):
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
"No such domain",
|
|
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
|
|
mock_virdomain = mock.Mock(autospec=fakelibvirt.virDomain)
|
|
mock_virdomain.destroy.side_effect = ex
|
|
mock_virdomain.info.side_effect = ex
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: mock_virdomain)
|
|
instance = objects.Instance(**self.test_instance)
|
|
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
|
|
drvr._destroy(instance)
|
|
|
|
mock_virdomain.ID.assert_called_once_with()
|
|
mock_virdomain.destroy.assert_called_once_with()
|
|
mock_virdomain.info.assert_called_once_with()
|
|
mock_virdomain.UUIDString.assert_called_once_with()
|
|
|
|
def test_private_destroy_lxc_processes_refused_to_die(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, "",
|
|
error_message="internal error: Some processes refused to die",
|
|
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with mock.patch.object(conn._host, '_get_domain') as mock_get_domain, \
|
|
mock.patch.object(conn, 'get_info') as mock_get_info:
|
|
mock_domain = mock.MagicMock()
|
|
mock_domain.ID.return_value = 1
|
|
mock_get_domain.return_value = mock_domain
|
|
mock_domain.destroy.side_effect = ex
|
|
|
|
mock_info = mock.MagicMock()
|
|
mock_info.internal_id = 1
|
|
mock_info.state = power_state.SHUTDOWN
|
|
mock_get_info.return_value = mock_info
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
conn._destroy(instance)
|
|
|
|
def test_private_destroy_processes_refused_to_die_still_raises(self):
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, "",
|
|
error_message="internal error: Some processes refused to die",
|
|
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with mock.patch.object(conn._host, '_get_domain') as mock_get_domain:
|
|
mock_domain = mock.MagicMock()
|
|
mock_domain.ID.return_value = 1
|
|
mock_get_domain.return_value = mock_domain
|
|
mock_domain.destroy.side_effect = ex
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
|
|
instance)
|
|
|
|
def test_private_destroy_ebusy_timeout(self):
|
|
# Tests that _destroy will retry 3 times to destroy the guest when an
|
|
# EBUSY is raised, but eventually times out and raises the libvirtError
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
("Failed to terminate process 26425 with SIGKILL: "
|
|
"Device or resource busy"),
|
|
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
|
|
int1=errno.EBUSY)
|
|
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
mock_guest.poweroff = mock.Mock(side_effect=ex)
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with mock.patch.object(drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
|
|
instance)
|
|
|
|
self.assertEqual(3, mock_guest.poweroff.call_count)
|
|
|
|
def test_private_destroy_ebusy_multiple_attempt_ok(self):
|
|
# Tests that the _destroy attempt loop is broken when EBUSY is no
|
|
# longer raised.
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
("Failed to terminate process 26425 with SIGKILL: "
|
|
"Device or resource busy"),
|
|
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
|
|
int1=errno.EBUSY)
|
|
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
|
|
|
|
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, internal_id=1)
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with mock.patch.object(drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
|
|
drvr._destroy(instance)
|
|
|
|
self.assertEqual(2, mock_guest.poweroff.call_count)
|
|
|
|
@mock.patch.object(fakelibvirt.libvirtError, 'get_error_code')
|
|
@mock.patch.object(host.Host, '_get_domain',
|
|
side_effect=exception.InstanceNotFound(
|
|
instance_id=uuids.instance))
|
|
def test_undefine_domain_with_not_found_instance(self, mock_get_domain,
|
|
mock_get_error):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
# NOTE(wenjianhn): verifies undefine doesn't raise if the
|
|
# instance disappears
|
|
drvr._undefine_domain(instance)
|
|
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
mock_get_error.assert_not_called()
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_uefi_support")
|
|
@mock.patch.object(host.Host, "get_guest")
|
|
def test_undefine_domain_handles_libvirt_errors(self, mock_get,
|
|
mock_has_uefi):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
fake_guest = mock.Mock()
|
|
mock_get.return_value = fake_guest
|
|
|
|
unexpected = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, "Random", error_code=1)
|
|
fake_guest.delete_configuration.side_effect = unexpected
|
|
|
|
# ensure raise unexpected error code
|
|
self.assertRaises(type(unexpected), drvr._undefine_domain, instance)
|
|
|
|
ignored = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, "No such domain",
|
|
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
|
|
fake_guest.delete_configuration.side_effect = ignored
|
|
|
|
# ensure no raise for no such domain
|
|
drvr._undefine_domain(instance)
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
|
|
@mock.patch.object(objects.InstanceList, "get_by_filters")
|
|
def test_disk_over_committed_size_total(self, mock_get, mock_bdms,
|
|
mock_list):
|
|
# Ensure destroy calls managedSaveRemove for saved instance.
|
|
class DiagFakeDomain(object):
|
|
def __init__(self, name):
|
|
self._name = name
|
|
self._uuid = uuids.fake
|
|
|
|
def ID(self):
|
|
return 1
|
|
|
|
def name(self):
|
|
return self._name
|
|
|
|
def UUIDString(self):
|
|
return self._uuid
|
|
|
|
def XMLDesc(self, flags):
|
|
return "<domain><name>%s</name></domain>" % self._name
|
|
|
|
instance_domains = [
|
|
DiagFakeDomain("instance0000001"),
|
|
DiagFakeDomain("instance0000002")]
|
|
mock_list.return_value = instance_domains
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
fake_disks = {'instance0000001':
|
|
[{'type': 'qcow2', 'path': '/somepath/disk1',
|
|
'virt_disk_size': '10737418240',
|
|
'backing_file': '/somepath/disk1',
|
|
'disk_size': '83886080',
|
|
'over_committed_disk_size': '10653532160'}],
|
|
'instance0000002':
|
|
[{'type': 'raw', 'path': '/somepath/disk2',
|
|
'virt_disk_size': '0',
|
|
'backing_file': '/somepath/disk2',
|
|
'disk_size': '10737418240',
|
|
'over_committed_disk_size': '0'}]}
|
|
|
|
def get_info(cfg, block_device_info):
|
|
return fake_disks.get(cfg.name)
|
|
|
|
instance_uuids = [dom.UUIDString() for dom in instance_domains]
|
|
instances = [objects.Instance(
|
|
uuid=instance_uuids[0],
|
|
root_device_name='/dev/vda'),
|
|
objects.Instance(
|
|
uuid=instance_uuids[1],
|
|
root_device_name='/dev/vdb')
|
|
]
|
|
mock_get.return_value = instances
|
|
|
|
with mock.patch.object(
|
|
drvr, "_get_instance_disk_info_from_config") as mock_info:
|
|
mock_info.side_effect = get_info
|
|
|
|
result = drvr._get_disk_over_committed_size_total()
|
|
self.assertEqual(result, 10653532160)
|
|
mock_list.assert_called_once_with(only_running=False)
|
|
self.assertEqual(2, mock_info.call_count)
|
|
|
|
filters = {'uuid': instance_uuids}
|
|
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
|
|
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
|
|
@mock.patch.object(objects.InstanceList, "get_by_filters")
|
|
def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms,
|
|
mock_list):
|
|
# Ensure destroy calls managedSaveRemove for saved instance.
|
|
class DiagFakeDomain(object):
|
|
def __init__(self, name):
|
|
self._name = name
|
|
self._uuid = uuidutils.generate_uuid()
|
|
|
|
def ID(self):
|
|
return 1
|
|
|
|
def name(self):
|
|
return self._name
|
|
|
|
def UUIDString(self):
|
|
return self._uuid
|
|
|
|
def XMLDesc(self, flags):
|
|
return "<domain><name>%s</name></domain>" % self._name
|
|
|
|
instance_domains = [
|
|
DiagFakeDomain("instance0000001"),
|
|
DiagFakeDomain("instance0000002"),
|
|
DiagFakeDomain("instance0000003"),
|
|
DiagFakeDomain("instance0000004"),
|
|
DiagFakeDomain("instance0000005")]
|
|
mock_list.return_value = instance_domains
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
fake_disks = {'instance0000001':
|
|
[{'type': 'qcow2', 'path': '/somepath/disk1',
|
|
'virt_disk_size': '10737418240',
|
|
'backing_file': '/somepath/disk1',
|
|
'disk_size': '83886080',
|
|
'over_committed_disk_size': '10653532160'}],
|
|
'instance0000002':
|
|
[{'type': 'raw', 'path': '/somepath/disk2',
|
|
'virt_disk_size': '0',
|
|
'backing_file': '/somepath/disk2',
|
|
'disk_size': '10737418240',
|
|
'over_committed_disk_size': '21474836480'}],
|
|
'instance0000003':
|
|
[{'type': 'raw', 'path': '/somepath/disk3',
|
|
'virt_disk_size': '0',
|
|
'backing_file': '/somepath/disk3',
|
|
'disk_size': '21474836480',
|
|
'over_committed_disk_size': '32212254720'}],
|
|
'instance0000004':
|
|
[{'type': 'raw', 'path': '/somepath/disk4',
|
|
'virt_disk_size': '0',
|
|
'backing_file': '/somepath/disk4',
|
|
'disk_size': '32212254720',
|
|
'over_committed_disk_size': '42949672960'}]}
|
|
|
|
def side_effect(cfg, block_device_info):
|
|
if cfg.name == 'instance0000001':
|
|
self.assertEqual('/dev/vda',
|
|
block_device_info['root_device_name'])
|
|
raise OSError(errno.ENOENT, 'No such file or directory')
|
|
if cfg.name == 'instance0000002':
|
|
self.assertEqual('/dev/vdb',
|
|
block_device_info['root_device_name'])
|
|
raise OSError(errno.ESTALE, 'Stale NFS file handle')
|
|
if cfg.name == 'instance0000003':
|
|
self.assertEqual('/dev/vdc',
|
|
block_device_info['root_device_name'])
|
|
raise OSError(errno.EACCES, 'Permission denied')
|
|
if cfg.name == 'instance0000004':
|
|
self.assertEqual('/dev/vdd',
|
|
block_device_info['root_device_name'])
|
|
return fake_disks.get(cfg.name)
|
|
get_disk_info = mock.Mock()
|
|
get_disk_info.side_effect = side_effect
|
|
drvr._get_instance_disk_info_from_config = get_disk_info
|
|
|
|
instance_uuids = [dom.UUIDString() for dom in instance_domains]
|
|
instances = [objects.Instance(
|
|
uuid=instance_uuids[0],
|
|
root_device_name='/dev/vda'),
|
|
objects.Instance(
|
|
uuid=instance_uuids[1],
|
|
root_device_name='/dev/vdb'),
|
|
objects.Instance(
|
|
uuid=instance_uuids[2],
|
|
root_device_name='/dev/vdc'),
|
|
objects.Instance(
|
|
uuid=instance_uuids[3],
|
|
root_device_name='/dev/vdd'),
|
|
]
|
|
mock_get.return_value = instances
|
|
|
|
# NOTE(danms): We need to have found bdms for our instances,
|
|
# but we don't really need them to be complete as we just need
|
|
# to make it to our side_effect above. Exclude the last domain
|
|
# to simulate the case where we have an instance with no BDMs.
|
|
mock_bdms.return_value = {uuid: [] for uuid in instance_uuids
|
|
if uuid != instance_domains[-1].UUIDString()}
|
|
|
|
result = drvr._get_disk_over_committed_size_total()
|
|
self.assertEqual(42949672960, result)
|
|
mock_list.assert_called_once_with(only_running=False)
|
|
self.assertEqual(5, get_disk_info.call_count)
|
|
filters = {'uuid': instance_uuids}
|
|
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
|
|
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_instance_disk_info_from_config",
|
|
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
|
|
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
|
|
@mock.patch.object(objects.InstanceList, "get_by_filters")
|
|
def test_disk_over_committed_size_total_bdm_not_found(self,
|
|
mock_get,
|
|
mock_bdms,
|
|
mock_get_disk_info,
|
|
mock_list_domains):
|
|
mock_dom = mock.Mock()
|
|
mock_dom.XMLDesc.return_value = "<domain/>"
|
|
mock_list_domains.return_value = [mock_dom]
|
|
# Tests that we handle VolumeBDMPathNotFound gracefully.
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.list_instance_domains')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info_from_config',
|
|
side_effect=exception.DiskNotFound(location='/opt/stack/foo'))
|
|
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid',
|
|
return_value=objects.BlockDeviceMappingList())
|
|
@mock.patch('nova.objects.InstanceList.get_by_filters',
|
|
return_value=objects.InstanceList(objects=[
|
|
objects.Instance(uuid=uuids.instance,
|
|
vm_state=vm_states.ACTIVE,
|
|
task_state=task_states.DELETING)]))
|
|
def test_disk_over_committed_size_total_disk_not_found_ignore_task_state(
|
|
self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains):
|
|
"""Tests that we handle DiskNotFound gracefully for an instance that
|
|
is undergoing a task_state transition.
|
|
"""
|
|
mock_dom = mock.Mock()
|
|
mock_dom.XMLDesc.return_value = "<domain/>"
|
|
mock_dom.UUIDString.return_value = uuids.instance
|
|
mock_list_domains.return_value = [mock_dom]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.list_instance_domains')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info_from_config',
|
|
side_effect=exception.DiskNotFound(location='/opt/stack/foo'))
|
|
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid',
|
|
return_value=objects.BlockDeviceMappingList())
|
|
@mock.patch('nova.objects.InstanceList.get_by_filters',
|
|
return_value=objects.InstanceList(objects=[
|
|
objects.Instance(uuid=uuids.instance,
|
|
task_state=None,
|
|
vm_state=vm_states.RESIZED)]))
|
|
def test_disk_over_committed_size_total_disk_not_found_ignore_vmstate(
|
|
self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains):
|
|
"""Tests that we handle DiskNotFound gracefully for an instance that
|
|
is resized but resize is not confirmed yet.
|
|
"""
|
|
mock_dom = mock.Mock()
|
|
mock_dom.XMLDesc.return_value = "<domain/>"
|
|
mock_dom.UUIDString.return_value = uuids.instance
|
|
mock_list_domains.return_value = [mock_dom]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.list_instance_domains')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info_from_config',
|
|
side_effect=exception.DiskNotFound(location='/opt/stack/foo'))
|
|
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid',
|
|
return_value=objects.BlockDeviceMappingList())
|
|
@mock.patch('nova.objects.InstanceList.get_by_filters',
|
|
return_value=objects.InstanceList(objects=[
|
|
objects.Instance(uuid=uuids.instance,
|
|
vm_state=vm_states.ACTIVE,
|
|
task_state=None)]))
|
|
def test_disk_over_committed_size_total_disk_not_found_reraise(
|
|
self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains):
|
|
"""Tests that we handle DiskNotFound gracefully for an instance that
|
|
is NOT undergoing a task_state transition and the error is re-raised.
|
|
"""
|
|
mock_dom = mock.Mock()
|
|
mock_dom.XMLDesc.return_value = "<domain/>"
|
|
mock_dom.UUIDString.return_value = uuids.instance
|
|
mock_list_domains.return_value = [mock_dom]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.DiskNotFound,
|
|
drvr._get_disk_over_committed_size_total)
|
|
|
|
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size')
|
|
@mock.patch('nova.virt.disk.api.get_disk_size',
|
|
new_callable=mock.NonCallableMock)
|
|
def test_get_instance_disk_info_from_config_block_devices(self,
|
|
mock_disk_api, mock_get_volume_size):
|
|
"""Test that for block devices the actual and virtual sizes are
|
|
reported as the same and that the disk_api is not used.
|
|
"""
|
|
c = context.get_admin_context()
|
|
instance = objects.Instance(root_device_name='/dev/vda',
|
|
**self.test_instance)
|
|
bdms = objects.BlockDeviceMappingList(objects=[
|
|
fake_block_device.fake_bdm_object(c, {
|
|
'device_name': '/dev/mapper/vg-lv',
|
|
'source_type': 'image',
|
|
'destination_type': 'local'
|
|
}),
|
|
|
|
])
|
|
block_device_info = driver.get_block_device_info(instance, bdms)
|
|
|
|
config = vconfig.LibvirtConfigGuest()
|
|
disk_config = vconfig.LibvirtConfigGuestDisk()
|
|
disk_config.source_type = "block"
|
|
disk_config.source_path = mock.sentinel.volume_path
|
|
config.devices.append(disk_config)
|
|
|
|
mock_get_volume_size.return_value = mock.sentinel.volume_size
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
disk_info = drvr._get_instance_disk_info_from_config(config,
|
|
block_device_info)
|
|
|
|
mock_get_volume_size.assert_called_once_with(mock.sentinel.volume_path)
|
|
self.assertEqual(disk_info[0]['disk_size'],
|
|
disk_info[0]['virt_disk_size'])
|
|
|
|
def test_cpu_info(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
def get_host_capabilities_stub(self):
|
|
cpu = vconfig.LibvirtConfigCPU()
|
|
cpu.model = "Opteron_G4"
|
|
cpu.vendor = "AMD"
|
|
cpu.arch = fields.Architecture.X86_64
|
|
|
|
cpu.cells = 1
|
|
cpu.cores = 2
|
|
cpu.threads = 1
|
|
cpu.sockets = 4
|
|
|
|
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
|
|
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = cpu
|
|
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
guest.ostype = fields.VMMode.HVM
|
|
guest.arch = fields.Architecture.X86_64
|
|
guest.domtype = ["kvm"]
|
|
caps.guests.append(guest)
|
|
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
guest.ostype = fields.VMMode.HVM
|
|
guest.arch = fields.Architecture.I686
|
|
guest.domtype = ["kvm"]
|
|
caps.guests.append(guest)
|
|
|
|
return caps
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
|
|
want = {"vendor": "AMD",
|
|
"features": set(["extapic", "3dnow"]),
|
|
"model": "Opteron_G4",
|
|
"arch": fields.Architecture.X86_64,
|
|
"topology": {"cells": 1, "cores": 2, "threads": 1,
|
|
"sockets": 4}}
|
|
got = drvr._get_cpu_info()
|
|
self.assertEqual(want, got)
|
|
|
|
def test_get_pcinet_info(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dev_name = "net_enp2s2_02_9a_a1_37_be_54"
|
|
parent_address = "pci_0000_04_11_7"
|
|
node_dev = FakeNodeDevice(_fake_NodeDevXml[dev_name])
|
|
|
|
with mock.patch.object(pci_utils, 'get_net_name_by_vf_pci_address',
|
|
return_value=dev_name) as mock_get_net_name, \
|
|
mock.patch.object(drvr._host, 'device_lookup_by_name',
|
|
return_value=node_dev) as mock_dev_lookup:
|
|
actualvf = drvr._get_pcinet_info(parent_address)
|
|
expect_vf = {
|
|
"name": dev_name,
|
|
"capabilities": ["rx", "tx", "sg", "tso", "gso", "gro",
|
|
"rxvlan", "txvlan"]
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
mock_get_net_name.called_once_with(parent_address)
|
|
mock_dev_lookup.called_once_with(dev_name)
|
|
|
|
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
|
|
def test_get_pcidev_info_non_nic(self, mock_get_ifname):
|
|
self.stub_out('nova.virt.libvirt.host.Host.device_lookup_by_name',
|
|
lambda self, name: FakeNodeDevice(
|
|
_fake_NodeDevXml[name]))
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
id = "pci_0000_04_10_7"
|
|
mock_get_ifname.side_effect = exception.PciDeviceNotFoundById(id=id)
|
|
actualvf = drvr._get_pcidev_info(id)
|
|
expect_vf = {
|
|
"dev_id": id,
|
|
"address": "0000:04:10.7",
|
|
"product_id": '1520',
|
|
"numa_node": None,
|
|
"vendor_id": '8086',
|
|
"label": 'label_8086_1520',
|
|
"dev_type": fields.PciDeviceType.SRIOV_VF,
|
|
'parent_addr': '0000:04:00.3',
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
|
|
return_value='ens1')
|
|
def test_get_pcidev_info(self, mock_get_ifname):
|
|
self.stub_out('nova.virt.libvirt.host.Host.device_lookup_by_name',
|
|
lambda self, name: FakeNodeDevice(
|
|
_fake_NodeDevXml[name]))
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_04_00_3",
|
|
"address": "0000:04:00.3",
|
|
"product_id": '1521',
|
|
"numa_node": None,
|
|
"vendor_id": '8086',
|
|
"label": 'label_8086_1521',
|
|
"dev_type": fields.PciDeviceType.SRIOV_PF,
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_04_10_7",
|
|
"address": "0000:04:10.7",
|
|
"product_id": '1520',
|
|
"numa_node": None,
|
|
"vendor_id": '8086',
|
|
"label": 'label_8086_1520',
|
|
"dev_type": fields.PciDeviceType.SRIOV_VF,
|
|
"parent_addr": '0000:04:00.3',
|
|
"parent_ifname": "ens1",
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
with mock.patch.object(pci_utils, 'get_net_name_by_vf_pci_address',
|
|
return_value="net_enp2s2_02_9a_a1_37_be_54"):
|
|
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_04_11_7",
|
|
"address": "0000:04:11.7",
|
|
"product_id": '1520',
|
|
"vendor_id": '8086',
|
|
"numa_node": 0,
|
|
"label": 'label_8086_1520',
|
|
"dev_type": fields.PciDeviceType.SRIOV_VF,
|
|
"parent_addr": '0000:04:00.3',
|
|
"capabilities": {
|
|
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
|
|
"rxvlan", "txvlan"]},
|
|
"parent_ifname": "ens1",
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
actualvf = drvr._get_pcidev_info("pci_0000_04_00_1")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_04_00_1",
|
|
"address": "0000:04:00.1",
|
|
"product_id": '1013',
|
|
"numa_node": 0,
|
|
"vendor_id": '15b3',
|
|
"label": 'label_15b3_1013',
|
|
"dev_type": fields.PciDeviceType.STANDARD,
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
actualvf = drvr._get_pcidev_info("pci_0000_03_00_0")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_03_00_0",
|
|
"address": "0000:03:00.0",
|
|
"product_id": '1013',
|
|
"numa_node": 0,
|
|
"vendor_id": '15b3',
|
|
"label": 'label_15b3_1013',
|
|
"dev_type": fields.PciDeviceType.SRIOV_PF,
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
actualvf = drvr._get_pcidev_info("pci_0000_03_00_1")
|
|
expect_vf = {
|
|
"dev_id": "pci_0000_03_00_1",
|
|
"address": "0000:03:00.1",
|
|
"product_id": '1013',
|
|
"numa_node": 0,
|
|
"vendor_id": '15b3',
|
|
"label": 'label_15b3_1013',
|
|
"dev_type": fields.PciDeviceType.SRIOV_PF,
|
|
}
|
|
self.assertEqual(expect_vf, actualvf)
|
|
|
|
def test_list_devices_not_supported(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# Handle just the NO_SUPPORT error
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver:'
|
|
' virNodeNumOfDevices',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
|
|
with mock.patch.object(drvr._conn, 'listDevices',
|
|
side_effect=not_supported_exc):
|
|
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
|
|
|
|
# We cache not supported status to avoid emitting too many logging
|
|
# messages. Clear this value to test the other exception case.
|
|
del drvr._list_devices_supported
|
|
|
|
# Other errors should not be caught
|
|
other_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'other exc',
|
|
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
|
|
|
|
with mock.patch.object(drvr._conn, 'listDevices',
|
|
side_effect=other_exc):
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
drvr._get_pci_passthrough_devices)
|
|
|
|
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
|
|
return_value='ens1')
|
|
@mock.patch.object(host.Host, 'list_pci_devices',
|
|
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
|
|
'pci_0000_04_11_7'])
|
|
def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
|
|
self.stub_out('nova.virt.libvirt.host.Host.device_lookup_by_name',
|
|
lambda self, name: FakeNodeDevice(
|
|
_fake_NodeDevXml[name]))
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
actjson = drvr._get_pci_passthrough_devices()
|
|
|
|
expectvfs = [
|
|
{
|
|
"dev_id": "pci_0000_04_00_3",
|
|
"address": "0000:04:00.3",
|
|
"product_id": '1521',
|
|
"vendor_id": '8086',
|
|
"dev_type": fields.PciDeviceType.SRIOV_PF,
|
|
"phys_function": None,
|
|
"numa_node": None},
|
|
{
|
|
"dev_id": "pci_0000_04_10_7",
|
|
"domain": 0,
|
|
"address": "0000:04:10.7",
|
|
"product_id": '1520',
|
|
"vendor_id": '8086',
|
|
"numa_node": None,
|
|
"dev_type": fields.PciDeviceType.SRIOV_VF,
|
|
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
|
|
"parent_addr": "0000:04:00.3",
|
|
"parent_ifname": "ens1",
|
|
},
|
|
{
|
|
"dev_id": "pci_0000_04_11_7",
|
|
"domain": 0,
|
|
"address": "0000:04:11.7",
|
|
"product_id": '1520',
|
|
"vendor_id": '8086',
|
|
"numa_node": 0,
|
|
"dev_type": fields.PciDeviceType.SRIOV_VF,
|
|
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
|
|
"parent_addr": "0000:04:00.3",
|
|
"parent_ifname": "ens1",
|
|
}
|
|
]
|
|
|
|
actualvfs = jsonutils.loads(actjson)
|
|
for dev in range(len(actualvfs)):
|
|
for key in actualvfs[dev].keys():
|
|
if key not in ['phys_function', 'virt_functions', 'label']:
|
|
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
|
|
mock_list.assert_called_once_with()
|
|
|
|
# The first call for every VF is to determine parent_ifname and
|
|
# the second call to determine the MAC address.
|
|
mock_get_ifname.assert_has_calls([
|
|
mock.call('0000:04:10.7', pf_interface=True),
|
|
mock.call('0000:04:10.7', False),
|
|
mock.call('0000:04:11.7', pf_interface=True),
|
|
mock.call('0000:04:11.7', False)
|
|
])
|
|
|
|
# TODO(stephenfin): This only has one caller. Flatten it and remove the
|
|
# 'mempages=False' branches or add the missing test
|
|
def _test_get_host_numa_topology(self, mempages):
|
|
self.flags(physnets=['foo', 'bar', 'baz'], group='neutron')
|
|
# we need to call the below again to ensure the updated 'physnets'
|
|
# value is read and the new groups created
|
|
nova.conf.neutron.register_dynamic_opts(CONF)
|
|
self.flags(numa_nodes=[0, 2], group='neutron_tunnel')
|
|
self.flags(numa_nodes=[1], group='neutron_physnet_foo')
|
|
self.flags(numa_nodes=[3], group='neutron_physnet_bar')
|
|
self.flags(numa_nodes=[1, 2, 3], group='neutron_physnet_baz')
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
if mempages:
|
|
for i, cell in enumerate(caps.host.topology.cells):
|
|
cell.mempages = fakelibvirt.create_mempages(
|
|
[(4, 1024 * i), (2048, i)])
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(
|
|
hardware, 'get_vcpu_pin_set',
|
|
return_value=set([0, 1, 3, 4, 5])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set([0, 1, 2, 3, 6])),
|
|
):
|
|
got_topo = drvr._get_host_numa_topology()
|
|
|
|
if mempages:
|
|
# cells 0
|
|
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
|
|
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
|
|
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
|
|
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
|
|
# cells 1
|
|
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
|
|
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
|
|
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
|
|
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
|
|
else:
|
|
self.assertEqual([], got_topo.cells[0].mempages)
|
|
self.assertEqual([], got_topo.cells[1].mempages)
|
|
|
|
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
|
|
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
|
|
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
|
|
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
|
|
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
|
|
self.assertEqual([set([3])], got_topo.cells[1].siblings)
|
|
|
|
self.assertEqual(set(),
|
|
got_topo.cells[0].network_metadata.physnets)
|
|
self.assertEqual(set(['foo', 'baz']),
|
|
got_topo.cells[1].network_metadata.physnets)
|
|
self.assertEqual(set(['baz']),
|
|
got_topo.cells[2].network_metadata.physnets)
|
|
self.assertEqual(set(['bar', 'baz']),
|
|
got_topo.cells[3].network_metadata.physnets)
|
|
|
|
self.assertTrue(got_topo.cells[0].network_metadata.tunneled)
|
|
self.assertFalse(got_topo.cells[1].network_metadata.tunneled)
|
|
self.assertTrue(got_topo.cells[2].network_metadata.tunneled)
|
|
self.assertFalse(got_topo.cells[3].network_metadata.tunneled)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_get_host_numa_topology(self, mock_version):
|
|
self._test_get_host_numa_topology(mempages=True)
|
|
|
|
def test_get_host_numa_topology_empty(self):
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = None
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(host.Host, 'has_min_version', return_value=True),
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps)
|
|
) as (has_min_version, get_caps):
|
|
self.assertIsNone(drvr._get_host_numa_topology())
|
|
self.assertEqual(2, get_caps.call_count)
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getType')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
|
|
def test_get_host_numa_topology_xen(self, mock_lib_version,
|
|
mock_version, mock_type):
|
|
self.flags(virt_type='xen', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_VERSION)
|
|
mock_version.return_value = versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_QEMU_VERSION)
|
|
mock_type.return_value = host.HV_DRIVER_XEN
|
|
self.assertIsNone(drvr._get_host_numa_topology())
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_get_host_numa_topology_missing_network_metadata(self,
|
|
mock_version):
|
|
self.flags(physnets=['bar'], group='neutron')
|
|
# we need to call the below again to ensure the updated 'physnets'
|
|
# value is read and the new groups created
|
|
nova.conf.neutron.register_dynamic_opts(CONF)
|
|
|
|
# we explicitly avoid registering a '[neutron_physnets_bar] numa_nodes'
|
|
# option here
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(hardware, 'get_vcpu_pin_set',
|
|
return_value=set([0, 1, 3, 4, 5])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set([0, 1, 2, 3, 6])),
|
|
):
|
|
self.assertRaisesRegex(
|
|
exception.InvalidNetworkNUMAAffinity,
|
|
"Invalid NUMA network affinity configured: the physnet 'bar' "
|
|
"was listed in '\[neutron\] physnets' but no corresponding "
|
|
"'\[neutron_physnet_bar\] numa_nodes' option was defined.",
|
|
drvr._get_host_numa_topology)
|
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def _test_get_host_numa_topology_invalid_network_affinity(self,
|
|
group_name, mock_version):
|
|
self.flags(physnets=['foo', 'bar'], group='neutron')
|
|
# we need to call the below again to ensure the updated 'physnets'
|
|
# value is read and the new groups created
|
|
nova.conf.neutron.register_dynamic_opts(CONF)
|
|
|
|
# set defaults...
|
|
for group_ in ['neutron_physnet_foo', 'neutron_physnet_bar',
|
|
'neutron_tunnel']:
|
|
self.flags(numa_nodes=[0], group=group_)
|
|
|
|
# but override them for the error case
|
|
self.flags(numa_nodes=[4], group=group_name)
|
|
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
caps.host = vconfig.LibvirtConfigCapsHost()
|
|
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
|
caps.host.cpu.arch = fields.Architecture.X86_64
|
|
caps.host.topology = fakelibvirt.NUMATopology()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(host.Host, "get_capabilities",
|
|
return_value=caps),
|
|
mock.patch.object(hardware, 'get_vcpu_pin_set',
|
|
return_value=set([0, 1, 3, 4, 5])),
|
|
mock.patch.object(host.Host, 'get_online_cpus',
|
|
return_value=set([0, 1, 2, 3, 6])),
|
|
):
|
|
self.assertRaisesRegex(
|
|
exception.InvalidNetworkNUMAAffinity,
|
|
r'node 4 for \w+ \w+ is not present',
|
|
drvr._get_host_numa_topology)
|
|
|
|
def test_get_host_numa_topology_invalid_physical_network_affinity(self):
|
|
"""Ensure errors are raised for non-existent NUMA nodes.
|
|
|
|
If a physical network is affined to a non-existent NUMA node, an
|
|
exception should be raised. Prove this to be the case.
|
|
"""
|
|
self._test_get_host_numa_topology_invalid_network_affinity(
|
|
'neutron_physnet_bar')
|
|
|
|
def test_get_host_numa_topology_invalid_tunnel_network_affinity(self):
|
|
"""Ensure errors are raised for non-existent NUMA nodes.
|
|
|
|
If a tunneled network is affined to a non-existent NUMA node, an
|
|
exception should be raised. Prove this to be the case.
|
|
"""
|
|
self._test_get_host_numa_topology_invalid_network_affinity(
|
|
'neutron_tunnel')
|
|
|
|
def test_diagnostic_vcpus_exception(self):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
raise fakelibvirt.libvirtError('vcpus missing')
|
|
|
|
def blockStats(self, path):
|
|
return (169, 688640, 0, 0, 1)
|
|
|
|
def interfaceStats(self, path):
|
|
return (4408, 82, 0, 0, 0, 0, 0, 0)
|
|
|
|
def memoryStats(self):
|
|
return {'actual': 220160, 'rss': 200164}
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: DiagFakeDomain())
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'vda_read': 688640,
|
|
'vda_read_req': 169,
|
|
'vda_write': 0,
|
|
'vda_write_req': 0,
|
|
'vda_errors': 1,
|
|
'vdb_read': 688640,
|
|
'vdb_read_req': 169,
|
|
'vdb_write': 0,
|
|
'vdb_write_req': 0,
|
|
'vdb_errors': 1,
|
|
'memory': 280160,
|
|
'memory-actual': 220160,
|
|
'memory-rss': 200164,
|
|
'vnet0_rx': 4408,
|
|
'vnet0_rx_drop': 0,
|
|
'vnet0_rx_errors': 0,
|
|
'vnet0_rx_packets': 82,
|
|
'vnet0_tx': 0,
|
|
'vnet0_tx_drop': 0,
|
|
'vnet0_tx_errors': 0,
|
|
'vnet0_tx_packets': 0,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_disks=True, with_nic=True)
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
def test_diagnostic_blockstats_exception(self):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
return ([(0, 1, 15340000000, 0),
|
|
(1, 1, 1640000000, 0),
|
|
(2, 1, 3040000000, 0),
|
|
(3, 1, 1420000000, 0)],
|
|
[(True, False),
|
|
(True, False),
|
|
(True, False),
|
|
(True, False)])
|
|
|
|
def blockStats(self, path):
|
|
raise fakelibvirt.libvirtError('blockStats missing')
|
|
|
|
def interfaceStats(self, path):
|
|
return (4408, 82, 0, 0, 0, 0, 0, 0)
|
|
|
|
def memoryStats(self):
|
|
return {'actual': 220160, 'rss': 200164}
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: DiagFakeDomain())
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'cpu0_time': 15340000000,
|
|
'cpu1_time': 1640000000,
|
|
'cpu2_time': 3040000000,
|
|
'cpu3_time': 1420000000,
|
|
'memory': 280160,
|
|
'memory-actual': 220160,
|
|
'memory-rss': 200164,
|
|
'vnet0_rx': 4408,
|
|
'vnet0_rx_drop': 0,
|
|
'vnet0_rx_errors': 0,
|
|
'vnet0_rx_packets': 82,
|
|
'vnet0_tx': 0,
|
|
'vnet0_tx_drop': 0,
|
|
'vnet0_tx_errors': 0,
|
|
'vnet0_tx_packets': 0,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_cpus=True, with_nic=True)
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
def test_diagnostic_interfacestats_exception(self):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
return ([(0, 1, 15340000000, 0),
|
|
(1, 1, 1640000000, 0),
|
|
(2, 1, 3040000000, 0),
|
|
(3, 1, 1420000000, 0)],
|
|
[(True, False),
|
|
(True, False),
|
|
(True, False),
|
|
(True, False)])
|
|
|
|
def blockStats(self, path):
|
|
return (169, 688640, 0, 0, 1)
|
|
|
|
def interfaceStats(self, path):
|
|
raise fakelibvirt.libvirtError('interfaceStat missing')
|
|
|
|
def memoryStats(self):
|
|
return {'actual': 220160, 'rss': 200164}
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: DiagFakeDomain())
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'cpu0_time': 15340000000,
|
|
'cpu1_time': 1640000000,
|
|
'cpu2_time': 3040000000,
|
|
'cpu3_time': 1420000000,
|
|
'vda_read': 688640,
|
|
'vda_read_req': 169,
|
|
'vda_write': 0,
|
|
'vda_write_req': 0,
|
|
'vda_errors': 1,
|
|
'vdb_read': 688640,
|
|
'vdb_read_req': 169,
|
|
'vdb_write': 0,
|
|
'vdb_write_req': 0,
|
|
'vdb_errors': 1,
|
|
'memory': 280160,
|
|
'memory-actual': 220160,
|
|
'memory-rss': 200164,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_cpus=True, with_disks=True)
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
def test_diagnostic_memorystats_exception(self):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
return ([(0, 1, 15340000000, 0),
|
|
(1, 1, 1640000000, 0),
|
|
(2, 1, 3040000000, 0),
|
|
(3, 1, 1420000000, 0)],
|
|
[(True, False),
|
|
(True, False),
|
|
(True, False),
|
|
(True, False)])
|
|
|
|
def blockStats(self, path):
|
|
return (169, 688640, 0, 0, 1)
|
|
|
|
def interfaceStats(self, path):
|
|
return (4408, 82, 0, 0, 0, 0, 0, 0)
|
|
|
|
def memoryStats(self):
|
|
raise fakelibvirt.libvirtError('memoryStats missing')
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: DiagFakeDomain())
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'cpu0_time': 15340000000,
|
|
'cpu1_time': 1640000000,
|
|
'cpu2_time': 3040000000,
|
|
'cpu3_time': 1420000000,
|
|
'vda_read': 688640,
|
|
'vda_read_req': 169,
|
|
'vda_write': 0,
|
|
'vda_write_req': 0,
|
|
'vda_errors': 1,
|
|
'vdb_read': 688640,
|
|
'vdb_read_req': 169,
|
|
'vdb_write': 0,
|
|
'vdb_write_req': 0,
|
|
'vdb_errors': 1,
|
|
'memory': 280160,
|
|
'vnet0_rx': 4408,
|
|
'vnet0_rx_drop': 0,
|
|
'vnet0_rx_errors': 0,
|
|
'vnet0_rx_packets': 82,
|
|
'vnet0_tx': 0,
|
|
'vnet0_tx_drop': 0,
|
|
'vnet0_tx_errors': 0,
|
|
'vnet0_tx_packets': 0,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
|
|
with_nic=True)
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
def test_diagnostic_full(self):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
return ([(0, 1, 15340000000, 0),
|
|
(1, 1, 1640000000, 0),
|
|
(2, 1, 3040000000, 0),
|
|
(3, 1, 1420000000, 0)],
|
|
[(True, False),
|
|
(True, False),
|
|
(True, False),
|
|
(True, False)])
|
|
|
|
def blockStats(self, path):
|
|
return (169, 688640, 0, 0, 1)
|
|
|
|
def interfaceStats(self, path):
|
|
return (4408, 82, 0, 0, 0, 0, 0, 0)
|
|
|
|
def memoryStats(self):
|
|
return {'actual': 220160, 'rss': 200164}
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: DiagFakeDomain())
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'cpu0_time': 15340000000,
|
|
'cpu1_time': 1640000000,
|
|
'cpu2_time': 3040000000,
|
|
'cpu3_time': 1420000000,
|
|
'vda_read': 688640,
|
|
'vda_read_req': 169,
|
|
'vda_write': 0,
|
|
'vda_write_req': 0,
|
|
'vda_errors': 1,
|
|
'vdb_read': 688640,
|
|
'vdb_read_req': 169,
|
|
'vdb_write': 0,
|
|
'vdb_write_req': 0,
|
|
'vdb_errors': 1,
|
|
'memory': 280160,
|
|
'memory-actual': 220160,
|
|
'memory-rss': 200164,
|
|
'vnet0_rx': 4408,
|
|
'vnet0_rx_drop': 0,
|
|
'vnet0_rx_errors': 0,
|
|
'vnet0_rx_packets': 82,
|
|
'vnet0_tx': 0,
|
|
'vnet0_tx_drop': 0,
|
|
'vnet0_tx_errors': 0,
|
|
'vnet0_tx_packets': 0,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
|
|
with_nic=True)
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain):
|
|
xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='filename'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
<interface type='network'>
|
|
<mac address='52:54:00:a4:38:38'/>
|
|
<source network='default'/>
|
|
<target dev='vnet0'/>
|
|
</interface>
|
|
<interface type="bridge">
|
|
<mac address="53:55:00:a5:39:39"/>
|
|
<model type="virtio"/>
|
|
<target dev="br0"/>
|
|
</interface>
|
|
<interface type='hostdev' managed='yes'>
|
|
<mac address="54:56:00:a6:40:40"/>
|
|
<driver name='vfio'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
class DiagFakeDomain(FakeVirtDomain):
|
|
|
|
def __init__(self):
|
|
super(DiagFakeDomain, self).__init__(fake_xml=xml)
|
|
|
|
def vcpus(self):
|
|
return ([(0, 1, 15340000000, 0),
|
|
(1, 1, 1640000000, 0),
|
|
(2, 1, 3040000000, 0),
|
|
(3, 1, 1420000000, 0)],
|
|
[(True, False),
|
|
(True, False),
|
|
(True, False),
|
|
(True, False)])
|
|
|
|
def blockStats(self, path):
|
|
return (169, 688640, 0, 0, 1)
|
|
|
|
def interfaceStats(self, path):
|
|
return (4408, 82, 0, 0, 0, 0, 0, 0)
|
|
|
|
def memoryStats(self):
|
|
return {'actual': 220160, 'rss': 200164}
|
|
|
|
def maxMemory(self):
|
|
return 280160
|
|
|
|
def fake_get_domain(self):
|
|
return DiagFakeDomain()
|
|
|
|
mock_get_domain.side_effect = fake_get_domain
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
actual = drvr.get_diagnostics(instance)
|
|
expect = {'cpu0_time': 15340000000,
|
|
'cpu1_time': 1640000000,
|
|
'cpu2_time': 3040000000,
|
|
'cpu3_time': 1420000000,
|
|
'vda_read': 688640,
|
|
'vda_read_req': 169,
|
|
'vda_write': 0,
|
|
'vda_write_req': 0,
|
|
'vda_errors': 1,
|
|
'vdb_read': 688640,
|
|
'vdb_read_req': 169,
|
|
'vdb_write': 0,
|
|
'vdb_write_req': 0,
|
|
'vdb_errors': 1,
|
|
'memory': 280160,
|
|
'memory-actual': 220160,
|
|
'memory-rss': 200164,
|
|
'vnet0_rx': 4408,
|
|
'vnet0_rx_drop': 0,
|
|
'vnet0_rx_errors': 0,
|
|
'vnet0_rx_packets': 82,
|
|
'vnet0_tx': 0,
|
|
'vnet0_tx_drop': 0,
|
|
'vnet0_tx_errors': 0,
|
|
'vnet0_tx_packets': 0,
|
|
'br0_rx': 4408,
|
|
'br0_rx_drop': 0,
|
|
'br0_rx_errors': 0,
|
|
'br0_rx_packets': 82,
|
|
'br0_tx': 0,
|
|
'br0_tx_drop': 0,
|
|
'br0_tx_errors': 0,
|
|
'br0_tx_packets': 0,
|
|
}
|
|
self.assertEqual(actual, expect)
|
|
|
|
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
|
|
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
|
|
self.useFixture(utils_fixture.TimeFixture(diags_time))
|
|
|
|
instance.launched_at = lt
|
|
actual = drvr.get_instance_diagnostics(instance)
|
|
expected = fake_diagnostics_object(with_cpus=True, with_disks=True,
|
|
with_nic=True)
|
|
expected.add_nic(mac_address='53:55:00:a5:39:39',
|
|
rx_drop=0,
|
|
rx_errors=0,
|
|
rx_octets=4408,
|
|
rx_packets=82,
|
|
tx_drop=0,
|
|
tx_errors=0,
|
|
tx_octets=0,
|
|
tx_packets=0)
|
|
|
|
expected.add_nic(mac_address='54:56:00:a6:40:40')
|
|
|
|
self.assertDiagnosticsEqual(expected, actual)
|
|
|
|
@mock.patch.object(host.Host, "list_instance_domains")
|
|
def test_failing_vcpu_count(self, mock_list):
|
|
"""Domain can fail to return the vcpu description in case it's
|
|
just starting up or shutting down. Make sure None is handled
|
|
gracefully.
|
|
"""
|
|
|
|
class DiagFakeDomain(object):
|
|
def __init__(self, vcpus):
|
|
self._vcpus = vcpus
|
|
|
|
def vcpus(self):
|
|
if self._vcpus is None:
|
|
raise fakelibvirt.libvirtError("fake-error")
|
|
else:
|
|
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
|
|
|
|
def ID(self):
|
|
return 1
|
|
|
|
def name(self):
|
|
return "instance000001"
|
|
|
|
def UUIDString(self):
|
|
return "19479fee-07a5-49bb-9138-d3738280d63c"
|
|
|
|
mock_list.return_value = [
|
|
DiagFakeDomain(None), DiagFakeDomain(5)]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
self.assertEqual(6, drvr._get_vcpu_used())
|
|
mock_list.assert_called_with(only_guests=True, only_running=True)
|
|
|
|
def _test_get_instance_capabilities(self, want):
|
|
'''Base test for 'get_capabilities' function. '''
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
def get_host_capabilities_stub(self):
|
|
caps = vconfig.LibvirtConfigCaps()
|
|
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
guest.ostype = 'hvm'
|
|
guest.arch = fields.Architecture.X86_64
|
|
guest.domtype = ['kvm', 'qemu']
|
|
caps.guests.append(guest)
|
|
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
guest.ostype = 'hvm'
|
|
guest.arch = fields.Architecture.I686
|
|
guest.domtype = ['kvm']
|
|
caps.guests.append(guest)
|
|
|
|
# Include one that is not known to nova to make sure it
|
|
# does not trip us up.
|
|
guest = vconfig.LibvirtConfigGuest()
|
|
guest.ostype = 'hvm'
|
|
guest.arch = 'itanic'
|
|
guest.domtype = ['kvm']
|
|
caps.guests.append(guest)
|
|
|
|
return caps
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
|
|
get_host_capabilities_stub)
|
|
|
|
got = drvr._get_instance_capabilities()
|
|
self.assertEqual(want, got)
|
|
|
|
def test_get_instance_capabilities_kvm(self):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
|
|
# Because virt_type is set to kvm, we get only
|
|
# capabilities where the hypervisor_type is kvm
|
|
want = [(fields.Architecture.X86_64, 'kvm', 'hvm'),
|
|
(fields.Architecture.I686, 'kvm', 'hvm')]
|
|
|
|
self._test_get_instance_capabilities(want)
|
|
|
|
def test_get_instance_capabilities_qemu(self):
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
|
|
# Because virt_type is set to qemu, we get only
|
|
# capabilities where the hypervisor_type is qemu
|
|
want = [(fields.Architecture.X86_64, 'qemu', 'hvm')]
|
|
|
|
self._test_get_instance_capabilities(want)
|
|
|
|
def test_set_cache_mode(self):
|
|
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
fake_conf = FakeConfigGuestDisk()
|
|
|
|
fake_conf.source_type = 'file'
|
|
drvr._set_cache_mode(fake_conf)
|
|
self.assertEqual(fake_conf.driver_cache, 'directsync')
|
|
|
|
def test_set_cache_mode_shareable(self):
|
|
"""Tests that when conf.shareable is True, the configuration is
|
|
ignored and the driver_cache is forced to 'none'.
|
|
"""
|
|
self.flags(disk_cachemodes=['block=writeback'], group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
fake_conf = FakeConfigGuestDisk()
|
|
fake_conf.shareable = True
|
|
fake_conf.source_type = 'block'
|
|
drvr._set_cache_mode(fake_conf)
|
|
self.assertEqual('none', fake_conf.driver_cache)
|
|
|
|
def test_set_cache_mode_invalid_mode(self):
|
|
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
fake_conf = FakeConfigGuestDisk()
|
|
|
|
fake_conf.source_type = 'file'
|
|
drvr._set_cache_mode(fake_conf)
|
|
self.assertIsNone(fake_conf.driver_cache)
|
|
|
|
def test_set_cache_mode_invalid_object(self):
|
|
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
fake_conf = FakeConfigGuest()
|
|
|
|
fake_conf.driver_cache = 'fake'
|
|
drvr._set_cache_mode(fake_conf)
|
|
self.assertEqual(fake_conf.driver_cache, 'fake')
|
|
|
|
@mock.patch('os.unlink')
|
|
@mock.patch.object(os.path, 'exists')
|
|
def _test_shared_storage_detection(self, is_same,
|
|
mock_exists, mock_unlink):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
|
|
mock_exists.return_value = is_same
|
|
with test.nested(
|
|
mock.patch.object(drvr._remotefs, 'create_file'),
|
|
mock.patch.object(drvr._remotefs, 'remove_file')
|
|
) as (mock_rem_fs_create, mock_rem_fs_remove):
|
|
result = drvr._is_storage_shared_with('host', '/path')
|
|
mock_rem_fs_create.assert_any_call('host', mock.ANY)
|
|
create_args, create_kwargs = mock_rem_fs_create.call_args
|
|
self.assertTrue(create_args[1].startswith('/path'))
|
|
if is_same:
|
|
mock_unlink.assert_called_once_with(mock.ANY)
|
|
else:
|
|
mock_rem_fs_remove.assert_called_with('host', mock.ANY)
|
|
remove_args, remove_kwargs = mock_rem_fs_remove.call_args
|
|
self.assertTrue(remove_args[1].startswith('/path'))
|
|
return result
|
|
|
|
def test_shared_storage_detection_same_host(self):
|
|
self.assertTrue(self._test_shared_storage_detection(True))
|
|
|
|
def test_shared_storage_detection_different_host(self):
|
|
self.assertFalse(self._test_shared_storage_detection(False))
|
|
|
|
@mock.patch.object(os, 'unlink')
|
|
@mock.patch.object(os.path, 'exists')
|
|
@mock.patch.object(utils, 'execute')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_host_ip_addr',
|
|
return_value='foo')
|
|
def test_shared_storage_detection_easy(self, mock_get, mock_exec,
|
|
mock_exists, mock_unlink):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
|
|
mock_get.assert_called_once_with()
|
|
mock_exec.assert_not_called()
|
|
mock_exists.assert_not_called()
|
|
mock_unlink.assert_not_called()
|
|
|
|
def test_store_pid_remove_pid(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
popen = mock.Mock(pid=3)
|
|
drvr.job_tracker.add_job(instance, popen.pid)
|
|
self.assertIn(3, drvr.job_tracker.jobs[instance.uuid])
|
|
drvr.job_tracker.remove_job(instance, popen.pid)
|
|
self.assertNotIn(instance.uuid, drvr.job_tracker.jobs)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
|
def test_get_domain_info_with_more_return(self, mock_get_domain):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dom_mock = mock.MagicMock()
|
|
dom_mock.info.return_value = [
|
|
1, 2048, 737, 8, 12345, 888888
|
|
]
|
|
dom_mock.ID.return_value = mock.sentinel.instance_id
|
|
mock_get_domain.return_value = dom_mock
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
info = drvr.get_info(instance)
|
|
self.assertEqual(1, info.state)
|
|
self.assertEqual(mock.sentinel.instance_id, info.internal_id)
|
|
dom_mock.info.assert_called_once_with()
|
|
dom_mock.ID.assert_called_once_with()
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
|
|
def test_create_domain(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_domain = mock.MagicMock()
|
|
|
|
guest = drvr._create_domain(domain=mock_domain)
|
|
|
|
self.assertEqual(mock_domain, guest._domain)
|
|
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
|
|
|
|
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
|
|
@mock.patch('nova.virt.disk.api.setup_container')
|
|
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
|
|
mock_setup_container, mock_get_info, mock_clean):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_instance = mock.MagicMock()
|
|
inst_sys_meta = dict()
|
|
mock_instance.system_metadata = inst_sys_meta
|
|
mock_get_inst_path.return_value = '/tmp/'
|
|
mock_image_backend = mock.MagicMock()
|
|
drvr.image_backend = mock_image_backend
|
|
mock_image = mock.MagicMock()
|
|
mock_image.path = '/tmp/test.img'
|
|
drvr.image_backend.by_name.return_value = mock_image
|
|
mock_setup_container.return_value = '/dev/nbd0'
|
|
mock_get_info.return_value = hardware.InstanceInfo(
|
|
state=power_state.RUNNING)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_is_booted_from_volume',
|
|
return_value=False),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
|
|
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
|
|
drvr._create_domain_and_network(self.context, 'xml',
|
|
mock_instance, [])
|
|
|
|
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
|
|
self.assertFalse(mock_instance.called)
|
|
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
|
|
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
|
|
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
|
|
'disk')])
|
|
|
|
setup_container_call = mock.call(
|
|
mock_image.get_model(),
|
|
container_dir='/tmp/rootfs')
|
|
mock_setup_container.assert_has_calls([setup_container_call])
|
|
mock_get_info.assert_has_calls([mock.call(mock_instance)])
|
|
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
|
|
|
|
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
|
|
@mock.patch('nova.virt.libvirt.utils.chown_for_id_maps')
|
|
@mock.patch('nova.virt.disk.api.setup_container')
|
|
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
|
|
mock_ensure_tree, mock_setup_container,
|
|
mock_chown, mock_get_info, mock_clean):
|
|
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
|
|
gid_maps=["0:1000:100"], group='libvirt')
|
|
|
|
def chown_side_effect(path, id_maps):
|
|
self.assertEqual('/tmp/rootfs', path)
|
|
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
|
|
self.assertEqual(0, id_maps[0].start)
|
|
self.assertEqual(1000, id_maps[0].target)
|
|
self.assertEqual(100, id_maps[0].count)
|
|
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
|
|
self.assertEqual(0, id_maps[1].start)
|
|
self.assertEqual(1000, id_maps[1].target)
|
|
self.assertEqual(100, id_maps[1].count)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_instance = mock.MagicMock()
|
|
inst_sys_meta = dict()
|
|
mock_instance.system_metadata = inst_sys_meta
|
|
mock_get_inst_path.return_value = '/tmp/'
|
|
mock_image_backend = mock.MagicMock()
|
|
drvr.image_backend = mock_image_backend
|
|
mock_image = mock.MagicMock()
|
|
mock_image.path = '/tmp/test.img'
|
|
drvr.image_backend.by_name.return_value = mock_image
|
|
mock_setup_container.return_value = '/dev/nbd0'
|
|
mock_chown.side_effect = chown_side_effect
|
|
mock_get_info.return_value = hardware.InstanceInfo(
|
|
state=power_state.RUNNING)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_is_booted_from_volume',
|
|
return_value=False),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
|
|
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
|
|
) as (
|
|
mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs,
|
|
mock_setup_basic_filtering, mock_prepare_instance_filter,
|
|
mock_apply_instance_filter
|
|
):
|
|
drvr._create_domain_and_network(self.context, 'xml',
|
|
mock_instance, [])
|
|
|
|
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
|
|
self.assertFalse(mock_instance.called)
|
|
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
|
|
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
|
|
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
|
|
'disk')])
|
|
|
|
setup_container_call = mock.call(
|
|
mock_image.get_model(),
|
|
container_dir='/tmp/rootfs')
|
|
mock_setup_container.assert_has_calls([setup_container_call])
|
|
mock_get_info.assert_has_calls([mock.call(mock_instance)])
|
|
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
|
|
|
|
@mock.patch('nova.virt.disk.api.teardown_container')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
|
|
@mock.patch('nova.virt.disk.api.setup_container')
|
|
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
|
|
mock_ensure_tree,
|
|
mock_setup_container,
|
|
mock_get_info, mock_teardown):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
mock_instance = mock.MagicMock()
|
|
inst_sys_meta = dict()
|
|
mock_instance.system_metadata = inst_sys_meta
|
|
mock_get_inst_path.return_value = '/tmp/'
|
|
mock_image_backend = mock.MagicMock()
|
|
drvr.image_backend = mock_image_backend
|
|
mock_image = mock.MagicMock()
|
|
mock_image.path = '/tmp/test.img'
|
|
drvr.image_backend.by_name.return_value = mock_image
|
|
mock_setup_container.return_value = '/dev/nbd0'
|
|
mock_get_info.return_value = hardware.InstanceInfo(
|
|
state=power_state.SHUTDOWN)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_is_booted_from_volume',
|
|
return_value=False),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
|
|
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
|
|
drvr._create_domain_and_network(self.context, 'xml',
|
|
mock_instance, [])
|
|
|
|
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
|
|
self.assertFalse(mock_instance.called)
|
|
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
|
|
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
|
|
drvr.image_backend.by_name.assert_has_calls([mock.call(mock_instance,
|
|
'disk')])
|
|
|
|
setup_container_call = mock.call(
|
|
mock_image.get_model(),
|
|
container_dir='/tmp/rootfs')
|
|
mock_setup_container.assert_has_calls([setup_container_call])
|
|
mock_get_info.assert_has_calls([mock.call(mock_instance)])
|
|
teardown_call = mock.call(container_dir='/tmp/rootfs')
|
|
mock_teardown.assert_has_calls([teardown_call])
|
|
|
|
def test_create_domain_define_xml_fails(self):
|
|
"""Tests that the xml is logged when defining the domain fails."""
|
|
fake_xml = "<test>this is a test</test>"
|
|
|
|
def fake_defineXML(xml):
|
|
# In py2 env, xml is encoded in write_instance_config use
|
|
# encodeutils.safe_encode, it will be decode text before encoding
|
|
if six.PY2:
|
|
self.assertEqual(fake_safe_decode(fake_xml), xml)
|
|
else:
|
|
self.assertEqual(fake_xml, xml)
|
|
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
|
|
|
|
def fake_safe_decode(text, *args, **kwargs):
|
|
return text + 'safe decoded'
|
|
|
|
self.log_error_called = False
|
|
|
|
def fake_error(msg, *args, **kwargs):
|
|
self.log_error_called = True
|
|
self.assertIn(fake_xml, msg % args)
|
|
self.assertIn('safe decoded', msg % args)
|
|
|
|
self.stub_out('oslo_utils.encodeutils.safe_decode', fake_safe_decode)
|
|
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
|
|
|
|
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
|
|
fake_xml)
|
|
self.assertTrue(self.log_error_called)
|
|
|
|
def test_create_domain_with_flags_fails(self):
|
|
"""Tests that the xml is logged when creating the domain with flags
|
|
fails
|
|
"""
|
|
fake_xml = "<test>this is a test</test>"
|
|
fake_domain = FakeVirtDomain(fake_xml)
|
|
|
|
def fake_createWithFlags(self, launch_flags):
|
|
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
|
|
|
|
self.log_error_called = False
|
|
|
|
def fake_error(msg, *args, **kwargs):
|
|
self.log_error_called = True
|
|
self.assertIn(fake_xml, msg % args)
|
|
|
|
self.stub_out('nova.tests.unit.virt.libvirt.test_driver.'
|
|
'FakeVirtDomain.createWithFlags', fake_createWithFlags)
|
|
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
|
|
domain=fake_domain)
|
|
self.assertTrue(self.log_error_called)
|
|
|
|
@mock.patch('nova.privsep.libvirt.enable_hairpin')
|
|
def test_create_domain_enable_hairpin_fails(self, mock_writefile):
|
|
"""Tests that the xml is logged when enabling hairpin mode for the
|
|
domain fails.
|
|
"""
|
|
# Guest.enable_hairpin is only called for nova-network.
|
|
# TODO(mikal): remove this test when nova-net goes away
|
|
self.flags(use_neutron=False)
|
|
fake_xml = "<test>this is a test</test>"
|
|
fake_domain = FakeVirtDomain(fake_xml)
|
|
|
|
mock_writefile.side_effect = IOError
|
|
|
|
def fake_get_interfaces(*args):
|
|
return ["dev"]
|
|
|
|
self.log_error_called = False
|
|
|
|
def fake_error(msg, *args, **kwargs):
|
|
self.log_error_called = True
|
|
self.assertIn(fake_xml, msg % args)
|
|
|
|
self.stub_out('nova.virt.libvirt.guest.LOG.error', fake_error)
|
|
|
|
self.create_fake_libvirt_mock()
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
self.stub_out(
|
|
'nova.virt.libvirt.guest.Guest.get_interfaces',
|
|
fake_get_interfaces)
|
|
|
|
self.assertRaises(IOError, drvr._create_domain, domain=fake_domain,
|
|
power_on=False)
|
|
self.assertTrue(self.log_error_called)
|
|
|
|
def test_get_vnc_console(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<graphics type='vnc' port='5900'/>"
|
|
"</devices></domain>")
|
|
|
|
vdmock = mock.create_autospec(fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance['uuid']:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
vnc_dict = drvr.get_vnc_console(self.context, instance)
|
|
self.assertEqual(vnc_dict.port, '5900')
|
|
vdmock.XMLDesc.assert_called_once_with(flags=0)
|
|
|
|
def test_get_vnc_console_unavailable(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices></devices></domain>")
|
|
|
|
vdmock = mock.create_autospec(fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance['uuid']:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.ConsoleTypeUnavailable,
|
|
drvr.get_vnc_console, self.context, instance)
|
|
vdmock.XMLDesc.assert_called_once_with(flags=0)
|
|
|
|
def test_get_spice_console(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<graphics type='spice' port='5950'/>"
|
|
"</devices></domain>")
|
|
|
|
vdmock = mock.create_autospec(fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance['uuid']:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
spice_dict = drvr.get_spice_console(self.context, instance)
|
|
self.assertEqual(spice_dict.port, '5950')
|
|
vdmock.XMLDesc.assert_called_once_with(flags=0)
|
|
|
|
def test_get_spice_console_unavailable(self):
|
|
instance = objects.Instance(**self.test_instance)
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices></devices></domain>")
|
|
|
|
vdmock = mock.create_autospec(fakelibvirt.virDomain)
|
|
vdmock.XMLDesc.return_value = dummyxml
|
|
|
|
def fake_lookup(_uuid):
|
|
if _uuid == instance['uuid']:
|
|
return vdmock
|
|
self.create_fake_libvirt_mock(lookupByUUIDString=fake_lookup)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertRaises(exception.ConsoleTypeUnavailable,
|
|
drvr.get_spice_console, self.context, instance)
|
|
vdmock.XMLDesc.assert_called_once_with(flags=0)
|
|
|
|
def test_detach_volume_with_instance_not_found(self):
|
|
# Test that detach_volume() method does not raise exception,
|
|
# if the instance does not exist.
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, '_get_domain',
|
|
side_effect=exception.InstanceNotFound(
|
|
instance_id=instance.uuid)),
|
|
mock.patch.object(drvr, '_disconnect_volume')
|
|
) as (_get_domain, _disconnect_volume):
|
|
connection_info = {'driver_volume_type': 'fake'}
|
|
drvr.detach_volume(
|
|
self.context, connection_info, instance, '/dev/sda')
|
|
_get_domain.assert_called_once_with(instance)
|
|
_disconnect_volume.assert_called_once_with(
|
|
self.context, connection_info, instance, encryption=None)
|
|
|
|
def _test_attach_detach_interface_get_config(self, method_name):
|
|
"""Tests that the get_config() method is properly called in
|
|
attach_interface() and detach_interface().
|
|
|
|
method_name: either \"attach_interface\" or \"detach_interface\"
|
|
depending on the method to test.
|
|
"""
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
lambda self, instance: FakeVirtDomain())
|
|
|
|
instance = objects.Instance(**self.test_instance)
|
|
network_info = _fake_network_info(self, 1)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
fake_image_meta = objects.ImageMeta.from_dict(
|
|
{'id': instance['image_ref']})
|
|
|
|
if method_name == "attach_interface":
|
|
mock_setup = self.useFixture(fixtures.MockPatchObject(
|
|
drvr.firewall_driver, 'setup_basic_filtering')).mock
|
|
mock_build = self.useFixture(fixtures.MockPatchObject(
|
|
drvr, '_build_device_metadata')).mock
|
|
mock_build.return_value = objects.InstanceDeviceMetadata()
|
|
mock_save = self.useFixture(fixtures.MockPatchObject(
|
|
objects.Instance, 'save')).mock
|
|
|
|
expected = drvr.vif_driver.get_config(instance, network_info[0],
|
|
fake_image_meta,
|
|
instance.get_flavor(),
|
|
CONF.libvirt.virt_type,
|
|
drvr._host)
|
|
mock_get_config = self.useFixture(fixtures.MockPatchObject(
|
|
drvr.vif_driver, 'get_config')).mock
|
|
mock_get_config.return_value = expected
|
|
|
|
if method_name == "attach_interface":
|
|
drvr.attach_interface(self.context, instance, fake_image_meta,
|
|
network_info[0])
|
|
mock_setup.assert_called_once_with(instance, network_info)
|
|
mock_build.assert_called_once_with(self.context, instance)
|
|
mock_save.assert_called_once_with()
|
|
elif method_name == "detach_interface":
|
|
drvr.detach_interface(self.context, instance, network_info[0])
|
|
else:
|
|
raise ValueError("Unhandled method %s" % method_name)
|
|
|
|
mock_get_config.assert_called_once_with(
|
|
instance, network_info[0], test.MatchType(objects.ImageMeta),
|
|
test.MatchType(objects.Flavor), CONF.libvirt.virt_type, drvr._host)
|
|
|
|
@mock.patch.object(lockutils, "external_lock")
|
|
def test_attach_interface_get_config(self, mock_lock):
|
|
"""Tests that the get_config() method is properly called in
|
|
attach_interface().
|
|
"""
|
|
mock_lock.return_value = threading.Semaphore()
|
|
|
|
self._test_attach_detach_interface_get_config("attach_interface")
|
|
|
|
def test_detach_interface_get_config(self):
|
|
"""Tests that the get_config() method is properly called in
|
|
detach_interface().
|
|
"""
|
|
self._test_attach_detach_interface_get_config("detach_interface")
|
|
|
|
@mock.patch.object(blockinfo, 'get_root_info')
|
|
@mock.patch.object(blockinfo, 'get_disk_bus_for_device_type')
|
|
def test_default_root_device_name(self, mock_get_disk, mock_get_root):
|
|
instance = {'uuid': 'fake_instance'}
|
|
image_meta = objects.ImageMeta.from_dict({'id': uuids.image_id})
|
|
root_bdm = {'source_type': 'image',
|
|
'destination_type': 'volume',
|
|
'image_id': 'fake_id'}
|
|
self.flags(virt_type='qemu', group='libvirt')
|
|
|
|
mock_get_disk.side_effect = ['virtio', 'ide']
|
|
mock_get_root.return_value = {'dev': 'vda'}
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
|
|
root_bdm), '/dev/vda')
|
|
self.assertEqual(2, mock_get_disk.call_count)
|
|
mock_get_disk.assert_has_calls([
|
|
mock.call(instance, 'qemu', image_meta, 'disk'),
|
|
mock.call(instance, 'qemu', image_meta, 'cdrom')])
|
|
mock_get_root.assert_called_once_with(instance, 'qemu', image_meta,
|
|
root_bdm, 'virtio', 'ide')
|
|
|
|
@mock.patch.object(objects.BlockDeviceMapping, "save")
|
|
def test_default_device_names_for_instance(self, save_mock):
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.root_device_name = '/dev/vda'
|
|
ephemerals = [objects.BlockDeviceMapping(
|
|
**fake_block_device.AnonFakeDbBlockDeviceDict(
|
|
{'device_name': 'vdb',
|
|
'source_type': 'blank',
|
|
'volume_size': 2,
|
|
'destination_type': 'local'}))]
|
|
swap = [objects.BlockDeviceMapping(
|
|
**fake_block_device.AnonFakeDbBlockDeviceDict(
|
|
{'device_name': 'vdg',
|
|
'source_type': 'blank',
|
|
'volume_size': 512,
|
|
'guest_format': 'swap',
|
|
'destination_type': 'local'}))]
|
|
block_device_mapping = [
|
|
objects.BlockDeviceMapping(
|
|
**fake_block_device.AnonFakeDbBlockDeviceDict(
|
|
{'source_type': 'volume',
|
|
'destination_type': 'volume',
|
|
'volume_id': 'fake-image-id',
|
|
'device_name': '/dev/vdxx',
|
|
'disk_bus': 'scsi'}))]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.default_device_names_for_instance(instance,
|
|
instance.root_device_name,
|
|
ephemerals, swap,
|
|
block_device_mapping)
|
|
|
|
# Ephemeral device name was correct so no changes
|
|
self.assertEqual('/dev/vdb', ephemerals[0].device_name)
|
|
# Swap device name was incorrect so it was changed
|
|
self.assertEqual('/dev/vdc', swap[0].device_name)
|
|
# Volume device name was changed too, taking the bus into account
|
|
self.assertEqual('/dev/sda', block_device_mapping[0].device_name)
|
|
|
|
self.assertEqual(3, save_mock.call_count)
|
|
|
|
def _test_get_device_name_for_instance(self, new_bdm, expected_dev):
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.root_device_name = '/dev/vda'
|
|
instance.ephemeral_gb = 0
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
got_dev = drvr.get_device_name_for_instance(
|
|
instance, [], new_bdm)
|
|
self.assertEqual(expected_dev, got_dev)
|
|
|
|
def test_get_device_name_for_instance_simple(self):
|
|
new_bdm = objects.BlockDeviceMapping(
|
|
context=context,
|
|
source_type='volume', destination_type='volume',
|
|
boot_index=-1, volume_id='fake-id',
|
|
device_name=None, guest_format=None,
|
|
disk_bus=None, device_type=None)
|
|
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
|
|
|
|
def test_get_device_name_for_instance_suggested(self):
|
|
new_bdm = objects.BlockDeviceMapping(
|
|
context=context,
|
|
source_type='volume', destination_type='volume',
|
|
boot_index=-1, volume_id='fake-id',
|
|
device_name='/dev/vdg', guest_format=None,
|
|
disk_bus=None, device_type=None)
|
|
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
|
|
|
|
def test_get_device_name_for_instance_bus(self):
|
|
new_bdm = objects.BlockDeviceMapping(
|
|
context=context,
|
|
source_type='volume', destination_type='volume',
|
|
boot_index=-1, volume_id='fake-id',
|
|
device_name=None, guest_format=None,
|
|
disk_bus='scsi', device_type=None)
|
|
self._test_get_device_name_for_instance(new_bdm, '/dev/sda')
|
|
|
|
def test_get_device_name_for_instance_device_type(self):
|
|
new_bdm = objects.BlockDeviceMapping(
|
|
context=context,
|
|
source_type='volume', destination_type='volume',
|
|
boot_index=-1, volume_id='fake-id',
|
|
device_name=None, guest_format=None,
|
|
disk_bus=None, device_type='floppy')
|
|
self._test_get_device_name_for_instance(new_bdm, '/dev/fda')
|
|
|
|
def test_is_supported_fs_format(self):
|
|
supported_fs = [nova.privsep.fs.FS_FORMAT_EXT2,
|
|
nova.privsep.fs.FS_FORMAT_EXT3,
|
|
nova.privsep.fs.FS_FORMAT_EXT4,
|
|
nova.privsep.fs.FS_FORMAT_XFS]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
for fs in supported_fs:
|
|
self.assertTrue(drvr.is_supported_fs_format(fs))
|
|
|
|
supported_fs = ['', 'dummy']
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
for fs in supported_fs:
|
|
self.assertFalse(drvr.is_supported_fs_format(fs))
|
|
|
|
@mock.patch("nova.objects.instance.Instance.image_meta",
|
|
new_callable=mock.PropertyMock())
|
|
@mock.patch("nova.virt.libvirt.driver.LibvirtDriver.attach_interface")
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_interfaces')
|
|
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_post_live_migration_at_destination(
|
|
self, mock_get_guest, mock_write_instance_config,
|
|
mock_get_interfaces, mock_attach, mock_image_meta):
|
|
instance = objects.Instance(id=1, uuid=uuids.instance)
|
|
dom = mock.MagicMock()
|
|
guest = libvirt_guest.Guest(dom)
|
|
|
|
mock_get_guest.return_value = guest
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
net_info = network_model.NetworkInfo()
|
|
mock_get_interfaces.return_type = []
|
|
drvr.post_live_migration_at_destination(mock.ANY, instance, net_info)
|
|
# Assert that we don't try to write anything to the destination node
|
|
# since the source live migrated with the VIR_MIGRATE_PERSIST_DEST flag
|
|
mock_write_instance_config.assert_not_called()
|
|
mock_attach.assert_not_called()
|
|
|
|
vif = network_model.VIF(id=uuids.port_id,
|
|
vnic_type=network_model.VNIC_TYPE_NORMAL)
|
|
vif_direct = network_model.VIF(id=uuids.port_id,
|
|
vnic_type=network_model.VNIC_TYPE_DIRECT)
|
|
|
|
net_info = network_model.NetworkInfo([vif, vif_direct])
|
|
mock_get_interfaces.return_type = [vif]
|
|
drvr.post_live_migration_at_destination(mock.ANY, instance, net_info)
|
|
mock_attach.assert_called_once_with(mock.ANY, instance,
|
|
mock_image_meta, vif_direct)
|
|
|
|
def test_create_propagates_exceptions(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(id=1, uuid=uuids.instance,
|
|
image_ref='my_fake_image')
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_create_domain_setup_lxc'),
|
|
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
|
|
mock.patch.object(drvr, '_is_booted_from_volume',
|
|
return_value=False),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr, 'firewall_driver'),
|
|
mock.patch.object(drvr, '_create_domain',
|
|
side_effect=exception.NovaException),
|
|
mock.patch.object(drvr, 'cleanup')):
|
|
self.assertRaises(exception.NovaException,
|
|
drvr._create_domain_and_network,
|
|
self.context,
|
|
'xml',
|
|
instance, None)
|
|
|
|
def test_create_without_pause(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
|
|
@contextlib.contextmanager
|
|
def fake_lxc_disk_handler(*args, **kwargs):
|
|
yield
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, '_lxc_disk_handler',
|
|
side_effect=fake_lxc_disk_handler),
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr, 'firewall_driver'),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr, 'cleanup')) as (
|
|
_handler, cleanup, firewall_driver, create, plug_vifs):
|
|
domain = drvr._create_domain_and_network(self.context, 'xml',
|
|
instance, None)
|
|
self.assertEqual(0, create.call_args_list[0][1]['pause'])
|
|
self.assertEqual(0, domain.resume.call_count)
|
|
|
|
def _test_create_with_network_events(self, neutron_failure=None,
|
|
power_on=True):
|
|
generated_events = []
|
|
|
|
def wait_timeout():
|
|
event = mock.MagicMock()
|
|
if neutron_failure == 'timeout':
|
|
raise eventlet.timeout.Timeout()
|
|
elif neutron_failure == 'error':
|
|
event.status = 'failed'
|
|
else:
|
|
event.status = 'completed'
|
|
return event
|
|
|
|
def fake_prepare(instance, name, tag):
|
|
m = mock.MagicMock()
|
|
m.instance = instance
|
|
m.event_name = '%s-%s' % (name, tag)
|
|
m.wait.side_effect = wait_timeout
|
|
generated_events.append(m)
|
|
return m
|
|
|
|
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
|
|
prepare = virtapi._compute.instance_events.prepare_for_instance_event
|
|
prepare.side_effect = fake_prepare
|
|
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
|
|
|
|
instance = objects.Instance(vm_state=vm_states.BUILDING,
|
|
**self.test_instance)
|
|
vifs = [{'id': uuids.vif_1, 'active': False},
|
|
{'id': uuids.vif_2, 'active': False}]
|
|
|
|
@mock.patch.object(drvr, 'plug_vifs')
|
|
@mock.patch.object(drvr, 'firewall_driver')
|
|
@mock.patch.object(drvr, '_create_domain')
|
|
@mock.patch.object(drvr, 'cleanup')
|
|
def test_create(cleanup, create, fw_driver, plug_vifs):
|
|
domain = drvr._create_domain_and_network(self.context, 'xml',
|
|
instance, vifs,
|
|
power_on=power_on)
|
|
plug_vifs.assert_called_with(instance, vifs)
|
|
|
|
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
|
|
self.assertEqual(pause,
|
|
create.call_args_list[0][1]['pause'])
|
|
if pause:
|
|
domain.resume.assert_called_once_with()
|
|
if neutron_failure and CONF.vif_plugging_is_fatal:
|
|
cleanup.assert_called_once_with(self.context,
|
|
instance, network_info=vifs,
|
|
block_device_info=None)
|
|
|
|
test_create()
|
|
|
|
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
|
|
prepare.assert_has_calls([
|
|
mock.call(instance, 'network-vif-plugged', uuids.vif_1),
|
|
mock.call(instance, 'network-vif-plugged', uuids.vif_2)])
|
|
for event in generated_events:
|
|
if neutron_failure and generated_events.index(event) != 0:
|
|
self.assertEqual(0, event.call_count)
|
|
elif (neutron_failure == 'error' and
|
|
not CONF.vif_plugging_is_fatal):
|
|
event.wait.assert_called_once_with()
|
|
else:
|
|
self.assertEqual(0, prepare.call_count)
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron(self, is_neutron):
|
|
self._test_create_with_network_events()
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_power_off(self,
|
|
is_neutron):
|
|
# Tests that we don't wait for events if we don't start the instance.
|
|
self._test_create_with_network_events(power_on=False)
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_nowait(self, is_neutron):
|
|
self.flags(vif_plugging_timeout=0)
|
|
self._test_create_with_network_events()
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
|
|
self, is_neutron):
|
|
self.flags(vif_plugging_is_fatal=False)
|
|
self._test_create_with_network_events(neutron_failure='timeout')
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_failed_fatal_timeout(
|
|
self, is_neutron):
|
|
self.assertRaises(exception.VirtualInterfaceCreateException,
|
|
self._test_create_with_network_events,
|
|
neutron_failure='timeout')
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_failed_nonfatal_error(
|
|
self, is_neutron):
|
|
self.flags(vif_plugging_is_fatal=False)
|
|
self._test_create_with_network_events(neutron_failure='error')
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=True)
|
|
def test_create_with_network_events_neutron_failed_fatal_error(
|
|
self, is_neutron):
|
|
self.assertRaises(exception.VirtualInterfaceCreateException,
|
|
self._test_create_with_network_events,
|
|
neutron_failure='error')
|
|
|
|
@mock.patch('nova.utils.is_neutron', return_value=False)
|
|
def test_create_with_network_events_non_neutron(self, is_neutron):
|
|
self._test_create_with_network_events()
|
|
|
|
def test_create_with_other_error(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
|
|
@mock.patch.object(drvr, 'plug_vifs')
|
|
@mock.patch.object(drvr, 'firewall_driver')
|
|
@mock.patch.object(drvr, '_create_domain')
|
|
@mock.patch.object(drvr, '_cleanup_failed_start')
|
|
def the_test(mock_cleanup, mock_create, mock_fw, mock_plug):
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_create.side_effect = test.TestingException
|
|
self.assertRaises(test.TestingException,
|
|
drvr._create_domain_and_network,
|
|
self.context, 'xml', instance, [], None)
|
|
mock_cleanup.assert_called_once_with(self.context, instance,
|
|
[], None, None, False)
|
|
# destroy_disks_on_failure=True, used only by spawn()
|
|
mock_cleanup.reset_mock()
|
|
self.assertRaises(test.TestingException,
|
|
drvr._create_domain_and_network,
|
|
self.context, 'xml', instance, [], None,
|
|
destroy_disks_on_failure=True)
|
|
mock_cleanup.assert_called_once_with(self.context, instance,
|
|
[], None, None, True)
|
|
|
|
the_test()
|
|
|
|
def test_cleanup_failed_start_no_guest(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
|
|
drvr._cleanup_failed_start(None, None, None, None, None, False)
|
|
self.assertTrue(mock_cleanup.called)
|
|
|
|
def test_cleanup_failed_start_inactive_guest(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
guest = mock.MagicMock()
|
|
guest.is_active.return_value = False
|
|
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
|
|
drvr._cleanup_failed_start(None, None, None, None, guest, False)
|
|
self.assertTrue(mock_cleanup.called)
|
|
self.assertFalse(guest.poweroff.called)
|
|
|
|
def test_cleanup_failed_start_active_guest(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
guest = mock.MagicMock()
|
|
guest.is_active.return_value = True
|
|
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
|
|
drvr._cleanup_failed_start(None, None, None, None, guest, False)
|
|
self.assertTrue(mock_cleanup.called)
|
|
self.assertTrue(guest.poweroff.called)
|
|
|
|
def test_cleanup_failed_start_failed_poweroff(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
guest = mock.MagicMock()
|
|
guest.is_active.return_value = True
|
|
guest.poweroff.side_effect = test.TestingException
|
|
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
|
|
self.assertRaises(test.TestingException,
|
|
drvr._cleanup_failed_start,
|
|
None, None, None, None, guest, False)
|
|
self.assertTrue(mock_cleanup.called)
|
|
self.assertTrue(guest.poweroff.called)
|
|
|
|
def test_cleanup_failed_start_failed_poweroff_destroy_disks(self):
|
|
drvr = libvirt_driver.LibvirtDriver(mock.MagicMock(), False)
|
|
guest = mock.MagicMock()
|
|
guest.is_active.return_value = True
|
|
guest.poweroff.side_effect = test.TestingException
|
|
with mock.patch.object(drvr, 'cleanup') as mock_cleanup:
|
|
self.assertRaises(test.TestingException,
|
|
drvr._cleanup_failed_start,
|
|
None, None, None, None, guest, True)
|
|
mock_cleanup.called_once_with(None, None, network_info=None,
|
|
block_device_info=None, destroy_disks=True)
|
|
self.assertTrue(guest.poweroff.called)
|
|
|
|
@mock.patch('os_brick.encryptors.get_encryption_metadata')
|
|
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
|
|
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
mock_dom = mock.MagicMock()
|
|
mock_encryption_meta = mock.MagicMock()
|
|
get_encryption_metadata.return_value = mock_encryption_meta
|
|
|
|
fake_xml = """
|
|
<domain>
|
|
<name>instance-00000001</name>
|
|
<memory>1048576</memory>
|
|
<vcpu>1</vcpu>
|
|
<devices>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='raw' cache='none'/>
|
|
<source file='/path/fake-volume1'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
fake_volume_id = "fake-volume-id"
|
|
connection_info = {"driver_volume_type": "fake",
|
|
"data": {"access_mode": "rw",
|
|
"volume_id": fake_volume_id}}
|
|
|
|
def fake_getitem(*args, **kwargs):
|
|
fake_bdm = {'connection_info': connection_info,
|
|
'mount_device': '/dev/vda'}
|
|
return fake_bdm.get(args[0])
|
|
|
|
mock_volume = mock.MagicMock()
|
|
mock_volume.__getitem__.side_effect = fake_getitem
|
|
block_device_info = {'block_device_mapping': [mock_volume]}
|
|
network_info = [network_model.VIF(id='1'),
|
|
network_model.VIF(id='2', active=True)]
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, 'plug_vifs'),
|
|
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
|
|
mock.patch.object(drvr.firewall_driver,
|
|
'prepare_instance_filter'),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
|
|
) as (plug_vifs, setup_basic_filtering, prepare_instance_filter,
|
|
create_domain, apply_instance_filter):
|
|
create_domain.return_value = libvirt_guest.Guest(mock_dom)
|
|
|
|
guest = drvr._create_domain_and_network(
|
|
self.context, fake_xml, instance, network_info,
|
|
block_device_info=block_device_info)
|
|
|
|
plug_vifs.assert_called_once_with(instance, network_info)
|
|
setup_basic_filtering.assert_called_once_with(instance,
|
|
network_info)
|
|
prepare_instance_filter.assert_called_once_with(instance,
|
|
network_info)
|
|
pause = self._get_pause_flag(drvr, network_info)
|
|
create_domain.assert_called_once_with(
|
|
fake_xml, pause=pause, power_on=True, post_xml_callback=None)
|
|
self.assertEqual(mock_dom, guest._domain)
|
|
|
|
def test_get_guest_storage_config(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
test_instance = copy.deepcopy(self.test_instance)
|
|
test_instance["default_swap_device"] = None
|
|
instance = objects.Instance(**test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
flavor = instance.get_flavor()
|
|
conn_info = {'driver_volume_type': 'fake', 'data': {}}
|
|
bdm = objects.BlockDeviceMapping(
|
|
self.context,
|
|
**fake_block_device.FakeDbBlockDeviceDict({
|
|
'id': 1,
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume',
|
|
'device_name': '/dev/vdc'}))
|
|
bdi = {'block_device_mapping':
|
|
driver_block_device.convert_volumes([bdm])}
|
|
bdm = bdi['block_device_mapping'][0]
|
|
bdm['connection_info'] = conn_info
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance,
|
|
image_meta,
|
|
bdi)
|
|
mock_conf = mock.MagicMock(source_path='fake')
|
|
|
|
with test.nested(
|
|
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
|
|
'save'),
|
|
mock.patch.object(drvr, '_connect_volume'),
|
|
mock.patch.object(drvr, '_get_volume_config',
|
|
return_value=mock_conf)
|
|
) as (volume_save, connect_volume, get_volume_config):
|
|
devices = drvr._get_guest_storage_config(self.context, instance,
|
|
image_meta, disk_info, False, bdi, flavor, "hvm")
|
|
|
|
self.assertEqual(3, len(devices))
|
|
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
|
|
self.assertIsNone(instance.default_swap_device)
|
|
connect_volume.assert_called_with(self.context,
|
|
bdm['connection_info'], instance)
|
|
get_volume_config.assert_called_with(bdm['connection_info'],
|
|
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
|
|
volume_save.assert_called_once_with()
|
|
|
|
def test_get_neutron_events(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
network_info = [network_model.VIF(id='1'),
|
|
network_model.VIF(id='2', active=True)]
|
|
events = drvr._get_neutron_events(network_info)
|
|
self.assertEqual([('network-vif-plugged', '1')], events)
|
|
|
|
def test_unplug_vifs_ignores_errors(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
|
|
vif_driver.unplug.side_effect = exception.AgentError(
|
|
method='unplug')
|
|
drvr._unplug_vifs('inst', [1], ignore_errors=True)
|
|
vif_driver.unplug.assert_called_once_with('inst', 1)
|
|
|
|
def test_unplug_vifs_reports_errors(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
|
|
vif_driver.unplug.side_effect = exception.AgentError(
|
|
method='unplug')
|
|
self.assertRaises(exception.AgentError,
|
|
drvr.unplug_vifs, 'inst', [1])
|
|
vif_driver.unplug.assert_called_once_with('inst', 1)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
|
|
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
drvr.firewall_driver = mock.Mock()
|
|
drvr._disconnect_volume = mock.Mock()
|
|
fake_inst = {'name': 'foo'}
|
|
fake_bdms = [{'connection_info': 'foo',
|
|
'mount_device': None}]
|
|
with mock.patch('nova.virt.driver'
|
|
'.block_device_info_get_mapping',
|
|
return_value=fake_bdms):
|
|
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
|
|
self.assertTrue(drvr._disconnect_volume.called)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
|
|
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
fake_inst = {'name': 'foo'}
|
|
with mock.patch.object(drvr._conn, 'lookupByUUIDString') as lookup:
|
|
lookup.return_value = fake_inst
|
|
# NOTE(danms): Make unplug cause us to bail early, since
|
|
# we only care about how it was called
|
|
unplug.side_effect = test.TestingException
|
|
self.assertRaises(test.TestingException,
|
|
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
|
|
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
|
|
return_value=True)
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
|
|
def test_cleanup_migrate_data_shared_block_storage(self,
|
|
_undefine_domain,
|
|
save,
|
|
delete_instance_files,
|
|
unfilter_instance):
|
|
# Tests the cleanup method when migrate_data has
|
|
# is_shared_block_storage=True and destroy_disks=False.
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
migrate_data = objects.LibvirtLiveMigrateData(
|
|
is_shared_block_storage=True)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
drvr.cleanup(
|
|
self.context, instance, network_info={}, destroy_disks=False,
|
|
migrate_data=migrate_data, destroy_vifs=False)
|
|
delete_instance_files.assert_called_once_with(instance)
|
|
self.assertEqual(1, int(instance.system_metadata['clean_attempts']))
|
|
self.assertTrue(instance.cleaned)
|
|
save.assert_called_once_with()
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
|
|
return_value=True)
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
|
|
def test_cleanup_instance_dir_with_rbd_workaround(self,
|
|
_undefine_domain, save, delete_instance_files, unfilter_instance):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
self.flags(ensure_libvirt_rbd_instance_dir_cleanup=True,
|
|
group='workarounds')
|
|
instance = objects.Instance(self.context, **self.test_instance)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
# destroy_disks=False here as check_instance_shared_storage_local call
|
|
# would return None when using the rbd imagebackend
|
|
drvr.cleanup(self.context, instance, network_info={},
|
|
destroy_disks=False)
|
|
delete_instance_files.assert_called_once_with(instance)
|
|
self.assertEqual(1, int(instance.system_metadata['clean_attempts']))
|
|
self.assertTrue(instance.cleaned)
|
|
save.assert_called_once_with()
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks')
|
|
def test_swap_volume_native_luks_blocked(self, mock_use_native_luks,
|
|
mock_get_encryption):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
mock_use_native_luks.return_value = True
|
|
|
|
# dest volume is encrypted
|
|
mock_get_encryption.side_effect = [{}, {'provider': 'luks'}]
|
|
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
|
|
{}, {}, None, None, None)
|
|
|
|
# src volume is encrypted
|
|
mock_get_encryption.side_effect = [{'provider': 'luks'}, {}]
|
|
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
|
|
{}, {}, None, None, None)
|
|
|
|
# both volumes are encrypted
|
|
mock_get_encryption.side_effect = [{'provider': 'luks'},
|
|
{'provider': 'luks'}]
|
|
self.assertRaises(NotImplementedError, drvr.swap_volume, self.context,
|
|
{}, {}, None, None, None)
|
|
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete',
|
|
return_value=True)
|
|
def _test_swap_volume(self, mock_is_job_complete, source_type,
|
|
resize=False, fail=False):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
|
|
mock_dom = mock.MagicMock()
|
|
guest = libvirt_guest.Guest(mock_dom)
|
|
|
|
with mock.patch.object(drvr._conn, 'defineXML',
|
|
create=True) as mock_define:
|
|
srcfile = "/first/path"
|
|
dstfile = "/second/path"
|
|
orig_xml = six.text_type(mock.sentinel.orig_xml)
|
|
new_xml = six.text_type(mock.sentinel.new_xml)
|
|
|
|
mock_dom.XMLDesc.return_value = orig_xml
|
|
mock_dom.isPersistent.return_value = True
|
|
|
|
def fake_rebase_success(*args, **kwargs):
|
|
# Make sure the XML is set after the rebase so we know
|
|
# get_xml_desc was called after the update.
|
|
mock_dom.XMLDesc.return_value = new_xml
|
|
|
|
if not fail:
|
|
mock_dom.blockRebase.side_effect = fake_rebase_success
|
|
# If the swap succeeds, make sure we use the new XML to
|
|
# redefine the domain.
|
|
expected_xml = new_xml
|
|
else:
|
|
if resize:
|
|
mock_dom.blockResize.side_effect = test.TestingException()
|
|
expected_exception = test.TestingException
|
|
else:
|
|
mock_dom.blockRebase.side_effect = test.TestingException()
|
|
expected_exception = exception.VolumeRebaseFailed
|
|
# If the swap fails, make sure we use the original domain XML
|
|
# to redefine the domain.
|
|
expected_xml = orig_xml
|
|
|
|
# Run the swap volume code.
|
|
mock_conf = mock.MagicMock(source_type=source_type,
|
|
source_path=dstfile)
|
|
if not fail:
|
|
drvr._swap_volume(guest, srcfile, mock_conf, 1)
|
|
else:
|
|
self.assertRaises(expected_exception, drvr._swap_volume, guest,
|
|
srcfile, mock_conf, 1)
|
|
|
|
# Verify we read the original persistent config.
|
|
expected_call_count = 1
|
|
expected_calls = [mock.call(
|
|
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
|
|
fakelibvirt.VIR_DOMAIN_XML_SECURE))]
|
|
if not fail:
|
|
# Verify we read the updated live config.
|
|
expected_call_count = 2
|
|
expected_calls.append(
|
|
mock.call(flags=fakelibvirt.VIR_DOMAIN_XML_SECURE))
|
|
self.assertEqual(expected_call_count, mock_dom.XMLDesc.call_count)
|
|
mock_dom.XMLDesc.assert_has_calls(expected_calls)
|
|
|
|
# Verify we called with the correct flags.
|
|
expected_flags = (fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
|
|
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
|
|
if source_type == 'block':
|
|
expected_flags = (expected_flags |
|
|
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV)
|
|
mock_dom.blockRebase.assert_called_once_with(srcfile, dstfile, 0,
|
|
flags=expected_flags)
|
|
|
|
# Verify we defined the expected XML.
|
|
mock_define.assert_called_once_with(expected_xml)
|
|
|
|
# Verify we called resize with the correct args.
|
|
if resize:
|
|
mock_dom.blockResize.assert_called_once_with(
|
|
srcfile, 1 * units.Gi / units.Ki)
|
|
|
|
def test_swap_volume_file(self):
|
|
self._test_swap_volume('file')
|
|
|
|
def test_swap_volume_block(self):
|
|
"""If the swapped volume is type="block", make sure that we give
|
|
libvirt the correct VIR_DOMAIN_BLOCK_REBASE_COPY_DEV flag to ensure the
|
|
correct type="block" XML is generated (bug 1691195)
|
|
"""
|
|
self._test_swap_volume('block')
|
|
|
|
def test_swap_volume_rebase_fail(self):
|
|
self._test_swap_volume('block', fail=True)
|
|
|
|
def test_swap_volume_resize_fail(self):
|
|
self._test_swap_volume('file', resize=True, fail=True)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
def test_swap_volume(self, get_guest, connect_volume, get_volume_config,
|
|
swap_volume, disconnect_volume):
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance = objects.Instance(**self.test_instance)
|
|
old_connection_info = {'driver_volume_type': 'fake',
|
|
'serial': 'old-volume-id',
|
|
'data': {'device_path': '/fake-old-volume',
|
|
'access_mode': 'rw'}}
|
|
new_connection_info = {'driver_volume_type': 'fake',
|
|
'serial': 'new-volume-id',
|
|
'data': {'device_path': '/fake-new-volume',
|
|
'access_mode': 'rw'}}
|
|
mock_dom = mock.MagicMock()
|
|
guest = libvirt_guest.Guest(mock_dom)
|
|
mock_dom.XMLDesc.return_value = """<domain>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='/fake-old-volume'/>
|
|
<target dev='vdb' bus='virtio'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
mock_dom.name.return_value = 'inst'
|
|
mock_dom.UUIDString.return_value = 'uuid'
|
|
get_guest.return_value = guest
|
|
conf = mock.MagicMock(source_path='/fake-new-volume')
|
|
get_volume_config.return_value = conf
|
|
|
|
conn.swap_volume(self.context, old_connection_info,
|
|
new_connection_info, instance, '/dev/vdb', 1)
|
|
|
|
get_guest.assert_called_once_with(instance)
|
|
connect_volume.assert_called_once_with(self.context,
|
|
new_connection_info, instance)
|
|
|
|
swap_volume.assert_called_once_with(guest, 'vdb', conf, 1)
|
|
disconnect_volume.assert_called_once_with(self.context,
|
|
old_connection_info,
|
|
instance)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.rebase')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_disk')
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
|
|
def test_swap_volume_disconnect_new_volume_on_rebase_error(self,
|
|
write_config, get_guest, get_disk, get_volume_config,
|
|
connect_volume, disconnect_volume, rebase, get_volume_encryption):
|
|
"""Assert that disconnect_volume is called for the new volume if an
|
|
error is encountered while rebasing
|
|
"""
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance = objects.Instance(**self.test_instance)
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
get_guest.return_value = guest
|
|
get_volume_encryption.return_value = {}
|
|
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
|
|
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
rebase.side_effect = exc
|
|
|
|
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
|
|
self.context, mock.sentinel.old_connection_info,
|
|
mock.sentinel.new_connection_info,
|
|
instance, '/dev/vdb', 0)
|
|
connect_volume.assert_called_once_with(self.context,
|
|
mock.sentinel.new_connection_info, instance)
|
|
disconnect_volume.assert_called_once_with(self.context,
|
|
mock.sentinel.new_connection_info, instance)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.abort_job')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
|
|
@mock.patch('nova.virt.libvirt.guest.Guest.get_disk')
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
|
|
@mock.patch('nova.virt.libvirt.host.Host.write_instance_config')
|
|
def test_swap_volume_disconnect_new_volume_on_pivot_error(self,
|
|
write_config, get_guest, get_disk, get_volume_config,
|
|
connect_volume, disconnect_volume, abort_job, is_job_complete,
|
|
get_volume_encryption):
|
|
"""Assert that disconnect_volume is called for the new volume if an
|
|
error is encountered while pivoting to the new volume
|
|
"""
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
instance = objects.Instance(**self.test_instance)
|
|
guest = libvirt_guest.Guest(mock.MagicMock())
|
|
get_guest.return_value = guest
|
|
get_volume_encryption.return_value = {}
|
|
exc = fakelibvirt.make_libvirtError(fakelibvirt.libvirtError,
|
|
'internal error', error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
|
|
is_job_complete.return_value = True
|
|
abort_job.side_effect = [None, exc]
|
|
|
|
self.assertRaises(exception.VolumeRebaseFailed, conn.swap_volume,
|
|
self.context, mock.sentinel.old_connection_info,
|
|
mock.sentinel.new_connection_info,
|
|
instance, '/dev/vdb', 0)
|
|
connect_volume.assert_called_once_with(self.context,
|
|
mock.sentinel.new_connection_info, instance)
|
|
disconnect_volume.assert_called_once_with(self.context,
|
|
mock.sentinel.new_connection_info, instance)
|
|
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
@mock.patch('nova.privsep.path.chown')
|
|
def _test_live_snapshot(
|
|
self, mock_chown, mock_is_job_complete,
|
|
can_quiesce=False, require_quiesce=False):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
mock_dom = mock.MagicMock()
|
|
test_image_meta = self.test_image_meta.copy()
|
|
if require_quiesce:
|
|
test_image_meta = {'properties': {'os_require_quiesce': 'yes'}}
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr._conn, 'defineXML', create=True),
|
|
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
|
|
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
|
|
mock.patch('nova.virt.libvirt.utils.create_cow_image'),
|
|
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
|
|
mock.patch.object(drvr, '_set_quiesced')
|
|
) as (mock_define, mock_size, mock_backing, mock_create_cow,
|
|
mock_snapshot, mock_quiesce):
|
|
|
|
xmldoc = "<domain/>"
|
|
srcfile = "/first/path"
|
|
dstfile = "/second/path"
|
|
bckfile = "/other/path"
|
|
dltfile = dstfile + ".delta"
|
|
|
|
mock_dom.XMLDesc.return_value = xmldoc
|
|
mock_dom.isPersistent.return_value = True
|
|
mock_size.return_value = 1004009
|
|
mock_backing.return_value = bckfile
|
|
guest = libvirt_guest.Guest(mock_dom)
|
|
|
|
if not can_quiesce:
|
|
mock_quiesce.side_effect = (
|
|
exception.InstanceQuiesceNotSupported(
|
|
instance_id=self.test_instance['id'], reason='test'))
|
|
|
|
image_meta = objects.ImageMeta.from_dict(test_image_meta)
|
|
|
|
mock_is_job_complete.return_value = True
|
|
|
|
drvr._live_snapshot(self.context, self.test_instance, guest,
|
|
srcfile, dstfile, "qcow2", "qcow2", image_meta)
|
|
|
|
mock_dom.XMLDesc.assert_called_once_with(flags=(
|
|
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
|
|
fakelibvirt.VIR_DOMAIN_XML_SECURE))
|
|
mock_dom.blockRebase.assert_called_once_with(
|
|
srcfile, dltfile, 0, flags=(
|
|
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
|
|
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
|
|
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
|
|
|
|
mock_size.assert_called_once_with(srcfile, format="qcow2")
|
|
mock_backing.assert_called_once_with(srcfile, basename=False,
|
|
format="qcow2")
|
|
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
|
|
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
|
|
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
|
|
dstfile, "qcow2")
|
|
mock_define.assert_called_once_with(xmldoc)
|
|
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
|
|
mock.ANY, True)
|
|
if can_quiesce:
|
|
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
|
|
mock.ANY, False)
|
|
|
|
def test_live_snapshot(self):
|
|
self._test_live_snapshot()
|
|
|
|
def test_live_snapshot_with_quiesce(self):
|
|
self._test_live_snapshot(can_quiesce=True)
|
|
|
|
def test_live_snapshot_with_require_quiesce(self):
|
|
self._test_live_snapshot(can_quiesce=True, require_quiesce=True)
|
|
|
|
def test_live_snapshot_with_require_quiesce_fails(self):
|
|
self.assertRaises(exception.InstanceQuiesceNotSupported,
|
|
self._test_live_snapshot,
|
|
can_quiesce=False, require_quiesce=True)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
|
|
def test_live_migration_hostname_valid(self, mock_lm):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.live_migration(self.context, self.test_instance,
|
|
"host1.example.com",
|
|
lambda x: x,
|
|
lambda x: x)
|
|
self.assertEqual(1, mock_lm.call_count)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
|
|
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
|
|
def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_hostname.return_value = False
|
|
self.assertRaises(exception.InvalidHostname,
|
|
drvr.live_migration,
|
|
self.context, self.test_instance,
|
|
"foo/?com=/bin/sh",
|
|
lambda x: x,
|
|
lambda x: x)
|
|
|
|
def test_live_migration_force_complete(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = fake_instance.fake_instance_obj(
|
|
None, name='instancename', id=1,
|
|
uuid='c83a75d4-4d53-4be5-9a40-04d9c0389ff8')
|
|
drvr.active_migrations[instance.uuid] = deque()
|
|
drvr.live_migration_force_complete(instance)
|
|
self.assertEqual(
|
|
1, drvr.active_migrations[instance.uuid].count("force-complete"))
|
|
|
|
@mock.patch.object(host.Host, "get_connection")
|
|
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
|
|
def test_live_migration_abort(self, mock_abort, mock_conn):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
|
|
guest = libvirt_guest.Guest(dom)
|
|
with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest',
|
|
return_value=guest):
|
|
drvr.live_migration_abort(self.test_instance)
|
|
self.assertTrue(mock_abort.called)
|
|
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('tempfile.mkstemp')
|
|
@mock.patch('os.close', return_value=None)
|
|
def test_check_instance_shared_storage_local_raw(self,
|
|
mock_close,
|
|
mock_mkstemp,
|
|
mock_exists):
|
|
instance_uuid = uuids.fake
|
|
self.flags(images_type='raw', group='libvirt')
|
|
self.flags(instances_path='/tmp')
|
|
mock_mkstemp.return_value = (-1,
|
|
'/tmp/{0}/file'.format(instance_uuid))
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
temp_file = driver.check_instance_shared_storage_local(self.context,
|
|
instance)
|
|
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
|
|
temp_file['filename'])
|
|
|
|
def test_check_instance_shared_storage_local_rbd(self):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(**self.test_instance)
|
|
self.assertIsNone(driver.
|
|
check_instance_shared_storage_local(self.context,
|
|
instance))
|
|
|
|
def test_version_to_string(self):
|
|
string_ver = libvirt_utils.version_to_string((4, 33, 173))
|
|
self.assertEqual("4.33.173", string_ver)
|
|
|
|
def test_virtuozzo_min_version_fail(self):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with mock.patch.object(driver._conn, 'getVersion') as mock_getver:
|
|
mock_getver.return_value = \
|
|
versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_VIRTUOZZO_VERSION) - 1
|
|
|
|
self.assertRaises(exception.NovaException,
|
|
driver.init_host, 'wibble')
|
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_VIRTUOZZO_VERSION))
|
|
def test_virtuozzo_min_version_ok(self, mock_get_virtuozzo_version):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
driver.init_host('wibble')
|
|
|
|
def test_get_guest_config_parallels_vm(self):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
self.flags(images_type='ploop', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta)
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info)
|
|
self.assertEqual("parallels", cfg.virt_type)
|
|
self.assertEqual(instance_ref["uuid"], cfg.uuid)
|
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
|
|
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
|
|
self.assertEqual(fields.VMMode.HVM, cfg.os_type)
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(6, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertEqual(cfg.devices[0].driver_format, "ploop")
|
|
self.assertIsInstance(cfg.devices[1],
|
|
vconfig.LibvirtConfigGuestDisk)
|
|
self.assertIsInstance(cfg.devices[2],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[3],
|
|
vconfig.LibvirtConfigGuestInput)
|
|
self.assertIsInstance(cfg.devices[4],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[5],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
|
|
def test_get_guest_config_parallels_ct_rescue(self):
|
|
self._test_get_guest_config_parallels_ct(rescue=True)
|
|
|
|
def test_get_guest_config_parallels_ct(self):
|
|
self._test_get_guest_config_parallels_ct(rescue=False)
|
|
|
|
def _test_get_guest_config_parallels_ct(self, rescue=False):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
ct_instance = self.test_instance.copy()
|
|
ct_instance["vm_mode"] = fields.VMMode.EXE
|
|
instance_ref = objects.Instance(**ct_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
if rescue:
|
|
rescue_data = ct_instance
|
|
else:
|
|
rescue_data = None
|
|
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, {'mapping': {'disk': {}}},
|
|
rescue_data)
|
|
self.assertEqual("parallels", cfg.virt_type)
|
|
self.assertEqual(instance_ref["uuid"], cfg.uuid)
|
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
|
|
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
|
|
self.assertEqual(fields.VMMode.EXE, cfg.os_type)
|
|
self.assertEqual("/sbin/init", cfg.os_init_path)
|
|
self.assertIsNone(cfg.os_root)
|
|
if rescue:
|
|
self.assertEqual(5, len(cfg.devices))
|
|
else:
|
|
self.assertEqual(4, len(cfg.devices))
|
|
self.assertIsInstance(cfg.devices[0],
|
|
vconfig.LibvirtConfigGuestFilesys)
|
|
|
|
device_index = 0
|
|
fs = cfg.devices[device_index]
|
|
self.assertEqual(fs.source_type, "file")
|
|
self.assertEqual(fs.driver_type, "ploop")
|
|
self.assertEqual(fs.target_dir, "/")
|
|
|
|
if rescue:
|
|
device_index = 1
|
|
fs = cfg.devices[device_index]
|
|
self.assertEqual(fs.source_type, "file")
|
|
self.assertEqual(fs.driver_type, "ploop")
|
|
self.assertEqual(fs.target_dir, "/mnt/rescue")
|
|
|
|
self.assertIsInstance(cfg.devices[device_index + 1],
|
|
vconfig.LibvirtConfigGuestInterface)
|
|
self.assertIsInstance(cfg.devices[device_index + 2],
|
|
vconfig.LibvirtConfigGuestGraphics)
|
|
self.assertIsInstance(cfg.devices[device_index + 3],
|
|
vconfig.LibvirtConfigGuestVideo)
|
|
|
|
def _test_get_guest_config_parallels_volume(self, vmmode, devices):
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
ct_instance = self.test_instance.copy()
|
|
ct_instance["vm_mode"] = vmmode
|
|
instance_ref = objects.Instance(**ct_instance)
|
|
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
conn_info = {'driver_volume_type': 'fake', 'data': {}}
|
|
bdm = objects.BlockDeviceMapping(
|
|
self.context,
|
|
**fake_block_device.FakeDbBlockDeviceDict(
|
|
{'id': 0,
|
|
'source_type': 'volume', 'destination_type': 'volume',
|
|
'device_name': '/dev/sda'}))
|
|
info = {'block_device_mapping': driver_block_device.convert_volumes(
|
|
[bdm])}
|
|
info['block_device_mapping'][0]['connection_info'] = conn_info
|
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
|
instance_ref,
|
|
image_meta,
|
|
info)
|
|
|
|
with mock.patch.object(
|
|
driver_block_device.DriverVolumeBlockDevice, 'save'
|
|
) as mock_save:
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, disk_info, None, info)
|
|
mock_save.assert_called_once_with()
|
|
|
|
self.assertEqual("parallels", cfg.virt_type)
|
|
self.assertEqual(instance_ref["uuid"], cfg.uuid)
|
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
|
|
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus)
|
|
self.assertEqual(vmmode, cfg.os_type)
|
|
self.assertIsNone(cfg.os_root)
|
|
self.assertEqual(devices, len(cfg.devices))
|
|
|
|
disk_found = False
|
|
|
|
for dev in cfg.devices:
|
|
result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys)
|
|
self.assertFalse(result)
|
|
if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and
|
|
(dev.source_path is None or
|
|
'disk.local' not in dev.source_path)):
|
|
self.assertEqual("disk", dev.source_device)
|
|
self.assertEqual("sda", dev.target_dev)
|
|
disk_found = True
|
|
|
|
self.assertTrue(disk_found)
|
|
|
|
def test_get_guest_config_parallels_volume(self):
|
|
self._test_get_guest_config_parallels_volume(fields.VMMode.EXE, 4)
|
|
self._test_get_guest_config_parallels_volume(fields.VMMode.HVM, 6)
|
|
|
|
def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self):
|
|
# New config drives are stored in rbd but existing instances have
|
|
# config drives in the old location under the instances path.
|
|
# Test that the driver falls back to 'flat' for config drive if it
|
|
# doesn't exist in rbd.
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
mock_rbd_image = mock.Mock()
|
|
mock_flat_image = mock.Mock()
|
|
mock_flat_image.libvirt_info.return_value = mock.sentinel.diskconfig
|
|
drvr.image_backend.by_name.side_effect = [mock_rbd_image,
|
|
mock_flat_image]
|
|
mock_rbd_image.exists.return_value = False
|
|
instance = objects.Instance()
|
|
disk_mapping = {'disk.config': {'bus': 'ide',
|
|
'dev': 'hda',
|
|
'type': 'file'}}
|
|
flavor = objects.Flavor(extra_specs={})
|
|
|
|
diskconfig = drvr._get_guest_disk_config(
|
|
instance, 'disk.config', disk_mapping, flavor,
|
|
drvr._get_disk_config_image_type())
|
|
|
|
self.assertEqual(2, drvr.image_backend.by_name.call_count)
|
|
call1 = mock.call(instance, 'disk.config', 'rbd')
|
|
call2 = mock.call(instance, 'disk.config', 'flat')
|
|
drvr.image_backend.by_name.assert_has_calls([call1, call2])
|
|
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
|
|
|
|
def _test_prepare_domain_for_snapshot(self, live_snapshot, state):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
with mock.patch.object(drvr, "suspend") as mock_suspend:
|
|
drvr._prepare_domain_for_snapshot(
|
|
self.context, live_snapshot, state, instance_ref)
|
|
return mock_suspend.called
|
|
|
|
def test_prepare_domain_for_snapshot(self):
|
|
# Ensure that suspend() is only called on RUNNING or PAUSED instances
|
|
for test_power_state in power_state.STATE_MAP.keys():
|
|
if test_power_state in (power_state.RUNNING, power_state.PAUSED):
|
|
self.assertTrue(self._test_prepare_domain_for_snapshot(
|
|
False, test_power_state))
|
|
else:
|
|
self.assertFalse(self._test_prepare_domain_for_snapshot(
|
|
False, test_power_state))
|
|
|
|
def test_prepare_domain_for_snapshot_lxc(self):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
# Ensure that suspend() is never called with LXC
|
|
for test_power_state in power_state.STATE_MAP.keys():
|
|
self.assertFalse(self._test_prepare_domain_for_snapshot(
|
|
False, test_power_state))
|
|
|
|
def test_prepare_domain_for_snapshot_live_snapshots(self):
|
|
# Ensure that suspend() is never called for live snapshots
|
|
for test_power_state in power_state.STATE_MAP.keys():
|
|
self.assertFalse(self._test_prepare_domain_for_snapshot(
|
|
True, test_power_state))
|
|
|
|
@mock.patch('os.walk')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('os.path.getsize')
|
|
@mock.patch('os.path.isdir')
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_get_instance_disk_info_parallels_ct(self, mock_get_domain,
|
|
mock_execute,
|
|
mock_isdir,
|
|
mock_getsize,
|
|
mock_exists,
|
|
mock_walk):
|
|
|
|
dummyxml = ("<domain type='parallels'><name>instance-0000000a</name>"
|
|
"<os><type>exe</type></os>"
|
|
"<devices>"
|
|
"<filesystem type='file'>"
|
|
"<driver format='ploop' type='ploop'/>"
|
|
"<source file='/test/disk'/>"
|
|
"<target dir='/'/></filesystem>"
|
|
"</devices></domain>")
|
|
|
|
ret = ("image: /test/disk/root.hds\n"
|
|
"file format: parallels\n"
|
|
"virtual size: 20G (21474836480 bytes)\n"
|
|
"disk size: 789M\n")
|
|
|
|
self.flags(virt_type='parallels', group='libvirt')
|
|
instance = objects.Instance(**self.test_instance)
|
|
instance.vm_mode = fields.VMMode.EXE
|
|
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
mock_get_domain.return_value = fake_dom
|
|
mock_walk.return_value = [('/test/disk', [],
|
|
['DiskDescriptor.xml', 'root.hds'])]
|
|
|
|
def getsize_sideeffect(*args, **kwargs):
|
|
if args[0] == '/test/disk/DiskDescriptor.xml':
|
|
return 790
|
|
if args[0] == '/test/disk/root.hds':
|
|
return 827326464
|
|
|
|
mock_getsize.side_effect = getsize_sideeffect
|
|
mock_exists.return_value = True
|
|
mock_isdir.return_value = True
|
|
mock_execute.return_value = (ret, '')
|
|
|
|
info = drvr.get_instance_disk_info(instance)
|
|
info = jsonutils.loads(info)
|
|
self.assertEqual(info[0]['type'], 'ploop')
|
|
self.assertEqual(info[0]['path'], '/test/disk')
|
|
self.assertEqual(info[0]['disk_size'], 827327254)
|
|
self.assertEqual(info[0]['over_committed_disk_size'], 20647509226)
|
|
self.assertEqual(info[0]['virt_disk_size'], 21474836480)
|
|
|
|
def test_get_guest_config_with_mdevs(self):
|
|
mdevs = [uuids.mdev1]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance_ref = objects.Instance(**self.test_instance)
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
cfg = drvr._get_guest_config(instance_ref,
|
|
_fake_network_info(self, 1),
|
|
image_meta, {'mapping': {}},
|
|
mdevs=mdevs)
|
|
# Loop over all devices to make sure we have at least one mediated one.
|
|
for device in cfg.devices:
|
|
if isinstance(device, vconfig.LibvirtConfigGuestHostdevMDEV):
|
|
# Make sure we use the provided UUID
|
|
self.assertEqual(uuids.mdev1, device.uuid)
|
|
break
|
|
else:
|
|
assert False, "Unable to find any mediated device for the guest."
|
|
|
|
|
|
class TestGuestConfigSysinfoSerialOS(test.NoDBTestCase):
|
|
def setUp(self):
|
|
super(TestGuestConfigSysinfoSerialOS, self).setUp()
|
|
|
|
# Don't import libvirt
|
|
self.useFixture(fixtures.MockPatch('nova.virt.libvirt.driver.libvirt'))
|
|
|
|
# Don't initialise the Host
|
|
self.useFixture(fixtures.MockPatch('nova.virt.libvirt.driver.host'))
|
|
|
|
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
test_instance = _create_test_instance()
|
|
instance_ref = objects.Instance(**test_instance)
|
|
|
|
cfg = drvr._get_guest_config_sysinfo(instance_ref)
|
|
|
|
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
|
|
self.assertEqual(version.vendor_string(),
|
|
cfg.system_manufacturer)
|
|
self.assertEqual(version.product_string(),
|
|
cfg.system_product)
|
|
self.assertEqual(version.version_string_with_package(),
|
|
cfg.system_version)
|
|
if expected_serial == 'instance_uuid':
|
|
expected_serial = instance_ref.uuid
|
|
self.assertEqual(expected_serial,
|
|
cfg.system_serial)
|
|
self.assertEqual(instance_ref['uuid'],
|
|
cfg.system_uuid)
|
|
self.assertEqual("Virtual Machine",
|
|
cfg.system_family)
|
|
|
|
def test_get_guest_config_sysinfo_serial_none(self):
|
|
self.flags(sysinfo_serial="none", group="libvirt")
|
|
self._test_get_guest_config_sysinfo_serial(None)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_host_sysinfo_serial_hardware")
|
|
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
|
|
self.flags(sysinfo_serial="hardware", group="libvirt")
|
|
|
|
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
|
|
mock_uuid.return_value = theuuid
|
|
|
|
self._test_get_guest_config_sysinfo_serial(theuuid)
|
|
|
|
def test_get_guest_config_sysinfo_serial_os(self):
|
|
self.flags(sysinfo_serial="os", group="libvirt")
|
|
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
|
|
with test.nested(
|
|
mock.patch.object(six.moves.builtins, "open",
|
|
mock.mock_open(read_data=theuuid)),
|
|
self.patch_exists("/etc/machine-id", True)):
|
|
self._test_get_guest_config_sysinfo_serial(theuuid)
|
|
|
|
def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self):
|
|
self.flags(sysinfo_serial="os", group="libvirt")
|
|
with test.nested(
|
|
mock.patch.object(six.moves.builtins, "open",
|
|
mock.mock_open(read_data="")),
|
|
self.patch_exists("/etc/machine-id", True)):
|
|
self.assertRaises(exception.NovaException,
|
|
self._test_get_guest_config_sysinfo_serial,
|
|
None)
|
|
|
|
def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self):
|
|
self.flags(sysinfo_serial="os", group="libvirt")
|
|
with self.patch_exists("/etc/machine-id", False):
|
|
self.assertRaises(exception.NovaException,
|
|
self._test_get_guest_config_sysinfo_serial,
|
|
None)
|
|
|
|
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
|
|
|
|
@test.patch_exists("/etc/machine-id", False)
|
|
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
|
|
self.flags(sysinfo_serial="auto", group="libvirt")
|
|
|
|
with mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
"_get_host_sysinfo_serial_hardware") \
|
|
as mock_uuid:
|
|
mock_uuid.return_value = self.theuuid
|
|
|
|
self._test_get_guest_config_sysinfo_serial(self.theuuid)
|
|
|
|
@test.patch_exists("/etc/machine-id", True)
|
|
@test.patch_open("/etc/machine-id", theuuid)
|
|
def test_get_guest_config_sysinfo_serial_auto_os(self):
|
|
self.flags(sysinfo_serial="auto", group="libvirt")
|
|
self._test_get_guest_config_sysinfo_serial(self.theuuid)
|
|
|
|
def test_get_guest_config_sysinfo_serial_unique(self):
|
|
self.flags(sysinfo_serial="unique", group="libvirt")
|
|
self._test_get_guest_config_sysinfo_serial('instance_uuid')
|
|
|
|
|
|
class HostStateTestCase(test.NoDBTestCase):
|
|
|
|
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
|
|
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
|
|
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
|
|
"mtrr", "sep", "apic"],
|
|
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
|
|
instance_caps = [(fields.Architecture.X86_64, "kvm", "hvm"),
|
|
(fields.Architecture.I686, "kvm", "hvm")]
|
|
pci_devices = [{
|
|
"dev_id": "pci_0000_04_00_3",
|
|
"address": "0000:04:10.3",
|
|
"product_id": '1521',
|
|
"vendor_id": '8086',
|
|
"dev_type": fields.PciDeviceType.SRIOV_PF,
|
|
"phys_function": None}]
|
|
numa_topology = objects.NUMATopology(cells=[
|
|
objects.NUMACell(
|
|
id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0,
|
|
mempages=[], siblings=[set([1]), set([2])], pinned_cpus=set([])),
|
|
objects.NUMACell(
|
|
id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0,
|
|
mempages=[], siblings=[set([3]), set([4])], pinned_cpus=set([]))])
|
|
|
|
class FakeConnection(libvirt_driver.LibvirtDriver):
|
|
"""Fake connection object."""
|
|
def __init__(self):
|
|
super(HostStateTestCase.FakeConnection,
|
|
self).__init__(fake.FakeVirtAPI(), True)
|
|
|
|
self._host = host.Host("qemu:///system")
|
|
|
|
def _get_memory_mb_total():
|
|
return 497
|
|
|
|
def _get_memory_mb_used():
|
|
return 88
|
|
|
|
self._host.get_memory_mb_total = _get_memory_mb_total
|
|
self._host.get_memory_mb_used = _get_memory_mb_used
|
|
|
|
def _get_vcpu_total(self):
|
|
return 1
|
|
|
|
def _get_vcpu_used(self):
|
|
return 0
|
|
|
|
def _get_cpu_info(self):
|
|
return HostStateTestCase.cpu_info
|
|
|
|
def _get_disk_over_committed_size_total(self):
|
|
return 0
|
|
|
|
def _get_local_gb_info(self):
|
|
return {'total': 100, 'used': 20, 'free': 80}
|
|
|
|
def get_host_uptime(self):
|
|
return ('10:01:16 up 1:36, 6 users, '
|
|
'load average: 0.21, 0.16, 0.19')
|
|
|
|
def _get_disk_available_least(self):
|
|
return 13091
|
|
|
|
def _get_instance_capabilities(self):
|
|
return HostStateTestCase.instance_caps
|
|
|
|
def _get_pci_passthrough_devices(self):
|
|
return jsonutils.dumps(HostStateTestCase.pci_devices)
|
|
|
|
def _get_mdev_capable_devices(self, types=None):
|
|
return []
|
|
|
|
def _get_mediated_devices(self, types=None):
|
|
return []
|
|
|
|
def _get_host_numa_topology(self):
|
|
return HostStateTestCase.numa_topology
|
|
|
|
def setUp(self):
|
|
super(HostStateTestCase, self).setUp()
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
|
|
@mock.patch.object(fakelibvirt, "openAuth")
|
|
def test_update_status(self, mock_open):
|
|
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
|
|
|
|
drvr = HostStateTestCase.FakeConnection()
|
|
|
|
stats = drvr.get_available_resource("compute1")
|
|
self.assertEqual(stats["vcpus"], 1)
|
|
self.assertEqual(stats["memory_mb"], 497)
|
|
self.assertEqual(stats["local_gb"], 100)
|
|
self.assertEqual(stats["vcpus_used"], 0)
|
|
self.assertEqual(stats["memory_mb_used"], 88)
|
|
self.assertEqual(stats["local_gb_used"], 20)
|
|
self.assertEqual(stats["hypervisor_type"], 'QEMU')
|
|
self.assertEqual(stats["hypervisor_version"],
|
|
fakelibvirt.FAKE_QEMU_VERSION)
|
|
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
|
|
cpu_info = jsonutils.loads(stats["cpu_info"])
|
|
self.assertEqual(cpu_info,
|
|
{"vendor": "Intel", "model": "pentium",
|
|
"arch": fields.Architecture.I686,
|
|
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
|
|
"fxsr", "clflush", "pse36", "pat", "cmov",
|
|
"mca", "pge", "mtrr", "sep", "apic"],
|
|
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
|
|
})
|
|
self.assertEqual(stats["disk_available_least"], 80)
|
|
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
|
|
HostStateTestCase.pci_devices)
|
|
self.assertEqual(objects.NUMATopology.obj_from_db_obj(
|
|
stats['numa_topology']),
|
|
HostStateTestCase.numa_topology)
|
|
|
|
|
|
class TestUpdateProviderTree(test.NoDBTestCase):
|
|
vcpus = 24
|
|
memory_mb = 1024
|
|
disk_gb = 200
|
|
cpu_traits = {t: False for t in libvirt_utils.CPU_TRAITS_MAPPING.values()}
|
|
|
|
def setUp(self):
|
|
super(TestUpdateProviderTree, self).setUp()
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
self.driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
# create compute node resource provider
|
|
self.cn_rp = dict(
|
|
uuid=uuids.cn,
|
|
name='compute-node',
|
|
)
|
|
# create shared storage resource provider
|
|
self.shared_rp = dict(
|
|
uuid=uuids.shared_storage,
|
|
name='shared_storage_rp',
|
|
)
|
|
|
|
self.pt = provider_tree.ProviderTree()
|
|
self.pt.new_root(self.cn_rp['name'], self.cn_rp['uuid'], generation=0)
|
|
self.pt.new_root(self.shared_rp['name'], self.shared_rp['uuid'],
|
|
generation=0)
|
|
|
|
self.cpu_traits['HW_CPU_X86_AVX512F'] = True
|
|
self.cpu_traits['HW_CPU_X86_BMI'] = True
|
|
|
|
def _get_inventory(self):
|
|
return {
|
|
orc.VCPU: {
|
|
'total': self.vcpus,
|
|
'min_unit': 1,
|
|
'max_unit': self.vcpus,
|
|
'step_size': 1,
|
|
'allocation_ratio': 16.0,
|
|
'reserved': 0,
|
|
},
|
|
orc.MEMORY_MB: {
|
|
'total': self.memory_mb,
|
|
'min_unit': 1,
|
|
'max_unit': self.memory_mb,
|
|
'step_size': 1,
|
|
'allocation_ratio': 1.5,
|
|
'reserved': 512,
|
|
},
|
|
orc.DISK_GB: {
|
|
'total': self.disk_gb,
|
|
'min_unit': 1,
|
|
'max_unit': self.disk_gb,
|
|
'step_size': 1,
|
|
'allocation_ratio': 1.0,
|
|
'reserved': 0,
|
|
},
|
|
}
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits',
|
|
new=mock.Mock(return_value=cpu_traits))
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_gpu_inventories')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
|
|
return_value={'total': disk_gb})
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
|
|
return_value=memory_mb)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
|
|
return_value=vcpus)
|
|
def _test_update_provider_tree(self, mock_vcpu, mock_mem, mock_disk,
|
|
mock_gpu_invs, gpu_invs=None):
|
|
if gpu_invs:
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
mock_gpu_invs.return_value = gpu_invs
|
|
self.driver.update_provider_tree(self.pt,
|
|
self.cn_rp['name'])
|
|
|
|
def test_update_provider_tree(self):
|
|
self._test_update_provider_tree()
|
|
self.assertEqual(self._get_inventory(),
|
|
(self.pt.data(self.cn_rp['uuid'])).inventory)
|
|
self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']),
|
|
self.pt.data(self.cn_rp['uuid']).traits)
|
|
|
|
def test_update_provider_tree_with_vgpus(self):
|
|
pci_devices = ['pci_0000_06_00_0', 'pci_0000_07_00_0']
|
|
gpu_inventory_dicts = {
|
|
pci_devices[0]: {'total': 16,
|
|
'max_unit': 16,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
pci_devices[1]: {'total': 8,
|
|
'max_unit': 8,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
}
|
|
self._test_update_provider_tree(gpu_invs=gpu_inventory_dicts)
|
|
inventory = self._get_inventory()
|
|
# root compute node provider inventory is unchanged
|
|
self.assertEqual(inventory,
|
|
(self.pt.data(self.cn_rp['uuid'])).inventory)
|
|
# We should have two new pGPU child providers in the tree under the
|
|
# compute node root provider.
|
|
compute_node_tree_uuids = self.pt.get_provider_uuids(
|
|
self.cn_rp['name'])
|
|
self.assertEqual(3, len(compute_node_tree_uuids))
|
|
# Create a default GPU inventory with no total and max_unit amounts yet
|
|
default_gpu_inventory = {
|
|
orc.VGPU: {
|
|
'step_size': 1, 'min_unit': 1, 'reserved': 0,
|
|
'allocation_ratio': 1.0
|
|
}
|
|
}
|
|
# The pGPU child providers should be any item in the list but the first
|
|
# which is the root provider UUID
|
|
for rp_uuid in compute_node_tree_uuids[1:]:
|
|
pgpu_provider_data = self.pt.data(rp_uuid)
|
|
# Identify which PCI device is related to this Resource Provider
|
|
pci_device = (pci_devices[0]
|
|
if pci_devices[0] in pgpu_provider_data.name
|
|
else pci_devices[1])
|
|
self.assertEqual('%s_%s' % (self.cn_rp['name'], pci_device),
|
|
pgpu_provider_data.name)
|
|
pgpu_inventory = default_gpu_inventory.copy()
|
|
inventory_dict = gpu_inventory_dicts[pci_device]
|
|
pgpu_inventory[orc.VGPU][
|
|
'total'] = inventory_dict['total']
|
|
pgpu_inventory[orc.VGPU][
|
|
'max_unit'] = inventory_dict['max_unit']
|
|
self.assertEqual(pgpu_inventory, pgpu_provider_data.inventory)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
|
|
return_value={'total': disk_gb})
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
|
|
return_value=memory_mb)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
|
|
return_value=vcpus)
|
|
# TODO(efried): Bug #1784020
|
|
@unittest.expectedFailure
|
|
def test_update_provider_tree_for_shared_disk_gb_resource(
|
|
self, mock_vcpu, mock_mem, mock_disk):
|
|
"""Test to check DISK_GB is reported from shared resource
|
|
provider.
|
|
"""
|
|
|
|
shared_rp_inv = {
|
|
orc.DISK_GB: {
|
|
'total': self.disk_gb,
|
|
'min_unit': 1,
|
|
'max_unit': self.disk_gb,
|
|
'step_size': 1,
|
|
}
|
|
}
|
|
# report inventory for shared storage resource provider
|
|
self.pt.update_inventory(self.shared_rp['uuid'], shared_rp_inv)
|
|
|
|
# add trait to shared storage resource provider
|
|
self.pt.update_traits(self.shared_rp['uuid'],
|
|
['MISC_SHARES_VIA_AGGREGATE'])
|
|
|
|
self.driver.update_provider_tree(self.pt,
|
|
self.cn_rp['name'])
|
|
|
|
inventory = self._get_inventory()
|
|
# Remove DISK_GB resource from inventory as you don't expect it to be
|
|
# reported by the compute node resource provider.
|
|
del inventory[orc.DISK_GB]
|
|
|
|
self.assertEqual(inventory,
|
|
(self.pt.data(self.cn_rp['uuid'])).inventory)
|
|
self.assertEqual(shared_rp_inv,
|
|
(self.pt.data(self.shared_rp['uuid'])).inventory)
|
|
|
|
def test_update_provider_tree_with_file_backed_memory(self):
|
|
self.flags(file_backed_memory=1024,
|
|
group="libvirt")
|
|
self._test_update_provider_tree()
|
|
self.assertEqual(self._get_inventory(),
|
|
(self.pt.data(self.cn_rp['uuid'])).inventory)
|
|
|
|
def test_update_provider_tree_with_cpu_traits(self):
|
|
# These two traits should be unset when update_provider_tree is called
|
|
self.pt.add_traits(self.cn_rp['uuid'],
|
|
'HW_CPU_X86_VMX', 'HW_CPU_X86_XOP')
|
|
self._test_update_provider_tree()
|
|
self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']),
|
|
self.pt.data(self.cn_rp['uuid']).traits)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits',
|
|
new=mock.Mock(return_value=cpu_traits))
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_mediated_device_information')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_all_assigned_mediated_devices')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_gpu_inventories')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
|
|
return_value={'total': disk_gb})
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
|
|
return_value=memory_mb)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
|
|
return_value=vcpus)
|
|
def test_update_provider_tree_for_vgpu_reshape(
|
|
self, mock_vcpu, mock_mem, mock_disk, mock_gpus, mock_get_devs,
|
|
mock_get_mdev_info):
|
|
"""Tests the VGPU reshape scenario."""
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
# Let's assume we have two PCI devices each having 4 pGPUs for this
|
|
# type
|
|
pci_devices = ['pci_0000_06_00_0', 'pci_0000_07_00_0']
|
|
gpu_inventory_dicts = {
|
|
pci_devices[0]: {'total': 4,
|
|
'max_unit': 4,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
pci_devices[1]: {'total': 4,
|
|
'max_unit': 4,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
}
|
|
mock_gpus.return_value = gpu_inventory_dicts
|
|
# Fake the fact that we have one vGPU allocated to one instance and
|
|
# this vGPU is on the first PCI device
|
|
mock_get_devs.return_value = {uuids.mdev1: uuids.consumer1}
|
|
mock_get_mdev_info.side_effect = [
|
|
{"dev_id": "mdev_fake",
|
|
"uuid": uuids.mdev1,
|
|
"parent": pci_devices[0],
|
|
"type": "nvidia-11",
|
|
"iommu_group": 12
|
|
}]
|
|
# First create a provider tree with VGPU inventory on the root node
|
|
# provider. Since we have 2 devices with 4 pGPUs each, the total is 8
|
|
# as we were flattening all resources in one single inventory before
|
|
inventory = self._get_inventory()
|
|
vgpu_inventory = {
|
|
orc.VGPU: {
|
|
'step_size': 1, 'min_unit': 1, 'max_unit': 8, 'total': 8
|
|
}
|
|
}
|
|
inventory.update(vgpu_inventory)
|
|
self.pt.update_inventory(self.cn_rp['uuid'], inventory)
|
|
# Call update_provider_tree which will raise ReshapeNeeded because
|
|
# there is VGPU inventory on the root node provider.
|
|
self.assertRaises(exception.ReshapeNeeded,
|
|
self.driver.update_provider_tree,
|
|
self.pt, self.cn_rp['name'])
|
|
# Now make up some fake allocations to pass back to the upt method
|
|
# for the reshape.
|
|
allocations = {
|
|
uuids.consumer1: {
|
|
'allocations': {
|
|
# This consumer has ram and vgpu allocations on the root
|
|
# node provider and should be changed.
|
|
self.cn_rp['uuid']: {
|
|
'resources': {
|
|
orc.MEMORY_MB: 512,
|
|
orc.VGPU: 1
|
|
}
|
|
}
|
|
}
|
|
},
|
|
uuids.consumer2: {
|
|
'allocations': {
|
|
# This consumer has ram and vcpu allocations on the root
|
|
# node provider and should not be changed.
|
|
self.cn_rp['uuid']: {
|
|
'resources': {
|
|
orc.MEMORY_MB: 256,
|
|
orc.VCPU: 2
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
original_allocations = copy.deepcopy(allocations)
|
|
# Initiate the reshape.
|
|
self.driver.update_provider_tree(
|
|
self.pt, self.cn_rp['name'], allocations=allocations)
|
|
# We should have two new VGPU child providers in the tree under the
|
|
# compute node root provider.
|
|
compute_node_tree_uuids = self.pt.get_provider_uuids(
|
|
self.cn_rp['name'])
|
|
self.assertEqual(3, len(compute_node_tree_uuids))
|
|
rp_per_pci_device = {}
|
|
# The VGPU child providers should be the 2nd and 3rd UUIDs in that list
|
|
for rp_uuid in compute_node_tree_uuids[1:]:
|
|
# The VGPU inventory should be on the VGPU child provider
|
|
pgpu_provider_data = self.pt.data(rp_uuid)
|
|
# We want to map the PCI device with the RP UUID
|
|
if pci_devices[0] in pgpu_provider_data.name:
|
|
rp_per_pci_device[pci_devices[0]] = rp_uuid
|
|
elif pci_devices[1] in pgpu_provider_data.name:
|
|
rp_per_pci_device[pci_devices[1]] = rp_uuid
|
|
# Make sure we have two child resource providers
|
|
self.assertEqual(2, len(rp_per_pci_device))
|
|
|
|
# The compute node root provider should not have VGPU inventory.
|
|
del inventory[orc.VGPU]
|
|
self.assertEqual(inventory, self.pt.data(self.cn_rp['uuid']).inventory)
|
|
# consumer1 should now have allocations against two providers,
|
|
# MEMORY_MB on the root compute node provider and VGPU on the child
|
|
# provider.
|
|
consumer1_allocs = allocations[uuids.consumer1]['allocations']
|
|
self.assertEqual(2, len(consumer1_allocs))
|
|
self.assertEqual({orc.MEMORY_MB: 512},
|
|
consumer1_allocs[self.cn_rp['uuid']]['resources'])
|
|
# Make sure the VGPU allocation moved to the corresponding child RP
|
|
self.assertEqual(
|
|
{orc.VGPU: 1},
|
|
consumer1_allocs[rp_per_pci_device[pci_devices[0]]]['resources'])
|
|
# The allocations on consumer2 should be unchanged.
|
|
self.assertEqual(original_allocations[uuids.consumer2],
|
|
allocations[uuids.consumer2])
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits',
|
|
new=mock.Mock(return_value=cpu_traits))
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_gpu_inventories')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
|
|
return_value={'total': disk_gb})
|
|
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
|
|
return_value=memory_mb)
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
|
|
return_value=vcpus)
|
|
def test_update_provider_tree_for_vgpu_reshape_fails(
|
|
self, mock_vcpu, mock_mem, mock_disk, mock_gpus):
|
|
"""Tests the VGPU reshape failure scenario where VGPU allocations
|
|
are not on the root compute node provider as expected.
|
|
"""
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
# Let's assume we have two PCI devices each having 4 pGPUs for this
|
|
# type
|
|
pci_devices = ['pci_0000_06_00_0', 'pci_0000_07_00_0']
|
|
gpu_inventory_dicts = {
|
|
pci_devices[0]: {'total': 4,
|
|
'max_unit': 4,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
pci_devices[1]: {'total': 4,
|
|
'max_unit': 4,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
}
|
|
mock_gpus.return_value = gpu_inventory_dicts
|
|
# First create a provider tree with VGPU inventory on the root node
|
|
# provider.
|
|
inventory = self._get_inventory()
|
|
vgpu_inventory = {
|
|
orc.VGPU: {
|
|
'step_size': 1, 'min_unit': 1, 'max_unit': 8, 'total': 8
|
|
}
|
|
}
|
|
inventory.update(vgpu_inventory)
|
|
self.pt.update_inventory(self.cn_rp['uuid'], inventory)
|
|
# Now make up some fake allocations to pass back to the upt method
|
|
# for the reshape.
|
|
allocations = {
|
|
uuids.consumer1: {
|
|
'allocations': {
|
|
# This consumer has invalid VGPU allocations on a non-root
|
|
# compute node provider.
|
|
uuids.other_rp: {
|
|
'resources': {
|
|
orc.MEMORY_MB: 512,
|
|
orc.VGPU: 1
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
# Initiate the reshape.
|
|
ex = self.assertRaises(exception.ReshapeFailed,
|
|
self.driver.update_provider_tree,
|
|
self.pt, self.cn_rp['name'],
|
|
allocations=allocations)
|
|
self.assertIn('Unexpected VGPU resource allocation on provider %s'
|
|
% uuids.other_rp, six.text_type(ex))
|
|
|
|
|
|
class TraitsComparisonMixin(object):
|
|
|
|
def assertTraitsEqual(self, expected, actual):
|
|
exp = {t: t in expected
|
|
for t in libvirt_utils.CPU_TRAITS_MAPPING.values()}
|
|
self.assertEqual(exp, actual)
|
|
|
|
|
|
class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
|
|
def setUp(self):
|
|
super(LibvirtDriverTestCase, self).setUp()
|
|
self.flags(sysinfo_serial="none", group="libvirt")
|
|
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
os_vif.initialize()
|
|
|
|
self.drvr = libvirt_driver.LibvirtDriver(
|
|
fake.FakeVirtAPI(), read_only=True)
|
|
self.context = context.get_admin_context()
|
|
self.test_image_meta = {
|
|
"disk_format": "raw",
|
|
}
|
|
|
|
def _create_instance(self, params=None):
|
|
"""Create a test instance."""
|
|
if not params:
|
|
params = {}
|
|
|
|
flavor = objects.Flavor(memory_mb=512,
|
|
swap=0,
|
|
vcpu_weight=None,
|
|
root_gb=10,
|
|
id=2,
|
|
name=u'm1.tiny',
|
|
ephemeral_gb=20,
|
|
rxtx_factor=1.0,
|
|
flavorid=u'1',
|
|
vcpus=1,
|
|
extra_specs={})
|
|
flavor.update(params.pop('flavor', {}))
|
|
|
|
inst = {}
|
|
inst['id'] = 1
|
|
inst['uuid'] = uuids.fake_instance_id
|
|
inst['os_type'] = 'linux'
|
|
inst['image_ref'] = uuids.fake_image_ref
|
|
inst['reservation_id'] = 'r-fakeres'
|
|
inst['user_id'] = 'fake'
|
|
inst['project_id'] = 'fake'
|
|
inst['instance_type_id'] = 2
|
|
inst['ami_launch_index'] = 0
|
|
inst['host'] = 'host1'
|
|
inst['root_gb'] = flavor.root_gb
|
|
inst['ephemeral_gb'] = flavor.ephemeral_gb
|
|
inst['config_drive'] = True
|
|
inst['kernel_id'] = 2
|
|
inst['ramdisk_id'] = 3
|
|
inst['key_data'] = 'ABCDEFG'
|
|
inst['system_metadata'] = {}
|
|
inst['metadata'] = {}
|
|
inst['task_state'] = None
|
|
|
|
inst.update(params)
|
|
|
|
instance = fake_instance.fake_instance_obj(
|
|
self.context, expected_attrs=['metadata', 'system_metadata',
|
|
'pci_devices'],
|
|
flavor=flavor, **inst)
|
|
|
|
# Attributes which we need to be set so they don't touch the db,
|
|
# but it's not worth the effort to fake properly
|
|
for field in ['numa_topology', 'vcpu_model', 'trusted_certs']:
|
|
setattr(instance, field, None)
|
|
|
|
return instance
|
|
|
|
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info'), return_value=[])
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr',
|
|
return_value='10.0.0.1')
|
|
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_storage_shared_with'), return_value=False)
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('oslo_concurrency.processutils.execute',
|
|
side_effect=test.TestingException)
|
|
def test_migrate_disk_and_power_off_exception(
|
|
self, mock_execute, mock_exists, mock_rename, mock_is_shared,
|
|
mock_get_host_ip, mock_destroy, mock_get_disk_info):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
|
|
.migrate_disk_and_power_off.
|
|
"""
|
|
|
|
ins_ref = self._create_instance()
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
|
|
self.assertRaises(test.TestingException,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
context.get_admin_context(), ins_ref, '10.0.0.2',
|
|
flavor_obj, None)
|
|
|
|
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_get_instance_disk_info'))
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr',
|
|
return_value='10.0.0.1')
|
|
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_is_storage_shared_with'), return_value=False)
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists', return_value=True)
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
def _test_migrate_disk_and_power_off(
|
|
self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename,
|
|
mock_is_shared, mock_get_host_ip, mock_destroy,
|
|
mock_get_disk_info, block_device_info=None,
|
|
params_for_instance=None):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
|
|
.migrate_disk_and_power_off.
|
|
"""
|
|
|
|
instance = self._create_instance(params=params_for_instance)
|
|
disk_info = list(fake_disk_info_byname(instance).values())
|
|
disk_info_text = jsonutils.dumps(disk_info)
|
|
mock_get_disk_info.return_value = disk_info
|
|
|
|
# dest is different host case
|
|
out = self.drvr.migrate_disk_and_power_off(
|
|
ctxt, instance, '10.0.0.2', flavor_obj, None,
|
|
block_device_info=block_device_info)
|
|
self.assertEqual(out, disk_info_text)
|
|
|
|
# dest is same host case
|
|
out = self.drvr.migrate_disk_and_power_off(
|
|
ctxt, instance, '10.0.0.1', flavor_obj, None,
|
|
block_device_info=block_device_info)
|
|
self.assertEqual(out, disk_info_text)
|
|
|
|
def test_migrate_disk_and_power_off(self):
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
self._test_migrate_disk_and_power_off(self.context, flavor_obj)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
def test_migrate_disk_and_power_off_boot_from_volume(self,
|
|
disconnect_volume):
|
|
info = {
|
|
'block_device_mapping': [
|
|
{'boot_index': None,
|
|
'mount_device': '/dev/vdd',
|
|
'connection_info': mock.sentinel.conn_info_vdd},
|
|
{'boot_index': 0,
|
|
'mount_device': '/dev/vda',
|
|
'connection_info': mock.sentinel.conn_info_vda}]}
|
|
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
|
|
self._test_migrate_disk_and_power_off(self.context,
|
|
flavor_obj, block_device_info=info,
|
|
params_for_instance={'image_ref': None,
|
|
'root_gb': 10,
|
|
'ephemeral_gb': 0,
|
|
'flavor': {'root_gb': 10,
|
|
'ephemeral_gb': 0}})
|
|
disconnect_volume.assert_called_with(self.context,
|
|
mock.sentinel.conn_info_vda, mock.ANY)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
|
def test_migrate_disk_and_power_off_boot_from_volume_backed_snapshot(
|
|
self, disconnect_volume):
|
|
# Such instance has not empty image_ref, but must be considered as
|
|
# booted from volume.
|
|
info = {
|
|
'block_device_mapping': [
|
|
{'boot_index': None,
|
|
'mount_device': '/dev/vdd',
|
|
'connection_info': mock.sentinel.conn_info_vdd},
|
|
{'boot_index': 0,
|
|
'mount_device': '/dev/vda',
|
|
'connection_info': mock.sentinel.conn_info_vda}]}
|
|
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
self._test_migrate_disk_and_power_off(self.context,
|
|
flavor_obj, block_device_info=info,
|
|
params_for_instance={
|
|
'image_ref': uuids.fake_volume_backed_image_ref,
|
|
'root_gb': 10,
|
|
'ephemeral_gb': 0,
|
|
'flavor': {'root_gb': 10,
|
|
'ephemeral_gb': 0}})
|
|
disconnect_volume.assert_called_with(self.context,
|
|
mock.sentinel.conn_info_vda, mock.ANY)
|
|
|
|
@mock.patch('os.rename')
|
|
@mock.patch('nova.virt.libvirt.utils.copy_image')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
|
|
get_host_ip_addr,
|
|
mock_destroy,
|
|
mock_copy_image,
|
|
mock_rename):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
|
|
.migrate_disk_and_power_off.
|
|
"""
|
|
|
|
# Original instance config
|
|
instance = self._create_instance({'flavor': {'root_gb': 10,
|
|
'ephemeral_gb': 0}})
|
|
|
|
disk_info = list(fake_disk_info_byname(instance).values())
|
|
mock_get_disk_info.return_value = disk_info
|
|
get_host_ip_addr.return_value = '10.0.0.1'
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# Re-size fake instance to 20G root and 1024M swap disk
|
|
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
|
|
# Destination is same host
|
|
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
|
|
instance, '10.0.0.1',
|
|
flavor_obj, None)
|
|
|
|
mock_get_disk_info.assert_called_once_with(instance, None)
|
|
self.assertTrue(get_host_ip_addr.called)
|
|
mock_destroy.assert_called_once_with(instance)
|
|
disk_info_text = jsonutils.dumps(disk_info)
|
|
self.assertEqual(disk_info_text, out)
|
|
|
|
# disk.swap isn't moved
|
|
for call in mock_rename.mock_calls:
|
|
self.assertFalse(call[0].endswith('.swap'))
|
|
|
|
# disk.swap isn't copied
|
|
for call in mock_copy_image.mock_calls:
|
|
self.assertFalse(call[0].endswith('.swap'))
|
|
|
|
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
|
|
.migrate_disk_and_power_off.
|
|
"""
|
|
instance = self._create_instance()
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
|
|
# Migration is not implemented for LVM backed instances
|
|
self.assertRaises(expected_exc,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
None, instance, '10.0.0.1', flavor_obj, None)
|
|
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._is_storage_shared_with')
|
|
def _test_migrate_disk_and_power_off_backing_file(self,
|
|
shared_storage,
|
|
mock_is_shared_storage,
|
|
mock_get_disk_info,
|
|
mock_destroy,
|
|
mock_rename,
|
|
mock_execute):
|
|
self.convert_file_called = False
|
|
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
|
|
'virt_disk_size': '10737418240',
|
|
'backing_file': '/base/disk',
|
|
'disk_size': '83886080'}]
|
|
mock_get_disk_info.return_value = disk_info
|
|
mock_is_shared_storage.return_value = shared_storage
|
|
|
|
def fake_execute(*args, **kwargs):
|
|
self.assertNotEqual(args[0:2], ['qemu-img', 'convert'])
|
|
|
|
mock_execute.side_effect = fake_execute
|
|
|
|
instance = self._create_instance()
|
|
|
|
out = self.drvr.migrate_disk_and_power_off(
|
|
context.get_admin_context(), instance, '10.0.0.2',
|
|
flavor_obj, None)
|
|
|
|
self.assertTrue(mock_is_shared_storage.called)
|
|
mock_destroy.assert_called_once_with(instance)
|
|
disk_info_text = jsonutils.dumps(disk_info)
|
|
self.assertEqual(out, disk_info_text)
|
|
|
|
def test_migrate_disk_and_power_off_shared_storage(self):
|
|
self._test_migrate_disk_and_power_off_backing_file(True)
|
|
|
|
def test_migrate_disk_and_power_off_non_shared_storage(self):
|
|
self._test_migrate_disk_and_power_off_backing_file(False)
|
|
|
|
def test_migrate_disk_and_power_off_lvm(self):
|
|
self.flags(images_type='lvm', group='libvirt')
|
|
|
|
expected_exc = exception.InstanceFaultRollback
|
|
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_is_storage_shared_with',
|
|
return_value=False)
|
|
def test_migrate_disk_and_power_off_resize_cannot_ssh(self,
|
|
mock_is_shared):
|
|
def fake_execute(*args, **kwargs):
|
|
raise processutils.ProcessExecutionError()
|
|
self.stub_out('oslo_concurrency.processutils.execute', fake_execute)
|
|
|
|
expected_exc = exception.InstanceFaultRollback
|
|
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
|
|
mock_is_shared.assert_called_once_with('10.0.0.1', test.MatchType(str))
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
|
|
instance = self._create_instance()
|
|
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
mock_get_disk_info.return_value = fake_disk_info_json(instance)
|
|
|
|
self.assertRaises(
|
|
exception.InstanceFaultRollback,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
'ctx', instance, '10.0.0.1', flavor_obj, None)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
def test_migrate_disk_and_power_off_resize_error_rbd(self,
|
|
mock_get_disk_info):
|
|
# Check error on resize root disk down for rbd.
|
|
# The difference is that get_instance_disk_info always returns
|
|
# an emply list for rbd.
|
|
# Ephemeral size is not changed in this case (otherwise other check
|
|
# will raise the same error).
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
instance = self._create_instance()
|
|
flavor = {'root_gb': 5, 'ephemeral_gb': 20}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
mock_get_disk_info.return_value = []
|
|
|
|
self.assertRaises(
|
|
exception.InstanceFaultRollback,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
'ctx', instance, '10.0.0.1', flavor_obj, None)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
|
|
self, mock_get_disk_info):
|
|
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
|
|
instance = self._create_instance()
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
mock_get_disk_info.return_value = fake_disk_info_json(instance)
|
|
|
|
self.assertRaises(exception.InstanceFaultRollback,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
'ctx', instance, '10.0.0.1', flavor_obj, None)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
|
|
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
|
|
mock_get_disk_info):
|
|
mappings = [
|
|
{
|
|
'device_name': '/dev/sdb4',
|
|
'source_type': 'blank',
|
|
'destination_type': 'local',
|
|
'device_type': 'disk',
|
|
'guest_format': 'swap',
|
|
'boot_index': -1,
|
|
'volume_size': 1
|
|
},
|
|
{
|
|
'device_name': '/dev/sda1',
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume',
|
|
'device_type': 'disk',
|
|
'volume_id': 1,
|
|
'guest_format': None,
|
|
'boot_index': 1,
|
|
'volume_size': 6
|
|
},
|
|
{
|
|
'device_name': '/dev/sda2',
|
|
'source_type': 'snapshot',
|
|
'destination_type': 'volume',
|
|
'snapshot_id': 1,
|
|
'device_type': 'disk',
|
|
'guest_format': None,
|
|
'boot_index': 0,
|
|
'volume_size': 4
|
|
},
|
|
{
|
|
'device_name': '/dev/sda3',
|
|
'source_type': 'blank',
|
|
'destination_type': 'local',
|
|
'device_type': 'disk',
|
|
'guest_format': None,
|
|
'boot_index': -1,
|
|
'volume_size': 3
|
|
}
|
|
]
|
|
mock_get.return_value = mappings
|
|
instance = self._create_instance()
|
|
|
|
# Old flavor, eph is 20, real disk is 3, target is 2, fail
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
mock_get_disk_info.return_value = fake_disk_info_json(instance)
|
|
|
|
self.assertRaises(
|
|
exception.InstanceFaultRollback,
|
|
self.drvr.migrate_disk_and_power_off,
|
|
'ctx', instance, '10.0.0.1', flavor_obj, None)
|
|
|
|
# Old flavor, eph is 20, real disk is 3, target is 4
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
self._test_migrate_disk_and_power_off(self.context, flavor_obj)
|
|
|
|
@mock.patch('os.rename')
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
@mock.patch('nova.virt.libvirt.utils.copy_image')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._is_storage_shared_with')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_instance_disk_info')
|
|
def test_migrate_disk_and_power_off_resize_copy_disk_info(
|
|
self, mock_disk_info, mock_shared, mock_path, mock_destroy,
|
|
mock_copy, mock_execute, mock_rename):
|
|
|
|
instance = self._create_instance()
|
|
disk_info = list(fake_disk_info_byname(instance).values())
|
|
instance_base = os.path.dirname(disk_info[0]['path'])
|
|
flavor = {'root_gb': 10, 'ephemeral_gb': 25}
|
|
flavor_obj = objects.Flavor(**flavor)
|
|
|
|
mock_disk_info.return_value = disk_info
|
|
mock_path.return_value = instance_base
|
|
mock_shared.return_value = False
|
|
|
|
src_disk_info_path = os.path.join(instance_base + '_resize',
|
|
'disk.info')
|
|
|
|
with mock.patch.object(os.path, 'exists', autospec=True) \
|
|
as mock_exists:
|
|
# disk.info exists on the source
|
|
mock_exists.side_effect = \
|
|
lambda path: path == src_disk_info_path
|
|
self.drvr.migrate_disk_and_power_off(context.get_admin_context(),
|
|
instance, mock.sentinel,
|
|
flavor_obj, None)
|
|
self.assertTrue(mock_exists.called)
|
|
|
|
dst_disk_info_path = os.path.join(instance_base, 'disk.info')
|
|
mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path,
|
|
host=mock.sentinel, on_execute=mock.ANY,
|
|
on_completion=mock.ANY)
|
|
|
|
def test_wait_for_running(self):
|
|
def fake_get_info(self, instance):
|
|
if instance['name'] == "not_found":
|
|
raise exception.InstanceNotFound(instance_id=instance['uuid'])
|
|
elif instance['name'] == "running":
|
|
return hardware.InstanceInfo(state=power_state.RUNNING)
|
|
else:
|
|
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
|
|
fake_get_info)
|
|
|
|
# instance not found case
|
|
self.assertRaises(exception.InstanceNotFound,
|
|
self.drvr._wait_for_running,
|
|
{'name': 'not_found',
|
|
'uuid': 'not_found_uuid'})
|
|
|
|
# instance is running case
|
|
self.assertRaises(loopingcall.LoopingCallDone,
|
|
self.drvr._wait_for_running,
|
|
{'name': 'running',
|
|
'uuid': 'running_uuid'})
|
|
|
|
# else case
|
|
self.drvr._wait_for_running({'name': 'else',
|
|
'uuid': 'other_uuid'})
|
|
|
|
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
|
|
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
@mock.patch('os.rename')
|
|
def test_disk_raw_to_qcow2(self, mock_rename, mock_execute, mock_direct_io,
|
|
mock_disk_op_sema):
|
|
path = '/test/disk'
|
|
_path_qcow = path + '_qcow'
|
|
|
|
self.drvr._disk_raw_to_qcow2(path)
|
|
mock_disk_op_sema.__enter__.assert_called_once()
|
|
mock_direct_io.assert_called_once_with(CONF.instances_path)
|
|
mock_execute.assert_has_calls([
|
|
mock.call('qemu-img', 'convert', '-t', 'none',
|
|
'-O', 'qcow2', '-f', 'raw', path, _path_qcow)])
|
|
mock_rename.assert_has_calls([
|
|
mock.call(_path_qcow, path)])
|
|
|
|
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
|
|
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=False)
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
@mock.patch('os.rename')
|
|
def test_disk_raw_to_qcow2_no_directio(self, mock_rename, mock_execute,
|
|
mock_direct_io, mock_disk_op_sema):
|
|
# Test the scenario where we have no support for direct IO.
|
|
# This could be removed when we add unit tests for convert_image().
|
|
path = '/test/disk'
|
|
_path_qcow = path + '_qcow'
|
|
|
|
self.drvr._disk_raw_to_qcow2(path)
|
|
mock_disk_op_sema.__enter__.assert_called_once()
|
|
mock_direct_io.assert_called_once_with(CONF.instances_path)
|
|
mock_execute.assert_has_calls([
|
|
mock.call('qemu-img', 'convert', '-t', 'writeback',
|
|
'-O', 'qcow2', '-f', 'raw', path, _path_qcow)])
|
|
mock_rename.assert_has_calls([
|
|
mock.call(_path_qcow, path)])
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_inject_data')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_create_domain_and_network')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disk_raw_to_qcow2')
|
|
# Don't write libvirt xml to disk
|
|
@mock.patch.object(libvirt_utils, 'write_to_file')
|
|
# NOTE(mdbooth): The following 4 mocks are required to execute
|
|
# get_guest_xml().
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
|
|
@mock.patch('nova.privsep.utils.supports_direct_io')
|
|
@mock.patch('nova.api.metadata.base.InstanceMetadata')
|
|
def _test_finish_migration(self, mock_instance_metadata,
|
|
mock_supports_direct_io,
|
|
mock_build_device_metadata,
|
|
mock_set_host_enabled, mock_write_to_file,
|
|
mock_raw_to_qcow2,
|
|
mock_create_domain_and_network,
|
|
mock_get_info, mock_inject_data,
|
|
power_on=True, resize_instance=False):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
|
|
.finish_migration.
|
|
"""
|
|
self.flags(use_cow_images=True)
|
|
if power_on:
|
|
state = power_state.RUNNING
|
|
else:
|
|
state = power_state.SHUTDOWN
|
|
mock_get_info.return_value = hardware.InstanceInfo(state=state)
|
|
|
|
instance = self._create_instance(
|
|
{'config_drive': str(True),
|
|
'task_state': task_states.RESIZE_FINISH,
|
|
'flavor': {'swap': 500}})
|
|
bdi = {'block_device_mapping': []}
|
|
|
|
migration = objects.Migration()
|
|
migration.source_compute = 'fake-source-compute'
|
|
migration.dest_compute = 'fake-dest-compute'
|
|
migration.source_node = 'fake-source-node'
|
|
migration.dest_node = 'fake-dest-node'
|
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
|
|
|
# Source disks are raw to test conversion
|
|
disk_info = list(fake_disk_info_byname(instance, type='raw').values())
|
|
disk_info_text = jsonutils.dumps(disk_info)
|
|
|
|
backend = self.useFixture(fake_imagebackend.ImageBackendFixture())
|
|
mock_create_domain_and_network.return_value = \
|
|
libvirt_guest.Guest('fake_dom')
|
|
|
|
self.drvr.finish_migration(
|
|
context.get_admin_context(), migration, instance,
|
|
disk_info_text, [], image_meta,
|
|
resize_instance, bdi, power_on)
|
|
|
|
# Assert that we converted the root, ephemeral, and swap disks
|
|
instance_path = libvirt_utils.get_instance_path(instance)
|
|
convert_calls = [mock.call(os.path.join(instance_path, name))
|
|
for name in ('disk', 'disk.local', 'disk.swap')]
|
|
mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True)
|
|
|
|
# Implicitly assert that we did not convert the config disk
|
|
self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count)
|
|
|
|
disks = backend.disks
|
|
|
|
# Assert that we called cache() on kernel, ramdisk, disk,
|
|
# and disk.local.
|
|
# This results in creation of kernel, ramdisk, and disk.swap.
|
|
# This results in backing file check and resize of disk and disk.local.
|
|
for name in ('kernel', 'ramdisk', 'disk', 'disk.local', 'disk.swap'):
|
|
self.assertTrue(disks[name].cache.called,
|
|
'cache() not called for %s' % name)
|
|
|
|
# Assert that we created a snapshot for the root disk
|
|
root_disk = disks['disk']
|
|
self.assertTrue(root_disk.create_snap.called)
|
|
|
|
# Assert that we didn't import a config disk
|
|
# Note that some path currently creates a config disk object,
|
|
# but only uses it for an exists() check. Therefore the object may
|
|
# exist, but shouldn't have been imported.
|
|
if 'disk.config' in disks:
|
|
self.assertFalse(disks['disk.config'].import_file.called)
|
|
|
|
# We shouldn't be injecting data during migration
|
|
self.assertFalse(mock_inject_data.called)
|
|
|
|
# NOTE(mdbooth): If we wanted to check the generated xml, we could
|
|
# insert a hook here
|
|
mock_create_domain_and_network.assert_called_once_with(
|
|
mock.ANY, mock.ANY, instance, [],
|
|
block_device_info=bdi, power_on=power_on,
|
|
vifs_already_plugged=True, post_xml_callback=mock.ANY)
|
|
|
|
def test_finish_migration_resize(self):
|
|
with mock.patch('nova.virt.libvirt.guest.Guest.sync_guest_time'
|
|
) as mock_guest_time:
|
|
self._test_finish_migration(resize_instance=True)
|
|
self.assertTrue(mock_guest_time.called)
|
|
|
|
def test_finish_migration_power_on(self):
|
|
with mock.patch('nova.virt.libvirt.guest.Guest.sync_guest_time'
|
|
) as mock_guest_time:
|
|
self._test_finish_migration()
|
|
self.assertTrue(mock_guest_time.called)
|
|
|
|
def test_finish_migration_power_off(self):
|
|
self._test_finish_migration(power_on=False)
|
|
|
|
def _test_finish_revert_migration(self, power_on):
|
|
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
|
|
.finish_revert_migration.
|
|
"""
|
|
powered_on = power_on
|
|
|
|
self.fake_create_domain_called = False
|
|
|
|
def fake_execute(*args, **kwargs):
|
|
pass
|
|
|
|
def fake_plug_vifs(self, instance, network_info):
|
|
pass
|
|
|
|
def fake_create_domain(_self, context, xml, instance, network_info,
|
|
block_device_info=None, power_on=None,
|
|
vifs_already_plugged=None):
|
|
self.fake_create_domain_called = True
|
|
self.assertEqual(powered_on, power_on)
|
|
self.assertFalse(vifs_already_plugged)
|
|
return mock.MagicMock()
|
|
|
|
def fake_enable_hairpin(self):
|
|
pass
|
|
|
|
def fake_get_info(self, instance):
|
|
if powered_on:
|
|
return hardware.InstanceInfo(state=power_state.RUNNING)
|
|
else:
|
|
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
|
|
|
|
def fake_to_xml(self, context, instance, network_info, disk_info,
|
|
image_meta=None, rescue=None,
|
|
block_device_info=None):
|
|
return ""
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._get_guest_xml',
|
|
fake_to_xml)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.plug_vifs',
|
|
fake_plug_vifs)
|
|
self.stub_out('oslo_concurrency.processutils.execute', fake_execute)
|
|
fw = base_firewall.NoopFirewallDriver()
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.firewall_driver',
|
|
fw)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_create_domain_and_network', fake_create_domain)
|
|
self.stub_out('nova.virt.libvirt.guest.Guest.enable_hairpin',
|
|
fake_enable_hairpin)
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.get_info',
|
|
fake_get_info)
|
|
self.stub_out('nova.utils.get_image_from_system_metadata',
|
|
lambda *a: self.test_image_meta)
|
|
|
|
with utils.tempdir() as tmpdir:
|
|
self.flags(instances_path=tmpdir)
|
|
ins_ref = self._create_instance()
|
|
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
|
|
libvirt_xml_path = os.path.join(tmpdir,
|
|
ins_ref['name'],
|
|
'libvirt.xml')
|
|
f = open(libvirt_xml_path, 'w')
|
|
f.close()
|
|
|
|
self.drvr.finish_revert_migration(
|
|
context.get_admin_context(), ins_ref,
|
|
[], None, power_on)
|
|
self.assertTrue(self.fake_create_domain_called)
|
|
|
|
def test_finish_revert_migration_power_on(self):
|
|
self._test_finish_revert_migration(True)
|
|
|
|
def test_finish_revert_migration_power_off(self):
|
|
self._test_finish_revert_migration(False)
|
|
|
|
def _test_finish_revert_migration_after_crash(self, backup_made=True,
|
|
del_inst_failed=False):
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
context = 'fake_context'
|
|
ins_ref = self._create_instance()
|
|
|
|
with test.nested(
|
|
mock.patch.object(os.path, 'exists', return_value=backup_made),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(os, 'rename'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(drvr, '_get_guest_xml'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'),
|
|
) as (mock_stat, mock_path, mock_rename, mock_cdn, mock_ggx,
|
|
mock_rmtree, mock_looping_call):
|
|
mock_path.return_value = '/fake/foo'
|
|
if del_inst_failed:
|
|
mock_rmtree.side_effect = OSError(errno.ENOENT,
|
|
'test exception')
|
|
drvr.finish_revert_migration(context, ins_ref, [])
|
|
if backup_made:
|
|
mock_rename.assert_called_once_with('/fake/foo_resize',
|
|
'/fake/foo')
|
|
else:
|
|
self.assertFalse(mock_rename.called)
|
|
|
|
def test_finish_revert_migration_after_crash(self):
|
|
self._test_finish_revert_migration_after_crash(backup_made=True)
|
|
|
|
def test_finish_revert_migration_after_crash_before_new(self):
|
|
self._test_finish_revert_migration_after_crash(backup_made=True)
|
|
|
|
def test_finish_revert_migration_after_crash_before_backup(self):
|
|
self._test_finish_revert_migration_after_crash(backup_made=False)
|
|
|
|
def test_finish_revert_migration_after_crash_delete_failed(self):
|
|
self._test_finish_revert_migration_after_crash(backup_made=True,
|
|
del_inst_failed=True)
|
|
|
|
def test_finish_revert_migration_preserves_disk_bus(self):
|
|
|
|
def fake_get_guest_xml(context, instance, network_info, disk_info,
|
|
image_meta, block_device_info=None):
|
|
self.assertEqual('ide', disk_info['disk_bus'])
|
|
|
|
image_meta = {"disk_format": "raw",
|
|
"properties": {"hw_disk_bus": "ide"}}
|
|
instance = self._create_instance()
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch.object(drvr, 'image_backend'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(utils, 'get_image_from_system_metadata',
|
|
return_value=image_meta),
|
|
mock.patch.object(drvr, '_get_guest_xml',
|
|
side_effect=fake_get_guest_xml)):
|
|
drvr.finish_revert_migration('', instance, None, power_on=False)
|
|
|
|
def test_finish_revert_migration_snap_backend(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
ins_ref = self._create_instance()
|
|
|
|
with test.nested(
|
|
mock.patch.object(utils, 'get_image_from_system_metadata'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(drvr, '_get_guest_xml')) as (
|
|
mock_image, mock_cdn, mock_ggx):
|
|
mock_image.return_value = {'disk_format': 'raw'}
|
|
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
|
|
|
|
drvr.image_backend.rollback_to_snap.assert_called_once_with(
|
|
libvirt_utils.RESIZE_SNAPSHOT_NAME)
|
|
drvr.image_backend.remove_snap.assert_called_once_with(
|
|
libvirt_utils.RESIZE_SNAPSHOT_NAME)
|
|
|
|
def test_finish_revert_migration_snap_backend_snapshot_not_found(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
ins_ref = self._create_instance()
|
|
|
|
with test.nested(
|
|
mock.patch.object(utils, 'get_image_from_system_metadata'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(drvr, '_get_guest_xml')) as (
|
|
mock_image, mock_cdn, mock_ggx):
|
|
mock_image.return_value = {'disk_format': 'raw'}
|
|
drvr.image_backend.rollback_to_snap.side_effect = (
|
|
exception.SnapshotNotFound(snapshot_id='testing'))
|
|
self.assertRaises(exception.SnapshotNotFound,
|
|
drvr.finish_revert_migration,
|
|
'', ins_ref, None, power_on=False)
|
|
drvr.image_backend.remove_snap.assert_not_called()
|
|
|
|
def test_finish_revert_migration_snap_backend_image_does_not_exist(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
drvr.image_backend.exists.return_value = False
|
|
ins_ref = self._create_instance()
|
|
|
|
with test.nested(
|
|
mock.patch.object(rbd_utils, 'RBDDriver'),
|
|
mock.patch.object(utils, 'get_image_from_system_metadata'),
|
|
mock.patch.object(drvr, '_create_domain_and_network'),
|
|
mock.patch.object(drvr, '_get_guest_xml')) as (
|
|
mock_rbd, mock_image, mock_cdn, mock_ggx):
|
|
mock_image.return_value = {'disk_format': 'raw'}
|
|
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
|
|
self.assertFalse(drvr.image_backend.rollback_to_snap.called)
|
|
self.assertFalse(drvr.image_backend.remove_snap.called)
|
|
|
|
@mock.patch.object(shutil, 'rmtree')
|
|
def test_cleanup_failed_migration(self, mock_rmtree):
|
|
self.drvr._cleanup_failed_migration('/fake/inst')
|
|
mock_rmtree.assert_called_once_with('/fake/inst')
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_cleanup_resize')
|
|
def test_confirm_migration(self, mock_cleanup):
|
|
ins_ref = self._create_instance()
|
|
|
|
self.drvr.confirm_migration(self.context, "migration_ref", ins_ref,
|
|
_fake_network_info(self, 1))
|
|
mock_cleanup.assert_called_once_with(
|
|
self.context, ins_ref, _fake_network_info(self, 1))
|
|
|
|
def test_cleanup_resize_same_host(self):
|
|
CONF.set_override('policy_dirs', [], group='oslo_policy')
|
|
ins_ref = self._create_instance({'host': CONF.host})
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
|
|
with test.nested(
|
|
mock.patch.object(os.path, 'exists'),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(shutil, 'rmtree')) as (
|
|
mock_exists, mock_get_path, mock_rmtree):
|
|
mock_exists.return_value = True
|
|
mock_get_path.return_value = '/fake/inst'
|
|
|
|
drvr._cleanup_resize(
|
|
self.context, ins_ref, _fake_network_info(self, 1))
|
|
mock_get_path.assert_called_once_with(ins_ref)
|
|
self.assertEqual(5, mock_rmtree.call_count)
|
|
|
|
def test_cleanup_resize_not_same_host(self):
|
|
CONF.set_override('policy_dirs', [], group='oslo_policy')
|
|
host = 'not' + CONF.host
|
|
ins_ref = self._create_instance({'host': host})
|
|
fake_net = _fake_network_info(self, 1)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
with test.nested(
|
|
mock.patch('nova.compute.utils.is_volume_backed_instance',
|
|
return_value=False),
|
|
mock.patch.object(os.path, 'exists'),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(drvr.image_backend, 'by_name',
|
|
new_callable=mock.NonCallableMock),
|
|
mock.patch.object(drvr, '_undefine_domain'),
|
|
mock.patch.object(drvr, 'unplug_vifs'),
|
|
mock.patch.object(drvr, 'unfilter_instance')
|
|
) as (mock_volume_backed, mock_exists, mock_get_path,
|
|
mock_rmtree, mock_image_by_name, mock_undef, mock_unplug,
|
|
mock_unfilter):
|
|
mock_exists.return_value = True
|
|
mock_get_path.return_value = '/fake/inst'
|
|
|
|
drvr._cleanup_resize(self.context, ins_ref, fake_net)
|
|
mock_get_path.assert_called_once_with(ins_ref)
|
|
self.assertEqual(5, mock_rmtree.call_count)
|
|
mock_undef.assert_called_once_with(ins_ref)
|
|
mock_unplug.assert_called_once_with(ins_ref, fake_net)
|
|
mock_unfilter.assert_called_once_with(ins_ref, fake_net)
|
|
|
|
def test_cleanup_resize_not_same_host_volume_backed(self):
|
|
"""Tests cleaning up after a resize is confirmed with a volume-backed
|
|
instance. The key point is that the instance base directory should not
|
|
be removed for volume-backed instances.
|
|
"""
|
|
CONF.set_override('policy_dirs', [], group='oslo_policy')
|
|
host = 'not' + CONF.host
|
|
ins_ref = self._create_instance({'host': host})
|
|
fake_net = _fake_network_info(self, 1)
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
drvr.image_backend.exists.return_value = False
|
|
|
|
with test.nested(
|
|
mock.patch('nova.compute.utils.is_volume_backed_instance',
|
|
return_value=True),
|
|
mock.patch.object(os.path, 'exists'),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(drvr, '_undefine_domain'),
|
|
mock.patch.object(drvr, 'unplug_vifs'),
|
|
mock.patch.object(drvr, 'unfilter_instance')
|
|
) as (mock_volume_backed, mock_exists, mock_get_path,
|
|
mock_rmtree, mock_undef, mock_unplug, mock_unfilter):
|
|
mock_exists.return_value = True
|
|
mock_get_path.return_value = '/fake/inst'
|
|
|
|
drvr._cleanup_resize(self.context, ins_ref, fake_net)
|
|
mock_get_path.assert_called_once_with(ins_ref)
|
|
self.assertEqual(5, mock_rmtree.call_count)
|
|
mock_undef.assert_called_once_with(ins_ref)
|
|
mock_unplug.assert_called_once_with(ins_ref, fake_net)
|
|
mock_unfilter.assert_called_once_with(ins_ref, fake_net)
|
|
|
|
def test_cleanup_resize_snap_backend(self):
|
|
CONF.set_override('policy_dirs', [], group='oslo_policy')
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
ins_ref = self._create_instance({'host': CONF.host})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
|
|
with test.nested(
|
|
mock.patch.object(os.path, 'exists'),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
|
|
mock_exists, mock_get_path, mock_rmtree, mock_remove):
|
|
mock_exists.return_value = True
|
|
mock_get_path.return_value = '/fake/inst'
|
|
|
|
drvr._cleanup_resize(
|
|
self.context, ins_ref, _fake_network_info(self, 1))
|
|
mock_get_path.assert_called_once_with(ins_ref)
|
|
mock_remove.assert_called_once_with(
|
|
libvirt_utils.RESIZE_SNAPSHOT_NAME)
|
|
self.assertEqual(5, mock_rmtree.call_count)
|
|
|
|
def test_cleanup_resize_snap_backend_image_does_not_exist(self):
|
|
CONF.set_override('policy_dirs', [], group='oslo_policy')
|
|
ins_ref = self._create_instance({'host': CONF.host})
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
drvr.image_backend = mock.Mock()
|
|
drvr.image_backend.by_name.return_value = drvr.image_backend
|
|
drvr.image_backend.exists.return_value = False
|
|
|
|
with test.nested(
|
|
mock.patch('nova.compute.utils.is_volume_backed_instance',
|
|
return_value=False),
|
|
mock.patch.object(os.path, 'exists'),
|
|
mock.patch.object(libvirt_utils, 'get_instance_path'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
|
|
mock_volume_backed, mock_exists, mock_get_path,
|
|
mock_rmtree, mock_remove):
|
|
mock_exists.return_value = True
|
|
mock_get_path.return_value = '/fake/inst'
|
|
|
|
drvr._cleanup_resize(
|
|
self.context, ins_ref, _fake_network_info(self, 1))
|
|
mock_get_path.assert_called_once_with(ins_ref)
|
|
self.assertFalse(mock_remove.called)
|
|
mock_rmtree.called_once_with('/fake/inst')
|
|
|
|
def test_get_instance_disk_info_exception(self):
|
|
instance = self._create_instance()
|
|
|
|
class FakeExceptionDomain(FakeVirtDomain):
|
|
def __init__(self):
|
|
super(FakeExceptionDomain, self).__init__()
|
|
|
|
def XMLDesc(self, flags):
|
|
raise fakelibvirt.libvirtError("Libvirt error")
|
|
|
|
def fake_get_domain(self, instance):
|
|
return FakeExceptionDomain()
|
|
|
|
self.stub_out('nova.virt.libvirt.host.Host._get_domain',
|
|
fake_get_domain)
|
|
self.assertRaises(exception.InstanceNotFound,
|
|
self.drvr.get_instance_disk_info,
|
|
instance)
|
|
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch.object(lvm, 'list_volumes')
|
|
def test_lvm_disks(self, listlvs, exists):
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
self.flags(images_volume_group='vols', group='libvirt')
|
|
exists.return_value = True
|
|
listlvs.return_value = ['%s_foo' % uuids.instance,
|
|
'other-uuid_foo']
|
|
disks = self.drvr._lvm_disks(instance)
|
|
self.assertEqual(['/dev/vols/%s_foo' % uuids.instance], disks)
|
|
|
|
def test_is_booted_from_volume(self):
|
|
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
|
|
bdm = []
|
|
bdi = {'block_device_mapping': bdm}
|
|
|
|
self.assertFalse(func(bdi))
|
|
|
|
bdm.append({'boot_index': -1})
|
|
self.assertFalse(func(bdi))
|
|
|
|
bdm.append({'boot_index': None})
|
|
self.assertFalse(func(bdi))
|
|
|
|
bdm.append({'boot_index': 1})
|
|
self.assertFalse(func(bdi))
|
|
|
|
bdm.append({'boot_index': 0})
|
|
self.assertTrue(func(bdi))
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.imagebackend')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._inject_data')
|
|
@mock.patch('nova.virt.libvirt.driver.imagecache')
|
|
def test_data_not_injects_with_configdrive(self, mock_image, mock_inject,
|
|
mock_backend):
|
|
self.flags(inject_partition=-1, group='libvirt')
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# config_drive is True by default, configdrive.required_by()
|
|
# returns True
|
|
instance_ref = self._create_instance()
|
|
disk_images = {'image_id': None}
|
|
|
|
drvr._create_and_inject_local_root(self.context, instance_ref, False,
|
|
'', disk_images, get_injection_info(),
|
|
None)
|
|
self.assertFalse(mock_inject.called)
|
|
|
|
@mock.patch('nova.virt.netutils.get_injected_network_template')
|
|
@mock.patch('nova.virt.disk.api.inject_data')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
|
|
def _test_inject_data(self, instance, injection_info, path, disk_params,
|
|
mock_conn, disk_inject_data, inj_network,
|
|
called=True):
|
|
class ImageBackend(object):
|
|
path = '/path'
|
|
|
|
def get_model(self, connection):
|
|
return imgmodel.LocalFileImage(self.path,
|
|
imgmodel.FORMAT_RAW)
|
|
|
|
def fake_inj_network(*args, **kwds):
|
|
return args[0] or None
|
|
inj_network.side_effect = fake_inj_network
|
|
|
|
image_backend = ImageBackend()
|
|
image_backend.path = path
|
|
|
|
with mock.patch.object(self.drvr.image_backend, 'by_name',
|
|
return_value=image_backend):
|
|
self.flags(inject_partition=0, group='libvirt')
|
|
|
|
self.drvr._inject_data(image_backend, instance, injection_info)
|
|
|
|
if called:
|
|
disk_inject_data.assert_called_once_with(
|
|
mock.ANY,
|
|
*disk_params,
|
|
partition=None, mandatory=('files',))
|
|
|
|
self.assertEqual(disk_inject_data.called, called)
|
|
|
|
def test_inject_data_adminpass(self):
|
|
self.flags(inject_password=True, group='libvirt')
|
|
instance = self._create_instance()
|
|
injection_info = get_injection_info(admin_pass='foobar')
|
|
disk_params = [
|
|
None, # key
|
|
None, # net
|
|
{}, # metadata
|
|
'foobar', # admin_pass
|
|
None, # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params)
|
|
|
|
# Test with the configuration setted to false.
|
|
self.flags(inject_password=False, group='libvirt')
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params,
|
|
called=False)
|
|
|
|
def test_inject_data_key(self):
|
|
instance = self._create_instance(params={'key_data': 'key-content'})
|
|
injection_info = get_injection_info()
|
|
|
|
self.flags(inject_key=True, group='libvirt')
|
|
disk_params = [
|
|
'key-content', # key
|
|
None, # net
|
|
{}, # metadata
|
|
None, # admin_pass
|
|
None, # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/path",
|
|
disk_params)
|
|
|
|
# Test with the configuration setted to false.
|
|
self.flags(inject_key=False, group='libvirt')
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params,
|
|
called=False)
|
|
|
|
def test_inject_data_metadata(self):
|
|
instance = self._create_instance(params={'metadata': {'data': 'foo'}})
|
|
injection_info = get_injection_info()
|
|
disk_params = [
|
|
None, # key
|
|
None, # net
|
|
{'data': 'foo'}, # metadata
|
|
None, # admin_pass
|
|
None, # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params)
|
|
|
|
def test_inject_data_files(self):
|
|
instance = self._create_instance()
|
|
injection_info = get_injection_info(files=['file1', 'file2'])
|
|
disk_params = [
|
|
None, # key
|
|
None, # net
|
|
{}, # metadata
|
|
None, # admin_pass
|
|
['file1', 'file2'], # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params)
|
|
|
|
def test_inject_data_net(self):
|
|
instance = self._create_instance()
|
|
injection_info = get_injection_info(network_info={'net': 'eno1'})
|
|
disk_params = [
|
|
None, # key
|
|
{'net': 'eno1'}, # net
|
|
{}, # metadata
|
|
None, # admin_pass
|
|
None, # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/path", disk_params)
|
|
|
|
def test_inject_not_exist_image(self):
|
|
instance = self._create_instance()
|
|
injection_info = get_injection_info()
|
|
disk_params = [
|
|
'key-content', # key
|
|
None, # net
|
|
None, # metadata
|
|
None, # admin_pass
|
|
None, # files
|
|
]
|
|
self._test_inject_data(instance, injection_info, "/fail/path",
|
|
disk_params, called=False)
|
|
|
|
def test_attach_interface_build_metadata_fails(self):
|
|
instance = self._create_instance()
|
|
network_info = _fake_network_info(self, 1)
|
|
domain = FakeVirtDomain(fake_xml="""
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x03' function='0x0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>""")
|
|
fake_image_meta = objects.ImageMeta.from_dict(
|
|
{'id': instance.image_ref})
|
|
expected = self.drvr.vif_driver.get_config(
|
|
instance, network_info[0], fake_image_meta, instance.flavor,
|
|
CONF.libvirt.virt_type, self.drvr._host)
|
|
with test.nested(
|
|
mock.patch.object(host.Host, '_get_domain', return_value=domain),
|
|
mock.patch.object(self.drvr.firewall_driver,
|
|
'setup_basic_filtering'),
|
|
mock.patch.object(domain, 'attachDeviceFlags'),
|
|
mock.patch.object(domain, 'info',
|
|
return_value=[power_state.RUNNING, 1, 2, 3, 4]),
|
|
mock.patch.object(self.drvr.vif_driver, 'get_config',
|
|
return_value=expected),
|
|
mock.patch.object(self.drvr, '_build_device_metadata',
|
|
side_effect=exception.NovaException),
|
|
mock.patch.object(self.drvr, 'detach_interface'),
|
|
) as (
|
|
mock_get_domain, mock_setup_basic_filtering,
|
|
mock_attach_device_flags, mock_info, mock_get_config,
|
|
mock_build_device_metadata, mock_detach_interface
|
|
):
|
|
self.assertRaises(exception.InterfaceAttachFailed,
|
|
self.drvr.attach_interface, self.context,
|
|
instance, fake_image_meta, network_info[0])
|
|
mock_get_domain.assert_called_with(instance)
|
|
mock_info.assert_called_with()
|
|
mock_setup_basic_filtering.assert_called_with(
|
|
instance, [network_info[0]])
|
|
mock_get_config.assert_called_with(
|
|
instance, network_info[0], fake_image_meta, instance.flavor,
|
|
CONF.libvirt.virt_type, self.drvr._host)
|
|
mock_build_device_metadata.assert_called_with(self.context,
|
|
instance)
|
|
mock_attach_device_flags.assert_called_with(
|
|
expected.to_xml(),
|
|
flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
|
|
mock_detach_interface.assert_called_with(self.context, instance,
|
|
network_info[0])
|
|
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
|
|
@mock.patch.object(FakeVirtDomain, 'info')
|
|
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _test_attach_interface(self, power_state, expected_flags,
|
|
mock_get_domain, mock_attach,
|
|
mock_info, mock_build, mock_save):
|
|
instance = self._create_instance()
|
|
network_info = _fake_network_info(self, 1)
|
|
domain = FakeVirtDomain(fake_xml="""
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x03' function='0x0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>""")
|
|
mock_get_domain.return_value = domain
|
|
mock_info.return_value = [power_state, 1, 2, 3, 4]
|
|
|
|
fake_image_meta = objects.ImageMeta.from_dict(
|
|
{'id': instance.image_ref})
|
|
expected = self.drvr.vif_driver.get_config(
|
|
instance, network_info[0], fake_image_meta, instance.flavor,
|
|
CONF.libvirt.virt_type, self.drvr._host)
|
|
mock_build.return_value = objects.InstanceDeviceMetadata()
|
|
|
|
with test.nested(
|
|
mock.patch.object(self.drvr.vif_driver, 'get_config',
|
|
return_value=expected),
|
|
mock.patch.object(self.drvr.firewall_driver,
|
|
'setup_basic_filtering')
|
|
) as (mock_get_config, mock_setup):
|
|
self.drvr.attach_interface(
|
|
self.context, instance, fake_image_meta, network_info[0])
|
|
|
|
mock_get_config.assert_called_once_with(
|
|
instance, network_info[0], test.MatchType(objects.ImageMeta),
|
|
test.MatchType(objects.Flavor), CONF.libvirt.virt_type,
|
|
self.drvr._host)
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
mock_info.assert_called_once_with()
|
|
mock_setup.assert_called_once_with(instance, [network_info[0]])
|
|
mock_build.assert_called_once_with(self.context, instance)
|
|
mock_save.assert_called_once_with()
|
|
mock_attach.assert_called_once_with(expected.to_xml(),
|
|
flags=expected_flags)
|
|
|
|
def test_attach_interface_with_running_instance(self):
|
|
self._test_attach_interface(
|
|
power_state.RUNNING,
|
|
(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
|
|
|
|
def test_attach_interface_with_pause_instance(self):
|
|
self._test_attach_interface(
|
|
power_state.PAUSED,
|
|
(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
|
|
|
|
def test_attach_interface_with_shutdown_instance(self):
|
|
self._test_attach_interface(
|
|
power_state.SHUTDOWN, fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
|
|
|
|
def _test_detach_interface(self, power_state, expected_flags,
|
|
device_not_found=False):
|
|
# setup some mocks
|
|
instance = self._create_instance()
|
|
network_info = _fake_network_info(self, 1)
|
|
domain = FakeVirtDomain(fake_xml="""
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x03' function='0x0'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>""",
|
|
info=[power_state, 1, 2, 3, 4])
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
expected_cfg = vconfig.LibvirtConfigGuestInterface()
|
|
expected_cfg.parse_str("""
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
</interface>""")
|
|
|
|
if device_not_found:
|
|
# This will trigger detach_device_with_retry to raise
|
|
# DeviceNotFound
|
|
get_interface_calls = [expected_cfg, None]
|
|
else:
|
|
get_interface_calls = [expected_cfg, expected_cfg, None, None]
|
|
|
|
with test.nested(
|
|
mock.patch.object(host.Host, 'get_guest', return_value=guest),
|
|
mock.patch.object(self.drvr.vif_driver, 'get_config',
|
|
return_value=expected_cfg),
|
|
# This is called multiple times in a retry loop so we use a
|
|
# side_effect to simulate the calls to stop the loop.
|
|
mock.patch.object(guest, 'get_interface_by_cfg',
|
|
side_effect=get_interface_calls),
|
|
mock.patch.object(domain, 'detachDeviceFlags'),
|
|
mock.patch('nova.virt.libvirt.driver.LOG.warning')
|
|
) as (
|
|
mock_get_guest, mock_get_config,
|
|
mock_get_interface, mock_detach_device_flags,
|
|
mock_warning
|
|
):
|
|
# run the detach method
|
|
self.drvr.detach_interface(self.context, instance, network_info[0])
|
|
|
|
# make our assertions
|
|
mock_get_guest.assert_called_once_with(instance)
|
|
mock_get_config.assert_called_once_with(
|
|
instance, network_info[0], test.MatchType(objects.ImageMeta),
|
|
test.MatchType(objects.Flavor), CONF.libvirt.virt_type,
|
|
self.drvr._host)
|
|
mock_get_interface.assert_has_calls(
|
|
[mock.call(expected_cfg) for x in range(len(get_interface_calls))])
|
|
|
|
if device_not_found:
|
|
mock_detach_device_flags.assert_not_called()
|
|
self.assertTrue(mock_warning.called)
|
|
else:
|
|
mock_detach_device_flags.assert_called_once_with(
|
|
expected_cfg.to_xml(), flags=expected_flags)
|
|
mock_warning.assert_not_called()
|
|
|
|
def test_detach_interface_with_running_instance(self):
|
|
self._test_detach_interface(
|
|
power_state.RUNNING,
|
|
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
|
|
|
|
def test_detach_interface_with_running_instance_device_not_found(self):
|
|
"""Tests that the interface is detached before we try to detach it.
|
|
"""
|
|
self._test_detach_interface(
|
|
power_state.RUNNING,
|
|
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE),
|
|
device_not_found=True)
|
|
|
|
def test_detach_interface_with_pause_instance(self):
|
|
self._test_detach_interface(
|
|
power_state.PAUSED,
|
|
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
|
|
|
|
def test_detach_interface_with_shutdown_instance(self):
|
|
self._test_detach_interface(
|
|
power_state.SHUTDOWN,
|
|
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LOG')
|
|
def test_detach_interface_device_not_found(self, mock_log):
|
|
# Asserts that we don't log an error when the interface device is not
|
|
# found on the guest after a libvirt error during detach.
|
|
instance = self._create_instance()
|
|
vif = _fake_network_info(self, 1)[0]
|
|
guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
guest.get_power_state = mock.Mock()
|
|
self.drvr._host.get_guest = mock.Mock(return_value=guest)
|
|
error = fakelibvirt.libvirtError(
|
|
'no matching network device was found')
|
|
error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,)
|
|
guest.detach_device = mock.Mock(side_effect=error)
|
|
# mock out that get_interface_by_cfg doesn't find the interface
|
|
guest.get_interface_by_cfg = mock.Mock(return_value=None)
|
|
self.drvr.detach_interface(self.context, instance, vif)
|
|
# an error shouldn't be logged, but a warning should be logged
|
|
self.assertFalse(mock_log.error.called)
|
|
self.assertEqual(1, mock_log.warning.call_count)
|
|
self.assertIn('the device is no longer found on the guest',
|
|
six.text_type(mock_log.warning.call_args[0]))
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'info')
|
|
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_detach_interface_device_with_same_mac_address(
|
|
self, mock_get_domain, mock_detach, mock_info):
|
|
instance = self._create_instance()
|
|
network_info = _fake_network_info(self, 1)
|
|
domain = FakeVirtDomain(fake_xml="""
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x03' function='0x0'/>
|
|
</interface>
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br1'/>
|
|
<target dev='tap87654321'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x03' function='0x1'/>
|
|
</interface>
|
|
</devices>
|
|
</domain>""")
|
|
mock_get_domain.return_value = domain
|
|
mock_info.return_value = [power_state.RUNNING, 1, 2, 3, 4]
|
|
expected = vconfig.LibvirtConfigGuestInterface()
|
|
expected.parse_str("""
|
|
<interface type='bridge'>
|
|
<mac address='52:54:00:f6:35:8f'/>
|
|
<model type='virtio'/>
|
|
<source bridge='br0'/>
|
|
<target dev='tap12345678'/>
|
|
</interface>""")
|
|
expected_flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
|
|
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
|
|
with test.nested(
|
|
mock.patch.object(libvirt_guest.Guest, 'get_interface_by_cfg',
|
|
side_effect=[expected, expected, None, None]),
|
|
mock.patch.object(self.drvr.vif_driver, 'get_config',
|
|
return_value=expected),
|
|
mock.patch.object(self.drvr.firewall_driver,
|
|
'setup_basic_filtering')
|
|
) as (mock_get_interface, mock_get_config, mock_setup):
|
|
self.drvr.detach_interface(self.context, instance, network_info[0])
|
|
|
|
mock_get_interface.assert_has_calls([mock.call(expected)] * 3)
|
|
self.assertEqual(4, mock_get_interface.call_count)
|
|
mock_get_config.assert_called_once_with(
|
|
instance, network_info[0], test.MatchType(objects.ImageMeta),
|
|
test.MatchType(objects.Flavor), CONF.libvirt.virt_type,
|
|
self.drvr._host)
|
|
mock_setup.assert_not_called()
|
|
mock_get_domain.assert_called_once_with(instance)
|
|
mock_info.assert_called_once_with()
|
|
mock_detach.assert_called_once_with(expected.to_xml(),
|
|
flags=expected_flags)
|
|
|
|
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
|
'_get_all_assigned_mediated_devices')
|
|
@mock.patch('nova.virt.libvirt.utils.write_to_file')
|
|
# NOTE(mdbooth): The following 4 mocks are required to execute
|
|
# get_guest_xml().
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
|
|
@mock.patch('nova.privsep.utils.supports_direct_io')
|
|
@mock.patch('nova.api.metadata.base.InstanceMetadata')
|
|
def _test_rescue(self, instance,
|
|
mock_instance_metadata, mock_supports_direct_io,
|
|
mock_build_device_metadata, mock_set_host_enabled,
|
|
mock_write_to_file,
|
|
mock_get_mdev,
|
|
exists=None):
|
|
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
|
|
mock_build_device_metadata.return_value = None
|
|
mock_supports_direct_io.return_value = True
|
|
|
|
mock_get_mdev.return_value = {uuids.mdev1: uuids.inst1}
|
|
|
|
backend = self.useFixture(
|
|
fake_imagebackend.ImageBackendFixture(exists=exists))
|
|
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{'id': uuids.image_id, 'name': 'fake'})
|
|
network_info = _fake_network_info(self, 1)
|
|
rescue_password = 'fake_password'
|
|
|
|
domain_xml = [None]
|
|
|
|
def fake_create_domain(xml=None, domain=None, power_on=True,
|
|
pause=False, post_xml_callback=None):
|
|
domain_xml[0] = xml
|
|
if post_xml_callback is not None:
|
|
post_xml_callback()
|
|
|
|
with mock.patch.object(
|
|
self.drvr, '_create_domain',
|
|
side_effect=fake_create_domain) as mock_create_domain:
|
|
self.drvr.rescue(self.context, instance,
|
|
network_info, image_meta, rescue_password)
|
|
|
|
self.assertTrue(mock_create_domain.called)
|
|
|
|
return backend, etree.fromstring(domain_xml[0])
|
|
|
|
def test_rescue(self):
|
|
instance = self._create_instance({'config_drive': None})
|
|
backend, doc = self._test_rescue(instance)
|
|
|
|
# Assert that we created the expected set of disks, and no others
|
|
self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
|
|
sorted(backend.created_disks.keys()))
|
|
|
|
disks = backend.disks
|
|
|
|
kernel_ramdisk = [disks[name + '.rescue']
|
|
for name in ('kernel', 'ramdisk')]
|
|
|
|
# Assert that kernel and ramdisk were both created as raw
|
|
for disk in kernel_ramdisk:
|
|
self.assertEqual('raw', disk.image_type)
|
|
|
|
# Assert that the root rescue disk was created as the default type
|
|
self.assertIsNone(disks['disk.rescue'].image_type)
|
|
|
|
# We expect the generated domain to contain disk.rescue and
|
|
# disk, in that order
|
|
expected_domain_disk_paths = [disks[name].path for name in
|
|
('disk.rescue', 'disk')]
|
|
domain_disk_paths = doc.xpath('devices/disk/source/@file')
|
|
self.assertEqual(expected_domain_disk_paths, domain_disk_paths)
|
|
|
|
# The generated domain xml should contain the rescue kernel
|
|
# and ramdisk
|
|
expected_kernel_ramdisk_paths = [os.path.join(CONF.instances_path,
|
|
disk.path) for disk
|
|
in kernel_ramdisk]
|
|
kernel_ramdisk_paths = \
|
|
doc.xpath('os/*[self::initrd|self::kernel]/text()')
|
|
self.assertEqual(expected_kernel_ramdisk_paths,
|
|
kernel_ramdisk_paths)
|
|
|
|
# The generated domain XML should also contain any existing mdev
|
|
self.assertEqual(
|
|
[uuids.mdev1],
|
|
doc.xpath("devices/*[@type='mdev']/source/address/@uuid"))
|
|
|
|
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder._make_iso9660')
|
|
def test_rescue_config_drive(self, mock_mkisofs):
|
|
instance = self._create_instance({'config_drive': str(True)})
|
|
backend, doc = self._test_rescue(
|
|
instance, exists=lambda name: name != 'disk.config.rescue')
|
|
|
|
# Assert that we created the expected set of disks, and no others
|
|
self.assertEqual(['disk.config.rescue', 'disk.rescue', 'kernel.rescue',
|
|
'ramdisk.rescue'],
|
|
sorted(backend.created_disks.keys()))
|
|
|
|
disks = backend.disks
|
|
|
|
config_disk = disks['disk.config.rescue']
|
|
kernel_ramdisk = [disks[name + '.rescue']
|
|
for name in ('kernel', 'ramdisk')]
|
|
|
|
# Assert that we imported the config disk
|
|
self.assertTrue(config_disk.import_file.called)
|
|
|
|
# Assert that the config disk, kernel and ramdisk were created as raw
|
|
for disk in [config_disk] + kernel_ramdisk:
|
|
self.assertEqual('raw', disk.image_type)
|
|
|
|
# Assert that the root rescue disk was created as the default type
|
|
self.assertIsNone(disks['disk.rescue'].image_type)
|
|
|
|
# We expect the generated domain to contain disk.rescue, disk, and
|
|
# disk.config.rescue in that order
|
|
expected_domain_disk_paths = [disks[name].path for name
|
|
in ('disk.rescue', 'disk',
|
|
'disk.config.rescue')]
|
|
domain_disk_paths = doc.xpath('devices/disk/source/@file')
|
|
self.assertEqual(expected_domain_disk_paths, domain_disk_paths)
|
|
|
|
# The generated domain xml should contain the rescue kernel
|
|
# and ramdisk
|
|
expected_kernel_ramdisk_paths = [os.path.join(CONF.instances_path,
|
|
disk.path)
|
|
for disk in kernel_ramdisk]
|
|
|
|
kernel_ramdisk_paths = \
|
|
doc.xpath('os/*[self::initrd|self::kernel]/text()')
|
|
self.assertEqual(expected_kernel_ramdisk_paths,
|
|
kernel_ramdisk_paths)
|
|
|
|
@mock.patch.object(libvirt_utils, 'get_instance_path')
|
|
@mock.patch.object(libvirt_utils, 'load_file')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _test_unrescue(self, instance, mock_get_domain, mock_load_file,
|
|
mock_get_instance_path):
|
|
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
|
"<devices>"
|
|
"<disk type='block' device='disk'>"
|
|
"<source dev='/dev/some-vg/some-lv'/>"
|
|
"<target dev='vda' bus='virtio'/></disk>"
|
|
"</devices></domain>")
|
|
|
|
mock_get_instance_path.return_value = '/path'
|
|
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
|
|
mock_get_domain.return_value = fake_dom
|
|
mock_load_file.return_value = "fake_unrescue_xml"
|
|
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
|
|
rescue_file = os.path.join('/path', 'rescue.file')
|
|
rescue_dir = os.path.join('/path', 'rescue.dir')
|
|
|
|
def isdir_sideeffect(*args, **kwargs):
|
|
if args[0] == '/path/rescue.file':
|
|
return False
|
|
if args[0] == '/path/rescue.dir':
|
|
return True
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
with test.nested(
|
|
mock.patch.object(libvirt_utils, 'write_to_file'),
|
|
mock.patch.object(drvr, '_destroy'),
|
|
mock.patch.object(drvr, '_create_domain'),
|
|
mock.patch.object(os, 'unlink'),
|
|
mock.patch.object(shutil, 'rmtree'),
|
|
mock.patch.object(os.path, "isdir",
|
|
side_effect=isdir_sideeffect),
|
|
mock.patch.object(drvr, '_lvm_disks',
|
|
return_value=['lvm.rescue']),
|
|
mock.patch.object(lvm, 'remove_volumes'),
|
|
mock.patch.object(glob, 'iglob',
|
|
return_value=[rescue_file, rescue_dir])
|
|
) as (mock_write, mock_destroy, mock_create, mock_del,
|
|
mock_rmtree, mock_isdir, mock_lvm_disks,
|
|
mock_remove_volumes, mock_glob):
|
|
drvr.unrescue(instance, None)
|
|
mock_destroy.assert_called_once_with(instance)
|
|
mock_create.assert_called_once_with("fake_unrescue_xml",
|
|
fake_dom)
|
|
self.assertEqual(2, mock_del.call_count)
|
|
self.assertEqual(unrescue_xml_path,
|
|
mock_del.call_args_list[0][0][0])
|
|
self.assertEqual(1, mock_rmtree.call_count)
|
|
self.assertEqual(rescue_dir, mock_rmtree.call_args_list[0][0][0])
|
|
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
|
|
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
|
|
|
|
def test_unrescue(self):
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
self._test_unrescue(instance)
|
|
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_destroy_volume')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
|
|
@mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
@mock.patch.object(rbd_utils, 'rados')
|
|
def test_unrescue_rbd(self, mock_rados, mock_rbd, mock_connect,
|
|
mock_disconnect, mock_destroy_volume):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
mock_connect.return_value = mock.MagicMock(), mock.MagicMock()
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
all_volumes = [uuids.other_instance + '_disk',
|
|
uuids.other_instance + '_disk.rescue',
|
|
instance.uuid + '_disk',
|
|
instance.uuid + '_disk.rescue']
|
|
mock_rbd.RBD.return_value.list.return_value = all_volumes
|
|
self._test_unrescue(instance)
|
|
mock_destroy_volume.assert_called_once_with(
|
|
mock.ANY, instance.uuid + '_disk.rescue')
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_exists.side_effect = [False, False, True, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
mock_rename.assert_called_with('/path', '/path_del')
|
|
mock_shutil.assert_called_with('/path_del')
|
|
self.assertTrue(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('os.kill')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_kill_running(
|
|
self, mock_get_instance_path, mock_kill, mock_exists,
|
|
mock_rename, mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
self.drvr.job_tracker.jobs[instance.uuid] = [3, 4]
|
|
|
|
mock_exists.side_effect = [False, False, True, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
mock_rename.assert_called_with('/path', '/path_del')
|
|
mock_kill.assert_has_calls(
|
|
[mock.call(3, signal.SIGKILL), mock.call(3, 0),
|
|
mock.call(4, signal.SIGKILL), mock.call(4, 0)])
|
|
mock_shutil.assert_called_with('/path_del')
|
|
self.assertTrue(result)
|
|
self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_resize(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_rename.side_effect = [Exception(), None]
|
|
mock_exists.side_effect = [False, False, True, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
expected = [mock.call('/path', '/path_del'),
|
|
mock.call('/path_resize', '/path_del')]
|
|
self.assertEqual(expected, mock_rename.mock_calls)
|
|
mock_shutil.assert_called_with('/path_del')
|
|
self.assertTrue(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_failed(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_exists.side_effect = [False, False, True, True]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
mock_rename.assert_called_with('/path', '/path_del')
|
|
mock_shutil.assert_called_with('/path_del')
|
|
self.assertFalse(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_mv_failed(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_rename.side_effect = Exception()
|
|
mock_exists.side_effect = [True, True]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
expected = [mock.call('/path', '/path_del'),
|
|
mock.call('/path_resize', '/path_del')] * 2
|
|
self.assertEqual(expected, mock_rename.mock_calls)
|
|
self.assertFalse(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_resume(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_rename.side_effect = Exception()
|
|
mock_exists.side_effect = [False, False, True, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
expected = [mock.call('/path', '/path_del'),
|
|
mock.call('/path_resize', '/path_del')] * 2
|
|
self.assertEqual(expected, mock_rename.mock_calls)
|
|
self.assertTrue(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_none(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_rename.side_effect = Exception()
|
|
mock_exists.side_effect = [False, False, False, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
expected = [mock.call('/path', '/path_del'),
|
|
mock.call('/path_resize', '/path_del')] * 2
|
|
self.assertEqual(expected, mock_rename.mock_calls)
|
|
self.assertEqual(0, len(mock_shutil.mock_calls))
|
|
self.assertTrue(result)
|
|
|
|
@mock.patch('shutil.rmtree')
|
|
@mock.patch('os.rename')
|
|
@mock.patch('os.path.exists')
|
|
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
|
|
def test_delete_instance_files_concurrent(self, mock_get_instance_path,
|
|
mock_exists, mock_rename,
|
|
mock_shutil):
|
|
mock_get_instance_path.return_value = '/path'
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
mock_rename.side_effect = [Exception(), Exception(), None]
|
|
mock_exists.side_effect = [False, False, True, False]
|
|
|
|
result = self.drvr.delete_instance_files(instance)
|
|
mock_get_instance_path.assert_called_with(instance)
|
|
expected = [mock.call('/path', '/path_del'),
|
|
mock.call('/path_resize', '/path_del')]
|
|
expected.append(expected[0])
|
|
self.assertEqual(expected, mock_rename.mock_calls)
|
|
mock_shutil.assert_called_with('/path_del')
|
|
self.assertTrue(result)
|
|
|
|
def _assert_on_id_map(self, idmap, klass, start, target, count):
|
|
self.assertIsInstance(idmap, klass)
|
|
self.assertEqual(start, idmap.start)
|
|
self.assertEqual(target, idmap.target)
|
|
self.assertEqual(count, idmap.count)
|
|
|
|
def test_get_id_maps(self):
|
|
self.flags(virt_type="lxc", group="libvirt")
|
|
CONF.libvirt.virt_type = "lxc"
|
|
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
|
|
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
|
|
|
|
idmaps = self.drvr._get_guest_idmaps()
|
|
|
|
self.assertEqual(len(idmaps), 4)
|
|
self._assert_on_id_map(idmaps[0],
|
|
vconfig.LibvirtConfigGuestUIDMap,
|
|
0, 10000, 1)
|
|
self._assert_on_id_map(idmaps[1],
|
|
vconfig.LibvirtConfigGuestUIDMap,
|
|
1, 20000, 10)
|
|
self._assert_on_id_map(idmaps[2],
|
|
vconfig.LibvirtConfigGuestGIDMap,
|
|
0, 10000, 1)
|
|
self._assert_on_id_map(idmaps[3],
|
|
vconfig.LibvirtConfigGuestGIDMap,
|
|
1, 20000, 10)
|
|
|
|
def test_get_id_maps_not_lxc(self):
|
|
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
|
|
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
|
|
|
|
idmaps = self.drvr._get_guest_idmaps()
|
|
|
|
self.assertEqual(0, len(idmaps))
|
|
|
|
def test_get_id_maps_only_uid(self):
|
|
self.flags(virt_type="lxc", group="libvirt")
|
|
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
|
|
CONF.libvirt.gid_maps = []
|
|
|
|
idmaps = self.drvr._get_guest_idmaps()
|
|
|
|
self.assertEqual(2, len(idmaps))
|
|
self._assert_on_id_map(idmaps[0],
|
|
vconfig.LibvirtConfigGuestUIDMap,
|
|
0, 10000, 1)
|
|
self._assert_on_id_map(idmaps[1],
|
|
vconfig.LibvirtConfigGuestUIDMap,
|
|
1, 20000, 10)
|
|
|
|
def test_get_id_maps_only_gid(self):
|
|
self.flags(virt_type="lxc", group="libvirt")
|
|
CONF.libvirt.uid_maps = []
|
|
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
|
|
|
|
idmaps = self.drvr._get_guest_idmaps()
|
|
|
|
self.assertEqual(2, len(idmaps))
|
|
self._assert_on_id_map(idmaps[0],
|
|
vconfig.LibvirtConfigGuestGIDMap,
|
|
0, 10000, 1)
|
|
self._assert_on_id_map(idmaps[1],
|
|
vconfig.LibvirtConfigGuestGIDMap,
|
|
1, 20000, 10)
|
|
|
|
def test_instance_on_disk(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
self.assertFalse(drvr.instance_on_disk(instance))
|
|
|
|
def test_instance_on_disk_rbd(self):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
self.assertTrue(drvr.instance_on_disk(instance))
|
|
|
|
def test_get_disk_xml(self):
|
|
dom_xml = """
|
|
<domain type="kvm">
|
|
<devices>
|
|
<disk type="file">
|
|
<source file="disk1_file"/>
|
|
<target dev="vda" bus="virtio"/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>
|
|
<disk type="block">
|
|
<source dev="/path/to/dev/1"/>
|
|
<target dev="vdb" bus="virtio" serial="1234"/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
diska_xml = """<disk type="file" device="disk">
|
|
<source file="disk1_file"/>
|
|
<target bus="virtio" dev="vda"/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>"""
|
|
|
|
diskb_xml = """<disk type="block" device="disk">
|
|
<source dev="/path/to/dev/1"/>
|
|
<target bus="virtio" dev="vdb"/>
|
|
</disk>"""
|
|
|
|
dom = mock.MagicMock()
|
|
dom.XMLDesc.return_value = dom_xml
|
|
guest = libvirt_guest.Guest(dom)
|
|
|
|
# NOTE(gcb): etree.tostring(node) returns an extra line with
|
|
# some white spaces, need to strip it.
|
|
actual_diska_xml = guest.get_disk('vda').to_xml()
|
|
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
|
|
|
|
actual_diskb_xml = guest.get_disk('vdb').to_xml()
|
|
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
|
|
|
|
self.assertIsNone(guest.get_disk('vdc'))
|
|
|
|
def test_vcpu_model_from_config(self):
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
|
|
self.assertIsNone(vcpu_model)
|
|
|
|
cpu = vconfig.LibvirtConfigGuestCPU()
|
|
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
|
|
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
|
|
feature1.name = 'sse'
|
|
feature1.policy = fields.CPUFeaturePolicy.REQUIRE
|
|
feature2.name = 'aes'
|
|
feature2.policy = fields.CPUFeaturePolicy.REQUIRE
|
|
|
|
cpu.features = set([feature1, feature2])
|
|
cpu.mode = fields.CPUMode.CUSTOM
|
|
cpu.sockets = 1
|
|
cpu.cores = 2
|
|
cpu.threads = 4
|
|
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
|
|
self.assertEqual(fields.CPUMatch.EXACT, vcpu_model.match)
|
|
self.assertEqual(fields.CPUMode.CUSTOM, vcpu_model.mode)
|
|
self.assertEqual(4, vcpu_model.topology.threads)
|
|
self.assertEqual(set(['sse', 'aes']),
|
|
set([f.name for f in vcpu_model.features]))
|
|
|
|
cpu.mode = fields.CPUMode.HOST_MODEL
|
|
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
|
|
self.assertEqual(fields.CPUMode.HOST_MODEL, vcpu_model.mode)
|
|
self.assertEqual(vcpu_model, vcpu_model_1)
|
|
|
|
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=10)
|
|
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
|
|
@mock.patch.object(host.Host, "get_guest")
|
|
@mock.patch.object(dmcrypt, 'delete_volume')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
|
|
@mock.patch.object(objects.Instance, 'save')
|
|
def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain,
|
|
mock_unfilter, mock_delete_volume,
|
|
mock_get_guest, mock_get_lvm_size,
|
|
mock_get_size):
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance = objects.Instance(
|
|
uuid=uuids.instance, id=1,
|
|
ephemeral_key_uuid=uuids.ephemeral_key_uuid)
|
|
instance.system_metadata = {}
|
|
block_device_info = {'root_device_name': '/dev/vda',
|
|
'ephemerals': [],
|
|
'block_device_mapping': []}
|
|
self.flags(images_type="lvm",
|
|
group='libvirt')
|
|
dom_xml = """
|
|
<domain type="kvm">
|
|
<devices>
|
|
<disk type="block">
|
|
<driver name='qemu' type='raw' cache='none'/>
|
|
<source dev="/dev/mapper/fake-dmcrypt"/>
|
|
<target dev="vda" bus="virtio" serial="1234"/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
dom = mock.MagicMock()
|
|
dom.XMLDesc.return_value = dom_xml
|
|
guest = libvirt_guest.Guest(dom)
|
|
mock_get_guest.return_value = guest
|
|
drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False,
|
|
block_device_info=block_device_info)
|
|
mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt')
|
|
|
|
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=10)
|
|
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
|
|
@mock.patch.object(host.Host, "get_guest")
|
|
@mock.patch.object(dmcrypt, 'delete_volume')
|
|
def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest,
|
|
mock_lvm_size, mock_get_size, encrypted=False):
|
|
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
instance = objects.Instance(
|
|
uuid=uuids.instance, id=1,
|
|
ephemeral_key_uuid=uuids.ephemeral_key_uuid)
|
|
block_device_info = {'root_device_name': '/dev/vda',
|
|
'ephemerals': [],
|
|
'block_device_mapping': []}
|
|
dev_name = 'fake-dmcrypt' if encrypted else 'fake'
|
|
dom_xml = """
|
|
<domain type="kvm">
|
|
<devices>
|
|
<disk type="block">
|
|
<driver name='qemu' type='raw' cache='none'/>
|
|
<source dev="/dev/mapper/%s"/>
|
|
<target dev="vda" bus="virtio" serial="1234"/>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
""" % dev_name
|
|
dom = mock.MagicMock()
|
|
dom.XMLDesc.return_value = dom_xml
|
|
guest = libvirt_guest.Guest(dom)
|
|
mock_get_guest.return_value = guest
|
|
drv._cleanup_lvm(instance, block_device_info)
|
|
|
|
if encrypted:
|
|
mock_delete_volume.assert_called_once_with(
|
|
'/dev/mapper/fake-dmcrypt')
|
|
else:
|
|
self.assertFalse(mock_delete_volume.called)
|
|
|
|
def test_cleanup_lvm(self):
|
|
self._test_cleanup_lvm()
|
|
|
|
def test_cleanup_encrypted_lvm(self):
|
|
self._test_cleanup_lvm(encrypted=True)
|
|
|
|
def test_vcpu_model_to_config(self):
|
|
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
|
|
feature = objects.VirtCPUFeature(
|
|
policy=fields.CPUFeaturePolicy.REQUIRE, name='sse')
|
|
feature_1 = objects.VirtCPUFeature(
|
|
policy=fields.CPUFeaturePolicy.FORBID, name='aes')
|
|
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
|
|
vcpu_model = objects.VirtCPUModel(mode=fields.CPUMode.HOST_MODEL,
|
|
features=[feature, feature_1],
|
|
topology=topo)
|
|
|
|
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
|
|
self.assertEqual(fields.CPUMode.HOST_MODEL, cpu.mode)
|
|
self.assertEqual(1, cpu.sockets)
|
|
self.assertEqual(4, cpu.threads)
|
|
self.assertEqual(2, len(cpu.features))
|
|
self.assertEqual(set(['sse', 'aes']),
|
|
set([f.name for f in cpu.features]))
|
|
self.assertEqual(set([fields.CPUFeaturePolicy.REQUIRE,
|
|
fields.CPUFeaturePolicy.FORBID]),
|
|
set([f.policy for f in cpu.features]))
|
|
|
|
def test_trigger_crash_dump(self):
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.drvr.trigger_crash_dump(instance)
|
|
|
|
def test_trigger_crash_dump_not_running(self):
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'Requested operation is not valid: domain is not running',
|
|
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
|
|
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.assertRaises(exception.InstanceNotRunning,
|
|
self.drvr.trigger_crash_dump, instance)
|
|
|
|
def test_trigger_crash_dump_not_supported(self):
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.assertRaises(exception.TriggerCrashDumpNotSupported,
|
|
self.drvr.trigger_crash_dump, instance)
|
|
|
|
def test_trigger_crash_dump_unexpected_error(self):
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'UnexpectedError',
|
|
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR)
|
|
|
|
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
|
|
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
|
|
instance = objects.Instance(uuid=uuids.instance, id=1)
|
|
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.assertRaises(fakelibvirt.libvirtError,
|
|
self.drvr.trigger_crash_dump, instance)
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'debug')
|
|
def test_get_volume_driver_invalid_connector_exception(self, mock_debug):
|
|
"""Tests that the driver doesn't fail to initialize if one of the
|
|
imported volume drivers raises InvalidConnectorProtocol from os-brick.
|
|
"""
|
|
# make a copy of the normal list and add a volume driver that raises
|
|
# the handled os-brick exception when imported.
|
|
libvirt_volume_drivers_copy = copy.copy(
|
|
libvirt_driver.libvirt_volume_drivers)
|
|
libvirt_volume_drivers_copy.append(
|
|
'invalid=nova.tests.unit.virt.libvirt.test_driver.'
|
|
'FakeInvalidVolumeDriver'
|
|
)
|
|
with mock.patch.object(libvirt_driver, 'libvirt_volume_drivers',
|
|
libvirt_volume_drivers_copy):
|
|
drvr = libvirt_driver.LibvirtDriver(
|
|
fake.FakeVirtAPI(), read_only=True
|
|
)
|
|
# make sure we didn't register the invalid volume driver
|
|
self.assertNotIn('invalid', drvr.volume_drivers)
|
|
# make sure we logged something
|
|
mock_debug.assert_called_with(
|
|
('Unable to load volume driver %s. '
|
|
'It is not supported on this host.'),
|
|
'nova.tests.unit.virt.libvirt.test_driver.FakeInvalidVolumeDriver'
|
|
)
|
|
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_mediated_devices')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
|
|
'._get_mdev_capable_devices')
|
|
def test_get_gpu_inventories(self, get_mdev_capable_devs,
|
|
get_mediated_devices):
|
|
get_mdev_capable_devs.return_value = [
|
|
{"dev_id": "pci_0000_06_00_0",
|
|
"vendor_id": 0x10de,
|
|
"types": {'nvidia-11': {'availableInstances': 15,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
},
|
|
{"dev_id": "pci_0000_07_00_0",
|
|
"vendor_id": 0x0000,
|
|
"types": {'nvidia-11': {'availableInstances': 7,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
},
|
|
]
|
|
get_mediated_devices.return_value = [{'dev_id': 'mdev_some_uuid1',
|
|
'uuid': uuids.mdev1,
|
|
'parent': "pci_0000_06_00_0",
|
|
'type': 'nvidia-11',
|
|
'iommu_group': 1},
|
|
{'dev_id': 'mdev_some_uuid2',
|
|
'uuid': uuids.mdev2,
|
|
'parent': "pci_0000_07_00_0",
|
|
'type': 'nvidia-11',
|
|
'iommu_group': 1}]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# If the operator doesn't provide GPU types
|
|
self.assertEqual({}, drvr._get_gpu_inventories())
|
|
|
|
# Now, set a specific GPU type
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
expected = {
|
|
# the first GPU also has one mdev allocated against it
|
|
'pci_0000_06_00_0': {'total': 15 + 1,
|
|
'max_unit': 15 + 1,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
# the second GPU also has another mdev
|
|
'pci_0000_07_00_0': {'total': 7 + 1,
|
|
'max_unit': 7 + 1,
|
|
'min_unit': 1,
|
|
'step_size': 1,
|
|
'reserved': 0,
|
|
'allocation_ratio': 1.0,
|
|
},
|
|
}
|
|
self.assertEqual(expected, drvr._get_gpu_inventories())
|
|
get_mdev_capable_devs.assert_called_once_with(types=['nvidia-11'])
|
|
get_mediated_devices.assert_called_once_with(types=['nvidia-11'])
|
|
|
|
@mock.patch.object(host.Host, 'device_lookup_by_name')
|
|
@mock.patch.object(host.Host, 'list_mdev_capable_devices')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
|
|
def test_get_mdev_capable_devices(self, _get_libvirt_version,
|
|
list_mdev_capable_devs,
|
|
device_lookup_by_name):
|
|
list_mdev_capable_devs.return_value = ['pci_0000_06_00_0']
|
|
|
|
def fake_nodeDeviceLookupByName(name):
|
|
return FakeNodeDevice(_fake_NodeDevXml[name])
|
|
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
expected = [{"dev_id": "pci_0000_06_00_0",
|
|
"vendor_id": 0x10de,
|
|
"types": {'nvidia-11': {'availableInstances': 16,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
}]
|
|
self.assertEqual(expected, drvr._get_mdev_capable_devices())
|
|
|
|
@mock.patch.object(host.Host, 'device_lookup_by_name')
|
|
@mock.patch.object(host.Host, 'list_mdev_capable_devices')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
|
|
def test_get_mdev_capable_devices_filtering(self, _get_libvirt_version,
|
|
list_mdev_capable_devs,
|
|
device_lookup_by_name):
|
|
list_mdev_capable_devs.return_value = ['pci_0000_06_00_0']
|
|
|
|
def fake_nodeDeviceLookupByName(name):
|
|
return FakeNodeDevice(_fake_NodeDevXml[name])
|
|
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
|
|
# Since we filter by a type not supported by the physical device,
|
|
# we don't get results.
|
|
self.assertEqual([],
|
|
drvr._get_mdev_capable_devices(types=['nvidia-12']))
|
|
|
|
@mock.patch.object(host.Host, 'device_lookup_by_name')
|
|
@mock.patch.object(host.Host, 'list_mediated_devices')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
|
|
def test_get_mediated_devices(self, _get_libvirt_version,
|
|
list_mediated_devices,
|
|
device_lookup_by_name):
|
|
list_mediated_devices.return_value = [
|
|
'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01']
|
|
|
|
def fake_nodeDeviceLookupByName(name):
|
|
return FakeNodeDevice(_fake_NodeDevXml[name])
|
|
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
expected = [{"dev_id": "mdev_4b20d080_1b54_4048_85b3_a6a62d165c01",
|
|
"uuid": "4b20d080-1b54-4048-85b3-a6a62d165c01",
|
|
"parent": "pci_0000_00_02_0",
|
|
"type": "nvidia-11",
|
|
"iommu_group": 12
|
|
}]
|
|
self.assertEqual(expected, drvr._get_mediated_devices())
|
|
|
|
@mock.patch.object(host.Host, 'device_lookup_by_name')
|
|
@mock.patch.object(host.Host, 'list_mediated_devices')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
|
|
def test_get_mediated_devices_filtering(self, _get_libvirt_version,
|
|
list_mediated_devices,
|
|
device_lookup_by_name):
|
|
list_mediated_devices.return_value = [
|
|
'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01']
|
|
|
|
def fake_nodeDeviceLookupByName(name):
|
|
return FakeNodeDevice(_fake_NodeDevXml[name])
|
|
device_lookup_by_name.side_effect = fake_nodeDeviceLookupByName
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# Since we filter by a type not supported by the physical device,
|
|
# we don't get results.
|
|
self.assertEqual([], drvr._get_mediated_devices(types=['nvidia-12']))
|
|
|
|
@mock.patch.object(host.Host, 'list_guests')
|
|
def test_get_all_assigned_mediated_devices(self, list_guests):
|
|
dom_with_vgpu = """
|
|
<domain type="kvm">
|
|
<devices>
|
|
<hostdev mode='subsystem' type='mdev' model='vfio-pci'>
|
|
<source>
|
|
<address uuid='%s'/>
|
|
</source>
|
|
</hostdev>
|
|
</devices>
|
|
</domain>
|
|
""" % uuids.mdev
|
|
guest1 = libvirt_guest.Guest(FakeVirtDomain())
|
|
guest2 = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
|
|
list_guests.return_value = [guest1, guest2]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertEqual({uuids.mdev: guest2.uuid},
|
|
drvr._get_all_assigned_mediated_devices())
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
def test_get_all_assigned_mediated_devices_for_an_instance(self,
|
|
get_guest):
|
|
dom_with_vgpu = """
|
|
<domain type="kvm">
|
|
<devices>
|
|
<hostdev mode='subsystem' type='mdev' model='vfio-pci'>
|
|
<source>
|
|
<address uuid='%s'/>
|
|
</source>
|
|
</hostdev>
|
|
</devices>
|
|
</domain>
|
|
""" % uuids.mdev
|
|
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
|
|
get_guest.return_value = guest
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
fake_inst = objects.Instance()
|
|
self.assertEqual({uuids.mdev: guest.uuid},
|
|
drvr._get_all_assigned_mediated_devices(fake_inst))
|
|
get_guest.assert_called_once_with(fake_inst)
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
def test_get_all_assigned_mediated_devices_for_a_non_existing_instance(
|
|
self, get_guest):
|
|
get_guest.side_effect = exception.InstanceNotFound(instance_id='fake')
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
fake_inst = objects.Instance()
|
|
self.assertEqual({},
|
|
drvr._get_all_assigned_mediated_devices(fake_inst))
|
|
|
|
def test_allocate_mdevs_with_no_vgpu_allocations(self):
|
|
allocations = {
|
|
'rp1': {
|
|
'resources': {
|
|
# Just any resource class but VGPU
|
|
orc.VCPU: 1,
|
|
}
|
|
}
|
|
}
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.assertIsNone(drvr._allocate_mdevs(allocations=allocations))
|
|
|
|
def _get_fake_provider_tree_with_vgpu(self):
|
|
"""Returns a fake ProviderTree with VGPU inventory on two children RPs
|
|
with one with a correct name and the other one wrong.
|
|
|
|
The child provider is named rp1 and its UUID is uuids.rp1.
|
|
"""
|
|
cn_rp = dict(
|
|
uuid=uuids.cn,
|
|
name='cn',
|
|
)
|
|
vgpu_rp_inv = {
|
|
orc.VGPU: {
|
|
'total': 1,
|
|
'min_unit': 1,
|
|
'max_unit': 1,
|
|
'step_size': 1,
|
|
}
|
|
}
|
|
pt = provider_tree.ProviderTree()
|
|
pt.new_root(cn_rp['name'], cn_rp['uuid'], generation=0)
|
|
# Create a first child with a correct naming attribute
|
|
pt.new_child(cn_rp['name'] + '_' + 'pci_0000_06_00_0', cn_rp['uuid'],
|
|
uuid=uuids.rp1, generation=0)
|
|
pt.update_inventory(uuids.rp1, vgpu_rp_inv)
|
|
# Create a second child with a bad naming convention
|
|
pt.new_child('oops_I_did_it_again', cn_rp['uuid'],
|
|
uuid=uuids.rp2, generation=0)
|
|
pt.update_inventory(uuids.rp2, vgpu_rp_inv)
|
|
return pt
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_existing_mdevs_not_assigned')
|
|
def test_allocate_mdevs_with_available_mdevs(self, get_unassigned_mdevs):
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
allocations = {
|
|
uuids.rp1: {
|
|
'resources': {
|
|
orc.VGPU: 1,
|
|
}
|
|
}
|
|
}
|
|
get_unassigned_mdevs.return_value = set([uuids.mdev1])
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# Mock the fact update_provider_tree() should have run
|
|
drvr.provider_tree = self._get_fake_provider_tree_with_vgpu()
|
|
self.assertEqual([uuids.mdev1],
|
|
drvr._allocate_mdevs(allocations=allocations))
|
|
get_unassigned_mdevs.assert_called_once_with(['nvidia-11'],
|
|
'pci_0000_06_00_0')
|
|
|
|
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_mdev_capable_devices')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_existing_mdevs_not_assigned')
|
|
def test_allocate_mdevs_with_no_mdevs_but_capacity(self,
|
|
unallocated_mdevs,
|
|
get_mdev_capable_devs,
|
|
privsep_create_mdev):
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
allocations = {
|
|
uuids.rp1: {
|
|
'resources': {
|
|
orc.VGPU: 1,
|
|
}
|
|
}
|
|
}
|
|
unallocated_mdevs.return_value = set()
|
|
get_mdev_capable_devs.return_value = [
|
|
{"dev_id": "pci_0000_06_00_0",
|
|
"vendor_id": 0x10de,
|
|
"types": {'nvidia-11': {'availableInstances': 16,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
}]
|
|
privsep_create_mdev.return_value = uuids.mdev1
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# Mock the fact update_provider_tree() should have run
|
|
drvr.provider_tree = self._get_fake_provider_tree_with_vgpu()
|
|
self.assertEqual([uuids.mdev1],
|
|
drvr._allocate_mdevs(allocations=allocations))
|
|
privsep_create_mdev.assert_called_once_with("0000:06:00.0",
|
|
'nvidia-11',
|
|
uuid=None)
|
|
|
|
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_mdev_capable_devices')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_existing_mdevs_not_assigned')
|
|
def test_allocate_mdevs_with_no_gpu_capacity(self,
|
|
unallocated_mdevs,
|
|
get_mdev_capable_devs,
|
|
privsep_create_mdev):
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
allocations = {
|
|
uuids.rp1: {
|
|
'resources': {
|
|
orc.VGPU: 1,
|
|
}
|
|
}
|
|
}
|
|
unallocated_mdevs.return_value = set()
|
|
# Mock the fact all possible mediated devices are created and all of
|
|
# them being assigned
|
|
get_mdev_capable_devs.return_value = [
|
|
{"dev_id": "pci_0000_06_00_0",
|
|
"vendor_id": 0x10de,
|
|
"types": {'nvidia-11': {'availableInstances': 0,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
}]
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
# Mock the fact update_provider_tree() should have run
|
|
drvr.provider_tree = self._get_fake_provider_tree_with_vgpu()
|
|
self.assertRaises(exception.ComputeResourcesUnavailable,
|
|
drvr._allocate_mdevs, allocations=allocations)
|
|
|
|
def test_allocate_mdevs_with_no_idea_of_the_provider(self):
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
# Mock the fact update_provider_tree() should have run
|
|
drvr.provider_tree = self._get_fake_provider_tree_with_vgpu()
|
|
|
|
# Test that the allocated RP doesn't exist in the tree
|
|
allocations = {
|
|
uuids.wrong_rp: {
|
|
'resources': {
|
|
orc.VGPU: 1,
|
|
}
|
|
}
|
|
}
|
|
self.assertRaises(exception.ComputeResourcesUnavailable,
|
|
drvr._allocate_mdevs, allocations=allocations)
|
|
|
|
# Test that we were unable to guess the RP name
|
|
allocations = {
|
|
uuids.rp2: {
|
|
'resources': {
|
|
orc.VGPU: 1,
|
|
}
|
|
}
|
|
}
|
|
# Remember, rp2 has a wrong naming convention
|
|
self.assertRaises(exception.ComputeResourcesUnavailable,
|
|
drvr._allocate_mdevs, allocations=allocations)
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_mediated_devices')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_all_assigned_mediated_devices')
|
|
def test_get_existing_mdevs_not_assigned(self, get_all_assigned_mdevs,
|
|
get_mediated_devices):
|
|
# mdev2 is assigned to instance1
|
|
get_all_assigned_mdevs.return_value = {uuids.mdev2: uuids.inst1}
|
|
# there is a total of 2 mdevs, mdev1 and mdev2
|
|
get_mediated_devices.return_value = [{'dev_id': 'mdev_some_uuid1',
|
|
'uuid': uuids.mdev1,
|
|
'parent': "pci_some",
|
|
'type': 'nvidia-11',
|
|
'iommu_group': 1},
|
|
{'dev_id': 'mdev_some_uuid2',
|
|
'uuid': uuids.mdev2,
|
|
'parent': "pci_some",
|
|
'type': 'nvidia-11',
|
|
'iommu_group': 1}]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
# Since mdev2 is assigned to inst1, only mdev1 is available
|
|
self.assertEqual(set([uuids.mdev1]),
|
|
drvr._get_existing_mdevs_not_assigned())
|
|
|
|
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_mdev_capable_devices')
|
|
@mock.patch.object(os.path, 'exists')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_get_all_assigned_mediated_devices')
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
|
|
return_value=versionutils.convert_version_to_int(
|
|
libvirt_driver.MIN_LIBVIRT_MDEV_SUPPORT))
|
|
def test_recreate_mediated_device_on_init_host(
|
|
self, _get_libvirt_version,
|
|
get_all_assigned_mdevs, exists, get_mdev_capable_devs,
|
|
privsep_create_mdev):
|
|
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
|
get_all_assigned_mdevs.return_value = {uuids.mdev1: uuids.inst1,
|
|
uuids.mdev2: uuids.inst2}
|
|
|
|
# Fake the fact that mdev1 is existing but mdev2 not
|
|
def _exists(path):
|
|
# Just verify what we ask
|
|
self.assertIn('/sys/bus/mdev/devices/', path)
|
|
return True if uuids.mdev1 in path else False
|
|
|
|
exists.side_effect = _exists
|
|
get_mdev_capable_devs.return_value = [
|
|
{"dev_id": "pci_0000_06_00_0",
|
|
"vendor_id": 0x10de,
|
|
"types": {'nvidia-11': {'availableInstances': 16,
|
|
'name': 'GRID M60-0B',
|
|
'deviceAPI': 'vfio-pci'},
|
|
}
|
|
}]
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr.init_host(host='foo')
|
|
privsep_create_mdev.assert_called_once_with(
|
|
"0000:06:00.0", 'nvidia-11', uuid=uuids.mdev2)
|
|
|
|
@mock.patch.object(libvirt_guest.Guest, 'detach_device')
|
|
def _test_detach_mediated_devices(self, side_effect, detach_device):
|
|
|
|
dom_with_vgpu = (
|
|
"""<domain> <devices>
|
|
<disk type='file' device='disk'>
|
|
<driver name='qemu' type='qcow2' cache='none'/>
|
|
<source file='xxx'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<alias name='virtio-disk0'/>
|
|
<address type='pci' domain='0x0000' bus='0x00'
|
|
slot='0x04' function='0x0'/>
|
|
</disk>
|
|
<hostdev mode='subsystem' type='mdev' managed='no'
|
|
model='vfio-pci'>
|
|
<source>
|
|
<address uuid='81db53c6-6659-42a0-a34c-1507fdc72983'/>
|
|
</source>
|
|
<alias name='hostdev0'/>
|
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x05'
|
|
function='0x0'/>
|
|
</hostdev>
|
|
</devices></domain>""")
|
|
|
|
detach_device.side_effect = side_effect
|
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_with_vgpu))
|
|
drvr._detach_mediated_devices(guest)
|
|
return detach_device
|
|
|
|
def test_detach_mediated_devices(self):
|
|
def fake_detach_device(cfg_obj, **kwargs):
|
|
self.assertIsInstance(cfg_obj,
|
|
vconfig.LibvirtConfigGuestHostdevMDEV)
|
|
|
|
detach_mock = self._test_detach_mediated_devices(fake_detach_device)
|
|
detach_mock.assert_called_once_with(mock.ANY, live=True)
|
|
|
|
def test_detach_mediated_devices_raises_exc_unsupported(self):
|
|
exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, 'virDomainDetachDeviceFlags() failed',
|
|
error_code=fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED)
|
|
|
|
self.assertRaises(exception.InstanceFaultRollback,
|
|
self._test_detach_mediated_devices, exc)
|
|
|
|
def test_detach_mediated_devices_raises_exc(self):
|
|
exc = test.TestingException()
|
|
|
|
self.assertRaises(test.TestingException,
|
|
self._test_detach_mediated_devices, exc)
|
|
|
|
def test_cpu_traits_with_passthrough_mode(self):
|
|
"""Test getting CPU traits when cpu_mmode is 'host-passthrough', traits
|
|
are calculated from fakelibvirt's baseline CPU features.
|
|
"""
|
|
self.flags(cpu_mode='host-passthrough', group='libvirt')
|
|
self.assertTraitsEqual(['HW_CPU_X86_AESNI', 'HW_CPU_X86_VMX'],
|
|
self.drvr._get_cpu_traits())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
def test_cpu_traits_with_mode_none(self, mock_baseline):
|
|
"""Test getting CPU traits when cpu_mode is 'none', traits are
|
|
calculated from _fake_qemu64_cpu_features.
|
|
"""
|
|
self.flags(cpu_mode='none', group='libvirt')
|
|
mock_baseline.return_value = _fake_qemu64_cpu_feature
|
|
self.assertTraitsEqual(['HW_CPU_X86_SSE', 'HW_CPU_X86_SVM',
|
|
'HW_CPU_X86_MMX', 'HW_CPU_X86_SSE2'],
|
|
self.drvr._get_cpu_traits())
|
|
|
|
mock_baseline.assert_called_with([u'''<cpu>
|
|
<arch>x86_64</arch>
|
|
<model>qemu64</model>
|
|
<vendor>Intel</vendor>
|
|
<topology sockets="1" cores="2" threads="1"/>
|
|
</cpu>
|
|
'''], 1)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
def test_cpu_traits_with_mode_custom(self, mock_baseline):
|
|
"""Test getting CPU traits when cpu_mode is 'custom' and cpu_model is
|
|
'Broadwell-noTSX', traits are calculated from
|
|
_fake_broadwell_cpu_features.
|
|
"""
|
|
self.flags(cpu_mode='custom',
|
|
cpu_model='Broadwell-noTSX',
|
|
group='libvirt')
|
|
mock_baseline.return_value = _fake_broadwell_cpu_feature
|
|
|
|
self.assertTraitsEqual(
|
|
[
|
|
'HW_CPU_X86_BMI2',
|
|
'HW_CPU_X86_AVX2',
|
|
'HW_CPU_X86_BMI',
|
|
'HW_CPU_X86_AVX',
|
|
'HW_CPU_X86_AESNI',
|
|
'HW_CPU_X86_SSE42',
|
|
'HW_CPU_X86_SSE41',
|
|
'HW_CPU_X86_FMA3',
|
|
'HW_CPU_X86_SSSE3',
|
|
'HW_CPU_X86_CLMUL',
|
|
'HW_CPU_X86_SSE2',
|
|
'HW_CPU_X86_SSE',
|
|
'HW_CPU_X86_MMX'
|
|
], self.drvr._get_cpu_traits()
|
|
)
|
|
mock_baseline.assert_called_with([u'''<cpu>
|
|
<arch>x86_64</arch>
|
|
<model>Broadwell-noTSX</model>
|
|
<vendor>Intel</vendor>
|
|
<topology sockets="1" cores="2" threads="1"/>
|
|
</cpu>
|
|
'''], 1)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
def test_cpu_traits_with_no_baseline_support(self, mock_baseline):
|
|
"""Test getting CPU traits when baseline call is not supported."""
|
|
self.flags(cpu_mode='none', group='libvirt')
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
mock_baseline.side_effect = not_supported_exc
|
|
self.assertTraitsEqual([], self.drvr._get_cpu_traits())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
def test_cpu_traits_on_s390x(self, mock_baseline, mock_cap):
|
|
"""Test getting CPU traits on s390x, baseline call is not supported on
|
|
the platform.
|
|
"""
|
|
self.flags(cpu_mode='none', group='libvirt')
|
|
|
|
mock_cap.return_value = """
|
|
<capabilities>
|
|
<host>
|
|
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
|
|
<cpu>
|
|
<arch>s390x</arch>
|
|
<topology sockets='1' cores='6' threads='1'/>
|
|
<pages unit='KiB' size='4' />
|
|
<pages unit='KiB' size='1024' />
|
|
</cpu>
|
|
</host>
|
|
</capabilities>
|
|
"""
|
|
|
|
not_supported_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'this function is not supported by the connection driver: cannot'
|
|
' compute baseline CPU',
|
|
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
|
|
missing_model_exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
'XML error: Missing CPU model name',
|
|
error_code=fakelibvirt.VIR_ERR_XML_ERROR)
|
|
|
|
# model the libvirt behavior on s390x
|
|
def mocked_baseline(cpu_xml, *args):
|
|
xml = cpu_xml[0]
|
|
if "<model>" in xml:
|
|
raise not_supported_exc
|
|
else:
|
|
raise missing_model_exc
|
|
mock_baseline.side_effect = mocked_baseline
|
|
|
|
self.assertTraitsEqual([], self.drvr._get_cpu_traits())
|
|
|
|
def test_cpu_traits_with_invalid_virt_type(self):
|
|
"""Test getting CPU traits when using a virt_type that doesn't support
|
|
the feature, only kvm and qemu supports reporting CPU traits.
|
|
"""
|
|
self.flags(cpu_mode='custom',
|
|
cpu_model='IvyBridge',
|
|
virt_type='lxc',
|
|
group='libvirt'
|
|
)
|
|
self.assertRaises(exception.Invalid, self.drvr._get_cpu_traits)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
|
|
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
|
|
def test_cpu_traits_with_mode_passthrough_and_extra_flags(
|
|
self, mock_to_traits, mock_cap):
|
|
"""Test if extra flags are accounted when cpu_mode is set to
|
|
host-passthrough.
|
|
"""
|
|
self.flags(cpu_mode='host-passthrough',
|
|
cpu_model_extra_flags='PCID',
|
|
group='libvirt')
|
|
mock_cap.return_value = """
|
|
<capabilities>
|
|
<host>
|
|
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
|
|
<cpu>
|
|
<arch>IvyBridge</arch>
|
|
<topology sockets='1' cores='2' threads='2'/>
|
|
<feature policy='require' name='erms'/>
|
|
<pages unit='KiB' size='4' />
|
|
<pages unit='KiB' size='1024' />
|
|
</cpu>
|
|
</host>
|
|
</capabilities>
|
|
"""
|
|
self.drvr._get_cpu_traits()
|
|
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
|
|
def test_cpu_traits_with_mode_custom_and_extra_flags(self, mock_to_traits,
|
|
mock_baseline):
|
|
"""Test if extra flags are accounted when cpu_mode is set to custom.
|
|
"""
|
|
self.flags(cpu_mode='custom',
|
|
cpu_model='IvyBridge',
|
|
cpu_model_extra_flags='PCID',
|
|
group='libvirt')
|
|
|
|
mock_baseline.return_value = """
|
|
<cpu mode='custom' match='exact'>
|
|
<model fallback='forbid'>IvyBridge</model>
|
|
<vendor>Intel</vendor>
|
|
<feature policy='require' name='erms'/>
|
|
<feature policy='require' name='pcid'/>
|
|
</cpu>
|
|
"""
|
|
self.drvr._get_cpu_traits()
|
|
mock_baseline.assert_called_with([u'''<cpu>
|
|
<arch>x86_64</arch>
|
|
<model>IvyBridge</model>
|
|
<vendor>Intel</vendor>
|
|
<topology sockets="1" cores="2" threads="1"/>
|
|
<feature name="pcid"/>
|
|
</cpu>
|
|
'''], 1)
|
|
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
@mock.patch('nova.virt.libvirt.utils.cpu_features_to_traits')
|
|
def test_cpu_traits_with_mode_not_set_and_extra_flags(self, mock_to_traits,
|
|
mock_baseline):
|
|
"""Test if extra flags are accounted when cpu_mode is not set."""
|
|
self.flags(cpu_mode=None,
|
|
cpu_model_extra_flags='PCID',
|
|
virt_type='kvm',
|
|
group='libvirt'
|
|
)
|
|
mock_baseline.return_value = """
|
|
<cpu mode='custom' match='exact'>
|
|
<model fallback='forbid'>IvyBridge</model>
|
|
<vendor>Intel</vendor>
|
|
<feature policy='require' name='erms'/>
|
|
</cpu>
|
|
"""
|
|
self.drvr._get_cpu_traits()
|
|
self.assertItemsEqual(['pcid', 'erms'], mock_to_traits.call_args[0][0])
|
|
|
|
def test_cpu_traits_with_mode_none_and_invalid_virt_type(self):
|
|
"""Test case that cpu mode is none and virt_type is neither kvm nor
|
|
qemu.
|
|
"""
|
|
self.flags(cpu_mode='none',
|
|
virt_type='lxc',
|
|
group='libvirt')
|
|
self.assertIsNone(self.drvr._get_cpu_traits())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.getCapabilities')
|
|
@mock.patch('nova.virt.libvirt.host.libvirt.Connection.baselineCPU')
|
|
def test_cpu_traits_with_mode_none_on_power(self, mock_baseline, mock_cap):
|
|
"""Test case that cpu mode is none on Power machines."""
|
|
self.flags(cpu_mode='none', virt_type='kvm', group='libvirt')
|
|
mock_cap.return_value = '''
|
|
<capabilities>
|
|
<host>
|
|
<uuid>1f71d34a-7c89-45cf-95ce-3df20fc6b936</uuid>
|
|
<cpu>
|
|
<model>POWER8</model>
|
|
<vendor>IBM</vendor>
|
|
<arch>ppc64le</arch>
|
|
<topology sockets='1' cores='5' threads='1'/>
|
|
<pages unit='KiB' size='64'/>
|
|
</cpu>
|
|
</host>
|
|
</capabilities>
|
|
'''
|
|
mock_baseline.return_value = '''
|
|
<cpu>
|
|
<model>POWER8</model>
|
|
<vendor>IBM</vendor>
|
|
</cpu>
|
|
'''
|
|
self.drvr._get_cpu_traits()
|
|
mock_baseline.assert_called_with([u'''<cpu>
|
|
<arch>ppc64le</arch>
|
|
<model>POWER8</model>
|
|
<vendor>IBM</vendor>
|
|
<topology sockets="1" cores="5" threads="1"/>
|
|
</cpu>
|
|
'''], 1)
|
|
|
|
|
|
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
|
|
"""Test for LibvirtDriver.get_all_volume_usage."""
|
|
|
|
def setUp(self):
|
|
super(LibvirtVolumeUsageTestCase, self).setUp()
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.c = context.get_admin_context()
|
|
|
|
self.ins_ref = objects.Instance(
|
|
id=1729,
|
|
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
|
|
)
|
|
|
|
# verify bootable volume device path also
|
|
self.bdms = [{'volume_id': 1,
|
|
'device_name': '/dev/vde'},
|
|
{'volume_id': 2,
|
|
'device_name': 'vda'}]
|
|
|
|
def test_get_all_volume_usage(self):
|
|
with mock.patch.object(
|
|
self.drvr, 'block_stats',
|
|
return_value=(169, 688640, 0, 0, -1)) as mock_block_stats:
|
|
vol_usage = self.drvr.get_all_volume_usage(
|
|
self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
|
|
|
|
expected_usage = [{'volume': 1,
|
|
'instance': self.ins_ref,
|
|
'rd_bytes': 688640, 'wr_req': 0,
|
|
'rd_req': 169, 'wr_bytes': 0},
|
|
{'volume': 2,
|
|
'instance': self.ins_ref,
|
|
'rd_bytes': 688640, 'wr_req': 0,
|
|
'rd_req': 169, 'wr_bytes': 0}]
|
|
self.assertEqual(vol_usage, expected_usage)
|
|
self.assertEqual(2, mock_block_stats.call_count)
|
|
mock_block_stats.assert_has_calls([
|
|
mock.call(self.ins_ref, 'vde'), mock.call(self.ins_ref, 'vda')])
|
|
|
|
@mock.patch.object(host.Host, '_get_domain',
|
|
side_effect=exception.InstanceNotFound(
|
|
instance_id='fakedom'))
|
|
def test_get_all_volume_usage_device_not_found(self, mock_get_domain):
|
|
vol_usage = self.drvr.get_all_volume_usage(self.c,
|
|
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
|
|
self.assertEqual(vol_usage, [])
|
|
self.assertEqual(2, mock_get_domain.call_count)
|
|
mock_get_domain.assert_has_calls([mock.call(self.ins_ref)] * 2)
|
|
|
|
|
|
class LibvirtNonblockingTestCase(test.NoDBTestCase):
|
|
"""Test libvirtd calls are nonblocking."""
|
|
|
|
def setUp(self):
|
|
super(LibvirtNonblockingTestCase, self).setUp()
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
self.flags(connection_uri="test:///default",
|
|
group='libvirt')
|
|
|
|
def test_connection_to_primitive(self):
|
|
# Test bug 962840.
|
|
import nova.virt.libvirt.driver as libvirt_driver
|
|
drvr = libvirt_driver.LibvirtDriver('')
|
|
drvr.set_host_enabled = mock.Mock()
|
|
jsonutils.to_primitive(drvr._conn, convert_instances=True)
|
|
|
|
@mock.patch.object(eventlet.tpool, 'execute')
|
|
@mock.patch.object(objects.Service, 'get_by_compute_host')
|
|
def test_tpool_execute_calls_libvirt(self, mock_svc, mock_execute):
|
|
conn = fakelibvirt.virConnect()
|
|
conn.is_expected = True
|
|
|
|
side_effect = [conn, None]
|
|
expected_calls = [
|
|
mock.call(fakelibvirt.openAuth, 'test:///default',
|
|
mock.ANY, mock.ANY),
|
|
mock.call(conn.domainEventRegisterAny, None,
|
|
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
|
|
mock.ANY, mock.ANY)]
|
|
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
|
|
side_effect.append(None)
|
|
expected_calls.append(mock.call(
|
|
conn.registerCloseCallback, mock.ANY, mock.ANY))
|
|
mock_execute.side_effect = side_effect
|
|
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
c = driver._get_connection()
|
|
self.assertTrue(c.is_expected)
|
|
self.assertEqual(len(expected_calls), mock_execute.call_count)
|
|
mock_execute.assert_has_calls(expected_calls)
|
|
|
|
|
|
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
|
|
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
|
|
|
|
def setUp(self):
|
|
super(LibvirtVolumeSnapshotTestCase, self).setUp()
|
|
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
self.c = context.get_admin_context()
|
|
|
|
self.flags(instance_name_template='instance-%s')
|
|
|
|
# creating instance
|
|
self.inst = {}
|
|
self.inst['uuid'] = uuids.fake
|
|
self.inst['id'] = '1'
|
|
# system_metadata is needed for objects.Instance.image_meta conversion
|
|
self.inst['system_metadata'] = {}
|
|
|
|
# create domain info
|
|
self.dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='disk1_file'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source dev='/path/to/dev/1'/>
|
|
<target dev='vdb' bus='virtio' serial='1234'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>"""
|
|
|
|
# alternate domain info with network-backed snapshot chain
|
|
self.dom_netdisk_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='disk1_file'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
|
|
</disk>
|
|
<disk type='network' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source protocol='netfs' name='vol1/root.img'>
|
|
<host name='server1' port='24007'/>
|
|
</source>
|
|
<backingStore type='network' index='1'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source protocol='netfs' name='vol1/snap.img'>
|
|
<host name='server1' port='24007'/>
|
|
</source>
|
|
<backingStore type='network' index='2'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source protocol='netfs' name='vol1/snap-b.img'>
|
|
<host name='server1' port='24007'/>
|
|
</source>
|
|
<backingStore/>
|
|
</backingStore>
|
|
</backingStore>
|
|
<target dev='vdb' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
# XML with netdisk attached, and 1 snapshot taken
|
|
self.dom_netdisk_xml_2 = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='disk1_file'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
|
|
</disk>
|
|
<disk type='network' device='disk'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source protocol='netfs' name='vol1/snap.img'>
|
|
<host name='server1' port='24007'/>
|
|
</source>
|
|
<backingStore type='network' index='1'>
|
|
<driver name='qemu' type='qcow2'/>
|
|
<source protocol='netfs' name='vol1/root.img'>
|
|
<host name='server1' port='24007'/>
|
|
</source>
|
|
<backingStore/>
|
|
</backingStore>
|
|
<target dev='vdb' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>
|
|
</devices>
|
|
</domain>
|
|
"""
|
|
|
|
self.create_info = {'type': 'qcow2',
|
|
'snapshot_id': '1234-5678',
|
|
'new_file': 'new-file'}
|
|
|
|
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
|
|
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
|
|
|
|
self.delete_info_1 = {'type': 'qcow2',
|
|
'file_to_merge': 'snap.img',
|
|
'merge_target_file': None}
|
|
|
|
self.delete_info_2 = {'type': 'qcow2',
|
|
'file_to_merge': 'snap.img',
|
|
'merge_target_file': 'other-snap.img'}
|
|
|
|
self.delete_info_3 = {'type': 'qcow2',
|
|
'file_to_merge': None,
|
|
'merge_target_file': None}
|
|
|
|
self.delete_info_netdisk = {'type': 'qcow2',
|
|
'file_to_merge': 'snap.img',
|
|
'merge_target_file': 'root.img'}
|
|
|
|
self.delete_info_invalid_type = {'type': 'made_up_type',
|
|
'file_to_merge': 'some_file',
|
|
'merge_target_file':
|
|
'some_other_file'}
|
|
|
|
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
|
|
'refresh_connection_info')
|
|
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
|
|
'get_by_volume_and_instance')
|
|
def test_volume_refresh_connection_info(self,
|
|
mock_get_by_volume_and_instance,
|
|
mock_refresh_connection_info):
|
|
instance = objects.Instance(**self.inst)
|
|
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
|
|
'id': 123,
|
|
'instance_uuid': uuids.instance,
|
|
'device_name': '/dev/sdb',
|
|
'source_type': 'volume',
|
|
'destination_type': 'volume',
|
|
'volume_id': 'fake-volume-id-1',
|
|
'connection_info': '{"fake": "connection_info"}'})
|
|
fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm)
|
|
mock_get_by_volume_and_instance.return_value = fake_bdm
|
|
|
|
self.drvr._volume_refresh_connection_info(self.c, instance,
|
|
self.volume_uuid)
|
|
|
|
mock_get_by_volume_and_instance.assert_called_once_with(
|
|
self.c, self.volume_uuid, instance.uuid)
|
|
mock_refresh_connection_info.assert_called_once_with(self.c, instance,
|
|
self.drvr._volume_api, self.drvr)
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'snapshotCreateXML')
|
|
@mock.patch.object(FakeVirtDomain, 'XMLDesc')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _test_volume_snapshot_create(self, mock_get, mock_xml, mock_snapshot,
|
|
quiesce=True, can_quiesce=True,
|
|
quiesce_required=False):
|
|
"""Test snapshot creation with file-based disk."""
|
|
self.flags(instance_name_template='instance-%s')
|
|
if quiesce_required:
|
|
self.inst['system_metadata']['image_os_require_quiesce'] = True
|
|
instance = objects.Instance(**self.inst)
|
|
|
|
new_file = 'new-file'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
mock_xml.return_value = self.dom_xml
|
|
|
|
snap_xml_src = (
|
|
'<domainsnapshot>\n'
|
|
' <disks>\n'
|
|
' <disk name="disk1_file" snapshot="external" type="file">\n'
|
|
' <source file="new-file"/>\n'
|
|
' </disk>\n'
|
|
' <disk name="vdb" snapshot="no"/>\n'
|
|
' </disks>\n'
|
|
'</domainsnapshot>\n')
|
|
|
|
# Older versions of libvirt may be missing these.
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
|
|
|
|
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
|
|
|
|
snap_flags_q = (snap_flags |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
|
|
|
|
can_quiesce_mock = mock.Mock()
|
|
if can_quiesce:
|
|
can_quiesce_mock.return_value = None
|
|
if not quiesce:
|
|
# we can quiesce but snapshot with quiesce fails
|
|
mock_snapshot.side_effect = [fakelibvirt.libvirtError(
|
|
'quiescing failed, no qemu-ga'), None]
|
|
else:
|
|
can_quiesce_mock.side_effect = exception.QemuGuestAgentNotEnabled
|
|
|
|
self.drvr._can_quiesce = can_quiesce_mock
|
|
|
|
guest = libvirt_guest.Guest(domain)
|
|
if quiesce_required and (not quiesce or not can_quiesce):
|
|
# If we can't quiesce but it's required by the image then we should
|
|
# fail.
|
|
if not quiesce:
|
|
# snapshot + quiesce failed which is a libvirtError
|
|
expected_error = fakelibvirt.libvirtError
|
|
else:
|
|
# quiesce is required but we can't do it
|
|
expected_error = exception.QemuGuestAgentNotEnabled
|
|
self.assertRaises(expected_error,
|
|
self.drvr._volume_snapshot_create,
|
|
self.c, instance, guest, self.volume_uuid,
|
|
new_file)
|
|
else:
|
|
self.drvr._volume_snapshot_create(self.c, instance, guest,
|
|
self.volume_uuid, new_file)
|
|
|
|
# instance.image_meta generates a new objects.ImageMeta object each
|
|
# time it's called so just use a mock.ANY for the image_meta arg.
|
|
can_quiesce_mock.assert_called_once_with(instance, mock.ANY)
|
|
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_not_called()
|
|
if can_quiesce:
|
|
if quiesce or quiesce_required:
|
|
mock_snapshot.assert_called_once_with(snap_xml_src,
|
|
flags=snap_flags_q)
|
|
else:
|
|
# quiesce is not required so try snapshot again without it
|
|
self.assertEqual(2, mock_snapshot.call_count)
|
|
mock_snapshot.assert_has_calls([
|
|
mock.call(snap_xml_src, flags=snap_flags_q),
|
|
mock.call(snap_xml_src, flags=snap_flags)])
|
|
elif not quiesce_required:
|
|
# quiesce is not required so try snapshot again without it
|
|
mock_snapshot.assert_called_once_with(snap_xml_src,
|
|
flags=snap_flags)
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'snapshotCreateXML')
|
|
@mock.patch.object(FakeVirtDomain, 'XMLDesc')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_volume_snapshot_create_libgfapi(self, mock_get, mock_xml,
|
|
mock_snapshot):
|
|
"""Test snapshot creation with libgfapi network disk."""
|
|
self.flags(instance_name_template = 'instance-%s')
|
|
|
|
self.dom_xml = """
|
|
<domain type='kvm'>
|
|
<devices>
|
|
<disk type='file'>
|
|
<source file='disk1_file'/>
|
|
<target dev='vda' bus='virtio'/>
|
|
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
|
|
</disk>
|
|
<disk type='block'>
|
|
<source protocol='netfs' name='netfs1/volume-1234'>
|
|
<host name='127.3.4.5' port='24007'/>
|
|
</source>
|
|
<target dev='vdb' bus='virtio' serial='1234'/>
|
|
</disk>
|
|
</devices>
|
|
</domain>"""
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
|
|
new_file = 'new-file'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
mock_xml.return_value = self.dom_xml
|
|
|
|
snap_xml_src = (
|
|
'<domainsnapshot>\n'
|
|
' <disks>\n'
|
|
' <disk name="disk1_file" snapshot="external" type="file">\n'
|
|
' <source file="new-file"/>\n'
|
|
' </disk>\n'
|
|
' <disk name="vdb" snapshot="no"/>\n'
|
|
' </disks>\n'
|
|
'</domainsnapshot>\n')
|
|
|
|
# Older versions of libvirt may be missing these.
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
|
|
|
|
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
|
|
|
|
snap_flags_q = (snap_flags |
|
|
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
|
|
|
|
guest = libvirt_guest.Guest(domain)
|
|
with mock.patch.object(self.drvr, '_can_quiesce', return_value=None):
|
|
self.drvr._volume_snapshot_create(self.c, instance, guest,
|
|
self.volume_uuid, new_file)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_snapshot.assert_called_once_with(snap_xml_src, flags=snap_flags_q)
|
|
mock_get.assert_not_called()
|
|
|
|
def test_volume_snapshot_create_cannot_quiesce(self):
|
|
# We can't quiesce so we don't try.
|
|
self._test_volume_snapshot_create(can_quiesce=False)
|
|
|
|
def test_volume_snapshot_create_cannot_quiesce_quiesce_required(self):
|
|
# We can't quiesce but it's required so we fail.
|
|
self._test_volume_snapshot_create(can_quiesce=False,
|
|
quiesce_required=True)
|
|
|
|
def test_volume_snapshot_create_can_quiesce_quiesce_required_fails(self):
|
|
# We can quiesce but it fails and it's required so we fail.
|
|
self._test_volume_snapshot_create(
|
|
quiesce=False, can_quiesce=True, quiesce_required=True)
|
|
|
|
def test_volume_snapshot_create_noquiesce(self):
|
|
# We can quiesce but it fails but it's not required so we don't fail.
|
|
self._test_volume_snapshot_create(quiesce=False)
|
|
|
|
def test_volume_snapshot_create_noquiesce_cannot_quiesce(self):
|
|
# We can't quiesce so we don't try, and if we did we'd fail.
|
|
self._test_volume_snapshot_create(quiesce=False, can_quiesce=False)
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_can_quiesce(self, ver):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.inst)
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{"properties": {
|
|
"hw_qemu_guest_agent": "yes"}})
|
|
self.assertIsNone(self.drvr._can_quiesce(instance, image_meta))
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_can_quiesce_bad_hyp(self, ver):
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
instance = objects.Instance(**self.inst)
|
|
image_meta = objects.ImageMeta.from_dict(
|
|
{"properties": {
|
|
"hw_qemu_guest_agent": "yes"}})
|
|
self.assertRaises(exception.InstanceQuiesceNotSupported,
|
|
self.drvr._can_quiesce, instance, image_meta)
|
|
|
|
@mock.patch.object(host.Host,
|
|
'has_min_version', return_value=True)
|
|
def test_can_quiesce_agent_not_enable(self, ver):
|
|
self.flags(virt_type='kvm', group='libvirt')
|
|
instance = objects.Instance(**self.inst)
|
|
image_meta = objects.ImageMeta.from_dict({})
|
|
self.assertRaises(exception.QemuGuestAgentNotEnabled,
|
|
self.drvr._can_quiesce, instance, image_meta)
|
|
|
|
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_volume_snapshot_create')
|
|
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
|
|
'_volume_refresh_connection_info')
|
|
def test_volume_snapshot_create_outer_success(self, mock_refresh,
|
|
mock_snap_create, mock_loop):
|
|
class FakeLoopingCall(object):
|
|
def __init__(self, func):
|
|
self.func = func
|
|
|
|
def start(self, *a, **k):
|
|
try:
|
|
self.func()
|
|
except loopingcall.LoopingCallDone:
|
|
pass
|
|
return self
|
|
|
|
def wait(self):
|
|
return None
|
|
|
|
mock_loop.side_effect = FakeLoopingCall
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
@mock.patch.object(self.drvr, '_volume_api')
|
|
@mock.patch.object(self.drvr._host, 'get_guest')
|
|
def _test(mock_get_guest, mock_vol_api):
|
|
mock_get_guest.return_value = guest
|
|
mock_vol_api.get_snapshot.return_value = {'status': 'available'}
|
|
self.drvr.volume_snapshot_create(self.c, instance,
|
|
self.volume_uuid,
|
|
self.create_info)
|
|
mock_get_guest.assert_called_once_with(instance)
|
|
mock_snap_create.assert_called_once_with(
|
|
self.c, instance, guest, self.volume_uuid,
|
|
self.create_info['new_file'])
|
|
mock_vol_api.update_snapshot_status.assert_called_once_with(
|
|
self.c, self.create_info['snapshot_id'], 'creating')
|
|
mock_vol_api.get_snapshot.assert_called_once_with(
|
|
self.c, self.create_info['snapshot_id'])
|
|
mock_refresh.assert_called_once_with(
|
|
self.c, instance, self.volume_uuid)
|
|
|
|
_test()
|
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_volume_snapshot_create')
|
|
@mock.patch('nova.volume.cinder.API.update_snapshot_status')
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
def test_volume_snapshot_create_outer_failure(self, mock_get, mock_update,
|
|
mock_snapshot):
|
|
instance = objects.Instance(**self.inst)
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
mock_get.return_value = guest
|
|
mock_snapshot.side_effect = exception.NovaException('oops')
|
|
|
|
self.assertRaises(exception.NovaException,
|
|
self.drvr.volume_snapshot_create,
|
|
self.c,
|
|
instance,
|
|
self.volume_uuid,
|
|
self.create_info)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_update.assert_called_once_with(
|
|
self.c, self.create_info['snapshot_id'], 'error')
|
|
mock_snapshot.assert_called_once_with(
|
|
self.c, instance, guest, self.volume_uuid,
|
|
self.create_info['new_file'])
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'blockCommit')
|
|
@mock.patch.object(FakeVirtDomain, 'blockRebase')
|
|
@mock.patch.object(FakeVirtDomain, 'XMLDesc')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_1(self, mock_is_job_complete, mock_get,
|
|
mock_xml, mock_rebase, mock_commit):
|
|
"""Deleting newest snapshot -- blockRebase."""
|
|
|
|
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
|
|
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
mock_xml.return_value = self.dom_xml
|
|
mock_get.return_value = domain
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
|
|
snapshot_id, self.delete_info_1)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with('vda', 'snap.img', 0, flags=0)
|
|
mock_commit.assert_not_called()
|
|
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
|
|
|
|
@mock.patch.object(FakeVirtDomain, 'blockCommit')
|
|
@mock.patch.object(FakeVirtDomain, 'blockRebase')
|
|
@mock.patch.object(FakeVirtDomain, 'XMLDesc')
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_relative_1(self, mock_is_job_complete,
|
|
mock_get, mock_xml, mock_rebase,
|
|
mock_commit):
|
|
"""Deleting newest snapshot -- blockRebase using relative flag"""
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
guest = libvirt_guest.Guest(domain)
|
|
mock_xml.return_value = self.dom_xml
|
|
mock_get.return_value = guest
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
|
|
snapshot_id, self.delete_info_1)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with(
|
|
'vda', 'snap.img', 0,
|
|
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
|
|
mock_commit.assert_not_called()
|
|
|
|
def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml):
|
|
mock_domain = mock.Mock(spec=fakelibvirt.virDomain)
|
|
mock_domain.XMLDesc.return_value = dom_xml
|
|
guest = libvirt_guest.Guest(mock_domain)
|
|
|
|
exc = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError, 'virDomainBlockRebase() failed',
|
|
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
|
|
mock_domain.blockRebase.side_effect = exc
|
|
|
|
return mock_domain, guest
|
|
|
|
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
|
|
@mock.patch.object(host.Host, "has_min_version",
|
|
mock.Mock(return_value=True))
|
|
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
|
|
mock.Mock(return_value=False))
|
|
@mock.patch('nova.virt.images.qemu_img_info',
|
|
return_value=mock.Mock(file_format="fake_fmt"))
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute,
|
|
mock_qemu_img_info,
|
|
mock_disk_op_sema):
|
|
"""Deleting newest snapshot of a file-based image when the domain is
|
|
not running should trigger a blockRebase using qemu-img not libvirt.
|
|
In this test, we rebase the image with another image as backing file.
|
|
"""
|
|
mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks(
|
|
self.dom_xml)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=guest):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid, snapshot_id,
|
|
self.delete_info_1)
|
|
|
|
mock_disk_op_sema.__enter__.assert_called_once()
|
|
mock_qemu_img_info.assert_called_once_with("snap.img")
|
|
mock_execute.assert_called_once_with('qemu-img', 'rebase',
|
|
'-b', 'snap.img', '-F',
|
|
'fake_fmt', 'disk1_file')
|
|
|
|
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
|
|
@mock.patch.object(host.Host, "has_min_version",
|
|
mock.Mock(return_value=True))
|
|
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
|
|
mock.Mock(return_value=False))
|
|
@mock.patch('nova.virt.images.qemu_img_info',
|
|
return_value=mock.Mock(file_format="fake_fmt"))
|
|
@mock.patch('oslo_concurrency.processutils.execute')
|
|
def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base(
|
|
self, mock_execute, mock_qemu_img_info, mock_disk_op_sema):
|
|
"""Deleting newest snapshot of a file-based image when the domain is
|
|
not running should trigger a blockRebase using qemu-img not libvirt.
|
|
In this test, the image is rebased onto no backing file (i.e.
|
|
it will exist independently of any backing file)
|
|
"""
|
|
mock_domain, mock_guest = (
|
|
self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml))
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid, snapshot_id,
|
|
self.delete_info_3)
|
|
|
|
mock_disk_op_sema.__enter__.assert_called_once()
|
|
self.assertEqual(0, mock_qemu_img_info.call_count)
|
|
mock_execute.assert_called_once_with('qemu-img', 'rebase',
|
|
'-b', '', 'disk1_file')
|
|
|
|
@mock.patch.object(host.Host, "has_min_version",
|
|
mock.Mock(return_value=True))
|
|
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
|
|
mock.Mock(return_value=False))
|
|
def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self):
|
|
"""Deleting newest snapshot of a network disk when the domain is not
|
|
running should raise a NovaException.
|
|
"""
|
|
mock_domain, mock_guest = (
|
|
self._setup_block_rebase_domain_and_guest_mocks(
|
|
self.dom_netdisk_xml))
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
with mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=mock_guest):
|
|
ex = self.assertRaises(exception.NovaException,
|
|
self.drvr._volume_snapshot_delete,
|
|
self.c, instance, self.volume_uuid,
|
|
snapshot_id, self.delete_info_1)
|
|
self.assertIn('has not been fully tested', six.text_type(ex))
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch.object(FakeVirtDomain, 'blockCommit')
|
|
@mock.patch.object(FakeVirtDomain, 'blockRebase')
|
|
@mock.patch.object(FakeVirtDomain, 'XMLDesc')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_relative_2(self, mock_is_job_complete,
|
|
mock_xml, mock_rebase,
|
|
mock_commit, mock_get):
|
|
"""Deleting older snapshot -- blockCommit using relative flag"""
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
mock_xml.return_value = self.dom_xml
|
|
mock_get.return_value = domain
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
|
|
snapshot_id, self.delete_info_2)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_commit.assert_called_once_with(
|
|
'vda', 'other-snap.img', 'snap.img', 0,
|
|
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
|
|
mock_rebase.assert_not_called()
|
|
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_nonrelative_null_base(
|
|
self, mock_is_job_complete):
|
|
# Deleting newest and last snapshot of a volume
|
|
# with blockRebase. So base of the new image will be null.
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
mock_is_job_complete.return_value = True
|
|
|
|
with test.nested(
|
|
mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml),
|
|
mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=guest),
|
|
mock.patch.object(domain, 'blockRebase'),
|
|
) as (mock_xmldesc, mock_get_guest, mock_rebase):
|
|
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid, snapshot_id,
|
|
self.delete_info_3)
|
|
|
|
mock_xmldesc.assert_called_once_with(flags=0)
|
|
mock_get_guest.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with('vda', None, 0, flags=0)
|
|
mock_is_job_complete.assert_called()
|
|
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_netdisk_nonrelative_null_base(
|
|
self, mock_is_job_complete):
|
|
# Deleting newest and last snapshot of a network attached volume
|
|
# with blockRebase. So base of the new image will be null.
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2)
|
|
guest = libvirt_guest.Guest(domain)
|
|
|
|
mock_is_job_complete.return_value = True
|
|
|
|
with test.nested(
|
|
mock.patch.object(domain, 'XMLDesc',
|
|
return_value=self.dom_netdisk_xml_2),
|
|
mock.patch.object(self.drvr._host, 'get_guest',
|
|
return_value=guest),
|
|
mock.patch.object(domain, 'blockRebase'),
|
|
) as (mock_xmldesc, mock_get_guest, mock_rebase):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid, snapshot_id,
|
|
self.delete_info_3)
|
|
|
|
mock_xmldesc.assert_called_once_with(flags=0)
|
|
mock_get_guest.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with('vdb', None, 0, flags=0)
|
|
mock_is_job_complete.assert_called()
|
|
|
|
@mock.patch('nova.volume.cinder.API.update_snapshot_status')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_volume_refresh_connection_info')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_volume_snapshot_delete')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_volume_snapshot_delete_outer_success(
|
|
self, mock_get, mock_snapshot, mock_refresh, mock_update):
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
|
|
snapshot_id,
|
|
self.delete_info_1)
|
|
|
|
mock_snapshot.assert_called_once_with(
|
|
self.c, instance, self.volume_uuid, snapshot_id,
|
|
delete_info=self.delete_info_1)
|
|
mock_update.assert_called_once_with(self.c, snapshot_id, 'deleting')
|
|
mock_refresh.assert_called_once_with(self.c, instance,
|
|
self.volume_uuid)
|
|
mock_get.assert_not_called()
|
|
|
|
@mock.patch('nova.volume.cinder.API.update_snapshot_status')
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
|
'_volume_snapshot_delete')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_volume_snapshot_delete_outer_failure(
|
|
self, mock_get, mock_snapshot, mock_update):
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = '1234-9876'
|
|
mock_snapshot.side_effect = exception.NovaException('oops')
|
|
|
|
self.assertRaises(exception.NovaException,
|
|
self.drvr.volume_snapshot_delete,
|
|
self.c,
|
|
instance,
|
|
self.volume_uuid,
|
|
snapshot_id,
|
|
self.delete_info_1)
|
|
mock_snapshot.assert_called_once_with(
|
|
self.c, instance, self.volume_uuid, snapshot_id,
|
|
delete_info=self.delete_info_1)
|
|
mock_update.assert_called_once_with(self.c, snapshot_id,
|
|
'error_deleting')
|
|
mock_get.assert_not_called()
|
|
|
|
@mock.patch('nova.volume.cinder.API.update_snapshot_status')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def test_volume_snapshot_delete_invalid_type(self, mock_get,
|
|
mock_update):
|
|
instance = objects.Instance(**self.inst)
|
|
self.assertRaises(exception.NovaException,
|
|
self.drvr.volume_snapshot_delete,
|
|
self.c,
|
|
instance,
|
|
self.volume_uuid,
|
|
self.snapshot_id,
|
|
self.delete_info_invalid_type)
|
|
mock_update.assert_called_once_with(self.c, self.snapshot_id,
|
|
'error_deleting')
|
|
mock_get.assert_not_called()
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_netdisk_1(
|
|
self, mock_is_job_complete, mock_get):
|
|
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
|
|
|
|
class FakeNetdiskDomain(FakeVirtDomain):
|
|
def __init__(self, *args, **kwargs):
|
|
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
|
|
|
|
def XMLDesc(self, flags):
|
|
return self.dom_netdisk_xml
|
|
|
|
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
|
|
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
|
|
mock_get.return_value = domain
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
with test.nested(
|
|
mock.patch.object(FakeNetdiskDomain, 'XMLDesc',
|
|
return_value=self.dom_netdisk_xml),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockRebase'),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockCommit')) as (mock_xml,
|
|
mock_rebase, mock_commit):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid,
|
|
snapshot_id, self.delete_info_1)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with('vdb', 'vdb[1]', 0, flags=0)
|
|
mock_commit.assert_not_called()
|
|
|
|
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_netdisk_relative_1(
|
|
self, mock_is_job_complete, mock_get):
|
|
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
|
|
|
|
class FakeNetdiskDomain(FakeVirtDomain):
|
|
def __init__(self, *args, **kwargs):
|
|
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
|
|
|
|
def XMLDesc(self, flags):
|
|
return self.dom_netdisk_xml
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
|
|
mock_get.return_value = domain
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
with test.nested(
|
|
mock.patch.object(FakeNetdiskDomain, 'XMLDesc',
|
|
return_value=self.dom_netdisk_xml),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockRebase'),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockCommit')) as (mock_xml,
|
|
mock_rebase, mock_commit):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid,
|
|
snapshot_id, self.delete_info_1)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_rebase.assert_called_once_with(
|
|
'vdb', 'vdb[1]', 0,
|
|
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
|
|
mock_commit.assert_not_called()
|
|
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
@mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete')
|
|
def test_volume_snapshot_delete_netdisk_relative_2(
|
|
self, mock_is_job_complete, mock_get):
|
|
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
|
|
|
|
class FakeNetdiskDomain(FakeVirtDomain):
|
|
def __init__(self, *args, **kwargs):
|
|
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
|
|
|
|
def XMLDesc(self, flags):
|
|
return self.dom_netdisk_xml
|
|
|
|
self.stub_out('nova.virt.libvirt.driver.libvirt', fakelibvirt)
|
|
|
|
instance = objects.Instance(**self.inst)
|
|
snapshot_id = 'snapshot-1234'
|
|
|
|
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
|
|
mock_get.return_value = domain
|
|
|
|
# is_job_complete returns False when initially called, then True
|
|
mock_is_job_complete.side_effect = (False, True)
|
|
|
|
with test.nested(
|
|
mock.patch.object(FakeNetdiskDomain, 'XMLDesc',
|
|
return_value=self.dom_netdisk_xml),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockRebase'),
|
|
mock.patch.object(FakeNetdiskDomain, 'blockCommit')) as (mock_xml,
|
|
mock_rebase, mock_commit):
|
|
self.drvr._volume_snapshot_delete(self.c, instance,
|
|
self.volume_uuid,
|
|
snapshot_id,
|
|
self.delete_info_netdisk)
|
|
|
|
self.assertEqual(2, mock_is_job_complete.call_count)
|
|
mock_xml.assert_called_once_with(flags=0)
|
|
mock_get.assert_called_once_with(instance)
|
|
mock_commit.assert_called_once_with(
|
|
'vdb', 'vdb[0]', 'vdb[1]', 0,
|
|
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
|
|
mock_rebase.assert_not_called()
|
|
|
|
|
|
def _fake_convert_image(source, dest, in_format, out_format,
|
|
run_as_root=True):
|
|
libvirt_driver.libvirt_utils.files[dest] = b''
|
|
|
|
|
|
class _BaseSnapshotTests(test.NoDBTestCase):
|
|
def setUp(self):
|
|
super(_BaseSnapshotTests, self).setUp()
|
|
self.flags(snapshots_directory='./', group='libvirt')
|
|
self.context = context.get_admin_context()
|
|
|
|
self.useFixture(fixtures.MonkeyPatch(
|
|
'nova.virt.libvirt.driver.libvirt_utils',
|
|
fake_libvirt_utils))
|
|
self.useFixture(fixtures.MonkeyPatch(
|
|
'nova.virt.libvirt.imagebackend.libvirt_utils',
|
|
fake_libvirt_utils))
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
|
|
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
|
|
self)
|
|
|
|
self.mock_update_task_state = mock.Mock()
|
|
|
|
test_instance = _create_test_instance()
|
|
self.instance_ref = objects.Instance(**test_instance)
|
|
self.instance_ref.info_cache = objects.InstanceInfoCache(
|
|
network_info=None)
|
|
|
|
def _assert_snapshot(self, snapshot, disk_format,
|
|
expected_properties=None):
|
|
self.mock_update_task_state.assert_has_calls([
|
|
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
|
|
mock.call(task_state=task_states.IMAGE_UPLOADING,
|
|
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
|
|
|
|
props = snapshot['properties']
|
|
self.assertEqual(props['image_state'], 'available')
|
|
self.assertEqual(snapshot['status'], 'active')
|
|
self.assertEqual(snapshot['disk_format'], disk_format)
|
|
self.assertEqual(snapshot['name'], 'test-snap')
|
|
|
|
if expected_properties:
|
|
for expected_key, expected_value in \
|
|
expected_properties.items():
|
|
self.assertEqual(expected_value, props[expected_key])
|
|
|
|
def _create_image(self, extra_properties=None):
|
|
properties = {'instance_id': self.instance_ref['id'],
|
|
'user_id': str(self.context.user_id)}
|
|
if extra_properties:
|
|
properties.update(extra_properties)
|
|
|
|
sent_meta = {'name': 'test-snap',
|
|
'is_public': False,
|
|
'status': 'creating',
|
|
'properties': properties}
|
|
|
|
# Create new image. It will be updated in snapshot method
|
|
# To work with it from snapshot, the single image_service is needed
|
|
recv_meta = self.image_service.create(self.context, sent_meta)
|
|
return recv_meta
|
|
|
|
@mock.patch.object(compute_utils, 'disk_ops_semaphore',
|
|
new_callable=compute_utils.UnlimitedSemaphore)
|
|
@mock.patch.object(host.Host, 'has_min_version')
|
|
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
|
|
@mock.patch.object(host.Host, '_get_domain')
|
|
def _snapshot(self, image_id, mock_get_domain, mock_resolve, mock_version,
|
|
mock_disk_op_sema):
|
|
mock_get_domain.return_value = FakeVirtDomain()
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
driver.snapshot(self.context, self.instance_ref, image_id,
|
|
self.mock_update_task_state)
|
|
snapshot = self.image_service.show(self.context, image_id)
|
|
return snapshot
|
|
|
|
def _test_snapshot(self, disk_format, extra_properties=None):
|
|
recv_meta = self._create_image(extra_properties=extra_properties)
|
|
snapshot = self._snapshot(recv_meta['id'])
|
|
self._assert_snapshot(snapshot, disk_format=disk_format,
|
|
expected_properties=extra_properties)
|
|
|
|
|
|
class LibvirtSnapshotTests(_BaseSnapshotTests):
|
|
|
|
def setUp(self):
|
|
super(LibvirtSnapshotTests, self).setUp()
|
|
# All paths through livesnapshot trigger a chown behind privsep
|
|
self.privsep_chown = mock.patch.object(nova.privsep.path, 'chown')
|
|
self.addCleanup(self.privsep_chown.stop)
|
|
self.privsep_chown.start()
|
|
|
|
def test_ami(self):
|
|
# Assign different image_ref from nova/images/fakes for testing ami
|
|
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
|
self.instance_ref.system_metadata = \
|
|
utils.get_system_metadata_from_image(
|
|
{'disk_format': 'ami'})
|
|
|
|
self._test_snapshot(disk_format='ami')
|
|
|
|
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
|
|
@mock.patch.object(libvirt_driver.imagebackend.images,
|
|
'convert_image',
|
|
side_effect=_fake_convert_image)
|
|
def test_raw(self, mock_convert_image):
|
|
self._test_snapshot(disk_format='raw')
|
|
|
|
def test_qcow2(self):
|
|
self._test_snapshot(disk_format='qcow2')
|
|
|
|
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop')
|
|
@mock.patch.object(libvirt_driver.imagebackend.images,
|
|
'convert_image',
|
|
side_effect=_fake_convert_image)
|
|
def test_ploop(self, mock_convert_image):
|
|
self._test_snapshot(disk_format='ploop')
|
|
|
|
def test_no_image_architecture(self):
|
|
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
|
|
self._test_snapshot(disk_format='qcow2')
|
|
|
|
def test_no_original_image(self):
|
|
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
|
|
self._test_snapshot(disk_format='qcow2')
|
|
|
|
def test_snapshot_metadata_image(self):
|
|
# Assign an image with an architecture defined (x86_64)
|
|
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
|
|
|
|
extra_properties = {'architecture': 'fake_arch',
|
|
'key_a': 'value_a',
|
|
'key_b': 'value_b',
|
|
'os_type': 'linux'}
|
|
|
|
self._test_snapshot(disk_format='qcow2',
|
|
extra_properties=extra_properties)
|
|
|
|
@mock.patch.object(libvirt_driver.LOG, 'exception')
|
|
def test_snapshot_update_task_state_failed(self, mock_exception):
|
|
res = [None, exception.InstanceNotFound(instance_id='foo')]
|
|
self.mock_update_task_state.side_effect = res
|
|
self.assertRaises(exception.InstanceNotFound, self._test_snapshot,
|
|
disk_format='qcow2')
|
|
self.assertFalse(mock_exception.called)
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(host.Host, 'write_instance_config')
|
|
def test_failing_domain_not_found(self, mock_write_config, mock_get_guest):
|
|
mock_dev = mock.Mock(spec=libvirt_guest.BlockDevice)
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest.get_power_state.return_value = power_state.RUNNING
|
|
mock_guest.get_block_device.return_value = mock_dev
|
|
mock_guest._domain = mock.Mock()
|
|
mock_get_guest.return_value = mock_guest
|
|
ex = fakelibvirt.make_libvirtError(
|
|
fakelibvirt.libvirtError,
|
|
"No such domain",
|
|
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
|
|
mock_dev.rebase.side_effect = ex
|
|
self.assertRaises(exception.InstanceNotFound, self._test_snapshot,
|
|
disk_format='qcow2')
|
|
|
|
@mock.patch.object(rbd_utils, 'RBDDriver')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_raw_with_rbd_clone(self, mock_rbd, mock_driver):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
rbd = mock_driver.return_value
|
|
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
|
|
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
|
|
with mock.patch.object(fake_libvirt_utils, 'find_disk',
|
|
return_value=('rbd://some/fake/rbd/image',
|
|
'raw')):
|
|
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
|
|
self._test_snapshot(disk_format='raw')
|
|
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
|
|
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
|
|
|
|
@mock.patch.object(rbd_utils, 'RBDDriver')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
rbd = mock_driver.return_value
|
|
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
|
|
image_id='fake_id', reason='rbd testing'))
|
|
with test.nested(
|
|
mock.patch.object(libvirt_driver.imagebackend.images,
|
|
'convert_image',
|
|
side_effect=_fake_convert_image),
|
|
mock.patch.object(fake_libvirt_utils, 'find_disk',
|
|
return_value=('rbd://some/fake/rbd/image',
|
|
'raw')),
|
|
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
|
|
self._test_snapshot(disk_format='raw')
|
|
self.assertFalse(rbd.clone.called)
|
|
|
|
@mock.patch.object(rbd_utils, 'RBDDriver')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
rbd = mock_driver.return_value
|
|
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
|
|
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
|
|
rbd.clone = mock.Mock(side_effect=exception.Forbidden(
|
|
image_id='fake_id', reason='rbd testing'))
|
|
with test.nested(
|
|
mock.patch.object(libvirt_driver.imagebackend.images,
|
|
'convert_image',
|
|
side_effect=_fake_convert_image),
|
|
mock.patch.object(fake_libvirt_utils, 'find_disk',
|
|
return_value=('rbd://some/fake/rbd/image',
|
|
'raw')),
|
|
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
|
|
self._test_snapshot(disk_format='raw')
|
|
# Ensure that the direct_snapshot attempt was cleaned up
|
|
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False,
|
|
pool='b', force=True)
|
|
|
|
@mock.patch.object(rbd_utils, 'RBDDriver')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd,
|
|
mock_driver):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
rbd = mock_driver.return_value
|
|
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
|
|
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
|
|
with test.nested(
|
|
mock.patch.object(fake_libvirt_utils, 'find_disk',
|
|
return_value=('rbd://some/fake/rbd/image',
|
|
'raw')),
|
|
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'),
|
|
mock.patch.object(self.image_service, 'update',
|
|
side_effect=test.TestingException)):
|
|
self.assertRaises(test.TestingException, self._test_snapshot,
|
|
disk_format='raw')
|
|
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
|
|
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
|
|
# Ensure that the direct_snapshot attempt was cleaned up
|
|
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True,
|
|
pool='b', force=True)
|
|
|
|
@mock.patch.object(imagebackend.Image, 'direct_snapshot')
|
|
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
def test_raw_with_rbd_clone_is_live_snapshot(self,
|
|
mock_get_guest,
|
|
mock_version,
|
|
mock_resolve,
|
|
mock_snapshot):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest._domain = mock.Mock()
|
|
mock_get_guest.return_value = mock_guest
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
recv_meta = self._create_image()
|
|
with mock.patch.object(driver, "suspend") as mock_suspend:
|
|
driver.snapshot(self.context, self.instance_ref, recv_meta['id'],
|
|
self.mock_update_task_state)
|
|
self.assertFalse(mock_suspend.called)
|
|
|
|
@mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image',
|
|
side_effect=_fake_convert_image)
|
|
@mock.patch.object(fake_libvirt_utils, 'find_disk')
|
|
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(rbd_utils, 'RBDDriver')
|
|
@mock.patch.object(rbd_utils, 'rbd')
|
|
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self,
|
|
mock_rbd,
|
|
mock_driver,
|
|
mock_get_guest,
|
|
mock_version,
|
|
mock_resolve,
|
|
mock_find_disk,
|
|
mock_convert):
|
|
self.flags(images_type='rbd', group='libvirt')
|
|
rbd = mock_driver.return_value
|
|
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
|
|
image_id='fake_id', reason='rbd testing'))
|
|
mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw')
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest.get_power_state.return_value = power_state.RUNNING
|
|
mock_guest._domain = mock.Mock()
|
|
mock_get_guest.return_value = mock_guest
|
|
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
recv_meta = self._create_image()
|
|
|
|
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
|
|
with mock.patch.object(driver, "suspend") as mock_suspend:
|
|
driver.snapshot(self.context, self.instance_ref,
|
|
recv_meta['id'], self.mock_update_task_state)
|
|
self.assertTrue(mock_suspend.called)
|
|
|
|
@mock.patch.object(host.Host, 'get_guest')
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
|
def test_cold_snapshot_based_on_power_state(
|
|
self, mock_version, mock_get_guest):
|
|
"""Tests that a cold snapshot is attempted because the guest power
|
|
state is SHUTDOWN or PAUSED.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
|
image = self._create_image()
|
|
for p_state in (power_state.SHUTDOWN, power_state.PAUSED):
|
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
|
|
mock_guest.get_power_state.return_value = p_state
|
|
mock_guest._domain = mock.Mock()
|
|
mock_get_guest.return_value = mock_guest
|
|
# Make _prepare_domain_for_snapshot short-circuit and fail, we just
|
|
# want to know that it was called with the correct live_snapshot
|
|
# argument based on the power_state.
|
|
with mock.patch.object(
|
|
drvr, '_prepare_domain_for_snapshot',
|
|
side_effect=test.TestingException) as mock_prep:
|
|
self.assertRaises(test.TestingException,
|
|
drvr.snapshot, self.context,
|
|
self.instance_ref, image['id'],
|
|
self.mock_update_task_state)
|
|
mock_prep.assert_called_once_with(
|
|
self.context, False, p_state, self.instance_ref)
|
|
|
|
|
|
class LXCSnapshotTests(LibvirtSnapshotTests):
|
|
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
|
|
def setUp(self):
|
|
super(LXCSnapshotTests, self).setUp()
|
|
self.flags(virt_type='lxc', group='libvirt')
|
|
|
|
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self):
|
|
self.skipTest("managedSave is not supported with LXC")
|
|
|
|
|
|
class LVMSnapshotTests(_BaseSnapshotTests):
|
|
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
|
|
@mock.patch.object(libvirt_driver.imagebackend.images,
|
|
'convert_image',
|
|
side_effect=_fake_convert_image)
|
|
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
|
|
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
|
|
mock_convert_image):
|
|
self.flags(images_type='lvm',
|
|
images_volume_group='nova-vg', group='libvirt')
|
|
|
|
self._test_snapshot(disk_format=disk_format)
|
|
|
|
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
|
|
mock_convert_image.assert_called_once_with(
|
|
'/dev/nova-vg/lv', mock.ANY, 'raw', disk_format,
|
|
run_as_root=True)
|
|
|
|
def test_raw(self):
|
|
self._test_lvm_snapshot('raw')
|
|
|
|
def test_qcow2(self):
|
|
self.flags(snapshot_image_format='qcow2', group='libvirt')
|
|
self._test_lvm_snapshot('qcow2')
|
|
|
|
|
|
class TestLibvirtMultiattach(test.NoDBTestCase):
|
|
"""Libvirt driver tests for volume multiattach support."""
|
|
|
|
def setUp(self):
|
|
super(TestLibvirtMultiattach, self).setUp()
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
|
|
return_value=True)
|
|
def test_init_host_supports_multiattach_new_enough_libvirt(self, min_ver):
|
|
"""Tests that the driver supports multiattach because libvirt>=3.10.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._set_multiattach_support()
|
|
self.assertTrue(drvr.capabilities['supports_multiattach'])
|
|
min_ver.assert_called_once_with(
|
|
lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH)
|
|
|
|
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
|
|
side_effect=[False, False])
|
|
def test_init_host_supports_multiattach_old_enough_qemu(self, min_ver):
|
|
"""Tests that the driver supports multiattach because qemu<2.10.
|
|
"""
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
drvr._set_multiattach_support()
|
|
self.assertTrue(drvr.capabilities['supports_multiattach'])
|
|
calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
|
|
mock.call(hv_ver=(2, 10, 0))]
|
|
min_ver.assert_has_calls(calls)
|
|
|
|
# FIXME(mriedem): This test intermittently fails when run at the same time
|
|
# as LibvirtConnTestCase, presumably because of shared global state on the
|
|
# version check.
|
|
# @mock.patch('nova.virt.libvirt.host.Host.has_min_version',
|
|
# side_effect=[False, True])
|
|
# def test_init_host_supports_multiattach_no_support(self,
|
|
# has_min_version):
|
|
# """Tests that the driver does not support multiattach because
|
|
# qemu>=2.10 and libvirt<3.10.
|
|
# """
|
|
# drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
|
# drvr._set_multiattach_support()
|
|
# self.assertFalse(drvr.capabilities['supports_multiattach'])
|
|
# calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
|
|
# mock.call(hv_ver=(2, 10, 0))]
|
|
# has_min_version.assert_has_calls(calls)
|