Skip to content

Commit 15c32e8

Browse files
author
Gustavo Santos
committed
Reattach mdevs to guest on resume
When suspending a VM in OpenStack, Nova detaches all the mediated devices from the guest machine, but does not reattach them on the resume operation. This patch makes Nova reattach the mdevs that were detached when the guest was suspended. This behavior is due to libvirt not supporting the hot-unplug of mediated devices at the time the feature was being developed. The limitation has been lifted since then, and now we have to amend the resume function so it will reattach the mediated devices that were detached on suspension. Closes-bug: #1948705 Signed-off-by: Gustavo Santos <[email protected]> Change-Id: I083929f36d9e78bf7713a87cae6d581e0d946867 (cherry picked from commit 16f7c60)
1 parent 7df9379 commit 15c32e8

File tree

4 files changed

+108
-7
lines changed

4 files changed

+108
-7
lines changed

doc/source/admin/virtual-gpu.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,11 @@ Caveats
301301
that will cause the instance to be set back to ACTIVE. The ``suspend`` action
302302
in the ``os-instance-actions`` API will have an *Error* state.
303303

304+
.. versionchanged:: 25.0.0
305+
306+
This has been resolved in the Yoga release and backported to Xena. See
307+
`bug 1948705`_.
308+
304309
* Resizing an instance with a new flavor that has vGPU resources doesn't
305310
allocate those vGPUs to the instance (the instance is created without
306311
vGPU resources). The proposed workaround is to rebuild the instance after
@@ -350,6 +355,7 @@ For nested vGPUs:
350355

351356
.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563
352357
.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688
358+
.. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705
353359

354360
.. Links
355361
.. _Intel GVT-g: https://01.org/igvt-g

nova/tests/unit/virt/libvirt/test_driver.py

Lines changed: 59 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16604,9 +16604,15 @@ def test_resume(self):
1660416604
mock.patch.object(guest, 'sync_guest_time'),
1660516605
mock.patch.object(drvr, '_wait_for_running',
1660616606
side_effect=loopingcall.LoopingCallDone()),
16607+
mock.patch.object(drvr,
16608+
'_get_mdevs_from_guest_config',
16609+
return_value='fake_mdevs'),
16610+
mock.patch.object(drvr, '_attach_mediated_devices'),
1660716611
) as (_get_existing_domain_xml, _create_guest_with_network,
1660816612
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
16609-
mock_sync_time, mock_wait):
16613+
mock_sync_time, mock_wait,
16614+
_get_mdevs_from_guest_config,
16615+
_attach_mediated_devices):
1661016616
get_image_metadata.return_value = {'bar': 234}
1661116617

1661216618
drvr.resume(self.context, instance, network_info,
@@ -16621,6 +16627,9 @@ def test_resume(self):
1662116627
self.assertTrue(mock_sync_time.called)
1662216628
_attach_pci_devices.assert_has_calls([mock.call(guest,
1662316629
'fake_pci_devs')])
16630+
_attach_mediated_devices.assert_has_calls(
16631+
[mock.call(guest, 'fake_mdevs')]
16632+
)
1662416633

1662516634
@mock.patch.object(host.Host, '_get_domain')
1662616635
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@@ -26114,6 +26123,55 @@ def test_detach_mediated_devices_raises_exc(self):
2611426123
self.assertRaises(test.TestingException,
2611526124
self._test_detach_mediated_devices, exc)
2611626125

26126+
@mock.patch.object(libvirt_guest.Guest, 'attach_device')
26127+
def _test_attach_mediated_devices(self, side_effect, attach_device):
26128+
dom_without_vgpu = (
26129+
"""<domain> <devices>
26130+
<disk type='file' device='disk'>
26131+
<driver name='qemu' type='qcow2' cache='none'/>
26132+
<source file='xxx'/>
26133+
<target dev='vda' bus='virtio'/>
26134+
<alias name='virtio-disk0'/>
26135+
<address type='pci' domain='0x0000' bus='0x00'
26136+
slot='0x04' function='0x0'/>
26137+
</disk>
26138+
</devices></domain>""")
26139+
26140+
vgpu_xml = (
26141+
"""<domain> <devices>
26142+
<hostdev mode='subsystem' type='mdev' managed='no'
26143+
model='vfio-pci'>
26144+
<source>
26145+
<address uuid='81db53c6-6659-42a0-a34c-1507fdc72983'/>
26146+
</source>
26147+
<alias name='hostdev0'/>
26148+
<address type='pci' domain='0x0000' bus='0x00' slot='0x05'
26149+
function='0x0'/>
26150+
</hostdev>
26151+
</devices></domain>""")
26152+
26153+
attach_device.side_effect = side_effect
26154+
26155+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
26156+
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_without_vgpu))
26157+
mdevs = drvr._get_mdevs_from_guest_config(vgpu_xml)
26158+
drvr._attach_mediated_devices(guest, mdevs)
26159+
return attach_device
26160+
26161+
def test_attach_mediated_devices(self):
26162+
def fake_attach_device(cfg_obj, **kwargs):
26163+
self.assertIsInstance(cfg_obj,
26164+
vconfig.LibvirtConfigGuestHostdevMDEV)
26165+
26166+
attach_mock = self._test_attach_mediated_devices(fake_attach_device)
26167+
attach_mock.assert_called_once_with(mock.ANY, live=True)
26168+
26169+
def test_attach_mediated_devices_raises_exc(self):
26170+
exc = test.TestingException()
26171+
26172+
self.assertRaises(test.TestingException,
26173+
self._test_attach_mediated_devices, exc)
26174+
2611726175
def test_storage_bus_traits__qemu_kvm(self):
2611826176
"""Test getting storage bus traits per virt type.
2611926177
"""

nova/virt/libvirt/driver.py

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3987,6 +3987,10 @@ def resume(self, context, instance, network_info, block_device_info=None):
39873987
"""resume the specified instance."""
39883988
xml = self._get_existing_domain_xml(instance, network_info,
39893989
block_device_info)
3990+
# NOTE(gsantos): The mediated devices that were removed on suspension
3991+
# are still present in the xml. Let's take their references from it
3992+
# and re-attach them.
3993+
mdevs = self._get_mdevs_from_guest_config(xml)
39903994
# NOTE(efried): The instance should already have a vtpm_secret_uuid
39913995
# registered if appropriate.
39923996
guest = self._create_guest_with_network(
@@ -3996,6 +4000,7 @@ def resume(self, context, instance, network_info, block_device_info=None):
39964000
pci_manager.get_instance_pci_devs(instance))
39974001
self._attach_direct_passthrough_ports(
39984002
context, instance, guest, network_info)
4003+
self._attach_mediated_devices(guest, mdevs)
39994004
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
40004005
instance)
40014006
timer.start(interval=0.5).wait()
@@ -8021,12 +8026,6 @@ def _detach_mediated_devices(self, guest):
80218026
guest.detach_device(mdev_cfg, live=True)
80228027
except libvirt.libvirtError as ex:
80238028
error_code = ex.get_error_code()
8024-
# NOTE(sbauza): There is a pending issue with libvirt that
8025-
# doesn't allow to hot-unplug mediated devices. Let's
8026-
# short-circuit the suspend action and set the instance back
8027-
# to ACTIVE.
8028-
# TODO(sbauza): Once libvirt supports this, amend the resume()
8029-
# operation to support reallocating mediated devices.
80308029
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
80318030
reason = _("Suspend is not supported for instances having "
80328031
"attached mediated devices.")
@@ -8035,6 +8034,38 @@ def _detach_mediated_devices(self, guest):
80358034
else:
80368035
raise
80378036

8037+
def _attach_mediated_devices(self, guest, devs):
8038+
for mdev_cfg in devs:
8039+
try:
8040+
guest.attach_device(mdev_cfg, live=True)
8041+
except libvirt.libvirtError as ex:
8042+
error_code = ex.get_error_code()
8043+
if error_code == libvirt.VIR_ERR_DEVICE_MISSING:
8044+
LOG.warning("The mediated device %s was not found and "
8045+
"won't be reattached to %s.", mdev_cfg, guest)
8046+
else:
8047+
raise
8048+
8049+
def _get_mdevs_from_guest_config(self, xml):
8050+
"""Get all libvirt's mediated devices from a guest's config (XML) file.
8051+
We don't have to worry about those devices being used by another guest,
8052+
since they remain allocated for the current guest as long as they are
8053+
present in the XML.
8054+
8055+
:param xml: The XML from the guest we want to get a list of mdevs from.
8056+
8057+
:returns: A list containing the objects that represent the mediated
8058+
devices attached to the guest's config passed as argument.
8059+
"""
8060+
config = vconfig.LibvirtConfigGuest()
8061+
config.parse_str(xml)
8062+
8063+
devs = []
8064+
for dev in config.devices:
8065+
if isinstance(dev, vconfig.LibvirtConfigGuestHostdevMDEV):
8066+
devs.append(dev)
8067+
return devs
8068+
80388069
def _has_numa_support(self):
80398070
# This means that the host can support LibvirtConfigGuestNUMATune
80408071
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
fixes:
3+
- |
4+
Amended the guest resume operation to support mediated devices, as
5+
libvirt's minimum required version (v6.0.0) supports the hot-plug/unplug of
6+
mediated devices, which was addressed in v4.3.0.

0 commit comments

Comments
 (0)