Skip to content

Commit 8348484

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Reattach mdevs to guest on resume" into stable/xena
2 parents 8e87ff5 + 15c32e8 commit 8348484

File tree

4 files changed

+108
-7
lines changed

4 files changed

+108
-7
lines changed

doc/source/admin/virtual-gpu.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,11 @@ Caveats
301301
that will cause the instance to be set back to ACTIVE. The ``suspend`` action
302302
in the ``os-instance-actions`` API will have an *Error* state.
303303

304+
.. versionchanged:: 25.0.0
305+
306+
This has been resolved in the Yoga release and backported to Xena. See
307+
`bug 1948705`_.
308+
304309
* Resizing an instance with a new flavor that has vGPU resources doesn't
305310
allocate those vGPUs to the instance (the instance is created without
306311
vGPU resources). The proposed workaround is to rebuild the instance after
@@ -350,6 +355,7 @@ For nested vGPUs:
350355

351356
.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563
352357
.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688
358+
.. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705
353359

354360
.. Links
355361
.. _Intel GVT-g: https://01.org/igvt-g

nova/tests/unit/virt/libvirt/test_driver.py

Lines changed: 59 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16604,9 +16604,15 @@ def test_resume(self):
1660416604
mock.patch.object(guest, 'sync_guest_time'),
1660516605
mock.patch.object(drvr, '_wait_for_running',
1660616606
side_effect=loopingcall.LoopingCallDone()),
16607+
mock.patch.object(drvr,
16608+
'_get_mdevs_from_guest_config',
16609+
return_value='fake_mdevs'),
16610+
mock.patch.object(drvr, '_attach_mediated_devices'),
1660716611
) as (_get_existing_domain_xml, _create_guest_with_network,
1660816612
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
16609-
mock_sync_time, mock_wait):
16613+
mock_sync_time, mock_wait,
16614+
_get_mdevs_from_guest_config,
16615+
_attach_mediated_devices):
1661016616
get_image_metadata.return_value = {'bar': 234}
1661116617

1661216618
drvr.resume(self.context, instance, network_info,
@@ -16621,6 +16627,9 @@ def test_resume(self):
1662116627
self.assertTrue(mock_sync_time.called)
1662216628
_attach_pci_devices.assert_has_calls([mock.call(guest,
1662316629
'fake_pci_devs')])
16630+
_attach_mediated_devices.assert_has_calls(
16631+
[mock.call(guest, 'fake_mdevs')]
16632+
)
1662416633

1662516634
@mock.patch.object(host.Host, '_get_domain')
1662616635
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@@ -26073,6 +26082,55 @@ def test_detach_mediated_devices_raises_exc(self):
2607326082
self.assertRaises(test.TestingException,
2607426083
self._test_detach_mediated_devices, exc)
2607526084

26085+
@mock.patch.object(libvirt_guest.Guest, 'attach_device')
26086+
def _test_attach_mediated_devices(self, side_effect, attach_device):
26087+
dom_without_vgpu = (
26088+
"""<domain> <devices>
26089+
<disk type='file' device='disk'>
26090+
<driver name='qemu' type='qcow2' cache='none'/>
26091+
<source file='xxx'/>
26092+
<target dev='vda' bus='virtio'/>
26093+
<alias name='virtio-disk0'/>
26094+
<address type='pci' domain='0x0000' bus='0x00'
26095+
slot='0x04' function='0x0'/>
26096+
</disk>
26097+
</devices></domain>""")
26098+
26099+
vgpu_xml = (
26100+
"""<domain> <devices>
26101+
<hostdev mode='subsystem' type='mdev' managed='no'
26102+
model='vfio-pci'>
26103+
<source>
26104+
<address uuid='81db53c6-6659-42a0-a34c-1507fdc72983'/>
26105+
</source>
26106+
<alias name='hostdev0'/>
26107+
<address type='pci' domain='0x0000' bus='0x00' slot='0x05'
26108+
function='0x0'/>
26109+
</hostdev>
26110+
</devices></domain>""")
26111+
26112+
attach_device.side_effect = side_effect
26113+
26114+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
26115+
guest = libvirt_guest.Guest(FakeVirtDomain(fake_xml=dom_without_vgpu))
26116+
mdevs = drvr._get_mdevs_from_guest_config(vgpu_xml)
26117+
drvr._attach_mediated_devices(guest, mdevs)
26118+
return attach_device
26119+
26120+
def test_attach_mediated_devices(self):
26121+
def fake_attach_device(cfg_obj, **kwargs):
26122+
self.assertIsInstance(cfg_obj,
26123+
vconfig.LibvirtConfigGuestHostdevMDEV)
26124+
26125+
attach_mock = self._test_attach_mediated_devices(fake_attach_device)
26126+
attach_mock.assert_called_once_with(mock.ANY, live=True)
26127+
26128+
def test_attach_mediated_devices_raises_exc(self):
26129+
exc = test.TestingException()
26130+
26131+
self.assertRaises(test.TestingException,
26132+
self._test_attach_mediated_devices, exc)
26133+
2607626134
def test_storage_bus_traits__qemu_kvm(self):
2607726135
"""Test getting storage bus traits per virt type.
2607826136
"""

nova/virt/libvirt/driver.py

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3987,6 +3987,10 @@ def resume(self, context, instance, network_info, block_device_info=None):
39873987
"""resume the specified instance."""
39883988
xml = self._get_existing_domain_xml(instance, network_info,
39893989
block_device_info)
3990+
# NOTE(gsantos): The mediated devices that were removed on suspension
3991+
# are still present in the xml. Let's take their references from it
3992+
# and re-attach them.
3993+
mdevs = self._get_mdevs_from_guest_config(xml)
39903994
# NOTE(efried): The instance should already have a vtpm_secret_uuid
39913995
# registered if appropriate.
39923996
guest = self._create_guest_with_network(
@@ -3996,6 +4000,7 @@ def resume(self, context, instance, network_info, block_device_info=None):
39964000
pci_manager.get_instance_pci_devs(instance))
39974001
self._attach_direct_passthrough_ports(
39984002
context, instance, guest, network_info)
4003+
self._attach_mediated_devices(guest, mdevs)
39994004
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
40004005
instance)
40014006
timer.start(interval=0.5).wait()
@@ -8021,12 +8026,6 @@ def _detach_mediated_devices(self, guest):
80218026
guest.detach_device(mdev_cfg, live=True)
80228027
except libvirt.libvirtError as ex:
80238028
error_code = ex.get_error_code()
8024-
# NOTE(sbauza): There is a pending issue with libvirt that
8025-
# doesn't allow to hot-unplug mediated devices. Let's
8026-
# short-circuit the suspend action and set the instance back
8027-
# to ACTIVE.
8028-
# TODO(sbauza): Once libvirt supports this, amend the resume()
8029-
# operation to support reallocating mediated devices.
80308029
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
80318030
reason = _("Suspend is not supported for instances having "
80328031
"attached mediated devices.")
@@ -8035,6 +8034,38 @@ def _detach_mediated_devices(self, guest):
80358034
else:
80368035
raise
80378036

8037+
def _attach_mediated_devices(self, guest, devs):
8038+
for mdev_cfg in devs:
8039+
try:
8040+
guest.attach_device(mdev_cfg, live=True)
8041+
except libvirt.libvirtError as ex:
8042+
error_code = ex.get_error_code()
8043+
if error_code == libvirt.VIR_ERR_DEVICE_MISSING:
8044+
LOG.warning("The mediated device %s was not found and "
8045+
"won't be reattached to %s.", mdev_cfg, guest)
8046+
else:
8047+
raise
8048+
8049+
def _get_mdevs_from_guest_config(self, xml):
8050+
"""Get all libvirt's mediated devices from a guest's config (XML) file.
8051+
We don't have to worry about those devices being used by another guest,
8052+
since they remain allocated for the current guest as long as they are
8053+
present in the XML.
8054+
8055+
:param xml: The XML from the guest we want to get a list of mdevs from.
8056+
8057+
:returns: A list containing the objects that represent the mediated
8058+
devices attached to the guest's config passed as argument.
8059+
"""
8060+
config = vconfig.LibvirtConfigGuest()
8061+
config.parse_str(xml)
8062+
8063+
devs = []
8064+
for dev in config.devices:
8065+
if isinstance(dev, vconfig.LibvirtConfigGuestHostdevMDEV):
8066+
devs.append(dev)
8067+
return devs
8068+
80388069
def _has_numa_support(self):
80398070
# This means that the host can support LibvirtConfigGuestNUMATune
80408071
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
fixes:
3+
- |
4+
Amended the guest resume operation to support mediated devices, as
5+
libvirt's minimum required version (v6.0.0) supports the hot-plug/unplug of
6+
mediated devices, which was addressed in v4.3.0.

0 commit comments

Comments
 (0)