Skip to content

Commit 04ca0b0

Browse files
fengidrimstsirkin
authored andcommitted
virtio_pci: support VIRTIO_F_RING_RESET
This patch implements virtio pci support for QUEUE RESET. Performing reset on a queue is divided into these steps: 1. notify the device to reset the queue 2. recycle the buffer submitted 3. reset the vring (may re-alloc) 4. mmap vring to device, and enable the queue This patch implements virtio_reset_vq(), virtio_enable_resetq() in the pci scenario. Signed-off-by: Xuan Zhuo <[email protected]> Acked-by: Jason Wang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
1 parent 56bdc06 commit 04ca0b0

File tree

2 files changed

+97
-3
lines changed

2 files changed

+97
-3
lines changed

drivers/virtio/virtio_pci_common.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
214214
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
215215
unsigned long flags;
216216

217-
spin_lock_irqsave(&vp_dev->lock, flags);
218-
list_del(&info->node);
219-
spin_unlock_irqrestore(&vp_dev->lock, flags);
217+
/*
218+
* If it fails during re-enable reset vq. This way we won't rejoin
219+
* info->node to the queue. Prevent unexpected irqs.
220+
*/
221+
if (!vq->reset) {
222+
spin_lock_irqsave(&vp_dev->lock, flags);
223+
list_del(&info->node);
224+
spin_unlock_irqrestore(&vp_dev->lock, flags);
225+
}
220226

221227
vp_dev->del_vq(info);
222228
kfree(info);

drivers/virtio/virtio_pci_modern.c

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
3434
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
3535
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
3636
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
37+
38+
if (features & BIT_ULL(VIRTIO_F_RING_RESET))
39+
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
3740
}
3841

3942
/* virtio config->finalize_features() implementation */
@@ -199,6 +202,87 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
199202
return 0;
200203
}
201204

205+
static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
206+
{
207+
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
208+
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
209+
struct virtio_pci_vq_info *info;
210+
unsigned long flags;
211+
212+
if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
213+
return -ENOENT;
214+
215+
vp_modern_set_queue_reset(mdev, vq->index);
216+
217+
info = vp_dev->vqs[vq->index];
218+
219+
/* delete vq from irq handler */
220+
spin_lock_irqsave(&vp_dev->lock, flags);
221+
list_del(&info->node);
222+
spin_unlock_irqrestore(&vp_dev->lock, flags);
223+
224+
INIT_LIST_HEAD(&info->node);
225+
226+
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
227+
__virtqueue_break(vq);
228+
#endif
229+
230+
/* For the case where vq has an exclusive irq, call synchronize_irq() to
231+
* wait for completion.
232+
*
233+
* note: We can't use disable_irq() since it conflicts with the affinity
234+
* managed IRQ that is used by some drivers.
235+
*/
236+
if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
237+
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
238+
239+
vq->reset = true;
240+
241+
return 0;
242+
}
243+
244+
static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
245+
{
246+
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
247+
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
248+
struct virtio_pci_vq_info *info;
249+
unsigned long flags, index;
250+
int err;
251+
252+
if (!vq->reset)
253+
return -EBUSY;
254+
255+
index = vq->index;
256+
info = vp_dev->vqs[index];
257+
258+
if (vp_modern_get_queue_reset(mdev, index))
259+
return -EBUSY;
260+
261+
if (vp_modern_get_queue_enable(mdev, index))
262+
return -EBUSY;
263+
264+
err = vp_active_vq(vq, info->msix_vector);
265+
if (err)
266+
return err;
267+
268+
if (vq->callback) {
269+
spin_lock_irqsave(&vp_dev->lock, flags);
270+
list_add(&info->node, &vp_dev->virtqueues);
271+
spin_unlock_irqrestore(&vp_dev->lock, flags);
272+
} else {
273+
INIT_LIST_HEAD(&info->node);
274+
}
275+
276+
#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
277+
__virtqueue_unbreak(vq);
278+
#endif
279+
280+
vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
281+
vq->reset = false;
282+
283+
return 0;
284+
}
285+
202286
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
203287
{
204288
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -413,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
413497
.set_vq_affinity = vp_set_vq_affinity,
414498
.get_vq_affinity = vp_get_vq_affinity,
415499
.get_shm_region = vp_get_shm_region,
500+
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
501+
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
416502
};
417503

418504
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -431,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
431517
.set_vq_affinity = vp_set_vq_affinity,
432518
.get_vq_affinity = vp_get_vq_affinity,
433519
.get_shm_region = vp_get_shm_region,
520+
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
521+
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
434522
};
435523

436524
/* the PCI probing function */

0 commit comments

Comments
 (0)