qemu/virtio-pci-decouple-the-single-vector-from-the-inter.patch
Jiabo Feng 946c69b887 QEMU update to version 6.2.0-84(master)
- hw/arm/fsl-imx: Do not ignore Error argument
- hw/net/cadence_gem.c: spelling fixes: Octects
- tests/qtest: check the return value
- libvhost-user: Fix VHOST_USER_GET_MAX_MEM_SLOTS  reply mainline inclusion commit 69a5daec06f423843ce1bb9be5fb049314996f78 category: bugfix
- io_uring: fix short read slow path mainline inclusion commit c06fc7ce147e57ab493bad9263f1601b8298484b category: bugfix
- libvhost-user: Fix VHOST_USER_ADD_MEM_REG reply mainline inclusion commit 7f27d20ded2f480f3e66d03f90ea71507b834276 category: bugfix
- qsd: Unlink absolute PID file path mainline inclusion commit 9d8f8233b9fa525a7e37350fbc18877051128c5d category: bugfix
- net: Fix a misleading error message
- vdpa: stop all svq on device deletion
- vhost: release virtqueue objects in error path
- vhost: fix the fd leak
- virtio: i2c: Check notifier helpers for VIRTIO_CONFIG_IRQ_IDX
- hw/virtio: fix typo in VIRTIO_CONFIG_IRQ_IDX comments
- virtio-net: clear guest_announce feature if no cvq backend
- vdpa: fix VHOST_BACKEND_F_IOTLB_ASID flag check
- vdpa: do not block migration if device has cvq and x-svq=on
- vdpa net: block migration if the device has CVQ
- vdpa: Return -EIO if device ack is VIRTIO_NET_ERR in _load_mq()
- vdpa: Return -EIO if device ack is VIRTIO_NET_ERR in _load_mac()
- vdpa: fix not using CVQ buffer in case of error
- vdpa: Fix possible use-after-free for VirtQueueElement
- hw/virtio: fix vhost_user_read tracepoint
- vhost: Fix false positive out-of-bounds
- vhost: fix possible wrap in SVQ descriptor ring
- vhost: move iova_tree set to vhost_svq_start
- vhost: Always store new kick fd on vhost_svq_set_svq_kick_fd
- virtio-crypto: verify src&dst buffer length for sym request
- vdpa: commit all host notifier MRs in a single MR transaction
- vdpa: harden the error path if get_iova_range failed
- vdpa-dev: get iova range explicitly
- virtio-pci: add support for configure interrupt
- virtio-mmio: add support for configure interrupt
- virtio-net: add support for configure interrupt
- vhost: add support for configure interrupt
- virtio: add support for configure interrupt
- vhost-vdpa: add support for config interrupt
- vhost: introduce new VhostOps vhost_set_config_call
- virtio-pci: decouple the single vector from the interrupt process
- virtio-pci: decouple notifier from interrupt process
- virtio: introduce macro VIRTIO_CONFIG_IRQ_IDX
- vdpa: do not handle VIRTIO_NET_F_GUEST_ANNOUNCE in vhost-vdpa
- vdpa: handle VIRTIO_NET_CTRL_ANNOUNCE in vhost_vdpa_net_handle_ctrl_avail
- vhost: fix vq dirty bitmap syncing when vIOMMU is enabled
- hw/virtio: gracefully handle unset vhost_dev vdev
- hw/virtio/vhost: Fix typo in comment.
- vdpa: always start CVQ in SVQ mode if possible
- vdpa: add shadow_data to vhost_vdpa
- vdpa: store x-svq parameter in VhostVDPAState
- vdpa: add asid parameter to vhost_vdpa_dma_map/unmap
- vdpa: allocate SVQ array unconditionally
- vdpa: move SVQ vring features check to net/
- vdpa: request iova_range only once
- vdpa: add vhost_vdpa_net_valid_svq_features
- vhost: allocate SVQ device file descriptors at device start
- vhost: set SVQ device call handler at SVQ start
- vdpa: use v->shadow_vqs_enabled in vhost_vdpa_svqs_start & stop
- vhost: enable vrings in vhost_dev_start() for vhost-user devices
- vhost-vdpa: fix assert !virtio_net_get_subqueue(nc)->async_tx.elem in virtio_net_reset
- net/vhost-vdpa.c: Fix clang compilation failure
- vhost-vdpa: allow passing opened vhostfd to vhost-vdpa
- vdpa: Remove shadow CVQ command check
- vdpa: Delete duplicated vdpa_feature_bits entry
- hw/virtio: add some vhost-user trace events
- vdpa: Allow MQ feature in SVQ
- virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
- vdpa: validate MQ CVQ commands
- vdpa: Add vhost_vdpa_net_load_mq
- vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
- vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
- vdpa: Delete CVQ migration blocker
- vdpa: Add virtio-net mac address via CVQ at start
- vhost_net: add NetClientState->load() callback
- vdpa: extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail
- vdpa: Move command buffers map to start of net device
- vdpa: add net_vhost_vdpa_cvq_info NetClientInfo
- vhost_net: Add NetClientInfo stop callback
- vhost_net: Add NetClientInfo start callback
- vdpa: Use ring hwaddr at vhost_vdpa_svq_unmap_ring
- vdpa: Make SVQ vring unmapping return void
- vdpa: Remove SVQ vring from iova_tree at shutdown
- util: accept iova_tree_remove_parameter by value
- vdpa: do not save failed dma maps in SVQ iova tree
- vdpa: Skip the maps not in the iova tree
- vdpa: Fix file descriptor leak on get features error
- vdpa: Fix memory listener deletions of iova tree
- vhost: Get vring base from vq, not svq
- vdpa: Add x-svq to NetdevVhostVDPAOptions
- vdpa: Add device migration blocker
- vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs
- vdpa: Buffer CVQ support on shadow virtqueue
- vdpa: manual forward CVQ buffers
- vdpa: Export vhost_vdpa_dma_map and unmap calls
- vhost: Add svq avail_handler callback
- vhost: add vhost_svq_poll
- vhost: Expose vhost_svq_add
- vhost: add vhost_svq_push_elem
- vhost: Track number of descs in SVQDescState
- vhost: Add SVQDescState
- vhost: Decouple vhost_svq_add from VirtQueueElement
- vhost: Check for queue full at vhost_svq_add
- vhost: Move vhost_svq_kick call to vhost_svq_add
- vhost: Reorder vhost_svq_kick
- vdpa: Avoid compiler to squash reads to used idx
- virtio-net: Expose ctrl virtqueue logic
- virtio-net: Expose MAC_TABLE_ENTRIES
- vhost: move descriptor translation to vhost_svq_vring_write_descs
- util: Return void on iova_tree_remove
- virtio-net: don't handle mq request in userspace handler for vhost-vdpa
- vhost-vdpa: change name and polarity for vhost_vdpa_one_time_request()
- vhost-vdpa: backend feature should set only once
- vhost-vdpa: fix improper cleanup in net_init_vhost_vdpa
- virtio-net: align ctrl_vq index for non-mq guest for vhost_vdpa
- virtio: add vhost support for virtio devices
- include/hw: start documenting the vhost API
- hw/virtio: add vhost_user_[read|write] trace points
- vhost: Fix element in vhost_svq_add failure
- vdpa: Fix index calculus at vhost_vdpa_svqs_start
- vdpa: Fix bad index calculus at vhost_vdpa_get_vring_base
- vhost: Fix device's used descriptor dequeue
- vhost: Track descriptor chain in private at SVQ
- vdpa: Add missing tracing to batch mapping functions
- vhost-vdpa: fix typo in a comment
- virtio: fix --enable-vhost-user build on non-Linux
- vdpa: Expose VHOST_F_LOG_ALL on SVQ
- vdpa: Never set log_base addr if SVQ is enabled
- vdpa: Adapt vhost_vdpa_get_vring_base to SVQ
- vdpa: Add custom IOTLB translations to SVQ
- vhost: Add VhostIOVATree
- util: add iova_tree_find_iova
- util: Add iova_tree_alloc_map
- vhost: Shadow virtqueue buffers forwarding
- vdpa: adapt vhost_ops callbacks to svq
- virtio: Add vhost_svq_get_vring_addr
- vhost: Add vhost_svq_valid_features to shadow vq
- vhost: Add Shadow VirtQueue call forwarding capabilities
- vhost: Add Shadow VirtQueue kick forwarding capabilities
- vhost: Add VhostShadowVirtqueue
- vdpa: Make ncs autofree
- Revert "virtio: introduce macro IRTIO_CONFIG_IRQ_IDX"
- Revert "virtio-pci: decouple notifier from interrupt process"
- Revert "virtio-pci: decouple the single vector from the interrupt process"
- Revert "vhost-vdpa: add support for config interrupt"
- Revert "virtio: add support for configure interrupt"
- Revert "vhost: add support for configure interrupt"
- Revert "virtio-net: add support for configure interrupt"
- Revert "virtio-mmio: add support for configure interrupt"
- Revert "virtio-pci: add support for configure interrupt"
- Revert "vhost: introduce new VhostOps vhost_set_config_call"
- virtio: signal after wrapping packed used_idx
- target/i386: Adjust feature level according to FEAT_7_1_EDX
- target/i386: Add new CPU model GraniteRapids
- target/i386: Add support for PREFETCHIT0/1 in CPUID enumeration
- target/i386: Add support for AVX-NE-CONVERT in CPUID enumeration
- target/i386: Add support for AVX-VNNI-INT8 in CPUID enumeration
- target/i386: Add support for AVX-IFMA in CPUID enumeration
- target/i386: Add support for AMX-FP16 in CPUID enumeration
- target/i386: Add support for CMPCCXADD in CPUID enumeration
- tracetool: avoid invalid escape in Python string
- hw/pvrdma: Protect against buggy or malicious guest driver
- vga: avoid crash if no default vga card mainline inclusion commit 6985d8ede92494f3b791de01e8ee9306eb6d5e4a category: bugfix
- qom/object: Remove circular include dependency mainline inclusion commit 5bba9bcfbb42e7c016626420e148a1bf1b080835 category: bugfix
- artist: set memory region owners for buffers to the  artist device mainline inclusion commit 39fbaeca096a9bf6cbe2af88572c1cb2aa62aa8c category: bugfix
- virtio-iommu: Fix the partial copy of probe request mainline inclusion commit 45461aace83d961e933b27519b81d17b4c690514 category: bugfix
- e1000: set RX descriptor status in a separate  operation mainline inclusion commit 034d00d4858161e1d4cff82d8d230bce874a04d3 category: bugfix
- vhost: introduce new VhostOps vhost_set_config_call
- vhost: stick to -errno error return convention
- vhost-user: stick to -errno error return convention
- vhost-vdpa: stick to -errno error return convention
- virtio-pci: add support for configure interrupt
- virtio-mmio: add support for configure interrupt
- virtio-net: add support for configure interrupt
- vhost: add support for configure interrupt
- virtio: add support for configure interrupt
- vhost-vdpa: add support for config interrupt
- virtio-pci: decouple the single vector from the interrupt process
- virtio-pci: decouple notifier from interrupt process
- virtio: introduce macro IRTIO_CONFIG_IRQ_IDX
- pci: Fix the update of interrupt disable bit in PCI_COMMAND register
- hw/timer/npcm7xx_timer: Prevent timer from counting down past zero
- tpm_crb: mark command buffer as dirty on request  completion mainline inclusion commit e37a0ef4605e5d2041785ff3fc89ca6021faf7a0 category: bugfix
- pci: fix overflow in snprintf string formatting mainline inclusion commit 36f18c6989a3d1ff1d7a0e50b0868ef3958299b4 category: bugfix
- hw/usb/hcd-ehci: fix writeback order mainline inclusion commit f471e8b060798f26a7fc339c6152f82f22a7b33d category: bugfix
- qemu-timer: Skip empty timer lists before locking  in qemu_clock_deadline_ns_all mainline inclusion commit 3f42906c9ab2c777a895b48b87b8107167e4a275 category: bugfix
- semihosting/config: Merge --semihosting-config  option groups mainline inclusion commit 90c072e063737e9e8f431489bbd334452f89056e category: bugfix
- semihosting: fix memleak at semihosting_arg_fallback
- target/i386: Export GDS_NO bit to guests

Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
2023-11-28 16:41:25 +08:00

197 lines
6.6 KiB
Diff

From b3223ddde84840ccc6bb2282dfc146616b85a362 Mon Sep 17 00:00:00 2001
From: fangyi <eric.fangyi@huawei.com>
Date: Thu, 16 Nov 2023 09:54:51 +0800
Subject: [PATCH] virtio-pci: decouple the single vector from the interrupt
process
To reuse the interrupt process in configure interrupt
Need to decouple the single vector from the interrupt process. Add new function
kvm_virtio_pci_vector_use_one and _release_one. These functions are use
for the single vector, the whole process will finish in a loop for the vq number.
Signed-off-by: Cindy Lu <lulu@redhat.com>
Message-Id: <20211104164827.21911-4-lulu@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: fangyi <eric.fangyi@huawei.com>
---
hw/virtio/virtio-pci.c | 131 +++++++++++++++++++++++------------------
1 file changed, 73 insertions(+), 58 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 85d7357f66..75be770971 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -762,7 +762,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
}
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
unsigned int vector)
{
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@@ -825,87 +824,103 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
return 0;
}
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
{
+ unsigned int vector;
+ int ret;
+ EventNotifier *n;
PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
- unsigned int vector;
- int ret, queue_no;
- EventNotifier *n;
- for (queue_no = 0; queue_no < nvqs; queue_no++) {
- if (!virtio_queue_get_num(vdev, queue_no)) {
- break;
- }
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
- }
- ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
+
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return ret;
+ }
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return 0;
+ }
+ ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+ if (ret < 0) {
+ goto undo;
+ }
+ /*
+ * If guest supports masking, set up irqfd now.
+ * Otherwise, delay until unmasked in the frontend.
+ */
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) {
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo;
}
- /* If guest supports masking, set up irqfd now.
- * Otherwise, delay until unmasked in the frontend.
- */
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
- if (ret < 0) {
- kvm_virtio_pci_vq_vector_release(proxy, vector);
- goto undo;
- }
- }
}
- return 0;
+ return 0;
undo:
- while (--queue_no >= 0) {
- vector = virtio_queue_vector(vdev, queue_no);
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
+
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return ret;
+ }
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return ret;
}
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ }
+ return ret;
+}
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+ int queue_no;
+ int ret = 0;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ for (queue_no = 0; queue_no < nvqs; queue_no++) {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ return -1;
}
- kvm_virtio_pci_vq_vector_release(proxy, vector);
+ ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
}
return ret;
}
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+
+static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
+ int queue_no)
{
- PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
unsigned int vector;
- int queue_no;
- VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
EventNotifier *n;
- int ret ;
+ int ret;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ PCIDevice *dev = &proxy->pci_dev;
+
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ return;
+ }
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return;
+ }
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ }
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+}
+
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+{
+ int queue_no;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
- if (ret < 0) {
- break;
- }
- if (vector >= msix_nr_vectors_allocated(dev)) {
- continue;
- }
- /* If guest supports masking, clean up irqfd now.
- * Otherwise, it was cleaned when masked in the frontend.
- */
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
- }
- kvm_virtio_pci_vq_vector_release(proxy, vector);
+ kvm_virtio_pci_vector_release_one(proxy, queue_no);
}
}
--
2.27.0