qemu/virtio-pci-decouple-notifier-from-interrupt-process.patch
Jiabo Feng 946c69b887 QEMU update to version 6.2.0-84(master)
- hw/arm/fsl-imx: Do not ignore Error argument
- hw/net/cadence_gem.c: spelling fixes: Octects
- tests/qtest: check the return value
- libvhost-user: Fix VHOST_USER_GET_MAX_MEM_SLOTS  reply mainline inclusion commit 69a5daec06f423843ce1bb9be5fb049314996f78 category: bugfix
- io_uring: fix short read slow path mainline inclusion commit c06fc7ce147e57ab493bad9263f1601b8298484b category: bugfix
- libvhost-user: Fix VHOST_USER_ADD_MEM_REG reply mainline inclusion commit 7f27d20ded2f480f3e66d03f90ea71507b834276 category: bugfix
- qsd: Unlink absolute PID file path mainline inclusion commit 9d8f8233b9fa525a7e37350fbc18877051128c5d category: bugfix
- net: Fix a misleading error message
- vdpa: stop all svq on device deletion
- vhost: release virtqueue objects in error path
- vhost: fix the fd leak
- virtio: i2c: Check notifier helpers for VIRTIO_CONFIG_IRQ_IDX
- hw/virtio: fix typo in VIRTIO_CONFIG_IRQ_IDX comments
- virtio-net: clear guest_announce feature if no cvq backend
- vdpa: fix VHOST_BACKEND_F_IOTLB_ASID flag check
- vdpa: do not block migration if device has cvq and x-svq=on
- vdpa net: block migration if the device has CVQ
- vdpa: Return -EIO if device ack is VIRTIO_NET_ERR in _load_mq()
- vdpa: Return -EIO if device ack is VIRTIO_NET_ERR in _load_mac()
- vdpa: fix not using CVQ buffer in case of error
- vdpa: Fix possible use-after-free for VirtQueueElement
- hw/virtio: fix vhost_user_read tracepoint
- vhost: Fix false positive out-of-bounds
- vhost: fix possible wrap in SVQ descriptor ring
- vhost: move iova_tree set to vhost_svq_start
- vhost: Always store new kick fd on vhost_svq_set_svq_kick_fd
- virtio-crypto: verify src&dst buffer length for sym request
- vdpa: commit all host notifier MRs in a single MR transaction
- vdpa: harden the error path if get_iova_range failed
- vdpa-dev: get iova range explicitly
- virtio-pci: add support for configure interrupt
- virtio-mmio: add support for configure interrupt
- virtio-net: add support for configure interrupt
- vhost: add support for configure interrupt
- virtio: add support for configure interrupt
- vhost-vdpa: add support for config interrupt
- vhost: introduce new VhostOps vhost_set_config_call
- virtio-pci: decouple the single vector from the interrupt process
- virtio-pci: decouple notifier from interrupt process
- virtio: introduce macro VIRTIO_CONFIG_IRQ_IDX
- vdpa: do not handle VIRTIO_NET_F_GUEST_ANNOUNCE in vhost-vdpa
- vdpa: handle VIRTIO_NET_CTRL_ANNOUNCE in vhost_vdpa_net_handle_ctrl_avail
- vhost: fix vq dirty bitmap syncing when vIOMMU is enabled
- hw/virtio: gracefully handle unset vhost_dev vdev
- hw/virtio/vhost: Fix typo in comment.
- vdpa: always start CVQ in SVQ mode if possible
- vdpa: add shadow_data to vhost_vdpa
- vdpa: store x-svq parameter in VhostVDPAState
- vdpa: add asid parameter to vhost_vdpa_dma_map/unmap
- vdpa: allocate SVQ array unconditionally
- vdpa: move SVQ vring features check to net/
- vdpa: request iova_range only once
- vdpa: add vhost_vdpa_net_valid_svq_features
- vhost: allocate SVQ device file descriptors at device start
- vhost: set SVQ device call handler at SVQ start
- vdpa: use v->shadow_vqs_enabled in vhost_vdpa_svqs_start & stop
- vhost: enable vrings in vhost_dev_start() for vhost-user devices
- vhost-vdpa: fix assert !virtio_net_get_subqueue(nc)->async_tx.elem in virtio_net_reset
- net/vhost-vdpa.c: Fix clang compilation failure
- vhost-vdpa: allow passing opened vhostfd to vhost-vdpa
- vdpa: Remove shadow CVQ command check
- vdpa: Delete duplicated vdpa_feature_bits entry
- hw/virtio: add some vhost-user trace events
- vdpa: Allow MQ feature in SVQ
- virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
- vdpa: validate MQ CVQ commands
- vdpa: Add vhost_vdpa_net_load_mq
- vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
- vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
- vdpa: Delete CVQ migration blocker
- vdpa: Add virtio-net mac address via CVQ at start
- vhost_net: add NetClientState->load() callback
- vdpa: extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail
- vdpa: Move command buffers map to start of net device
- vdpa: add net_vhost_vdpa_cvq_info NetClientInfo
- vhost_net: Add NetClientInfo stop callback
- vhost_net: Add NetClientInfo start callback
- vdpa: Use ring hwaddr at vhost_vdpa_svq_unmap_ring
- vdpa: Make SVQ vring unmapping return void
- vdpa: Remove SVQ vring from iova_tree at shutdown
- util: accept iova_tree_remove_parameter by value
- vdpa: do not save failed dma maps in SVQ iova tree
- vdpa: Skip the maps not in the iova tree
- vdpa: Fix file descriptor leak on get features error
- vdpa: Fix memory listener deletions of iova tree
- vhost: Get vring base from vq, not svq
- vdpa: Add x-svq to NetdevVhostVDPAOptions
- vdpa: Add device migration blocker
- vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs
- vdpa: Buffer CVQ support on shadow virtqueue
- vdpa: manual forward CVQ buffers
- vdpa: Export vhost_vdpa_dma_map and unmap calls
- vhost: Add svq avail_handler callback
- vhost: add vhost_svq_poll
- vhost: Expose vhost_svq_add
- vhost: add vhost_svq_push_elem
- vhost: Track number of descs in SVQDescState
- vhost: Add SVQDescState
- vhost: Decouple vhost_svq_add from VirtQueueElement
- vhost: Check for queue full at vhost_svq_add
- vhost: Move vhost_svq_kick call to vhost_svq_add
- vhost: Reorder vhost_svq_kick
- vdpa: Avoid compiler to squash reads to used idx
- virtio-net: Expose ctrl virtqueue logic
- virtio-net: Expose MAC_TABLE_ENTRIES
- vhost: move descriptor translation to vhost_svq_vring_write_descs
- util: Return void on iova_tree_remove
- virtio-net: don't handle mq request in userspace handler for vhost-vdpa
- vhost-vdpa: change name and polarity for vhost_vdpa_one_time_request()
- vhost-vdpa: backend feature should set only once
- vhost-vdpa: fix improper cleanup in net_init_vhost_vdpa
- virtio-net: align ctrl_vq index for non-mq guest for vhost_vdpa
- virtio: add vhost support for virtio devices
- include/hw: start documenting the vhost API
- hw/virtio: add vhost_user_[read|write] trace points
- vhost: Fix element in vhost_svq_add failure
- vdpa: Fix index calculus at vhost_vdpa_svqs_start
- vdpa: Fix bad index calculus at vhost_vdpa_get_vring_base
- vhost: Fix device's used descriptor dequeue
- vhost: Track descriptor chain in private at SVQ
- vdpa: Add missing tracing to batch mapping functions
- vhost-vdpa: fix typo in a comment
- virtio: fix --enable-vhost-user build on non-Linux
- vdpa: Expose VHOST_F_LOG_ALL on SVQ
- vdpa: Never set log_base addr if SVQ is enabled
- vdpa: Adapt vhost_vdpa_get_vring_base to SVQ
- vdpa: Add custom IOTLB translations to SVQ
- vhost: Add VhostIOVATree
- util: add iova_tree_find_iova
- util: Add iova_tree_alloc_map
- vhost: Shadow virtqueue buffers forwarding
- vdpa: adapt vhost_ops callbacks to svq
- virtio: Add vhost_svq_get_vring_addr
- vhost: Add vhost_svq_valid_features to shadow vq
- vhost: Add Shadow VirtQueue call forwarding capabilities
- vhost: Add Shadow VirtQueue kick forwarding capabilities
- vhost: Add VhostShadowVirtqueue
- vdpa: Make ncs autofree
- Revert "virtio: introduce macro IRTIO_CONFIG_IRQ_IDX"
- Revert "virtio-pci: decouple notifier from interrupt process"
- Revert "virtio-pci: decouple the single vector from the interrupt process"
- Revert "vhost-vdpa: add support for config interrupt"
- Revert "virtio: add support for configure interrupt"
- Revert "vhost: add support for configure interrupt"
- Revert "virtio-net: add support for configure interrupt"
- Revert "virtio-mmio: add support for configure interrupt"
- Revert "virtio-pci: add support for configure interrupt"
- Revert "vhost: introduce new VhostOps vhost_set_config_call"
- virtio: signal after wrapping packed used_idx
- target/i386: Adjust feature level according to FEAT_7_1_EDX
- target/i386: Add new CPU model GraniteRapids
- target/i386: Add support for PREFETCHIT0/1 in CPUID enumeration
- target/i386: Add support for AVX-NE-CONVERT in CPUID enumeration
- target/i386: Add support for AVX-VNNI-INT8 in CPUID enumeration
- target/i386: Add support for AVX-IFMA in CPUID enumeration
- target/i386: Add support for AMX-FP16 in CPUID enumeration
- target/i386: Add support for CMPCCXADD in CPUID enumeration
- tracetool: avoid invalid escape in Python string
- hw/pvrdma: Protect against buggy or malicious guest driver
- vga: avoid crash if no default vga card mainline inclusion commit 6985d8ede92494f3b791de01e8ee9306eb6d5e4a category: bugfix
- qom/object: Remove circular include dependency mainline inclusion commit 5bba9bcfbb42e7c016626420e148a1bf1b080835 category: bugfix
- artist: set memory region owners for buffers to the  artist device mainline inclusion commit 39fbaeca096a9bf6cbe2af88572c1cb2aa62aa8c category: bugfix
- virtio-iommu: Fix the partial copy of probe request mainline inclusion commit 45461aace83d961e933b27519b81d17b4c690514 category: bugfix
- e1000: set RX descriptor status in a separate  operation mainline inclusion commit 034d00d4858161e1d4cff82d8d230bce874a04d3 category: bugfix
- vhost: introduce new VhostOps vhost_set_config_call
- vhost: stick to -errno error return convention
- vhost-user: stick to -errno error return convention
- vhost-vdpa: stick to -errno error return convention
- virtio-pci: add support for configure interrupt
- virtio-mmio: add support for configure interrupt
- virtio-net: add support for configure interrupt
- vhost: add support for configure interrupt
- virtio: add support for configure interrupt
- vhost-vdpa: add support for config interrupt
- virtio-pci: decouple the single vector from the interrupt process
- virtio-pci: decouple notifier from interrupt process
- virtio: introduce macro IRTIO_CONFIG_IRQ_IDX
- pci: Fix the update of interrupt disable bit in PCI_COMMAND register
- hw/timer/npcm7xx_timer: Prevent timer from counting down past zero
- tpm_crb: mark command buffer as dirty on request  completion mainline inclusion commit e37a0ef4605e5d2041785ff3fc89ca6021faf7a0 category: bugfix
- pci: fix overflow in snprintf string formatting mainline inclusion commit 36f18c6989a3d1ff1d7a0e50b0868ef3958299b4 category: bugfix
- hw/usb/hcd-ehci: fix writeback order mainline inclusion commit f471e8b060798f26a7fc339c6152f82f22a7b33d category: bugfix
- qemu-timer: Skip empty timer lists before locking  in qemu_clock_deadline_ns_all mainline inclusion commit 3f42906c9ab2c777a895b48b87b8107167e4a275 category: bugfix
- semihosting/config: Merge --semihosting-config  option groups mainline inclusion commit 90c072e063737e9e8f431489bbd334452f89056e category: bugfix
- semihosting: fix memleak at semihosting_arg_fallback
- target/i386: Export GDS_NO bit to guests

Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
2023-11-28 16:41:25 +08:00

260 lines
10 KiB
Diff

From 22998eab50bc17b9af19e377df04d1583a7ddbda Mon Sep 17 00:00:00 2001
From: fangyi <eric.fangyi@huawei.com>
Date: Thu, 16 Nov 2023 09:54:50 +0800
Subject: [PATCH] virtio-pci: decouple notifier from interrupt process
To reuse the notifier process in configure interrupt.
Use the virtio_pci_get_notifier function to get the notifier.
the INPUT of this function is the IDX, the OUTPUT is notifier and
the vector
Signed-off-by: Cindy Lu <lulu@redhat.com>
Message-Id: <20211104164827.21911-3-lulu@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: fangyi <eric.fangyi@huawei.com>
---
hw/virtio/virtio-pci.c | 88 +++++++++++++++++++++++++++---------------
1 file changed, 57 insertions(+), 31 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 21c0ec3b1b..85d7357f66 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -789,29 +789,41 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
}
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
+ EventNotifier *n,
unsigned int vector)
{
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, queue_no);
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
}
static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
+ EventNotifier *n ,
unsigned int vector)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, queue_no);
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
int ret;
ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
assert(ret == 0);
}
+static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
+ EventNotifier **n, unsigned int *vector)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq;
+
+ if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
+ return -1;
+ } else {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ return -1;
+ }
+ *vector = virtio_queue_vector(vdev, queue_no);
+ vq = virtio_get_queue(vdev, queue_no);
+ *n = virtio_queue_get_guest_notifier(vq);
+ }
+ return 0;
+}
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
{
@@ -820,12 +832,15 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
unsigned int vector;
int ret, queue_no;
-
+ EventNotifier *n;
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
- vector = virtio_queue_vector(vdev, queue_no);
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ break;
+ }
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
@@ -837,7 +852,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
* Otherwise, delay until unmasked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo;
@@ -853,7 +868,11 @@ undo:
continue;
}
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ break;
+ }
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
}
@@ -867,12 +886,16 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
unsigned int vector;
int queue_no;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
+ EventNotifier *n;
+ int ret ;
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
}
- vector = virtio_queue_vector(vdev, queue_no);
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
+ if (ret < 0) {
+ break;
+ }
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
@@ -880,21 +903,20 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
* Otherwise, it was cleaned when masked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
}
}
-static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
+static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
unsigned int queue_no,
unsigned int vector,
- MSIMessage msg)
+ MSIMessage msg,
+ EventNotifier *n)
{
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
- VirtQueue *vq = virtio_get_queue(vdev, queue_no);
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
VirtIOIRQFD *irqfd;
int ret = 0;
@@ -921,14 +943,15 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
event_notifier_set(n);
}
} else {
- ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
}
return ret;
}
-static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
+static void virtio_pci_one_vector_mask(VirtIOPCIProxy *proxy,
unsigned int queue_no,
- unsigned int vector)
+ unsigned int vector,
+ EventNotifier *n)
{
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -939,7 +962,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true);
} else {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
}
@@ -949,6 +972,7 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ EventNotifier *n;
int ret, index, unmasked = 0;
while (vq) {
@@ -957,7 +981,8 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
break;
}
if (index < proxy->nvqs_with_notifiers) {
- ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
+ n = virtio_queue_get_guest_notifier(vq);
+ ret = virtio_pci_one_vector_unmask(proxy, index, vector, msg, n);
if (ret < 0) {
goto undo;
}
@@ -973,7 +998,8 @@ undo:
while (vq && unmasked >= 0) {
index = virtio_get_queue_index(vq);
if (index < proxy->nvqs_with_notifiers) {
- virtio_pci_vq_vector_mask(proxy, index, vector);
+ n = virtio_queue_get_guest_notifier(vq);
+ virtio_pci_one_vector_mask(proxy, index, vector, n);
--unmasked;
}
vq = virtio_vector_next_queue(vq);
@@ -986,15 +1012,17 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ EventNotifier *n;
int index;
while (vq) {
index = virtio_get_queue_index(vq);
+ n = virtio_queue_get_guest_notifier(vq);
if (!virtio_queue_get_num(vdev, index)) {
break;
}
if (index < proxy->nvqs_with_notifiers) {
- virtio_pci_vq_vector_mask(proxy, index, vector);
+ virtio_pci_one_vector_mask(proxy, index, vector, n);
}
vq = virtio_vector_next_queue(vq);
}
@@ -1010,19 +1038,17 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
int queue_no;
unsigned int vector;
EventNotifier *notifier;
- VirtQueue *vq;
+ int ret;
for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
- if (!virtio_queue_get_num(vdev, queue_no)) {
+ ret = virtio_pci_get_notifier(proxy, queue_no, &notifier, &vector);
+ if (ret < 0) {
break;
}
- vector = virtio_queue_vector(vdev, queue_no);
if (vector < vector_start || vector >= vector_end ||
!msix_is_masked(dev, vector)) {
continue;
}
- vq = virtio_get_queue(vdev, queue_no);
- notifier = virtio_queue_get_guest_notifier(vq);
if (k->guest_notifier_pending) {
if (k->guest_notifier_pending(vdev, queue_no)) {
msix_set_pending(dev, vector);
--
2.27.0