- vfio/migration: Add support for manual clear vfio dirty log - vfio: Maintain DMA mapping range for the container - linux-headers: update against 5.10 and manual clear vfio dirty log series - arm/acpi: Fix when make qemu-system-aarch64 at x86_64 host bios_tables_test fail reason: __aarch64__ macro let build_pptt at x86_64 and aarch64 host build different function that let bios_tables_test fail. - pl031: support rtc-timer property for pl031 - feature: Add logs for vm start and destroy - feature: Add log for each modules - log: Add log at boot & cpu init for aarch64 - bugfix: irq: Avoid covering object refcount of qemu_irq - i386: cache passthrough: Update AMD 8000_001D.EAX[25:14] based on vCPU topo - freeclock: set rtc_date_diff for X86 - freeclock: set rtc_date_diff for arm - freeclock: add qmp command to get time offset of vm in seconds - tests: Disable filemonitor testcase - shadow_dev: introduce shadow dev for virtio-net device - pl011: reset read FIFO when UARTTIMSC=0 & UARTICR=0xffff - tests: virt: Update expected ACPI tables for virt test(update BinDir) - arm64: Add the cpufreq device to show cpufreq info to guest - hw/arm64: add vcpu cache info support - tests: virt: Allow changes to PPTT test table - cpu: add Cortex-A72 processor kvm target support - cpu: add Kunpeng-920 cpu support - net: eepro100: validate various address valuesi(CVE-2021-20255) - ide: ahci: add check to avoid null dereference (CVE-2019-12067) - vdpa: set vring enable only if the vring address has already been set - docs: Add generic vhost-vdpa device documentation - vdpa: don't suspend/resume device when vdpa device not started - vdpa: correct param passed in when unregister save - vdpa: suspend function return 0 when the vdpa device is stopped - vdpa: support vdpa device suspend/resume - vdpa: move memory listener to the realize stage - vdpa: implement vdpa device migration - vhost: implement migration state notifier for vdpa device - vhost: implement post resume bh - vhost: implement savevm_handler for vdpa device - vhost: implement vhost_vdpa_device_suspend/resume - vhost: implement vhost-vdpa suspend/resume - vhost: add vhost_dev_suspend/resume_op - vhost: introduce bytemap for vhost backend logging - vhost-vdpa: add migration log ops for VhostOps - vhost-vdpa: add VHOST_BACKEND_F_BYTEMAPLOG - hw/usb: reduce the vpcu cost of UHCI when VNC disconnect - virtio-net: update the default and max of rx/tx_queue_size - virtio-net: set the max of queue size to 4096 - virtio-net: fix max vring buf size when set ring num - virtio-net: bugfix: do not delete netdev before virtio net - monitor: Discard BLOCK_IO_ERROR event when VM rebooted - vhost-user: add unregister_savevm when vhost-user cleanup - vhost-user: add vhost_set_mem_table when vm load_setup at destination - vhost-user: quit infinite loop while used memslots is more than the backend limit - fix qemu-core when vhost-user-net config with server mode - vhost-user: Add support reconnect vhost-user socket - vhost-user: Set the acked_features to vm's featrue - i6300esb watchdog: bugfix: Add a runstate transition - hw/net/rocker_of_dpa: fix double free bug of rocker device - net/dump.c: Suppress spurious compiler warning - pcie: Add pcie-root-port fast plug/unplug feature - pcie: Compat with devices which do not support Link Width, such as ioh3420 - qdev/monitors: Fix reundant error_setg of qdev_add_device - qemu-nbd: set timeout to qemu-nbd socket - qemu-nbd: make native as the default aio mode - nbd/server.c: fix invalid read after client was already free - virtio-scsi: bugfix: fix qemu crash for hotplug scsi disk with dataplane - virtio: bugfix: check the value of caches before accessing it - virtio: print the guest virtio_net features that host does not support - virtio: bugfix: add rcu_read_lock when vring_avail_idx is called - virtio: check descriptor numbers - migration: report multiFd related thread pid to libvirt - migration: report migration related thread pid to libvirt - cpu/features: fix bug for memory leakage - doc: Update multi-thread compression doc - migration: Add compress_level sanity check - migration: Add zstd support in multi-thread compression - migration: Add multi-thread compress ops - migration: Refactoring multi-thread compress migration - migration: Add multi-thread compress method - migration: skip cache_drop for bios bootloader and nvram template - oslib-posix: optimise vm startup time for 1G hugepage - monitor/qmp: drop inflight rsp if qmp client broken - ps2: fix oob in ps2 kbd - Currently, while kvm and qemu can not handle some kvm exit, qemu will do vm_stop, which will make vm in pause state. This action make vm unrecoverable, so send guest panic to libvirt instead. - vhost: cancel migration when vhost-user restarted during migraiton Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
197 lines
5.9 KiB
Diff
197 lines
5.9 KiB
Diff
From c4829aa6fce007c995b21cfbd86de0473263c19a Mon Sep 17 00:00:00 2001
|
|
From: Dongxu Sun <sundongxu3@huawei.com>
|
|
Date: Sat, 30 Mar 2024 12:49:05 +0800
|
|
Subject: [PATCH] shadow_dev: introduce shadow dev for virtio-net device
|
|
|
|
for virtio net devices, create the shadow device for vlpi
|
|
bypass inject supported.
|
|
|
|
Signed-off-by: Wang Haibin <wanghaibin.wang@huawei.com>
|
|
Signed-off-by: Yu Zenghui <yuzenghui@huawei.com>
|
|
Signed-off-by: Chen Qun <kuhn.chenqun@huawei.com>
|
|
Signed-off-by: KunKun Jiang <jiangkunkun@huawei.com>
|
|
Signed-off-by: Dongxu Sun <sundongxu3@huawei.com>
|
|
Signed-off-by: Yuan Zhang <zhangyuan162@huawei.com>
|
|
---
|
|
hw/virtio/virtio-pci.c | 32 ++++++++++++++++++++++++++
|
|
include/sysemu/kvm.h | 5 +++++
|
|
linux-headers/linux/kvm.h | 13 +++++++++++
|
|
target/arm/kvm.c | 47 +++++++++++++++++++++++++++++++++++++++
|
|
4 files changed, 97 insertions(+)
|
|
|
|
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
|
|
index 134a8eaef6..f8adb0520a 100644
|
|
--- a/hw/virtio/virtio-pci.c
|
|
+++ b/hw/virtio/virtio-pci.c
|
|
@@ -922,18 +922,44 @@ undo:
|
|
}
|
|
return ret;
|
|
}
|
|
+
|
|
+#ifdef __aarch64__
|
|
+int __attribute__((weak)) kvm_create_shadow_device(PCIDevice *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __attribute__((weak)) kvm_delete_shadow_device(PCIDevice *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs)
|
|
{
|
|
int queue_no;
|
|
int ret = 0;
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
|
|
+#ifdef __aarch64__
|
|
+ if (!strcmp(vdev->name, "virtio-net")) {
|
|
+ kvm_create_shadow_device(&proxy->pci_dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
return -1;
|
|
}
|
|
ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
|
|
}
|
|
+
|
|
+#ifdef __aarch64__
|
|
+ if (!strcmp(vdev->name, "virtio-net") && ret != 0) {
|
|
+ kvm_delete_shadow_device(&proxy->pci_dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -976,6 +1002,12 @@ static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs)
|
|
}
|
|
kvm_virtio_pci_vector_release_one(proxy, queue_no);
|
|
}
|
|
+
|
|
+#ifdef __aarch64__
|
|
+ if (!strcmp(vdev->name, "virtio-net")) {
|
|
+ kvm_delete_shadow_device(&proxy->pci_dev);
|
|
+ }
|
|
+#endif
|
|
}
|
|
|
|
static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
|
|
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
|
|
index d614878164..b46d6203b4 100644
|
|
--- a/include/sysemu/kvm.h
|
|
+++ b/include/sysemu/kvm.h
|
|
@@ -538,4 +538,9 @@ bool kvm_arch_cpu_check_are_resettable(void);
|
|
bool kvm_dirty_ring_enabled(void);
|
|
|
|
uint32_t kvm_dirty_ring_size(void);
|
|
+
|
|
+#ifdef __aarch64__
|
|
+int kvm_create_shadow_device(PCIDevice *dev);
|
|
+int kvm_delete_shadow_device(PCIDevice *dev);
|
|
+#endif
|
|
#endif
|
|
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
|
|
index 549fea3a97..56f6b2583f 100644
|
|
--- a/linux-headers/linux/kvm.h
|
|
+++ b/linux-headers/linux/kvm.h
|
|
@@ -1198,6 +1198,8 @@ struct kvm_ppc_resize_hpt {
|
|
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
|
|
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
|
|
|
|
+#define KVM_CAP_ARM_VIRT_MSI_BYPASS 799
|
|
+
|
|
#ifdef KVM_CAP_IRQ_ROUTING
|
|
|
|
struct kvm_irq_routing_irqchip {
|
|
@@ -1524,6 +1526,17 @@ struct kvm_s390_ucas_mapping {
|
|
#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
|
|
#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
|
|
#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
|
|
+
|
|
+#ifdef __aarch64__
|
|
+struct kvm_master_dev_info
|
|
+{
|
|
+ __u32 nvectors; /* number of msi vectors */
|
|
+ struct kvm_msi msi[0];
|
|
+};
|
|
+#define KVM_CREATE_SHADOW_DEV _IOW(KVMIO, 0xf0, struct kvm_master_dev_info)
|
|
+#define KVM_DEL_SHADOW_DEV _IOW(KVMIO, 0xf1, __u32)
|
|
+#endif
|
|
+
|
|
/* Available with KVM_CAP_PIT_STATE2 */
|
|
#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
|
|
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
|
|
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
|
|
index 7903e2ddde..f59f4f81b2 100644
|
|
--- a/target/arm/kvm.c
|
|
+++ b/target/arm/kvm.c
|
|
@@ -26,6 +26,8 @@
|
|
#include "trace.h"
|
|
#include "internals.h"
|
|
#include "hw/pci/pci.h"
|
|
+#include "hw/pci/msi.h"
|
|
+#include "hw/pci/msix.h"
|
|
#include "exec/memattrs.h"
|
|
#include "exec/address-spaces.h"
|
|
#include "hw/boards.h"
|
|
@@ -1053,6 +1055,51 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
|
return 0;
|
|
}
|
|
|
|
+int kvm_create_shadow_device(PCIDevice *dev)
|
|
+{
|
|
+ KVMState *s = kvm_state;
|
|
+ struct kvm_master_dev_info *mdi;
|
|
+ MSIMessage msg;
|
|
+ uint32_t vector, nvectors = msix_nr_vectors_allocated(dev);
|
|
+ uint32_t request_id;
|
|
+ int ret;
|
|
+
|
|
+ if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ mdi = g_malloc0(sizeof(uint32_t) + sizeof(struct kvm_msi) * nvectors);
|
|
+ mdi->nvectors = nvectors;
|
|
+ request_id = pci_requester_id(dev);
|
|
+
|
|
+ for (vector = 0; vector < nvectors; vector++) {
|
|
+ msg = msix_get_message(dev, vector);
|
|
+ mdi->msi[vector].address_lo = extract64(msg.address, 0, 32);
|
|
+ mdi->msi[vector].address_hi = extract64(msg.address, 32, 32);
|
|
+ mdi->msi[vector].data = le32_to_cpu(msg.data);
|
|
+ mdi->msi[vector].flags = KVM_MSI_VALID_DEVID;
|
|
+ mdi->msi[vector].devid = request_id;
|
|
+ memset(mdi->msi[vector].pad, 0, sizeof(mdi->msi[vector].pad));
|
|
+ }
|
|
+
|
|
+ ret = kvm_vm_ioctl(s, KVM_CREATE_SHADOW_DEV, mdi);
|
|
+ g_free(mdi);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_delete_shadow_device(PCIDevice *dev)
|
|
+{
|
|
+ KVMState *s = kvm_state;
|
|
+ uint32_t request_id, nvectors = msix_nr_vectors_allocated(dev);
|
|
+
|
|
+ if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ request_id = pci_requester_id(dev);
|
|
+ return kvm_vm_ioctl(s, KVM_DEL_SHADOW_DEV, &request_id);
|
|
+}
|
|
+
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
int vector, PCIDevice *dev)
|
|
{
|
|
--
|
|
2.27.0
|
|
|