!344 Automatically generate code patches with openeuler !174

From: @kuhnchen18
Reviewed-by: @imxcc
Signed-off-by: @imxcc
This commit is contained in:
openeuler-ci-bot 2021-07-29 08:57:23 +00:00 committed by Gitee
commit 08474a4948
4 changed files with 199 additions and 1 deletions

View File

@ -1,6 +1,6 @@
Name: qemu
Version: 4.1.0
Release: 73
Release: 74
Epoch: 2
Summary: QEMU is a generic and open source machine emulator and virtualizer
License: GPLv2 and BSD and MIT and CC-BY-SA-4.0
@ -498,6 +498,9 @@ Patch0485: vfio-Dirty-page-tracking-when-vIOMMU-is-enabled.patch
Patch0486: vfio-Add-ioctl-to-get-dirty-pages-bitmap-during-dma-.patch
Patch0487: vfio-Make-vfio-pci-device-migration-capable.patch
Patch0488: qapi-Add-VFIO-devices-migration-stats-in-Migration-s.patch
Patch0489: vfio-Move-the-saving-of-the-config-space-to-the-righ.patch
Patch0490: vfio-Set-the-priority-of-the-VFIO-VM-state-change-ha.patch
Patch0491: vfio-Avoid-disabling-and-enabling-vectors-repeatedly.patch
BuildRequires: flex
BuildRequires: gcc
@ -892,6 +895,11 @@ getent passwd qemu >/dev/null || \
%endif
%changelog
* Thu Jul 29 2021 Chen Qun <kuhn.chenqun@huawei.com>
- vfio: Move the saving of the config space to the right place in VFIO migration
- vfio: Set the priority of the VFIO VM state change handler explicitly
- vfio: Avoid disabling and enabling vectors repeatedly in VFIO migration
* Thu Jul 29 2021 imxcc <xingchaochao@huawei.com>
- hw/net: fix vmxnet3 live migration
- include: Make headers more self-contained

View File

@ -0,0 +1,63 @@
From 8113fdcf0c1383ae5b9542563656bea3753d834e Mon Sep 17 00:00:00 2001
From: Shenming Lu <lushenming@huawei.com>
Date: Wed, 10 Mar 2021 11:02:33 +0800
Subject: [PATCH] vfio: Avoid disabling and enabling vectors repeatedly in VFIO
migration
In VFIO migration resume phase and some guest startups, there are
already unmasked vectors in the vector table when calling
vfio_msix_enable(). So in order to avoid inefficiently disabling
and enabling vectors repeatedly, let's allocate all needed vectors
first and then enable these unmasked vectors one by one without
disabling.
Signed-off-by: Shenming Lu <lushenming@huawei.com>
Message-Id: <20210310030233.1133-4-lushenming@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/pci.c | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index a637c35e7a..da7c740bce 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -563,6 +563,9 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
static void vfio_msix_enable(VFIOPCIDevice *vdev)
{
+ PCIDevice *pdev = &vdev->pdev;
+ unsigned int nr, max_vec = 0;
+
vfio_disable_interrupts(vdev);
vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
@@ -581,11 +584,22 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev)
* triggering to userspace, then immediately release the vector, leaving
* the physical device with no vectors enabled, but MSI-X enabled, just
* like the guest view.
+ * If there are already unmasked vectors (in migration resume phase and
+ * some guest startups) which will be enabled soon, we can allocate all
+ * of them here to avoid inefficiently disabling and enabling vectors
+ * repeatedly later.
*/
- vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
- vfio_msix_vector_release(&vdev->pdev, 0);
+ if (!pdev->msix_function_masked) {
+ for (nr = 0; nr < msix_nr_vectors_allocated(pdev); nr++) {
+ if (!msix_is_masked(pdev, nr)) {
+ max_vec = nr;
+ }
+ }
+ }
+ vfio_msix_vector_do_use(pdev, max_vec, NULL, NULL);
+ vfio_msix_vector_release(pdev, max_vec);
- if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
+ if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use,
vfio_msix_vector_release, NULL)) {
error_report("vfio: msix_set_vector_notifiers failed");
}
--
2.27.0

View File

@ -0,0 +1,86 @@
From 483baf4c668fbd2da76e6948576e13eded1c54ec Mon Sep 17 00:00:00 2001
From: Shenming Lu <lushenming@huawei.com>
Date: Wed, 10 Mar 2021 11:02:31 +0800
Subject: [PATCH] vfio: Move the saving of the config space to the right place
in VFIO migration
On ARM64 the VFIO SET_IRQS ioctl is dependent on the VM interrupt
setup, if the restoring of the VFIO PCI device config space is
before the VGIC, an error might occur in the kernel.
So we move the saving of the config space to the non-iterable
process, thus it will be called after the VGIC according to
their priorities.
As for the possible dependence of the device specific migration
data on it's config space, we can let the vendor driver to
include any config info it needs in its own data stream.
Signed-off-by: Shenming Lu <lushenming@huawei.com>
Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com>
Message-Id: <20210310030233.1133-2-lushenming@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/migration.c | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index b77c66557e..ea36ae5225 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -575,11 +575,6 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
return ret;
}
- ret = vfio_save_device_config_state(f, opaque);
- if (ret) {
- return ret;
- }
-
ret = vfio_update_pending(vbasedev);
if (ret) {
return ret;
@@ -620,6 +615,19 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
return ret;
}
+static void vfio_save_state(QEMUFile *f, void *opaque)
+{
+ VFIODevice *vbasedev = opaque;
+ int ret;
+
+ ret = vfio_save_device_config_state(f, opaque);
+ if (ret) {
+ error_report("%s: Failed to save device config space",
+ vbasedev->name);
+ qemu_file_set_error(f, ret);
+ }
+}
+
static int vfio_load_setup(QEMUFile *f, void *opaque)
{
VFIODevice *vbasedev = opaque;
@@ -670,11 +678,7 @@ static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
switch (data) {
case VFIO_MIG_FLAG_DEV_CONFIG_STATE:
{
- ret = vfio_load_device_config_state(f, opaque);
- if (ret) {
- return ret;
- }
- break;
+ return vfio_load_device_config_state(f, opaque);
}
case VFIO_MIG_FLAG_DEV_SETUP_STATE:
{
@@ -720,6 +724,7 @@ static SaveVMHandlers savevm_vfio_handlers = {
.save_live_pending = vfio_save_pending,
.save_live_iterate = vfio_save_iterate,
.save_live_complete_precopy = vfio_save_complete_precopy,
+ .save_state = vfio_save_state,
.load_setup = vfio_load_setup,
.load_cleanup = vfio_load_cleanup,
.load_state = vfio_load_state,
--
2.27.0

View File

@ -0,0 +1,41 @@
From b9d74bcf6aefe8ab607439ad1c518a453053ccee Mon Sep 17 00:00:00 2001
From: Shenming Lu <lushenming@huawei.com>
Date: Wed, 10 Mar 2021 11:02:32 +0800
Subject: [PATCH] vfio: Set the priority of the VFIO VM state change handler
explicitly
In the VFIO VM state change handler when stopping the VM, the _RUNNING
bit in device_state is cleared which makes the VFIO device stop, including
no longer generating interrupts. Then we can save the pending states of
all interrupts in the GIC VM state change handler (on ARM).
So we have to set the priority of the VFIO VM state change handler
explicitly (like virtio devices) to ensure it is called before the
GIC's in saving.
Signed-off-by: Shenming Lu <lushenming@huawei.com>
Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Message-Id: <20210310030233.1133-3-lushenming@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/migration.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index ea36ae5225..1a97784486 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -862,7 +862,8 @@ static int vfio_migration_init(VFIODevice *vbasedev,
register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers,
vbasedev);
- migration->vm_state = qemu_add_vm_change_state_handler(vfio_vmstate_change,
+ migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev,
+ vfio_vmstate_change,
vbasedev);
migration->migration_state.notify = vfio_migration_state_notifier;
add_migration_state_change_notifier(&migration->migration_state);
--
2.27.0