The cpu_physical_memory_set_dirty_lebitmap() can quickly deal with
the dirty pages of memory by bitmap-traveling, regardless of whether
the bitmap is aligned correctly or not.
cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
host page size. So it'd better to set bitmap_pgsize to host page size
to support more translation granule sizes.
[aw: The Fixes commit below introduced code to restrict migration
support to configurations where the target page size intersects the
host dirty page support. For example, a 4K guest on a 4K host.
Due to the above flexibility in bitmap handling, this restriction
unnecessarily prevents mixed target/host pages size that could
otherwise be supported. Use host page size for dirty bitmap.]
Fixes: fc49c9cbf2 ("vfio: Get migration capability flags for container")
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
Message-Id: <20210304133446.1521-1-jiangkunkun@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
153 lines
6.7 KiB
Diff
153 lines
6.7 KiB
Diff
From 594cba5943b3e8bf1bd5720b1fa20d4662920ae0 Mon Sep 17 00:00:00 2001
|
|
From: Kunkun Jiang <jiangkunkun@huawei.com>
|
|
Date: Thu, 4 Mar 2021 21:34:46 +0800
|
|
Subject: [PATCH] vfio: Support host translation granule size
|
|
|
|
The cpu_physical_memory_set_dirty_lebitmap() can quickly deal with
|
|
the dirty pages of memory by bitmap-traveling, regardless of whether
|
|
the bitmap is aligned correctly or not.
|
|
|
|
cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
|
|
host page size. So it'd better to set bitmap_pgsize to host page size
|
|
to support more translation granule sizes.
|
|
|
|
[aw: The Fixes commit below introduced code to restrict migration
|
|
support to configurations where the target page size intersects the
|
|
host dirty page support. For example, a 4K guest on a 4K host.
|
|
Due to the above flexibility in bitmap handling, this restriction
|
|
unnecessarily prevents mixed target/host pages size that could
|
|
otherwise be supported. Use host page size for dirty bitmap.]
|
|
|
|
Fixes: fc49c9cbf2 ("vfio: Get migration capability flags for container")
|
|
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
|
|
Message-Id: <20210304133446.1521-1-jiangkunkun@huawei.com>
|
|
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
|
|
---
|
|
hw/vfio/common.c | 48 +++++++++++++++++++++++++-----------------------
|
|
1 file changed, 25 insertions(+), 23 deletions(-)
|
|
|
|
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
|
|
index ebd701faa0..a7817c90cc 100644
|
|
--- a/hw/vfio/common.c
|
|
+++ b/hw/vfio/common.c
|
|
@@ -377,7 +377,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
|
|
{
|
|
struct vfio_iommu_type1_dma_unmap *unmap;
|
|
struct vfio_bitmap *bitmap;
|
|
- uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS;
|
|
+ uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
|
|
int ret;
|
|
|
|
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
|
|
@@ -389,12 +389,12 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
|
|
bitmap = (struct vfio_bitmap *)&unmap->data;
|
|
|
|
/*
|
|
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
|
|
- * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to
|
|
- * TARGET_PAGE_SIZE.
|
|
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
|
|
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
|
|
+ * to qemu_real_host_page_size.
|
|
*/
|
|
|
|
- bitmap->pgsize = TARGET_PAGE_SIZE;
|
|
+ bitmap->pgsize = qemu_real_host_page_size;
|
|
bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
|
|
BITS_PER_BYTE;
|
|
|
|
@@ -672,16 +672,17 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
|
return;
|
|
}
|
|
|
|
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
|
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
|
|
+ if (unlikely((section->offset_within_address_space &
|
|
+ ~qemu_real_host_page_mask) !=
|
|
+ (section->offset_within_region & ~qemu_real_host_page_mask))) {
|
|
error_report("%s received unaligned region", __func__);
|
|
return;
|
|
}
|
|
|
|
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
|
|
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
|
|
llend = int128_make64(section->offset_within_address_space);
|
|
llend = int128_add(llend, section->size);
|
|
- llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
|
|
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
|
|
|
|
if (int128_ge(int128_make64(iova), llend)) {
|
|
return;
|
|
@@ -866,8 +867,9 @@ static void vfio_listener_region_del(MemoryListener *listener,
|
|
return;
|
|
}
|
|
|
|
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
|
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
|
|
+ if (unlikely((section->offset_within_address_space &
|
|
+ ~qemu_real_host_page_mask) !=
|
|
+ (section->offset_within_region & ~qemu_real_host_page_mask))) {
|
|
error_report("%s received unaligned region", __func__);
|
|
return;
|
|
}
|
|
@@ -895,10 +897,10 @@ static void vfio_listener_region_del(MemoryListener *listener,
|
|
*/
|
|
}
|
|
|
|
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
|
|
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
|
|
llend = int128_make64(section->offset_within_address_space);
|
|
llend = int128_add(llend, section->size);
|
|
- llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
|
|
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
|
|
|
|
if (int128_ge(int128_make64(iova), llend)) {
|
|
return;
|
|
@@ -967,13 +969,13 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
|
|
range->size = size;
|
|
|
|
/*
|
|
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
|
|
- * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
|
|
- * TARGET_PAGE_SIZE.
|
|
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
|
|
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
|
|
+ * to qemu_real_host_page_size.
|
|
*/
|
|
- range->bitmap.pgsize = TARGET_PAGE_SIZE;
|
|
+ range->bitmap.pgsize = qemu_real_host_page_size;
|
|
|
|
- pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS;
|
|
+ pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
|
|
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
|
|
BITS_PER_BYTE;
|
|
range->bitmap.data = g_try_malloc0(range->bitmap.size);
|
|
@@ -1077,8 +1079,8 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
|
|
section->offset_within_region;
|
|
|
|
return vfio_get_dirty_bitmap(container,
|
|
- TARGET_PAGE_ALIGN(section->offset_within_address_space),
|
|
- int128_get64(section->size), ram_addr);
|
|
+ REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
|
|
+ int128_get64(section->size), ram_addr);
|
|
}
|
|
|
|
static void vfio_listener_log_sync(MemoryListener *listener,
|
|
@@ -1572,10 +1574,10 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
|
|
header);
|
|
|
|
/*
|
|
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
|
|
- * TARGET_PAGE_SIZE to mark those dirty.
|
|
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
|
|
+ * qemu_real_host_page_size to mark those dirty.
|
|
*/
|
|
- if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) {
|
|
+ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
|
|
container->dirty_pages_supported = true;
|
|
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
|
|
container->dirty_pgsizes = cap_mig->pgsize_bitmap;
|
|
--
|
|
2.27.0
|
|
|