!783 QEMU update to version 6.2.0-75(master)

From: @JiaboFeng 
Reviewed-by: @yezengruan 
Signed-off-by: @yezengruan
This commit is contained in:
openeuler-ci-bot 2023-07-05 02:27:03 +00:00 committed by Gitee
commit 804831c36f
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 469 additions and 1 deletions

View File

@ -0,0 +1,160 @@
From c174f8c60cd372301200cdecaaae345b079cf589 Mon Sep 17 00:00:00 2001
From: lixianglai <lixianglai@loongson.cn>
Date: Wed, 24 May 2023 23:28:41 -0400
Subject: [PATCH] Add lbt support for kvm.
Add lbt registers get and put function.
Signed-off-by: lixianglai <lixianglai@loongson.cn>
---
hw/loongarch/larch_3a.c | 3 ++-
linux-headers/asm-loongarch64/kvm.h | 15 +++++++++++++
target/loongarch64/cpu.h | 10 +++++++++
target/loongarch64/kvm.c | 35 +++++++++++++++++++++++++++++
4 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/hw/loongarch/larch_3a.c b/hw/loongarch/larch_3a.c
index cef1a6f3d2..95bb224664 100644
--- a/hw/loongarch/larch_3a.c
+++ b/hw/loongarch/larch_3a.c
@@ -356,7 +356,8 @@ struct kvm_cpucfg ls3a5k_cpucfgs = {
.cpucfg[LOONGARCH_CPUCFG2] =
CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS |
CPUCFG2_LSX | CPUCFG2_LASX | CPUCFG2_COMPLEX | CPUCFG2_CRYPTO |
- CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_LSPW | CPUCFG2_LAM,
+ CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_X86BT | CPUCFG2_ARMBT |
+ CPUCFG2_MIPSBT | CPUCFG2_LSPW | CPUCFG2_LAM,
.cpucfg[LOONGARCH_CPUCFG3] =
CPUCFG3_CCDMA | CPUCFG3_SFB | CPUCFG3_UCACC | CPUCFG3_LLEXC |
CPUCFG3_SCDLY | CPUCFG3_LLDBAR | CPUCFG3_ITLBT | CPUCFG3_ICACHET |
diff --git a/linux-headers/asm-loongarch64/kvm.h b/linux-headers/asm-loongarch64/kvm.h
index a473916d50..a036ea57cd 100644
--- a/linux-headers/asm-loongarch64/kvm.h
+++ b/linux-headers/asm-loongarch64/kvm.h
@@ -82,6 +82,7 @@ struct kvm_fpu {
* Register set = 2: KVM specific registers (see definitions below).
*
* Register set = 3: FPU / MSA registers (see definitions below).
+ * Register set = 4: LBT registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
@@ -91,6 +92,7 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x0000000000010000ULL)
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x0000000000020000ULL)
#define KVM_REG_LOONGARCH_FPU (KVM_REG_LOONGARCH | 0x0000000000030000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x0000000000040000ULL)
/*
* KVM_REG_LOONGARCH_GP - General purpose registers from kvm_regs.
@@ -174,6 +176,19 @@ struct kvm_fpu {
#define KVM_REG_LOONGARCH_VCPU_RESET \
(KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LBT_SCR0 \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LBT_SCR1 \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LBT_SCR2 \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LBT_SCR3 \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LBT_FLAGS \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LBT_FTOP \
+ (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
+
struct kvm_iocsr_entry {
__u32 addr;
__u32 pad;
diff --git a/target/loongarch64/cpu.h b/target/loongarch64/cpu.h
index bf5b36d404..8a29a507b1 100644
--- a/target/loongarch64/cpu.h
+++ b/target/loongarch64/cpu.h
@@ -75,6 +75,7 @@ typedef struct CPULOONGARCHFPUContext {
uint32_t fcsr0;
uint32_t fcsr0_rw_bitmask;
uint32_t vcsr16;
+ uint64_t ftop;
} CPULOONGARCHFPUContext;
/* fp control and status register definition */
@@ -196,6 +197,15 @@ struct CPULOONGARCHState {
struct {
uint64_t guest_addr;
} st;
+ struct {
+ /* scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+ /* loongarch eflag */
+ unsigned long eflag;
+ } lbt;
};
/*
diff --git a/target/loongarch64/kvm.c b/target/loongarch64/kvm.c
index 21f6d5695f..0a4dc86421 100644
--- a/target/loongarch64/kvm.c
+++ b/target/loongarch64/kvm.c
@@ -1277,6 +1277,39 @@ int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu)
return 0;
}
+
+static int kvm_loongarch_put_lbt_registers(CPUState *cs)
+{
+ int ret = 0;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop);
+
+ return ret;
+}
+
+static int kvm_loongarch_get_lbt_registers(CPUState *cs)
+{
+ int ret = 0;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop);
+
+ return ret;
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
@@ -1308,6 +1341,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ kvm_loongarch_put_lbt_registers(cs);
return ret;
}
@@ -1334,6 +1368,7 @@ int kvm_arch_get_registers(CPUState *cs)
kvm_loongarch_get_csr_registers(cs);
kvm_loongarch_get_fpu_registers(cs);
+ kvm_loongarch_get_lbt_registers(cs);
return ret;
}
--
2.41.0.windows.1

View File

@ -0,0 +1,43 @@
From 08d0374d80ef321a421d4d9716c05006b469c78f Mon Sep 17 00:00:00 2001
From: lixianglai <lixianglai@loongson.cn>
Date: Wed, 24 May 2023 23:06:51 -0400
Subject: [PATCH] Fix smp.cores value and Fix divide 0 error
The smp.cores should use the default value passed from
qemu start command, and the argument is cores_per_socket.
The variable nb_numa_nodes may be 0, and a division by 0
error will occur later, and special treatment is done for
nb_numa_nodes here.
Signed-off-by: lixianglai <lixianglai@loongson.cn>
---
hw/loongarch/larch_3a.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/hw/loongarch/larch_3a.c b/hw/loongarch/larch_3a.c
index fe786008ac..cef1a6f3d2 100644
--- a/hw/loongarch/larch_3a.c
+++ b/hw/loongarch/larch_3a.c
@@ -1221,7 +1221,6 @@ static void loongarch_build_smbios(LoongarchMachineState *lsms)
uint8_t *smbios_tables, *smbios_anchor;
size_t smbios_tables_len, smbios_anchor_len;
const char *product = "QEMU Virtual Machine";
- ms->smp.cores = 4;
if (!lsms->fw_cfg) {
return;
@@ -2005,6 +2004,10 @@ static int64_t ls3a_get_default_cpu_node_id(const MachineState *ms, int idx)
{
int nb_numa_nodes = ms->numa_state->num_nodes;
int smp_cores = ms->smp.cores;
+
+ if (nb_numa_nodes == 0) {
+ nb_numa_nodes = 1;
+ }
return idx / smp_cores % nb_numa_nodes;
}
--
2.41.0.windows.1

View File

@ -0,0 +1,91 @@
From f0ac211aab73b5b78795cd7bc94e0159c8e3cc1a Mon Sep 17 00:00:00 2001
From: wangmeiyang <wangmeiyang@xfusion.com>
Date: Fri, 26 May 2023 11:03:29 +0800
Subject: [PATCH] hw/nvme: Change alignment in dma functions for nvme_blk_*
Since the nvme_blk_read/write are used by both the data and metadata
portions of the IO, it can't have the 512B alignment requirement.
Without this change any metadata transfer, which length isn't a multiple
of 512B and which is bigger than 512B, will result in only a partial
transfer.
origin commit: https://gitlab.com/qemu-project/qemu/-/commit/9b4f01812f69ad6066725338c89945bb61f41823
Signed-off-by: Meiyang Wang <wangmeiyang@xfusion.com>
Signed-off-by: Mateusz Kozlowski <kozlowski.mateuszpl@gmail.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 40fbda3b03..282abdda91 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -1263,26 +1263,28 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
}
static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
- BlockCompletionFunc *cb, NvmeRequest *req)
+ uint32_t align, BlockCompletionFunc *cb,
+ NvmeRequest *req)
{
assert(req->sg.flags & NVME_SG_ALLOC);
if (req->sg.flags & NVME_SG_DMA) {
- req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
- cb, req);
+ req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req);
} else {
req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req);
}
}
static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
- BlockCompletionFunc *cb, NvmeRequest *req)
+ uint32_t align, BlockCompletionFunc *cb,
+ NvmeRequest *req)
{
assert(req->sg.flags & NVME_SG_ALLOC);
if (req->sg.flags & NVME_SG_DMA) {
req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
cb, req);
+ req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req);
} else {
req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
}
@@ -1958,10 +1960,10 @@ static void nvme_rw_cb(void *opaque, int ret)
}
if (req->cmd.opcode == NVME_CMD_READ) {
- return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req);
+ return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req);
}
- return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req);
+ return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req);
}
}
@@ -3145,7 +3147,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_READ);
- nvme_blk_read(blk, data_offset, nvme_rw_cb, req);
+ nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req);
return NVME_NO_COMPLETE;
invalid:
@@ -3272,7 +3274,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_WRITE);
- nvme_blk_write(blk, data_offset, nvme_rw_cb, req);
+ nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req);
} else {
req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
--
2.41.0.windows.1

View File

@ -0,0 +1,52 @@
From 5d4fdfc6639103d0ea5754537886921e59abb2fc Mon Sep 17 00:00:00 2001
From: wangmeiyang <wangmeiyang@xfusion.com>
Date: Fri, 26 May 2023 10:52:48 +0800
Subject: [PATCH] hw/nvme: fix missing DNR on compare failure
Even if the host is somehow using compare to do compare-and-write, the
host should be notified immediately about the compare failure and not
have to wait for the driver to potentially retry the command.
origin commit: https://gitlab.com/qemu-project/qemu/-/commit/ca2a091802872b265bc6007a2d36276d51d8e4b3
Signed-off-by: Meiyang Wang <wangmeiyang@xfusion.com>
Fixes: 0a384f923f51 ("hw/block/nvme: add compare command")
Reported-by: Jim Harris <james.r.harris@intel.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 40fbda3b03..0b4df77e3c 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -2123,7 +2123,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret)
for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) {
if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) {
- req->status = NVME_CMP_FAILURE;
+ req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}
}
@@ -2132,7 +2132,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret)
}
if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) {
- req->status = NVME_CMP_FAILURE;
+ req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}
@@ -2181,7 +2181,7 @@ static void nvme_compare_data_cb(void *opaque, int ret)
}
if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) {
- req->status = NVME_CMP_FAILURE;
+ req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}
--
2.41.0.windows.1

View File

@ -3,7 +3,7 @@
Name: qemu Name: qemu
Version: 6.2.0 Version: 6.2.0
Release: 74 Release: 75
Epoch: 10 Epoch: 10
Summary: QEMU is a generic and open source machine emulator and virtualizer Summary: QEMU is a generic and open source machine emulator and virtualizer
License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 License: GPLv2 and BSD and MIT and CC-BY-SA-4.0
@ -502,6 +502,11 @@ Patch0487: AVX512-support-for-xbzrle_encode_buffer.patch
Patch0488: Update-bench-code-for-addressing-CI-problem.patch Patch0488: Update-bench-code-for-addressing-CI-problem.patch
Patch0489: migration-xbzrle-use-ctz64-to-avoid-undefined-result.patch Patch0489: migration-xbzrle-use-ctz64-to-avoid-undefined-result.patch
Patch0490: migration-xbzrle-fix-out-of-bounds-write-with-axv512.patch Patch0490: migration-xbzrle-fix-out-of-bounds-write-with-axv512.patch
Patch0491: hw-nvme-fix-missing-DNR-on-compare-failure.patch
Patch0492: virtio-fix-reachable-assertion-due-to-stale-value-of.patch
Patch0493: hw-nvme-Change-alignment-in-dma-functions-for-nvme_b.patch
Patch0494: Fix-smp.cores-value-and-Fix-divide-0-error.patch
Patch0495: Add-lbt-support-for-kvm.patch
BuildRequires: flex BuildRequires: flex
BuildRequires: gcc BuildRequires: gcc
@ -1075,6 +1080,13 @@ getent passwd qemu >/dev/null || \
%endif %endif
%changelog %changelog
* Thu Jun 29 2023 <fengjiabo1@huawei.com> - 10:6.2.0-75
- Add lbt support for kvm.
- Fix smp.cores value and Fix divide 0 error
- hw/nvme: Change alignment in dma functions for nvme_blk_*
- virtio: fix reachable assertion due to stale value of cached region size
- hw/nvme: fix missing DNR on compare failure
* Thu May 25 2023 <liuxiangdong5@huawei.com> - 10:6.2.0-74 * Thu May 25 2023 <liuxiangdong5@huawei.com> - 10:6.2.0-74
- spec: delete repetitive man8/qemu-ga.8* from qemu-guest-agent package - spec: delete repetitive man8/qemu-ga.8* from qemu-guest-agent package

View File

@ -0,0 +1,110 @@
From fc3c5fc2f3ccc236a6bcb670043912ab31e99772 Mon Sep 17 00:00:00 2001
From: wangmeiyang <wangmeiyang@xfusion.com>
Date: Fri, 26 May 2023 11:09:19 +0800
Subject: [PATCH] virtio: fix reachable assertion due to stale value of cached
region size
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In virtqueue_{split,packed}_get_avail_bytes() descriptors are read
in a loop via MemoryRegionCache regions and calls to
vring_{split,packed}_desc_read() - these take a region cache and the
index of the descriptor to be read.
For direct descriptors we use a cache provided by the caller, whose
size matches that of the virtqueue vring. We limit the number of
descriptors we can read by the size of that vring:
max = vq->vring.num;
...
MemoryRegionCache *desc_cache = &caches->desc;
For indirect descriptors, we initialize a new cache and limit the
number of descriptors by the size of the intermediate descriptor:
len = address_space_cache_init(&indirect_desc_cache,
vdev->dma_as,
desc.addr, desc.len, false);
desc_cache = &indirect_desc_cache;
...
max = desc.len / sizeof(VRingDesc);
However, the first initialization of `max` is done outside the loop
where we process guest descriptors, while the second one is done
inside. This means that a sequence of an indirect descriptor followed
by a direct one will leave a stale value in `max`. If the second
descriptor's `next` field is smaller than the stale value, but
greater than the size of the virtqueue ring (and thus the cached
region), a failed assertion will be triggered in
address_space_read_cached() down the call chain.
Fix this by initializing `max` inside the loop in both functions.
origin commit: https://gitlab.com/qemu-project/qemu/-/commit/bbc1c327d7974261c61566cdb950cc5fa0196b41
Signed-off-by: Meiyang Wang <wangmeiyang@xfusion.com>
Fixes: 9796d0ac8fb0 ("virtio: use address_space_map/unmap to access descriptors")
Signed-off-by: Carlos López <clopez@suse.de>
Message-Id: <20230302100358.3613-1-clopez@suse.de>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
hw/virtio/virtio.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index f8ab48e6bd..071668e3e0 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -983,7 +983,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
- unsigned int max, idx;
+ unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
int64_t len = 0;
@@ -992,13 +992,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
- max = vq->vring.num;
-
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
MemoryRegionCache *desc_cache = &caches->desc;
unsigned int num_bufs;
VRingDesc desc;
unsigned int i;
+ unsigned int max = vq->vring.num;
num_bufs = total_bufs;
@@ -1120,7 +1119,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
- unsigned int max, idx;
+ unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache *desc_cache;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
@@ -1132,14 +1131,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
wrap_counter = vq->last_avail_wrap_counter;
total_bufs = in_total = out_total = 0;
- max = vq->vring.num;
-
for (;;) {
unsigned int num_bufs = total_bufs;
unsigned int i = idx;
int rc;
+ unsigned int max = vq->vring.num;
desc_cache = &caches->desc;
+
vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
if (!is_desc_avail(desc.flags, wrap_counter)) {
break;
--
2.41.0.windows.1