libvirt update to version 9.10.0-4:

- docs: Document CPU clusters
- docs: Improve documentation for CPU topology
- tests: Verify handling of CPU clusters in QMP data
- qemu: Make monitor aware of CPU clusters
- qemu: Use CPU clusters for guests
- qemu: Introduce QEMU_CAPS_SMP_CLUSTERS
- conf: Allow specifying CPU clusters
- conf: Report CPU clusters in capabilities XML
- tests: Add hostcpudata for machine with CPU clusters
- cpu_map: add kunpeng-920 features to arm features
- cpu/aarch64: enable host-model cpu for AArch64 architecture
- conf/domain_conf: pin the retry_interval and retry_timeout parameters to xml
- nodedev: fix potential heap use after free
- libvirt/conf: Set default values of retry fileds
- qemu: Support 'retry' BLOCK_IO_ERROR event.
- libvirt: Add 'retry' support for error policy
- vdpa: support vdpa device migrate
- vdpa: support vdpa device hot plug/unplug
- hostdev:Introduce vDPA device to hostdev subsystem as a new subtype
- node_device: fix leak of DIR*
- migration/multifd-pin: support migration multifd thread pin
- migration/multifd-pin: add qemu monitor callback functions
- migration/migration-pin: add domainMigrationPid for qemuMonitorCallbacks
- migration/migration-pin: add migrationpin for migration parameters
- migration/migration-pin: add qemu monitor callback functions
- migration/migration-pin:add some migration/multiFd params
- qemu: add pointer check in qemuMonitorLastError
- qemu: fix a concurrent operation situation
- test/commandtest: skip the test4 if the testcase is run in the container env

Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
This commit is contained in:
Jiabo Feng 2024-04-02 20:56:45 +08:00
parent 0c7c54e0f0
commit 307ea8abab
30 changed files with 6611 additions and 3 deletions

View File

@ -0,0 +1,971 @@
From 3b90700f9e72cf98c5f6b943dfb82e2c06e6f15c Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 18:20:14 +0100
Subject: [PATCH] conf: Allow specifying CPU clusters
The default number of CPU clusters is 1, and values other than
that one are currently rejected by all hypervisor drivers.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
src/bhyve/bhyve_command.c | 5 +++++
src/conf/cpu_conf.c | 16 +++++++++++++++-
src/conf/cpu_conf.h | 1 +
src/conf/domain_conf.c | 1 +
src/conf/schemas/cputypes.rng | 5 +++++
src/cpu/cpu.c | 1 +
src/libxl/libxl_capabilities.c | 1 +
src/qemu/qemu_command.c | 5 +++++
src/vmx/vmx.c | 7 +++++++
.../x86_64-host+guest,model486-result.xml | 2 +-
.../x86_64-host+guest,models-result.xml | 2 +-
tests/cputestdata/x86_64-host+guest-result.xml | 2 +-
tests/cputestdata/x86_64-host+guest.xml | 2 +-
.../x86_64-host+host-model-nofallback.xml | 2 +-
...host-Haswell-noTSX+Haswell,haswell-result.xml | 2 +-
...aswell-noTSX+Haswell-noTSX,haswell-result.xml | 2 +-
...4-host-Haswell-noTSX+Haswell-noTSX-result.xml | 2 +-
.../x86_64-host-worse+guest-result.xml | 2 +-
.../ppc64-modern-bulk-result-conf.xml | 2 +-
.../ppc64-modern-bulk-result-live.xml | 2 +-
.../ppc64-modern-individual-result-conf.xml | 2 +-
.../ppc64-modern-individual-result-live.xml | 2 +-
.../x86-modern-bulk-result-conf.xml | 2 +-
.../x86-modern-bulk-result-live.xml | 2 +-
.../x86-modern-individual-add-result-conf.xml | 2 +-
.../x86-modern-individual-add-result-live.xml | 2 +-
...e-timeout+graphics-spice-timeout-password.xml | 2 +-
.../qemuhotplug-graphics-spice-timeout.xml | 2 +-
.../fd-memory-no-numa-topology.xml | 2 +-
.../qemuxml2argvdata/fd-memory-numa-topology.xml | 2 +-
.../fd-memory-numa-topology2.xml | 2 +-
.../fd-memory-numa-topology3.xml | 2 +-
tests/qemuxml2argvdata/hugepages-nvdimm.xml | 2 +-
.../memfd-memory-default-hugepage.xml | 2 +-
tests/qemuxml2argvdata/memfd-memory-numa.xml | 2 +-
.../memory-hotplug-nvdimm-access.xml | 2 +-
.../memory-hotplug-nvdimm-align.xml | 2 +-
.../memory-hotplug-nvdimm-label.xml | 2 +-
.../memory-hotplug-nvdimm-pmem.xml | 2 +-
.../memory-hotplug-nvdimm-readonly.xml | 2 +-
tests/qemuxml2argvdata/memory-hotplug-nvdimm.xml | 2 +-
.../memory-hotplug-virtio-mem.xml | 2 +-
.../memory-hotplug-virtio-pmem.xml | 2 +-
.../cpu-numa-disjoint.x86_64-latest.xml | 2 +-
.../cpu-numa-disordered.x86_64-latest.xml | 2 +-
.../cpu-numa-memshared.x86_64-latest.xml | 2 +-
.../cpu-numa-no-memory-element.x86_64-latest.xml | 2 +-
.../cpu-numa1.x86_64-latest.xml | 2 +-
.../cpu-numa2.x86_64-latest.xml | 2 +-
.../memory-hotplug-dimm-addr.x86_64-latest.xml | 2 +-
.../memory-hotplug-dimm.x86_64-latest.xml | 2 +-
.../memory-hotplug-multiple.x86_64-latest.xml | 2 +-
...plug-nvdimm-ppc64-abi-update.ppc64-latest.xml | 2 +-
.../memory-hotplug-nvdimm-ppc64.ppc64-latest.xml | 2 +-
.../memory-hotplug.x86_64-latest.xml | 2 +-
...mad-auto-memory-vcpu-cpuset.x86_64-latest.xml | 2 +-
...cpu-no-cpuset-and-placement.x86_64-latest.xml | 2 +-
...numad-auto-vcpu-no-numatune.x86_64-latest.xml | 2 +-
...mad-static-vcpu-no-numatune.x86_64-latest.xml | 2 +-
.../pci-expander-bus.x86_64-latest.xml | 2 +-
.../pcie-expander-bus.x86_64-latest.xml | 2 +-
.../pseries-phb-numa-node.ppc64-latest.xml | 2 +-
tests/vmx2xmldata/esx-in-the-wild-10.xml | 2 +-
tests/vmx2xmldata/esx-in-the-wild-8.xml | 2 +-
tests/vmx2xmldata/esx-in-the-wild-9.xml | 2 +-
65 files changed, 97 insertions(+), 57 deletions(-)
diff --git a/src/bhyve/bhyve_command.c b/src/bhyve/bhyve_command.c
index 5b388c7a8f..d05b01ae5d 100644
--- a/src/bhyve/bhyve_command.c
+++ b/src/bhyve/bhyve_command.c
@@ -672,6 +672,11 @@ virBhyveProcessBuildBhyveCmd(struct _bhyveConn *driver, virDomainDef *def,
_("Only 1 die per socket is supported"));
return NULL;
}
+ if (def->cpu->clusters != 1) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Only 1 cluster per die is supported"));
+ return NULL;
+ }
if (nvcpus != def->cpu->sockets * def->cpu->cores * def->cpu->threads) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Invalid CPU topology: total number of vCPUs must equal the product of sockets, cores, and threads"));
diff --git a/src/conf/cpu_conf.c b/src/conf/cpu_conf.c
index 7abe489733..6e6e1b9a89 100644
--- a/src/conf/cpu_conf.c
+++ b/src/conf/cpu_conf.c
@@ -241,6 +241,7 @@ virCPUDefCopyWithoutModel(const virCPUDef *cpu)
copy->fallback = cpu->fallback;
copy->sockets = cpu->sockets;
copy->dies = cpu->dies;
+ copy->clusters = cpu->clusters;
copy->cores = cpu->cores;
copy->threads = cpu->threads;
copy->arch = cpu->arch;
@@ -572,6 +573,12 @@ virCPUDefParseXML(xmlXPathContextPtr ctxt,
return -1;
}
+ if (virXMLPropUIntDefault(topology, "clusters", 10,
+ VIR_XML_PROP_NONZERO,
+ &def->clusters, 1) < 0) {
+ return -1;
+ }
+
if (virXMLPropUInt(topology, "cores", 10,
VIR_XML_PROP_REQUIRED | VIR_XML_PROP_NONZERO,
&def->cores) < 0) {
@@ -827,10 +834,11 @@ virCPUDefFormatBuf(virBuffer *buf,
virBufferAddLit(buf, "/>\n");
}
- if (def->sockets && def->dies && def->cores && def->threads) {
+ if (def->sockets && def->dies && def->clusters && def->cores && def->threads) {
virBufferAddLit(buf, "<topology");
virBufferAsprintf(buf, " sockets='%u'", def->sockets);
virBufferAsprintf(buf, " dies='%u'", def->dies);
+ virBufferAsprintf(buf, " clusters='%u'", def->clusters);
virBufferAsprintf(buf, " cores='%u'", def->cores);
virBufferAsprintf(buf, " threads='%u'", def->threads);
virBufferAddLit(buf, "/>\n");
@@ -1106,6 +1114,12 @@ virCPUDefIsEqual(virCPUDef *src,
return false;
}
+ if (src->clusters != dst->clusters) {
+ MISMATCH(_("Target CPU clusters %1$d does not match source %2$d"),
+ dst->clusters, src->clusters);
+ return false;
+ }
+
if (src->cores != dst->cores) {
MISMATCH(_("Target CPU cores %1$d does not match source %2$d"),
dst->cores, src->cores);
diff --git a/src/conf/cpu_conf.h b/src/conf/cpu_conf.h
index 3e4c53675c..2694022fed 100644
--- a/src/conf/cpu_conf.h
+++ b/src/conf/cpu_conf.h
@@ -148,6 +148,7 @@ struct _virCPUDef {
unsigned int microcodeVersion;
unsigned int sockets;
unsigned int dies;
+ unsigned int clusters;
unsigned int cores;
unsigned int threads;
unsigned int sigFamily;
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index e56eb02f61..4798d01d12 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -2325,6 +2325,7 @@ virDomainDefGetVcpusTopology(const virDomainDef *def,
/* multiplication of 32bit numbers fits into a 64bit variable */
if ((tmp *= def->cpu->dies) > UINT_MAX ||
+ (tmp *= def->cpu->clusters) > UINT_MAX ||
(tmp *= def->cpu->cores) > UINT_MAX ||
(tmp *= def->cpu->threads) > UINT_MAX) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
diff --git a/src/conf/schemas/cputypes.rng b/src/conf/schemas/cputypes.rng
index db1aa57158..3a8910e09f 100644
--- a/src/conf/schemas/cputypes.rng
+++ b/src/conf/schemas/cputypes.rng
@@ -92,6 +92,11 @@
<ref name="positiveInteger"/>
</attribute>
</optional>
+ <optional>
+ <attribute name="clusters">
+ <ref name="positiveInteger"/>
+ </attribute>
+ </optional>
<attribute name="cores">
<ref name="positiveInteger"/>
</attribute>
diff --git a/src/cpu/cpu.c b/src/cpu/cpu.c
index bc43aa4e93..4f048d0dad 100644
--- a/src/cpu/cpu.c
+++ b/src/cpu/cpu.c
@@ -435,6 +435,7 @@ virCPUGetHost(virArch arch,
if (nodeInfo) {
cpu->sockets = nodeInfo->sockets;
cpu->dies = 1;
+ cpu->clusters = 1;
cpu->cores = nodeInfo->cores;
cpu->threads = nodeInfo->threads;
}
diff --git a/src/libxl/libxl_capabilities.c b/src/libxl/libxl_capabilities.c
index 177e8b988e..c3736bd0a9 100644
--- a/src/libxl/libxl_capabilities.c
+++ b/src/libxl/libxl_capabilities.c
@@ -159,6 +159,7 @@ libxlCapsInitCPU(virCaps *caps, libxl_physinfo *phy_info)
cpu->cores = phy_info->cores_per_socket;
cpu->threads = phy_info->threads_per_core;
cpu->dies = 1;
+ cpu->clusters = 1;
cpu->sockets = phy_info->nr_cpus / (cpu->cores * cpu->threads);
if (!(data = libxlCapsNodeData(cpu, phy_info->hw_cap)) ||
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 9cff03632c..461584566f 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7217,6 +7217,11 @@ qemuBuildSmpCommandLine(virCommand *cmd,
_("Only 1 die per socket is supported"));
return -1;
}
+ if (def->cpu->clusters != 1) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Only 1 cluster per die is supported"));
+ return -1;
+ }
virBufferAsprintf(&buf, ",sockets=%u", def->cpu->sockets);
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_DIES))
virBufferAsprintf(&buf, ",dies=%u", def->cpu->dies);
diff --git a/src/vmx/vmx.c b/src/vmx/vmx.c
index 26b89776e1..4ac2320251 100644
--- a/src/vmx/vmx.c
+++ b/src/vmx/vmx.c
@@ -1583,6 +1583,7 @@ virVMXParseConfig(virVMXContext *ctx,
goto cleanup;
}
cpu->dies = 1;
+ cpu->clusters = 1;
cpu->cores = coresPerSocket;
cpu->threads = 1;
@@ -3377,6 +3378,12 @@ virVMXFormatConfig(virVMXContext *ctx, virDomainXMLOption *xmlopt, virDomainDef
goto cleanup;
}
+ if (def->cpu->clusters != 1) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Only 1 cluster per die is supported"));
+ goto cleanup;
+ }
+
calculated_vcpus = def->cpu->sockets * def->cpu->cores;
if (calculated_vcpus != maxvcpus) {
virReportError(VIR_ERR_INTERNAL_ERROR,
diff --git a/tests/cputestdata/x86_64-host+guest,model486-result.xml b/tests/cputestdata/x86_64-host+guest,model486-result.xml
index ea8e2d3a48..b533f22b88 100644
--- a/tests/cputestdata/x86_64-host+guest,model486-result.xml
+++ b/tests/cputestdata/x86_64-host+guest,model486-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>486</model>
- <topology sockets='2' dies='1' cores='4' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='1'/>
<feature policy='require' name='de'/>
<feature policy='require' name='tsc'/>
<feature policy='require' name='msr'/>
diff --git a/tests/cputestdata/x86_64-host+guest,models-result.xml b/tests/cputestdata/x86_64-host+guest,models-result.xml
index 42664a48b4..e975d9bc18 100644
--- a/tests/cputestdata/x86_64-host+guest,models-result.xml
+++ b/tests/cputestdata/x86_64-host+guest,models-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='1'/>
<feature policy='force' name='pbe'/>
<feature policy='force' name='monitor'/>
<feature policy='require' name='ssse3'/>
diff --git a/tests/cputestdata/x86_64-host+guest-result.xml b/tests/cputestdata/x86_64-host+guest-result.xml
index 28e3152cbf..cf41b3f872 100644
--- a/tests/cputestdata/x86_64-host+guest-result.xml
+++ b/tests/cputestdata/x86_64-host+guest-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
- <topology sockets='2' dies='1' cores='4' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='1'/>
<feature policy='require' name='dca'/>
<feature policy='require' name='xtpr'/>
<feature policy='disable' name='sse4.2'/>
diff --git a/tests/cputestdata/x86_64-host+guest.xml b/tests/cputestdata/x86_64-host+guest.xml
index 28e3152cbf..cf41b3f872 100644
--- a/tests/cputestdata/x86_64-host+guest.xml
+++ b/tests/cputestdata/x86_64-host+guest.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
- <topology sockets='2' dies='1' cores='4' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='1'/>
<feature policy='require' name='dca'/>
<feature policy='require' name='xtpr'/>
<feature policy='disable' name='sse4.2'/>
diff --git a/tests/cputestdata/x86_64-host+host-model-nofallback.xml b/tests/cputestdata/x86_64-host+host-model-nofallback.xml
index 16d6e1daf2..881eea7bd0 100644
--- a/tests/cputestdata/x86_64-host+host-model-nofallback.xml
+++ b/tests/cputestdata/x86_64-host+host-model-nofallback.xml
@@ -1,7 +1,7 @@
<cpu mode='custom' match='exact'>
<model fallback='forbid'>Penryn</model>
<vendor>Intel</vendor>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
<feature policy='require' name='dca'/>
<feature policy='require' name='xtpr'/>
<feature policy='require' name='tm2'/>
diff --git a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell,haswell-result.xml b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell,haswell-result.xml
index 8eda6684a0..67994c62cc 100644
--- a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell,haswell-result.xml
+++ b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell,haswell-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Haswell</model>
- <topology sockets='1' dies='1' cores='2' threads='2'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='2'/>
<feature policy='disable' name='rtm'/>
<feature policy='disable' name='hle'/>
</cpu>
diff --git a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml
index cb02449d60..4804c0b818 100644
--- a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml
+++ b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Haswell</model>
- <topology sockets='1' dies='1' cores='2' threads='2'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='2'/>
<feature policy='disable' name='hle'/>
<feature policy='disable' name='rtm'/>
</cpu>
diff --git a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX-result.xml b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX-result.xml
index 7ee926aba8..c21b331248 100644
--- a/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX-result.xml
+++ b/tests/cputestdata/x86_64-host-Haswell-noTSX+Haswell-noTSX-result.xml
@@ -1,4 +1,4 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Haswell-noTSX</model>
- <topology sockets='1' dies='1' cores='2' threads='2'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='2'/>
</cpu>
diff --git a/tests/cputestdata/x86_64-host-worse+guest-result.xml b/tests/cputestdata/x86_64-host-worse+guest-result.xml
index 9d54c66a8f..712c3ad341 100644
--- a/tests/cputestdata/x86_64-host-worse+guest-result.xml
+++ b/tests/cputestdata/x86_64-host-worse+guest-result.xml
@@ -1,6 +1,6 @@
<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
- <topology sockets='2' dies='1' cores='4' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='1'/>
<feature policy='disable' name='dca'/>
<feature policy='disable' name='xtpr'/>
<feature policy='disable' name='sse4.2'/>
diff --git a/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-conf.xml b/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-conf.xml
index ad11b2f8a6..1a0d28257e 100644
--- a/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-conf.xml
+++ b/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-conf.xml
@@ -44,7 +44,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='1' dies='1' cores='4' threads='8'/>
+ <topology sockets='1' dies='1' clusters='1' cores='4' threads='8'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-live.xml b/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-live.xml
index 2a3b4a495f..b127883b36 100644
--- a/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-live.xml
+++ b/tests/qemuhotplugtestcpus/ppc64-modern-bulk-result-live.xml
@@ -44,7 +44,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='1' dies='1' cores='4' threads='8'/>
+ <topology sockets='1' dies='1' clusters='1' cores='4' threads='8'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-conf.xml b/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-conf.xml
index 34aec9b965..29f1a5ac45 100644
--- a/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-conf.xml
+++ b/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-conf.xml
@@ -44,7 +44,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='1' dies='1' cores='4' threads='8'/>
+ <topology sockets='1' dies='1' clusters='1' cores='4' threads='8'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-live.xml b/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-live.xml
index 5ce2cfd0b0..76a85ac9f0 100644
--- a/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-live.xml
+++ b/tests/qemuhotplugtestcpus/ppc64-modern-individual-result-live.xml
@@ -44,7 +44,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='1' dies='1' cores='4' threads='8'/>
+ <topology sockets='1' dies='1' clusters='1' cores='4' threads='8'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml b/tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml
index 8d52ffedb4..bec46987ff 100644
--- a/tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml
+++ b/tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml
@@ -20,7 +20,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='4' dies='1' cores='2' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml b/tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml
index f416397e33..be9769c686 100644
--- a/tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml
+++ b/tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml
@@ -20,7 +20,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='4' dies='1' cores='2' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-conf.xml b/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-conf.xml
index 0bd2af8e43..539f607818 100644
--- a/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-conf.xml
+++ b/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-conf.xml
@@ -20,7 +20,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='4' dies='1' cores='2' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-live.xml b/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-live.xml
index b31e6ebe55..acbdd3cfd5 100644
--- a/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-live.xml
+++ b/tests/qemuhotplugtestcpus/x86-modern-individual-add-result-live.xml
@@ -20,7 +20,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='4' dies='1' cores='2' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout+graphics-spice-timeout-password.xml b/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout+graphics-spice-timeout-password.xml
index 03964ad01c..ee53339338 100644
--- a/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout+graphics-spice-timeout-password.xml
+++ b/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout+graphics-spice-timeout-password.xml
@@ -18,7 +18,7 @@
<cpu mode='custom' match='exact' check='partial'>
<model fallback='allow'>core2duo</model>
<vendor>Intel</vendor>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
<feature policy='require' name='lahf_lm'/>
<feature policy='require' name='xtpr'/>
<feature policy='require' name='cx16'/>
diff --git a/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout.xml b/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout.xml
index e6b0cc833a..eb9b902fc5 100644
--- a/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout.xml
+++ b/tests/qemuhotplugtestdomains/qemuhotplug-graphics-spice-timeout.xml
@@ -18,7 +18,7 @@
<cpu mode='custom' match='exact' check='partial'>
<model fallback='allow'>core2duo</model>
<vendor>Intel</vendor>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
<feature policy='require' name='lahf_lm'/>
<feature policy='require' name='xtpr'/>
<feature policy='require' name='cx16'/>
diff --git a/tests/qemuxml2argvdata/fd-memory-no-numa-topology.xml b/tests/qemuxml2argvdata/fd-memory-no-numa-topology.xml
index 2090bb8288..92f418fb88 100644
--- a/tests/qemuxml2argvdata/fd-memory-no-numa-topology.xml
+++ b/tests/qemuxml2argvdata/fd-memory-no-numa-topology.xml
@@ -15,7 +15,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='8' dies='1' cores='1' threads='1'/>
+ <topology sockets='8' dies='1' clusters='1' cores='1' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology.xml b/tests/qemuxml2argvdata/fd-memory-numa-topology.xml
index 2f94690656..543509d832 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology.xml
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology.xml
@@ -15,7 +15,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='1' dies='1' cores='8' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='8' threads='1'/>
<numa>
<cell id='0' cpus='0-7' memory='14680064' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology2.xml b/tests/qemuxml2argvdata/fd-memory-numa-topology2.xml
index 3a4e9b478e..d3b98da3c6 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology2.xml
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology2.xml
@@ -15,7 +15,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='1' dies='1' cores='20' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='20' threads='1'/>
<numa>
<cell id='0' cpus='0-7,16-19' memory='14680064' unit='KiB'/>
<cell id='1' cpus='8-15' memory='14680064' unit='KiB' memAccess='shared'/>
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology3.xml b/tests/qemuxml2argvdata/fd-memory-numa-topology3.xml
index 0f7f74283b..459d1b9d1d 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology3.xml
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology3.xml
@@ -15,7 +15,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='1' dies='1' cores='32' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='32' threads='1'/>
<numa>
<cell id='0' cpus='0-1,6-31' memory='14680064' unit='KiB'/>
<cell id='1' cpus='2-3' memory='14680064' unit='KiB' memAccess='shared'/>
diff --git a/tests/qemuxml2argvdata/hugepages-nvdimm.xml b/tests/qemuxml2argvdata/hugepages-nvdimm.xml
index 1a1500895b..b786b0d3dd 100644
--- a/tests/qemuxml2argvdata/hugepages-nvdimm.xml
+++ b/tests/qemuxml2argvdata/hugepages-nvdimm.xml
@@ -17,7 +17,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='1048576' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.xml b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.xml
index 238d4c6b52..a70bd53134 100644
--- a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.xml
+++ b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.xml
@@ -19,7 +19,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='1' dies='1' cores='8' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='8' threads='1'/>
<numa>
<cell id='0' cpus='0-7' memory='14680064' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memfd-memory-numa.xml b/tests/qemuxml2argvdata/memfd-memory-numa.xml
index 1ac87e3aef..0c5d7ba4ef 100644
--- a/tests/qemuxml2argvdata/memfd-memory-numa.xml
+++ b/tests/qemuxml2argvdata/memfd-memory-numa.xml
@@ -22,7 +22,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='1' dies='1' cores='8' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='8' threads='1'/>
<numa>
<cell id='0' cpus='0-7' memory='14680064' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.xml
index bee0346aca..84baf82bf5 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.xml
index decf87db63..664418e805 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.xml
index 8a0dab3908..f998f7f276 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.xml
index a712adfe1e..d66481fd35 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.xml
index 57629ccb8c..56d6b7b712 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.xml b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.xml
index 865ddcf0ea..ff6e3b7b0f 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='1048576' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.xml b/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.xml
index c578209d8a..52fa6b14e9 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='2095104' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.xml b/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.xml
index a8b22dd3c5..2786a739ad 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.xml
+++ b/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='2095104' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa-disjoint.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa-disjoint.x86_64-latest.xml
index fa2ec31463..4f33094949 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa-disjoint.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa-disjoint.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-3,8-11' memory='109550' unit='KiB'/>
<cell id='1' cpus='4-7,12-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa-disordered.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa-disordered.x86_64-latest.xml
index 1b4d0bfa67..75dcb8c9e2 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa-disordered.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa-disordered.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-5' memory='109550' unit='KiB'/>
<cell id='1' cpus='11-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa-memshared.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa-memshared.x86_64-latest.xml
index 47ed9efd69..c45e295921 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa-memshared.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa-memshared.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB' memAccess='shared'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB' memAccess='private'/>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa-no-memory-element.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa-no-memory-element.x86_64-latest.xml
index 57bbacdff0..663d137ff5 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa-no-memory-element.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa-no-memory-element.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa1.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa1.x86_64-latest.xml
index 57bbacdff0..663d137ff5 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa1.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa1.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/cpu-numa2.x86_64-latest.xml b/tests/qemuxml2xmloutdata/cpu-numa2.x86_64-latest.xml
index 57bbacdff0..663d137ff5 100644
--- a/tests/qemuxml2xmloutdata/cpu-numa2.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/cpu-numa2.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug-dimm-addr.x86_64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug-dimm-addr.x86_64-latest.xml
index 0a32d5491a..38b41e6719 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug-dimm-addr.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug-dimm-addr.x86_64-latest.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug-dimm.x86_64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug-dimm.x86_64-latest.xml
index 7c1b7b2c5d..7f0dc85c0e 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug-dimm.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug-dimm.x86_64-latest.xml
@@ -15,7 +15,7 @@
</idmap>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug-multiple.x86_64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug-multiple.x86_64-latest.xml
index 42b0f7b880..b3306fb569 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug-multiple.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug-multiple.x86_64-latest.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='2095104' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.xml
index ae157c4849..4cc0c674df 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='1048576' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.xml
index 3c1cbc731d..a5c26e3c5b 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='1048576' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/memory-hotplug.x86_64-latest.xml b/tests/qemuxml2xmloutdata/memory-hotplug.x86_64-latest.xml
index 083102e8d6..697819387f 100644
--- a/tests/qemuxml2xmloutdata/memory-hotplug.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/memory-hotplug.x86_64-latest.xml
@@ -11,7 +11,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
<numa>
<cell id='0' cpus='0-1' memory='219136' unit='KiB'/>
</numa>
diff --git a/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.xml b/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.xml
index 2d04bc23c2..6068a76464 100644
--- a/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.xml
@@ -13,7 +13,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.xml b/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.xml
index 80f7284126..6c558526e9 100644
--- a/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.xml
@@ -13,7 +13,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuxml2xmloutdata/numad-auto-vcpu-no-numatune.x86_64-latest.xml b/tests/qemuxml2xmloutdata/numad-auto-vcpu-no-numatune.x86_64-latest.xml
index 724209f6e3..6e1fecb488 100644
--- a/tests/qemuxml2xmloutdata/numad-auto-vcpu-no-numatune.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/numad-auto-vcpu-no-numatune.x86_64-latest.xml
@@ -13,7 +13,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuxml2xmloutdata/numad-static-vcpu-no-numatune.x86_64-latest.xml b/tests/qemuxml2xmloutdata/numad-static-vcpu-no-numatune.x86_64-latest.xml
index 2a4ee0d496..c42d7066f9 100644
--- a/tests/qemuxml2xmloutdata/numad-static-vcpu-no-numatune.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/numad-static-vcpu-no-numatune.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='1' threads='1'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/qemuxml2xmloutdata/pci-expander-bus.x86_64-latest.xml b/tests/qemuxml2xmloutdata/pci-expander-bus.x86_64-latest.xml
index b63c8c145a..2a6c329a40 100644
--- a/tests/qemuxml2xmloutdata/pci-expander-bus.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/pci-expander-bus.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/pcie-expander-bus.x86_64-latest.xml b/tests/qemuxml2xmloutdata/pcie-expander-bus.x86_64-latest.xml
index a441be8ebe..99612740b2 100644
--- a/tests/qemuxml2xmloutdata/pcie-expander-bus.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/pcie-expander-bus.x86_64-latest.xml
@@ -10,7 +10,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>qemu64</model>
- <topology sockets='2' dies='1' cores='4' threads='2'/>
+ <topology sockets='2' dies='1' clusters='1' cores='4' threads='2'/>
<numa>
<cell id='0' cpus='0-7' memory='109550' unit='KiB'/>
<cell id='1' cpus='8-15' memory='109550' unit='KiB'/>
diff --git a/tests/qemuxml2xmloutdata/pseries-phb-numa-node.ppc64-latest.xml b/tests/qemuxml2xmloutdata/pseries-phb-numa-node.ppc64-latest.xml
index 59015846fb..0a044f50b0 100644
--- a/tests/qemuxml2xmloutdata/pseries-phb-numa-node.ppc64-latest.xml
+++ b/tests/qemuxml2xmloutdata/pseries-phb-numa-node.ppc64-latest.xml
@@ -14,7 +14,7 @@
</os>
<cpu mode='custom' match='exact' check='none'>
<model fallback='forbid'>POWER9</model>
- <topology sockets='2' dies='1' cores='1' threads='4'/>
+ <topology sockets='2' dies='1' clusters='1' cores='1' threads='4'/>
<numa>
<cell id='0' cpus='0-3' memory='1048576' unit='KiB'/>
<cell id='1' cpus='4-7' memory='1048576' unit='KiB'/>
diff --git a/tests/vmx2xmldata/esx-in-the-wild-10.xml b/tests/vmx2xmldata/esx-in-the-wild-10.xml
index 47ed637920..78129682bd 100644
--- a/tests/vmx2xmldata/esx-in-the-wild-10.xml
+++ b/tests/vmx2xmldata/esx-in-the-wild-10.xml
@@ -12,7 +12,7 @@
<type arch='x86_64'>hvm</type>
</os>
<cpu>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/vmx2xmldata/esx-in-the-wild-8.xml b/tests/vmx2xmldata/esx-in-the-wild-8.xml
index 0eea610709..47d22ced2a 100644
--- a/tests/vmx2xmldata/esx-in-the-wild-8.xml
+++ b/tests/vmx2xmldata/esx-in-the-wild-8.xml
@@ -11,7 +11,7 @@
<type arch='x86_64'>hvm</type>
</os>
<cpu>
- <topology sockets='4' dies='1' cores='2' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
diff --git a/tests/vmx2xmldata/esx-in-the-wild-9.xml b/tests/vmx2xmldata/esx-in-the-wild-9.xml
index 66eca400dd..ee6be2527f 100644
--- a/tests/vmx2xmldata/esx-in-the-wild-9.xml
+++ b/tests/vmx2xmldata/esx-in-the-wild-9.xml
@@ -12,7 +12,7 @@
<type arch='x86_64'>hvm</type>
</os>
<cpu>
- <topology sockets='4' dies='1' cores='4' threads='1'/>
+ <topology sockets='4' dies='1' clusters='1' cores='4' threads='1'/>
</cpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
--
2.27.0

View File

@ -0,0 +1,679 @@
From 6a7873a8facab024a6192a7d1199877ccb22168d Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 16:03:54 +0100
Subject: [PATCH] conf: Report CPU clusters in capabilities XML
For machines that don't expose useful information through sysfs,
the dummy ID 0 is used.
https://issues.redhat.com/browse/RHEL-7043
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
src/conf/capabilities.c | 5 +-
src/conf/capabilities.h | 1 +
src/conf/schemas/capability.rng | 3 ++
src/libvirt_linux.syms | 1 +
src/util/virhostcpu.c | 22 +++++++++
src/util/virhostcpu.h | 1 +
tests/capabilityschemadata/caps-qemu-kvm.xml | 32 ++++++-------
.../vircaps-aarch64-basic-clusters.xml | 16 +++----
.../vircaps2xmldata/vircaps-aarch64-basic.xml | 32 ++++++-------
.../vircaps-x86_64-basic-dies.xml | 24 +++++-----
.../vircaps2xmldata/vircaps-x86_64-basic.xml | 32 ++++++-------
.../vircaps2xmldata/vircaps-x86_64-caches.xml | 16 +++----
tests/vircaps2xmldata/vircaps-x86_64-hmat.xml | 48 +++++++++----------
.../vircaps-x86_64-resctrl-cdp.xml | 24 +++++-----
.../vircaps-x86_64-resctrl-cmt.xml | 24 +++++-----
.../vircaps-x86_64-resctrl-fake-feature.xml | 24 +++++-----
.../vircaps-x86_64-resctrl-skx-twocaches.xml | 2 +-
.../vircaps-x86_64-resctrl-skx.xml | 2 +-
.../vircaps-x86_64-resctrl.xml | 24 +++++-----
19 files changed, 182 insertions(+), 151 deletions(-)
diff --git a/src/conf/capabilities.c b/src/conf/capabilities.c
index 32badee7b3..02298e40a3 100644
--- a/src/conf/capabilities.c
+++ b/src/conf/capabilities.c
@@ -811,9 +811,10 @@ virCapsHostNUMACellCPUFormat(virBuffer *buf,
return -1;
virBufferAsprintf(&childBuf,
- " socket_id='%d' die_id='%d' core_id='%d' siblings='%s'",
+ " socket_id='%d' die_id='%d' cluster_id='%d' core_id='%d' siblings='%s'",
cpus[j].socket_id,
cpus[j].die_id,
+ cpus[j].cluster_id,
cpus[j].core_id,
siblings);
}
@@ -1453,6 +1454,7 @@ virCapabilitiesFillCPUInfo(int cpu_id G_GNUC_UNUSED,
if (virHostCPUGetSocket(cpu_id, &cpu->socket_id) < 0 ||
virHostCPUGetDie(cpu_id, &cpu->die_id) < 0 ||
+ virHostCPUGetCluster(cpu_id, &cpu->cluster_id) < 0 ||
virHostCPUGetCore(cpu_id, &cpu->core_id) < 0)
return -1;
@@ -1712,6 +1714,7 @@ virCapabilitiesHostNUMAInitFake(virCapsHostNUMA *caps)
if (tmp) {
cpus[cid].id = id;
cpus[cid].die_id = 0;
+ cpus[cid].cluster_id = 0;
cpus[cid].socket_id = s;
cpus[cid].core_id = c;
cpus[cid].siblings = virBitmapNewCopy(siblings);
diff --git a/src/conf/capabilities.h b/src/conf/capabilities.h
index 9eaf6e2807..52e395de14 100644
--- a/src/conf/capabilities.h
+++ b/src/conf/capabilities.h
@@ -89,6 +89,7 @@ struct _virCapsHostNUMACellCPU {
unsigned int id;
unsigned int socket_id;
unsigned int die_id;
+ unsigned int cluster_id;
unsigned int core_id;
virBitmap *siblings;
};
diff --git a/src/conf/schemas/capability.rng b/src/conf/schemas/capability.rng
index b1968df258..a1606941e7 100644
--- a/src/conf/schemas/capability.rng
+++ b/src/conf/schemas/capability.rng
@@ -201,6 +201,9 @@
<attribute name="die_id">
<ref name="unsignedInt"/>
</attribute>
+ <attribute name="cluster_id">
+ <ref name="unsignedInt"/>
+ </attribute>
<attribute name="core_id">
<ref name="unsignedInt"/>
</attribute>
diff --git a/src/libvirt_linux.syms b/src/libvirt_linux.syms
index 55649ae39c..004cbfee97 100644
--- a/src/libvirt_linux.syms
+++ b/src/libvirt_linux.syms
@@ -3,6 +3,7 @@
#
# util/virhostcpu.h
+virHostCPUGetCluster;
virHostCPUGetCore;
virHostCPUGetDie;
virHostCPUGetInfoPopulateLinux;
diff --git a/src/util/virhostcpu.c b/src/util/virhostcpu.c
index 4027547e1e..a3781ca870 100644
--- a/src/util/virhostcpu.c
+++ b/src/util/virhostcpu.c
@@ -232,6 +232,28 @@ virHostCPUGetDie(unsigned int cpu, unsigned int *die)
return 0;
}
+int
+virHostCPUGetCluster(unsigned int cpu, unsigned int *cluster)
+{
+ int cluster_id;
+ int ret = virFileReadValueInt(&cluster_id,
+ "%s/cpu/cpu%u/topology/cluster_id",
+ SYSFS_SYSTEM_PATH, cpu);
+
+ if (ret == -1)
+ return -1;
+
+ /* If the file doesn't exists (old kernel) or the value contained
+ * in it is -1 (architecture without CPU clusters), report 0 to
+ * indicate the lack of information */
+ if (ret == -2 || cluster_id < 0)
+ cluster_id = 0;
+
+ *cluster = cluster_id;
+
+ return 0;
+}
+
int
virHostCPUGetCore(unsigned int cpu, unsigned int *core)
{
diff --git a/src/util/virhostcpu.h b/src/util/virhostcpu.h
index 5f0d43e069..d7e09bff22 100644
--- a/src/util/virhostcpu.h
+++ b/src/util/virhostcpu.h
@@ -68,6 +68,7 @@ int virHostCPUStatsAssign(virNodeCPUStatsPtr param,
#ifdef __linux__
int virHostCPUGetSocket(unsigned int cpu, unsigned int *socket);
int virHostCPUGetDie(unsigned int cpu, unsigned int *die);
+int virHostCPUGetCluster(unsigned int cpu, unsigned int *cluster);
int virHostCPUGetCore(unsigned int cpu, unsigned int *core);
virBitmap *virHostCPUGetSiblingsList(unsigned int cpu);
diff --git a/tests/capabilityschemadata/caps-qemu-kvm.xml b/tests/capabilityschemadata/caps-qemu-kvm.xml
index acdbb362cc..317fa0885f 100644
--- a/tests/capabilityschemadata/caps-qemu-kvm.xml
+++ b/tests/capabilityschemadata/caps-qemu-kvm.xml
@@ -64,14 +64,14 @@
<sibling id='1' value='21'/>
</distances>
<cpus num='8'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='1' siblings='2'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='2' siblings='4'/>
- <cpu id='6' socket_id='0' die_id='0' core_id='3' siblings='6'/>
- <cpu id='8' socket_id='0' die_id='0' core_id='4' siblings='8'/>
- <cpu id='10' socket_id='0' die_id='0' core_id='5' siblings='10'/>
- <cpu id='12' socket_id='0' die_id='0' core_id='6' siblings='12'/>
- <cpu id='14' socket_id='0' die_id='0' core_id='7' siblings='14'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='2'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='4'/>
+ <cpu id='6' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='6'/>
+ <cpu id='8' socket_id='0' die_id='0' cluster_id='0' core_id='4' siblings='8'/>
+ <cpu id='10' socket_id='0' die_id='0' cluster_id='0' core_id='5' siblings='10'/>
+ <cpu id='12' socket_id='0' die_id='0' cluster_id='0' core_id='6' siblings='12'/>
+ <cpu id='14' socket_id='0' die_id='0' cluster_id='0' core_id='7' siblings='14'/>
</cpus>
</cell>
<cell id='1'>
@@ -84,14 +84,14 @@
<sibling id='1' value='10'/>
</distances>
<cpus num='8'>
- <cpu id='1' socket_id='1' die_id='0' core_id='0' siblings='1'/>
- <cpu id='3' socket_id='1' die_id='0' core_id='1' siblings='3'/>
- <cpu id='5' socket_id='1' die_id='0' core_id='2' siblings='5'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='3' siblings='7'/>
- <cpu id='9' socket_id='1' die_id='0' core_id='4' siblings='9'/>
- <cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
- <cpu id='13' socket_id='1' die_id='0' core_id='6' siblings='13'/>
- <cpu id='15' socket_id='1' die_id='0' core_id='7' siblings='15'/>
+ <cpu id='1' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='1'/>
+ <cpu id='3' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='3'/>
+ <cpu id='5' socket_id='1' die_id='0' cluster_id='0' core_id='2' siblings='5'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='3' siblings='7'/>
+ <cpu id='9' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='9'/>
+ <cpu id='11' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='11'/>
+ <cpu id='13' socket_id='1' die_id='0' cluster_id='0' core_id='6' siblings='13'/>
+ <cpu id='15' socket_id='1' die_id='0' cluster_id='0' core_id='7' siblings='15'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml b/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
index fe61fc42cc..b37c8e7a20 100644
--- a/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
+++ b/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
@@ -14,10 +14,10 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='4'>
- <cpu id='0' socket_id='36' die_id='0' core_id='0' siblings='0-1'/>
- <cpu id='1' socket_id='36' die_id='0' core_id='0' siblings='0-1'/>
- <cpu id='2' socket_id='36' die_id='0' core_id='1' siblings='2-3'/>
- <cpu id='3' socket_id='36' die_id='0' core_id='1' siblings='2-3'/>
+ <cpu id='0' socket_id='36' die_id='0' cluster_id='0' core_id='0' siblings='0-1'/>
+ <cpu id='1' socket_id='36' die_id='0' cluster_id='0' core_id='0' siblings='0-1'/>
+ <cpu id='2' socket_id='36' die_id='0' cluster_id='1' core_id='1' siblings='2-3'/>
+ <cpu id='3' socket_id='36' die_id='0' cluster_id='1' core_id='1' siblings='2-3'/>
</cpus>
</cell>
<cell id='1'>
@@ -26,10 +26,10 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='4'>
- <cpu id='4' socket_id='3180' die_id='0' core_id='256' siblings='4-5'/>
- <cpu id='5' socket_id='3180' die_id='0' core_id='256' siblings='4-5'/>
- <cpu id='6' socket_id='3180' die_id='0' core_id='257' siblings='6-7'/>
- <cpu id='7' socket_id='3180' die_id='0' core_id='257' siblings='6-7'/>
+ <cpu id='4' socket_id='3180' die_id='0' cluster_id='256' core_id='256' siblings='4-5'/>
+ <cpu id='5' socket_id='3180' die_id='0' cluster_id='256' core_id='256' siblings='4-5'/>
+ <cpu id='6' socket_id='3180' die_id='0' cluster_id='257' core_id='257' siblings='6-7'/>
+ <cpu id='7' socket_id='3180' die_id='0' cluster_id='257' core_id='257' siblings='6-7'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-aarch64-basic.xml b/tests/vircaps2xmldata/vircaps-aarch64-basic.xml
index 0a04052c40..5533ae0586 100644
--- a/tests/vircaps2xmldata/vircaps-aarch64-basic.xml
+++ b/tests/vircaps2xmldata/vircaps-aarch64-basic.xml
@@ -16,10 +16,10 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='4'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
</cpus>
</cell>
<cell id='1'>
@@ -28,10 +28,10 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='4'>
- <cpu id='4' socket_id='1' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='1' die_id='0' core_id='5' siblings='5'/>
- <cpu id='6' socket_id='1' die_id='0' core_id='6' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='7' siblings='7'/>
+ <cpu id='4' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='6' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='7' siblings='7'/>
</cpus>
</cell>
<cell id='2'>
@@ -40,10 +40,10 @@
<pages unit='KiB' size='2048'>8192</pages>
<pages unit='KiB' size='1048576'>10240</pages>
<cpus num='4'>
- <cpu id='8' socket_id='2' die_id='0' core_id='8' siblings='8'/>
- <cpu id='9' socket_id='2' die_id='0' core_id='9' siblings='9'/>
- <cpu id='10' socket_id='2' die_id='0' core_id='10' siblings='10'/>
- <cpu id='11' socket_id='2' die_id='0' core_id='11' siblings='11'/>
+ <cpu id='8' socket_id='2' die_id='0' cluster_id='0' core_id='8' siblings='8'/>
+ <cpu id='9' socket_id='2' die_id='0' cluster_id='0' core_id='9' siblings='9'/>
+ <cpu id='10' socket_id='2' die_id='0' cluster_id='0' core_id='10' siblings='10'/>
+ <cpu id='11' socket_id='2' die_id='0' cluster_id='0' core_id='11' siblings='11'/>
</cpus>
</cell>
<cell id='3'>
@@ -52,10 +52,10 @@
<pages unit='KiB' size='2048'>10240</pages>
<pages unit='KiB' size='1048576'>12288</pages>
<cpus num='4'>
- <cpu id='12' socket_id='3' die_id='0' core_id='12' siblings='12'/>
- <cpu id='13' socket_id='3' die_id='0' core_id='13' siblings='13'/>
- <cpu id='14' socket_id='3' die_id='0' core_id='14' siblings='14'/>
- <cpu id='15' socket_id='3' die_id='0' core_id='15' siblings='15'/>
+ <cpu id='12' socket_id='3' die_id='0' cluster_id='0' core_id='12' siblings='12'/>
+ <cpu id='13' socket_id='3' die_id='0' cluster_id='0' core_id='13' siblings='13'/>
+ <cpu id='14' socket_id='3' die_id='0' cluster_id='0' core_id='14' siblings='14'/>
+ <cpu id='15' socket_id='3' die_id='0' cluster_id='0' core_id='15' siblings='15'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-basic-dies.xml b/tests/vircaps2xmldata/vircaps-x86_64-basic-dies.xml
index 8a3ca2d13c..c86dc4defc 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-basic-dies.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-basic-dies.xml
@@ -14,18 +14,18 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='12'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='1' core_id='0' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='1' core_id='1' siblings='3'/>
- <cpu id='4' socket_id='0' die_id='2' core_id='0' siblings='4'/>
- <cpu id='5' socket_id='0' die_id='2' core_id='1' siblings='5'/>
- <cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
- <cpu id='8' socket_id='1' die_id='1' core_id='0' siblings='8'/>
- <cpu id='9' socket_id='1' die_id='1' core_id='1' siblings='9'/>
- <cpu id='10' socket_id='1' die_id='2' core_id='0' siblings='10'/>
- <cpu id='11' socket_id='1' die_id='2' core_id='1' siblings='11'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='1' cluster_id='0' core_id='0' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='1' cluster_id='0' core_id='1' siblings='3'/>
+ <cpu id='4' socket_id='0' die_id='2' cluster_id='0' core_id='0' siblings='4'/>
+ <cpu id='5' socket_id='0' die_id='2' cluster_id='0' core_id='1' siblings='5'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='7'/>
+ <cpu id='8' socket_id='1' die_id='1' cluster_id='0' core_id='0' siblings='8'/>
+ <cpu id='9' socket_id='1' die_id='1' cluster_id='0' core_id='1' siblings='9'/>
+ <cpu id='10' socket_id='1' die_id='2' cluster_id='0' core_id='0' siblings='10'/>
+ <cpu id='11' socket_id='1' die_id='2' cluster_id='0' core_id='1' siblings='11'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-basic.xml b/tests/vircaps2xmldata/vircaps-x86_64-basic.xml
index 4da09f889c..9ae155d571 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-basic.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-basic.xml
@@ -14,10 +14,10 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='4'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
</cpus>
</cell>
<cell id='1'>
@@ -26,10 +26,10 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='4'>
- <cpu id='4' socket_id='1' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='1' die_id='0' core_id='5' siblings='5'/>
- <cpu id='6' socket_id='1' die_id='0' core_id='6' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='7' siblings='7'/>
+ <cpu id='4' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='6' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='7' siblings='7'/>
</cpus>
</cell>
<cell id='2'>
@@ -38,10 +38,10 @@
<pages unit='KiB' size='2048'>8192</pages>
<pages unit='KiB' size='1048576'>10240</pages>
<cpus num='4'>
- <cpu id='8' socket_id='2' die_id='0' core_id='8' siblings='8'/>
- <cpu id='9' socket_id='2' die_id='0' core_id='9' siblings='9'/>
- <cpu id='10' socket_id='2' die_id='0' core_id='10' siblings='10'/>
- <cpu id='11' socket_id='2' die_id='0' core_id='11' siblings='11'/>
+ <cpu id='8' socket_id='2' die_id='0' cluster_id='0' core_id='8' siblings='8'/>
+ <cpu id='9' socket_id='2' die_id='0' cluster_id='0' core_id='9' siblings='9'/>
+ <cpu id='10' socket_id='2' die_id='0' cluster_id='0' core_id='10' siblings='10'/>
+ <cpu id='11' socket_id='2' die_id='0' cluster_id='0' core_id='11' siblings='11'/>
</cpus>
</cell>
<cell id='3'>
@@ -50,10 +50,10 @@
<pages unit='KiB' size='2048'>10240</pages>
<pages unit='KiB' size='1048576'>12288</pages>
<cpus num='4'>
- <cpu id='12' socket_id='3' die_id='0' core_id='12' siblings='12'/>
- <cpu id='13' socket_id='3' die_id='0' core_id='13' siblings='13'/>
- <cpu id='14' socket_id='3' die_id='0' core_id='14' siblings='14'/>
- <cpu id='15' socket_id='3' die_id='0' core_id='15' siblings='15'/>
+ <cpu id='12' socket_id='3' die_id='0' cluster_id='0' core_id='12' siblings='12'/>
+ <cpu id='13' socket_id='3' die_id='0' cluster_id='0' core_id='13' siblings='13'/>
+ <cpu id='14' socket_id='3' die_id='0' cluster_id='0' core_id='14' siblings='14'/>
+ <cpu id='15' socket_id='3' die_id='0' cluster_id='0' core_id='15' siblings='15'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-caches.xml b/tests/vircaps2xmldata/vircaps-x86_64-caches.xml
index 28f00c0a90..05b33147b7 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-caches.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-caches.xml
@@ -17,14 +17,14 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='8'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0,4'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1,5'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2,6'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3,7'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='0' siblings='0,4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='1' siblings='1,5'/>
- <cpu id='6' socket_id='0' die_id='0' core_id='2' siblings='2,6'/>
- <cpu id='7' socket_id='0' die_id='0' core_id='3' siblings='3,7'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0,4'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1,5'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2,6'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3,7'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0,4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1,5'/>
+ <cpu id='6' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2,6'/>
+ <cpu id='7' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3,7'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-hmat.xml b/tests/vircaps2xmldata/vircaps-x86_64-hmat.xml
index 6fe5751666..2b97354bf3 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-hmat.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-hmat.xml
@@ -25,30 +25,30 @@
<line value='16' unit='B'/>
</cache>
<cpus num='24'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='1' die_id='0' core_id='0' siblings='1'/>
- <cpu id='2' socket_id='2' die_id='0' core_id='0' siblings='2'/>
- <cpu id='3' socket_id='3' die_id='0' core_id='0' siblings='3'/>
- <cpu id='4' socket_id='4' die_id='0' core_id='0' siblings='4'/>
- <cpu id='5' socket_id='5' die_id='0' core_id='0' siblings='5'/>
- <cpu id='6' socket_id='6' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='7' die_id='0' core_id='0' siblings='7'/>
- <cpu id='8' socket_id='8' die_id='0' core_id='0' siblings='8'/>
- <cpu id='9' socket_id='9' die_id='0' core_id='0' siblings='9'/>
- <cpu id='10' socket_id='10' die_id='0' core_id='0' siblings='10'/>
- <cpu id='11' socket_id='11' die_id='0' core_id='0' siblings='11'/>
- <cpu id='12' socket_id='12' die_id='0' core_id='0' siblings='12'/>
- <cpu id='13' socket_id='13' die_id='0' core_id='0' siblings='13'/>
- <cpu id='14' socket_id='14' die_id='0' core_id='0' siblings='14'/>
- <cpu id='15' socket_id='15' die_id='0' core_id='0' siblings='15'/>
- <cpu id='16' socket_id='16' die_id='0' core_id='0' siblings='16'/>
- <cpu id='17' socket_id='17' die_id='0' core_id='0' siblings='17'/>
- <cpu id='18' socket_id='18' die_id='0' core_id='0' siblings='18'/>
- <cpu id='19' socket_id='19' die_id='0' core_id='0' siblings='19'/>
- <cpu id='20' socket_id='20' die_id='0' core_id='0' siblings='20'/>
- <cpu id='21' socket_id='21' die_id='0' core_id='0' siblings='21'/>
- <cpu id='22' socket_id='22' die_id='0' core_id='0' siblings='22'/>
- <cpu id='23' socket_id='23' die_id='0' core_id='0' siblings='23'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='1'/>
+ <cpu id='2' socket_id='2' die_id='0' cluster_id='0' core_id='0' siblings='2'/>
+ <cpu id='3' socket_id='3' die_id='0' cluster_id='0' core_id='0' siblings='3'/>
+ <cpu id='4' socket_id='4' die_id='0' cluster_id='0' core_id='0' siblings='4'/>
+ <cpu id='5' socket_id='5' die_id='0' cluster_id='0' core_id='0' siblings='5'/>
+ <cpu id='6' socket_id='6' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='7' die_id='0' cluster_id='0' core_id='0' siblings='7'/>
+ <cpu id='8' socket_id='8' die_id='0' cluster_id='0' core_id='0' siblings='8'/>
+ <cpu id='9' socket_id='9' die_id='0' cluster_id='0' core_id='0' siblings='9'/>
+ <cpu id='10' socket_id='10' die_id='0' cluster_id='0' core_id='0' siblings='10'/>
+ <cpu id='11' socket_id='11' die_id='0' cluster_id='0' core_id='0' siblings='11'/>
+ <cpu id='12' socket_id='12' die_id='0' cluster_id='0' core_id='0' siblings='12'/>
+ <cpu id='13' socket_id='13' die_id='0' cluster_id='0' core_id='0' siblings='13'/>
+ <cpu id='14' socket_id='14' die_id='0' cluster_id='0' core_id='0' siblings='14'/>
+ <cpu id='15' socket_id='15' die_id='0' cluster_id='0' core_id='0' siblings='15'/>
+ <cpu id='16' socket_id='16' die_id='0' cluster_id='0' core_id='0' siblings='16'/>
+ <cpu id='17' socket_id='17' die_id='0' cluster_id='0' core_id='0' siblings='17'/>
+ <cpu id='18' socket_id='18' die_id='0' cluster_id='0' core_id='0' siblings='18'/>
+ <cpu id='19' socket_id='19' die_id='0' cluster_id='0' core_id='0' siblings='19'/>
+ <cpu id='20' socket_id='20' die_id='0' cluster_id='0' core_id='0' siblings='20'/>
+ <cpu id='21' socket_id='21' die_id='0' cluster_id='0' core_id='0' siblings='21'/>
+ <cpu id='22' socket_id='22' die_id='0' cluster_id='0' core_id='0' siblings='22'/>
+ <cpu id='23' socket_id='23' die_id='0' cluster_id='0' core_id='0' siblings='23'/>
</cpus>
</cell>
<cell id='1'>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cdp.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cdp.xml
index ee26fe9464..167b217d8e 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cdp.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cdp.xml
@@ -17,12 +17,12 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='6'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='5' siblings='5'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
</cpus>
</cell>
<cell id='1'>
@@ -31,12 +31,12 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='6'>
- <cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
- <cpu id='8' socket_id='1' die_id='0' core_id='2' siblings='8'/>
- <cpu id='9' socket_id='1' die_id='0' core_id='3' siblings='9'/>
- <cpu id='10' socket_id='1' die_id='0' core_id='4' siblings='10'/>
- <cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='7'/>
+ <cpu id='8' socket_id='1' die_id='0' cluster_id='0' core_id='2' siblings='8'/>
+ <cpu id='9' socket_id='1' die_id='0' cluster_id='0' core_id='3' siblings='9'/>
+ <cpu id='10' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='10'/>
+ <cpu id='11' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='11'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cmt.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cmt.xml
index acdd97ec58..311bb58e6a 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cmt.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-cmt.xml
@@ -17,12 +17,12 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='6'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='5' siblings='5'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
</cpus>
</cell>
<cell id='1'>
@@ -31,12 +31,12 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='6'>
- <cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
- <cpu id='8' socket_id='1' die_id='0' core_id='2' siblings='8'/>
- <cpu id='9' socket_id='1' die_id='0' core_id='3' siblings='9'/>
- <cpu id='10' socket_id='1' die_id='0' core_id='4' siblings='10'/>
- <cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='7'/>
+ <cpu id='8' socket_id='1' die_id='0' cluster_id='0' core_id='2' siblings='8'/>
+ <cpu id='9' socket_id='1' die_id='0' cluster_id='0' core_id='3' siblings='9'/>
+ <cpu id='10' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='10'/>
+ <cpu id='11' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='11'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-fake-feature.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-fake-feature.xml
index 1327d85c98..d85407f0b1 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-fake-feature.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-fake-feature.xml
@@ -17,12 +17,12 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='6'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='5' siblings='5'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
</cpus>
</cell>
<cell id='1'>
@@ -31,12 +31,12 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='6'>
- <cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
- <cpu id='8' socket_id='1' die_id='0' core_id='2' siblings='8'/>
- <cpu id='9' socket_id='1' die_id='0' core_id='3' siblings='9'/>
- <cpu id='10' socket_id='1' die_id='0' core_id='4' siblings='10'/>
- <cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='7'/>
+ <cpu id='8' socket_id='1' die_id='0' cluster_id='0' core_id='2' siblings='8'/>
+ <cpu id='9' socket_id='1' die_id='0' cluster_id='0' core_id='3' siblings='9'/>
+ <cpu id='10' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='10'/>
+ <cpu id='11' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='11'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx-twocaches.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx-twocaches.xml
index 6769bd0591..eb53eb2142 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx-twocaches.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx-twocaches.xml
@@ -17,7 +17,7 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='1'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx.xml
index bc52480905..38ea0bdc27 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl-skx.xml
@@ -17,7 +17,7 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='1'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
</cpus>
</cell>
</cells>
diff --git a/tests/vircaps2xmldata/vircaps-x86_64-resctrl.xml b/tests/vircaps2xmldata/vircaps-x86_64-resctrl.xml
index b638bbd1c9..fd854ee91e 100644
--- a/tests/vircaps2xmldata/vircaps-x86_64-resctrl.xml
+++ b/tests/vircaps2xmldata/vircaps-x86_64-resctrl.xml
@@ -17,12 +17,12 @@
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='6'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='4' siblings='4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='5' siblings='5'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='4' siblings='4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='5' siblings='5'/>
</cpus>
</cell>
<cell id='1'>
@@ -31,12 +31,12 @@
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='6'>
- <cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
- <cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
- <cpu id='8' socket_id='1' die_id='0' core_id='2' siblings='8'/>
- <cpu id='9' socket_id='1' die_id='0' core_id='3' siblings='9'/>
- <cpu id='10' socket_id='1' die_id='0' core_id='4' siblings='10'/>
- <cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
+ <cpu id='6' socket_id='1' die_id='0' cluster_id='0' core_id='0' siblings='6'/>
+ <cpu id='7' socket_id='1' die_id='0' cluster_id='0' core_id='1' siblings='7'/>
+ <cpu id='8' socket_id='1' die_id='0' cluster_id='0' core_id='2' siblings='8'/>
+ <cpu id='9' socket_id='1' die_id='0' cluster_id='0' core_id='3' siblings='9'/>
+ <cpu id='10' socket_id='1' die_id='0' cluster_id='0' core_id='4' siblings='10'/>
+ <cpu id='11' socket_id='1' die_id='0' cluster_id='0' core_id='5' siblings='11'/>
</cpus>
</cell>
</cells>
--
2.27.0

View File

@ -0,0 +1,31 @@
From ca03e9837c3f79217b9375bd1f5f4fda0034ff6b Mon Sep 17 00:00:00 2001
From: Mao Zhongyi <maozhongyi@cmss.chinamobile.com>
Date: Sat, 18 Sep 2021 14:20:24 +0800
Subject: [PATCH] conf/domain_conf: pin the retry_interval and retry_timeout
parameters to xml
Signed-off-by: Mao Zhongyi <maozhongyi@cmss.chinamobile.com>
---
src/conf/domain_conf.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 0aa514bda3..779130671f 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -22669,6 +22669,12 @@ virDomainDiskDefFormatDriver(virBuffer *buf,
virBufferAsprintf(&attrBuf, " rerror_policy='%s'",
virDomainDiskErrorPolicyTypeToString(disk->rerror_policy));
+ if (disk->retry_interval)
+ virBufferAsprintf(&attrBuf, " retry_interval='%lld'", disk->retry_interval);
+
+ if (disk->retry_timeout)
+ virBufferAsprintf(&attrBuf, " retry_timeout='%lld'", disk->retry_timeout);
+
if (disk->iomode)
virBufferAsprintf(&attrBuf, " io='%s'",
virDomainDiskIoTypeToString(disk->iomode));
--
2.27.0

View File

@ -0,0 +1,31 @@
From 3650de04c4dc99416b7a2d4edd283217657e77c5 Mon Sep 17 00:00:00 2001
From: Xu Yandong <xuyandong2@huawei.com>
Date: Wed, 20 May 2020 02:33:27 -0400
Subject: [PATCH] cpu/aarch64: enable host-model cpu for AArch64 architecture
The 'host-model' cpu is support by kunpeng-v virtualization
suit, skip hypervisor host-model externsion checking on AArch64
architecture.
Signed-off-by: Xu Yandong <xuyandong2@huawei.com>
Signed-off-by: lishusen <lishusen2@huawei.com>
---
src/qemu/qemu_command.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index d54149ed2d..e3cf092828 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -6278,7 +6278,7 @@ qemuBuildCpuModelArgStr(virQEMUDriver *driver,
break;
}
- if ((ARCH_IS_S390(def->os.arch) || ARCH_IS_ARM(def->os.arch)) &&
+ if (ARCH_IS_S390(def->os.arch) &&
cpu->features &&
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) {
virReportError(VIR_ERR_INTERNAL_ERROR,
--
2.27.0

View File

@ -0,0 +1,42 @@
From 1fafc7b5a9392fce83e8f1701ccec289d11d4344 Mon Sep 17 00:00:00 2001
From: Xu Yandong <xuyandong2@huawei.com>
Date: Tue, 26 May 2020 20:08:07 +0800
Subject: [PATCH] cpu_map: add kunpeng-920 features to arm features
Signed-off-by: Xu Yandong <xuyandong2@huawei.com>
Signed-off-by: lishusen <lishusen2@huawei.com>
---
src/cpu_map/arm_features.xml | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/src/cpu_map/arm_features.xml b/src/cpu_map/arm_features.xml
index 8a53384463..5bed298071 100644
--- a/src/cpu_map/arm_features.xml
+++ b/src/cpu_map/arm_features.xml
@@ -19,4 +19,23 @@
<feature name='sve1920'/>
<feature name='sve2048'/>
+ <!-- Kunpeng-920 cpu features -->
+ <feature name='fp'/>
+ <feature name='asimd'/>
+ <feature name='evtstrm'/>
+ <feature name='aes'/>
+ <feature name='pmull'/>
+ <feature name='sha1'/>
+ <feature name='sha2'/>
+ <feature name='crc32'/>
+ <feature name='atomics'/>
+ <feature name='fphp'/>
+ <feature name='asimdhp'/>
+ <feature name='cpuid'/>
+ <feature name='asimdrdm'/>
+ <feature name='jscvt'/>
+ <feature name='fcma'/>
+ <feature name='dcpop'/>
+ <feature name='asimddp'/>
+ <feature name='asimdfhm'/>
</cpus>
--
2.27.0

View File

@ -0,0 +1,124 @@
From 0041b76d5b82a15cbb0b1d6f70fe12e5017b940a Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Mon, 8 Jan 2024 16:11:51 +0100
Subject: [PATCH] docs: Document CPU clusters
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
docs/formatcaps.rst | 26 ++++++++++++++++----------
docs/formatdomain.rst | 18 ++++++++++--------
2 files changed, 26 insertions(+), 18 deletions(-)
diff --git a/docs/formatcaps.rst b/docs/formatcaps.rst
index 60f8b7caca..f37532296f 100644
--- a/docs/formatcaps.rst
+++ b/docs/formatcaps.rst
@@ -77,6 +77,12 @@ The ``<host/>`` element consists of the following child elements:
Note that not all architectures support CPU dies: if the current
architecture doesn't, the value will be 0 for all CPUs.
+ ``cluster_id``
+ Identifier for the cluster the CPU is in.
+
+ Note that not all architectures support CPU clusters: if the current
+ architecture doesn't, the value will be 0 for all CPUs.
+
``core_id``
Identifier for the core the CPU is in.
@@ -84,7 +90,7 @@ The ``<host/>`` element consists of the following child elements:
List of CPUs that are in the same core.
The list will include the current CPU, plus all other CPUs that have the
- same values for ``socket_id``, ``die_id`` and ``core_id``.
+ same values for ``socket_id``, ``die_id``, ``cluster_id`` and ``core_id``.
``secmodel``
To find out default security labels for different security models you need to
@@ -196,7 +202,7 @@ capabilities enabled in the chip and BIOS you will see:
<microcode version='236'/>
<signature family='6' model='142' stepping='12'/>
<counter name='tsc' frequency='2303997000' scaling='no'/>
- <topology sockets='1' dies='1' cores='4' threads='2'/>
+ <topology sockets='1' dies='1' clusters='1' cores='4' threads='2'/>
<maxphysaddr mode='emulate' bits='39'/>
<feature name='ds'/>
<feature name='acpi'/>
@@ -261,14 +267,14 @@ capabilities enabled in the chip and BIOS you will see:
<sibling id='0' value='10'/>
</distances>
<cpus num='8'>
- <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0,4'/>
- <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1,5'/>
- <cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2,6'/>
- <cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3,7'/>
- <cpu id='4' socket_id='0' die_id='0' core_id='0' siblings='0,4'/>
- <cpu id='5' socket_id='0' die_id='0' core_id='1' siblings='1,5'/>
- <cpu id='6' socket_id='0' die_id='0' core_id='2' siblings='2,6'/>
- <cpu id='7' socket_id='0' die_id='0' core_id='3' siblings='3,7'/>
+ <cpu id='0' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0,4'/>
+ <cpu id='1' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1,5'/>
+ <cpu id='2' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2,6'/>
+ <cpu id='3' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3,7'/>
+ <cpu id='4' socket_id='0' die_id='0' cluster_id='0' core_id='0' siblings='0,4'/>
+ <cpu id='5' socket_id='0' die_id='0' cluster_id='0' core_id='1' siblings='1,5'/>
+ <cpu id='6' socket_id='0' die_id='0' cluster_id='0' core_id='2' siblings='2,6'/>
+ <cpu id='7' socket_id='0' die_id='0' cluster_id='0' core_id='3' siblings='3,7'/>
</cpus>
</cell>
</cells>
diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst
index 366918b32c..2d794cc8a2 100644
--- a/docs/formatdomain.rst
+++ b/docs/formatdomain.rst
@@ -1377,7 +1377,7 @@ following collection of elements. :since:`Since 0.7.5`
<cpu match='exact'>
<model fallback='allow'>core2duo</model>
<vendor>Intel</vendor>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
<cache level='3' mode='emulate'/>
<maxphysaddr mode='emulate' bits='42'/>
<feature policy='disable' name='lahf_lm'/>
@@ -1388,7 +1388,7 @@ following collection of elements. :since:`Since 0.7.5`
<cpu mode='host-model'>
<model fallback='forbid'/>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
...
@@ -1414,7 +1414,7 @@ In case no restrictions need to be put on CPU model and its features, a simpler
...
<cpu>
- <topology sockets='1' dies='1' cores='2' threads='1'/>
+ <topology sockets='1' dies='1' clusters='1' cores='2' threads='1'/>
</cpu>
...
@@ -1579,12 +1579,14 @@ In case no restrictions need to be put on CPU model and its features, a simpler
``topology``
The ``topology`` element specifies requested topology of virtual CPU provided
to the guest.
- Its attributes ``sockets``, ``dies`` (:since:`Since 6.1.0`), ``cores``,
- and ``threads`` accept non-zero positive integer values.
+ Its attributes ``sockets``, ``dies`` (:since:`Since 6.1.0`), ``clusters``
+ (:since:`Since 10.1.0`), ``cores``, and ``threads`` accept non-zero positive
+ integer values.
They refer to the total number of CPU sockets, number of dies per socket,
- number of cores per die, and number of threads per core, respectively.
- The ``dies`` attribute is optional and will default to 1 if omitted, while
- the other attributes are all mandatory.
+ number of clusters per die, number of cores per cluster, and number of
+ threads per core, respectively.
+ The ``dies`` and ``clusters`` attributes are optional and will default to 1
+ if omitted, while the other attributes are all mandatory.
Hypervisors may require that the maximum number of vCPUs specified
by the ``cpus`` element equals to the number of vcpus resulting from the
topology.
--
2.27.0

View File

@ -0,0 +1,119 @@
From 35d7f3c8b34744d8430eb9487a106e3a98f372e4 Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Mon, 8 Jan 2024 16:13:25 +0100
Subject: [PATCH] docs: Improve documentation for CPU topology
On the guest configuration side, mention that support for the
"dies" attribute was introduced in libvirt 6.1.0 and clarify
that the ability to use non-default values is subject to
architecture and machine limitations.
On the host capabilities side, the documentation was pretty
much entirely missing. It's still far from perfect, but anything
is better than having no information at all.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
docs/formatcaps.rst | 48 +++++++++++++++++++++++++++++++++++++------
docs/formatdomain.rst | 16 +++++++++------
2 files changed, 52 insertions(+), 12 deletions(-)
diff --git a/docs/formatcaps.rst b/docs/formatcaps.rst
index 3cccf70882..60f8b7caca 100644
--- a/docs/formatcaps.rst
+++ b/docs/formatcaps.rst
@@ -37,6 +37,12 @@ The ``<host/>`` element consists of the following child elements:
The host UUID.
``cpu``
The host CPU architecture and features.
+
+ Note that, while this element contains a ``topology`` sub-element,
+ the information contained therein is farily high-level and likely
+ not very useful when it comes to optimizing guest vCPU placement.
+ Look into the ``topology`` *element*, described below, for more
+ detailed information.
``power_management``
whether host is capable of memory suspend, disk hibernation, or hybrid
suspend.
@@ -44,12 +50,42 @@ The ``<host/>`` element consists of the following child elements:
This element exposes information on the hypervisor's migration capabilities,
like live migration, supported URI transports, and so on.
``topology``
- This element embodies the host internal topology. Management applications may
- want to learn this information when orchestrating new guests - e.g. due to
- reduce inter-NUMA node transfers. Note that the ``sockets`` value reported
- here is per-NUMA-node; this is in contrast to the value given in domain
- definitions, which is interpreted as a total number of sockets for the
- domain.
+ This element describes the host CPU topology in detail.
+
+ Management applications may want to use this information when defining new
+ guests: for example, in order to ensure that all vCPUs are scheduled on
+ CPUs that are in the same NUMA node or even CPU core.
+
+ The ``cells`` sub-element contains a list of NUMA nodes, each one
+ represented by a single ``cell`` element. Within each ``cell``, a ``cpus``
+ sub-element contains a list of logical CPUs, each one represented by a
+ single ``cpu`` element. In both cases, the ``num`` attribute of the
+ top-level element contains the number of children.
+
+ Each ``cpu`` element contains the following attributes:
+
+ ``id``
+ CPU identifier. Can be used to refer to it in the context of
+ `CPU tuning <formatdomain.html#cpu-tuning>`__.
+
+ ``socket_id``
+ Identifier for the physical package the CPU is in.
+
+ ``die_id``
+ Identifier for the die the CPU is in.
+
+ Note that not all architectures support CPU dies: if the current
+ architecture doesn't, the value will be 0 for all CPUs.
+
+ ``core_id``
+ Identifier for the core the CPU is in.
+
+ ``siblings``
+ List of CPUs that are in the same core.
+
+ The list will include the current CPU, plus all other CPUs that have the
+ same values for ``socket_id``, ``die_id`` and ``core_id``.
+
``secmodel``
To find out default security labels for different security models you need to
parse this element. In contrast with the former elements, this is repeated
diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst
index 310d2bc427..366918b32c 100644
--- a/docs/formatdomain.rst
+++ b/docs/formatdomain.rst
@@ -1578,14 +1578,18 @@ In case no restrictions need to be put on CPU model and its features, a simpler
supported vendors can be found in ``cpu_map/*_vendors.xml``.
``topology``
The ``topology`` element specifies requested topology of virtual CPU provided
- to the guest. Four attributes, ``sockets``, ``dies``, ``cores``, and
- ``threads``, accept non-zero positive integer values. They refer to the
- total number of CPU sockets, number of dies per socket, number of cores per
- die, and number of threads per core, respectively. The ``dies`` attribute is
- optional and will default to 1 if omitted, while the other attributes are all
- mandatory. Hypervisors may require that the maximum number of vCPUs specified
+ to the guest.
+ Its attributes ``sockets``, ``dies`` (:since:`Since 6.1.0`), ``cores``,
+ and ``threads`` accept non-zero positive integer values.
+ They refer to the total number of CPU sockets, number of dies per socket,
+ number of cores per die, and number of threads per core, respectively.
+ The ``dies`` attribute is optional and will default to 1 if omitted, while
+ the other attributes are all mandatory.
+ Hypervisors may require that the maximum number of vCPUs specified
by the ``cpus`` element equals to the number of vcpus resulting from the
topology.
+ Moreover, not all architectures and machine types support specifying a value
+ other than 1 for all attributes.
``feature``
The ``cpu`` element can contain zero or more ``feature`` elements used to
fine-tune features provided by the selected CPU model. The list of known
--
2.27.0

View File

@ -0,0 +1,411 @@
From 5d9ad485395b057739736ea31efa82d275c57324 Mon Sep 17 00:00:00 2001
From: AlexChen <alex.chen@huawei.com>
Date: Fri, 29 Mar 2024 15:27:46 +0800
Subject: [PATCH] hostdev:Introduce vDPA device to hostdev subsystem as a new
subtype
The following is the xml of vdpa device:
<devices>
<hostdev mode='subsystem' type='vdpa'>
<source dev='/dev/vhost-vdpa-0'/>
</hostdev>
</devices>
And the command line passed to QEMU is as follows:
-device {"driver":"vhost-vdpa-device-pci","vhostdev":"/dev/vhost-vdpa-0"}
This solution is selected according to the previous discussion
on the solution of supporting the vDPA device.
For details, see the following:
https://listman.redhat.com/archives/libvir-list/2023-March/239018.html
Signed-off-by: AlexChen <alex.chen@huawei.com>
---
src/conf/domain_audit.c | 4 +++
src/conf/domain_conf.c | 43 +++++++++++++++++++++++++++++++++
src/conf/domain_conf.h | 6 +++++
src/conf/domain_postparse.c | 1 +
src/conf/domain_validate.c | 1 +
src/conf/virconftypes.h | 2 ++
src/qemu/qemu_command.c | 19 +++++++++++++++
src/qemu/qemu_command.h | 3 +++
src/qemu/qemu_domain.c | 7 ++++++
src/qemu/qemu_hotplug.c | 3 +++
src/qemu/qemu_migration.c | 2 ++
src/qemu/qemu_validate.c | 2 ++
src/security/security_dac.c | 2 ++
src/security/security_selinux.c | 2 ++
14 files changed, 97 insertions(+)
diff --git a/src/conf/domain_audit.c b/src/conf/domain_audit.c
index 7a6bb02203..e4d4d6d912 100644
--- a/src/conf/domain_audit.c
+++ b/src/conf/domain_audit.c
@@ -344,6 +344,7 @@ virDomainAuditHostdev(virDomainObj *vm, virDomainHostdevDef *hostdev,
virDomainHostdevSubsysSCSI *scsisrc = &hostdev->source.subsys.u.scsi;
virDomainHostdevSubsysSCSIVHost *hostsrc = &hostdev->source.subsys.u.scsi_host;
virDomainHostdevSubsysMediatedDev *mdevsrc = &hostdev->source.subsys.u.mdev;
+ virDomainHostdevSubsysVDPA *vdpasrc = &hostdev->source.subsys.u.vdpa;
virUUIDFormat(vm->def->uuid, uuidstr);
if (!(vmname = virAuditEncode("vm", vm->def->name))) {
@@ -383,6 +384,9 @@ virDomainAuditHostdev(virDomainObj *vm, virDomainHostdevDef *hostdev,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
address = g_strdup(mdevsrc->uuidstr);
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ address = g_strdup(vdpasrc->devpath);
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
VIR_WARN("Unexpected hostdev type while encoding audit message: %d",
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index f54f99f379..0f4c9012c1 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -1052,6 +1052,7 @@ VIR_ENUM_IMPL(virDomainHostdevSubsys,
"scsi",
"scsi_host",
"mdev",
+ "vdpa",
);
VIR_ENUM_IMPL(virDomainHostdevSubsysPCIBackend,
@@ -2645,6 +2646,9 @@ virDomainHostdevDefClear(virDomainHostdevDef *def)
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
g_clear_pointer(&def->source.subsys.u.pci.origstates, virBitmapFree);
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ VIR_FREE(def->source.subsys.u.vdpa.devpath);
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
@@ -6170,6 +6174,22 @@ virDomainHostdevSubsysMediatedDevDefParseXML(virDomainHostdevDef *def,
return 0;
}
+static int
+virDomainHostdevSubsysVDPADefParseXML(xmlNodePtr sourcenode,
+ virDomainHostdevDef *def)
+{
+ g_autofree char *devpath = NULL;
+ virDomainHostdevSubsysVDPA *vdpa = &def->source.subsys.u.vdpa;
+
+ if(!(devpath = virXMLPropString(sourcenode, "dev"))) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Missing 'dev' attribute for element <source>"));
+ return -1;
+ }
+ vdpa->devpath = g_steal_pointer(&devpath);
+ return 0;
+}
+
static int
virDomainHostdevDefParseXMLSubsys(xmlNodePtr node,
xmlXPathContextPtr ctxt,
@@ -6315,6 +6335,11 @@ virDomainHostdevDefParseXMLSubsys(xmlNodePtr node,
if (virDomainHostdevSubsysMediatedDevDefParseXML(def, ctxt) < 0)
return -1;
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ if (virDomainHostdevSubsysVDPADefParseXML(sourcenode, def) < 0) {
+ return -1;
+ }
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
@@ -13005,6 +13030,7 @@ virDomainHostdevDefParseXML(virDomainXMLOption *xmlopt,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
@@ -14152,6 +14178,9 @@ virDomainHostdevMatchSubsys(virDomainHostdevDef *a,
return 0;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
return virDomainHostdevMatchSubsysMediatedDev(a, b);
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ return STREQ(a->source.subsys.u.vdpa.devpath,
+ b->source.subsys.u.vdpa.devpath);
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
return 0;
}
@@ -23501,6 +23530,16 @@ virDomainHostdevDefFormatSubsysMdev(virBuffer *buf,
virXMLFormatElement(buf, "source", NULL, &sourceChildBuf);
}
+static void
+virDomainHostdevDefFormatSubsysVDPA(virBuffer *buf,
+ virDomainHostdevDef *def)
+{
+ g_auto(virBuffer) sourceAttrBuf = VIR_BUFFER_INITIALIZER;
+ virDomainHostdevSubsysVDPA *vdpasrc = &def->source.subsys.u.vdpa;
+ virBufferAsprintf(&sourceAttrBuf, " dev='%s'", vdpasrc->devpath);
+ virXMLFormatElement(buf, "source", &sourceAttrBuf, NULL);
+}
+
static int
virDomainHostdevDefFormatSubsys(virBuffer *buf,
@@ -23528,6 +23567,10 @@ virDomainHostdevDefFormatSubsys(virBuffer *buf,
virDomainHostdevDefFormatSubsysMdev(buf, def);
return 0;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ virDomainHostdevDefFormatSubsysVDPA(buf, def);
+ return 0;
+
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportEnumRangeError(virDomainHostdevSubsysType, def->source.subsys.type);
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index 196053d950..2a85f7bcf0 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -198,6 +198,7 @@ typedef enum {
VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI,
VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST,
VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV,
+ VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA,
VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST
} virDomainHostdevSubsysType;
@@ -290,6 +291,10 @@ struct _virDomainHostdevSubsysMediatedDev {
virTristateSwitch ramfb;
};
+struct _virDomainHostdevSubsysVDPA {
+ char *devpath; /* vDPA device path */
+};
+
typedef enum {
VIR_DOMAIN_HOSTDEV_SUBSYS_SCSI_HOST_PROTOCOL_TYPE_NONE = 0,
VIR_DOMAIN_HOSTDEV_SUBSYS_SCSI_HOST_PROTOCOL_TYPE_VHOST,
@@ -324,6 +329,7 @@ struct _virDomainHostdevSubsys {
virDomainHostdevSubsysSCSI scsi;
virDomainHostdevSubsysSCSIVHost scsi_host;
virDomainHostdevSubsysMediatedDev mdev;
+ virDomainHostdevSubsysVDPA vdpa;
} u;
};
diff --git a/src/conf/domain_postparse.c b/src/conf/domain_postparse.c
index e79913b73f..b948bf0653 100644
--- a/src/conf/domain_postparse.c
+++ b/src/conf/domain_postparse.c
@@ -351,6 +351,7 @@ virDomainHostdevDefPostParse(virDomainHostdevDef *dev,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
index c72108886e..d88ef6b915 100644
--- a/src/conf/domain_validate.c
+++ b/src/conf/domain_validate.c
@@ -2190,6 +2190,7 @@ virDomainHostdevDefValidate(const virDomainHostdevDef *hostdev)
}
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
diff --git a/src/conf/virconftypes.h b/src/conf/virconftypes.h
index 26cb966194..bcdcb8b825 100644
--- a/src/conf/virconftypes.h
+++ b/src/conf/virconftypes.h
@@ -120,6 +120,8 @@ typedef struct _virDomainHostdevSubsys virDomainHostdevSubsys;
typedef struct _virDomainHostdevSubsysMediatedDev virDomainHostdevSubsysMediatedDev;
+typedef struct _virDomainHostdevSubsysVDPA virDomainHostdevSubsysVDPA;
+
typedef struct _virDomainHostdevSubsysPCI virDomainHostdevSubsysPCI;
typedef struct _virDomainHostdevSubsysSCSI virDomainHostdevSubsysSCSI;
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index d54149ed2d..de3be7fc1c 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -5004,6 +5004,18 @@ qemuBuildHostdevMediatedDevProps(const virDomainDef *def,
return g_steal_pointer(&props);
}
+virJSONValue *
+qemuBuildHostdevVDPADevProps(virDomainHostdevDef *dev)
+{
+ g_autoptr(virJSONValue) props = NULL;
+ virDomainHostdevSubsysVDPA *vdpasrc = &dev->source.subsys.u.vdpa;
+ if (virJSONValueObjectAdd(&props,
+ "s:driver", "vhost-vdpa-device-pci",
+ "s:vhostdev", vdpasrc->devpath,
+ NULL) < 0)
+ return NULL;
+ return g_steal_pointer(&props);
+}
qemuBlockStorageSourceAttachData *
qemuBuildHostdevSCSIDetachPrepare(virDomainHostdevDef *hostdev,
@@ -5199,6 +5211,13 @@ qemuBuildHostdevCommandLine(virCommand *cmd,
return -1;
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ if (!(devprops = qemuBuildHostdevVDPADevProps(hostdev)))
+ return -1;
+ if (qemuBuildDeviceCommandlineFromJSON(cmd, devprops, def, qemuCaps) < 0)
+ return -1;
+ break;
+
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
diff --git a/src/qemu/qemu_command.h b/src/qemu/qemu_command.h
index 341ec43f9a..432ea59310 100644
--- a/src/qemu/qemu_command.h
+++ b/src/qemu/qemu_command.h
@@ -198,6 +198,9 @@ virJSONValue *
qemuBuildHostdevMediatedDevProps(const virDomainDef *def,
virDomainHostdevDef *dev);
+virJSONValue *
+qemuBuildHostdevVDPADevProps(virDomainHostdevDef *dev);
+
virJSONValue *
qemuBuildRedirdevDevProps(const virDomainDef *def,
virDomainRedirdevDef *dev);
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index f8a6c43797..5a7e8d2607 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -10528,6 +10528,8 @@ qemuDomainGetHostdevPath(virDomainHostdevDef *dev,
virDomainHostdevSubsysSCSI *scsisrc = &dev->source.subsys.u.scsi;
virDomainHostdevSubsysSCSIVHost *hostsrc = &dev->source.subsys.u.scsi_host;
virDomainHostdevSubsysMediatedDev *mdevsrc = &dev->source.subsys.u.mdev;
+ virDomainHostdevSubsysVDPA *vdpasrc = &dev->source.subsys.u.vdpa;
+
g_autoptr(virUSBDevice) usb = NULL;
g_autoptr(virSCSIDevice) scsi = NULL;
g_autoptr(virSCSIVHostDevice) host = NULL;
@@ -10598,6 +10600,10 @@ qemuDomainGetHostdevPath(virDomainHostdevDef *dev,
if (!(tmpPath = virMediatedDeviceGetIOMMUGroupDev(mdevsrc->uuidstr)))
return -1;
+ perm = VIR_CGROUP_DEVICE_RW;
+ break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ tmpPath = g_strdup(vdpasrc->devpath);
perm = VIR_CGROUP_DEVICE_RW;
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
@@ -11373,6 +11379,7 @@ qemuDomainPrepareHostdev(virDomainHostdevDef *hostdev,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 4e2fc724c0..29c0856f62 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -2805,6 +2805,7 @@ qemuDomainAttachHostDevice(virQEMUDriver *driver,
return -1;
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
@@ -4717,6 +4718,7 @@ qemuDomainRemoveHostDevice(virQEMUDriver *driver,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
qemuDomainRemoveMediatedDevice(driver, vm, hostdev);
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
break;
}
@@ -5743,6 +5745,7 @@ qemuDomainDetachPrepHostdev(virDomainObj *vm,
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
break;
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportError(VIR_ERR_INTERNAL_ERROR,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 41ce565ede..0a89d102df 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1310,6 +1310,8 @@ qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
+ /* The vDPA devices don't support migration for now */
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot migrate a domain with <hostdev mode='subsystem' type='%1$s'>"),
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c
index e475ad035e..18c5b60a35 100644
--- a/src/qemu/qemu_validate.c
+++ b/src/qemu/qemu_validate.c
@@ -2496,6 +2496,8 @@ qemuValidateDomainDeviceDefHostdev(const virDomainHostdevDef *hostdev,
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
return qemuValidateDomainMdevDef(hostdev, def, qemuCaps);
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportEnumRangeError(virDomainHostdevSubsysType,
diff --git a/src/security/security_dac.c b/src/security/security_dac.c
index c07e488db7..96aebfce5b 100644
--- a/src/security/security_dac.c
+++ b/src/security/security_dac.c
@@ -1313,6 +1313,7 @@ virSecurityDACSetHostdevLabel(virSecurityManager *mgr,
break;
}
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
ret = 0;
break;
@@ -1469,6 +1470,7 @@ virSecurityDACRestoreHostdevLabel(virSecurityManager *mgr,
break;
}
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
ret = 0;
break;
diff --git a/src/security/security_selinux.c b/src/security/security_selinux.c
index 84c5ce75ed..652c086cb0 100644
--- a/src/security/security_selinux.c
+++ b/src/security/security_selinux.c
@@ -2256,6 +2256,7 @@ virSecuritySELinuxSetHostdevSubsysLabel(virSecurityManager *mgr,
break;
}
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
ret = 0;
break;
@@ -2487,6 +2488,7 @@ virSecuritySELinuxRestoreHostdevSubsysLabel(virSecurityManager *mgr,
break;
}
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
ret = 0;
break;
--
2.27.0

View File

@ -0,0 +1,146 @@
From c45717fa823682a04814ca49669116b2f5917671 Mon Sep 17 00:00:00 2001
From: Jiahui Cen <cenjiahui@huawei.com>
Date: Thu, 25 Feb 2021 18:55:30 +0800
Subject: [PATCH] libvirt: Add 'retry' support for error policy
Introduce error_policy=/rerror_policy='retry' to support
werror=/rerror=retry mechanism in qemu.
Add retry_interval parameter to control the interval between retries.
Add retry_timeout parameter to control the total retry times.
Signed-off-by: Jiahui Cen <cenjiahui@huawei.com>
Signed-off-by: Ying Fang <fangying1@huawei.com>
---
src/conf/domain_conf.c | 26 ++++++++++++++++++++++++++
src/conf/domain_conf.h | 3 +++
src/qemu/qemu_command.c | 13 +++++++++++++
src/qemu/qemu_domain.c | 2 ++
4 files changed, 44 insertions(+)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 22ad43e1d7..f31f06b428 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -385,6 +385,7 @@ VIR_ENUM_IMPL(virDomainDiskErrorPolicy,
"report",
"ignore",
"enospace",
+ "retry",
);
VIR_ENUM_IMPL(virDomainDiskIo,
@@ -7773,6 +7774,8 @@ static int
virDomainDiskDefDriverParseXML(virDomainDiskDef *def,
xmlNodePtr cur)
{
+ g_autofree char *tmp = NULL;
+
def->driverName = virXMLPropString(cur, "name");
if (virXMLPropEnum(cur, "cache", virDomainDiskCacheTypeFromString,
@@ -7796,6 +7799,29 @@ virDomainDiskDefDriverParseXML(virDomainDiskDef *def,
return -1;
}
+ def->retry_interval = -1;
+ if ((tmp = virXMLPropString(cur, "retry_interval")) &&
+ ((def->error_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY &&
+ def->rerror_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) ||
+ (virStrToLong_ll(tmp, NULL, 10, &def->retry_interval) < 0) ||
+ (def->retry_interval < 0))) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unknown disk retry interval '%s'"), tmp);
+ return -1;
+ }
+ VIR_FREE(tmp);
+
+ def->retry_timeout = -1;
+ if ((tmp = virXMLPropString(cur, "retry_timeout")) &&
+ ((def->error_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY &&
+ def->rerror_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) ||
+ (virStrToLong_ll(tmp, NULL, 10, &def->retry_timeout) < 0) ||
+ (def->retry_timeout < 0))) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unknown disk retry interval '%s'"), tmp);
+ return -1;
+ }
+
if (virXMLPropEnum(cur, "io", virDomainDiskIoTypeFromString,
VIR_XML_PROP_NONZERO, &def->iomode) < 0)
return -1;
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index ed07859bc5..021623cce7 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -419,6 +419,7 @@ typedef enum {
VIR_DOMAIN_DISK_ERROR_POLICY_REPORT,
VIR_DOMAIN_DISK_ERROR_POLICY_IGNORE,
VIR_DOMAIN_DISK_ERROR_POLICY_ENOSPACE,
+ VIR_DOMAIN_DISK_ERROR_POLICY_RETRY,
VIR_DOMAIN_DISK_ERROR_POLICY_LAST
} virDomainDiskErrorPolicy;
@@ -556,6 +557,8 @@ struct _virDomainDiskDef {
virDomainDiskCache cachemode;
virDomainDiskErrorPolicy error_policy;
virDomainDiskErrorPolicy rerror_policy;
+ long long retry_interval;
+ long long retry_timeout;
virDomainDiskIo iomode;
virTristateSwitch ioeventfd;
virTristateSwitch event_idx;
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index d54149ed2d..678b84b332 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -1753,6 +1753,8 @@ qemuBuildDiskDeviceProps(const virDomainDef *def,
const char *biosCHSTrans = NULL;
const char *wpolicy = NULL;
const char *rpolicy = NULL;
+ g_autoptr(virJSONValue) retry_interval = NULL;
+ g_autoptr(virJSONValue) retry_timeout = NULL;
switch (disk->bus) {
case VIR_DOMAIN_DISK_BUS_IDE:
@@ -1912,6 +1914,15 @@ qemuBuildDiskDeviceProps(const virDomainDef *def,
}
qemuBuildDiskGetErrorPolicy(disk, &wpolicy, &rpolicy);
+ if ((disk->error_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY ||
+ disk->rerror_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) &&
+ disk->retry_interval >= 0)
+ retry_interval = virJSONValueNewNumberUlong(disk->retry_interval);
+ if ((disk->error_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY ||
+ disk->rerror_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) &&
+ disk->retry_timeout >= 0)
+ retry_timeout = virJSONValueNewNumberUlong(disk->retry_timeout);
+
if (virJSONValueObjectAdd(&props,
"S:device_id", scsiVPDDeviceId,
@@ -1936,6 +1947,8 @@ qemuBuildDiskDeviceProps(const virDomainDef *def,
"S:serial", serial,
"S:werror", wpolicy,
"S:rerror", rpolicy,
+ "A:retry_interval", &retry_interval,
+ "A:retry_timeout", &retry_timeout,
NULL) < 0)
return NULL;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 953808fcfe..fa6a7fd1c9 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -8325,6 +8325,8 @@ qemuDomainDiskChangeSupported(virDomainDiskDef *disk,
CHECK_EQ(cachemode, "cache", true);
CHECK_EQ(error_policy, "error_policy", true);
CHECK_EQ(rerror_policy, "rerror_policy", true);
+ CHECK_EQ(retry_interval, "retry_interval", true);
+ CHECK_EQ(retry_timeout, "retry_timeout", true);
CHECK_EQ(iomode, "io", true);
CHECK_EQ(ioeventfd, "ioeventfd", true);
CHECK_EQ(event_idx, "event_idx", true);
--
2.27.0

View File

@ -0,0 +1,86 @@
From 2b79389fe60dc8fc0bf8788a346a35d63ae6883d Mon Sep 17 00:00:00 2001
From: Jiahui Cen <cenjiahui@huawei.com>
Date: Thu, 18 Mar 2021 15:14:20 +0800
Subject: [PATCH] libvirt/conf: Set default values of retry fileds
Currently the default values of retry_interval and retry_timeout are set
to -1, when 'driver' option exists without retry fileds. It conflicts
with the default values when the 'driver' option does not exist.
So let's set default values of retry_interval and retry_timeout to 0 when
retry policy is not enabled.
Signed-off-by: Jiahui Cen <cenjiahui@huawei.com>
---
src/conf/domain_conf.c | 18 ++++++++++++------
src/conf/domain_conf.h | 2 ++
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index f31f06b428..0aa514bda3 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -7775,6 +7775,7 @@ virDomainDiskDefDriverParseXML(virDomainDiskDef *def,
xmlNodePtr cur)
{
g_autofree char *tmp = NULL;
+ bool retry_enabled = false;
def->driverName = virXMLPropString(cur, "name");
@@ -7799,28 +7800,33 @@ virDomainDiskDefDriverParseXML(virDomainDiskDef *def,
return -1;
}
- def->retry_interval = -1;
+ retry_enabled = (def->error_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) ||
+ (def->rerror_policy == VIR_DOMAIN_DISK_ERROR_POLICY_RETRY);
+
if ((tmp = virXMLPropString(cur, "retry_interval")) &&
- ((def->error_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY &&
- def->rerror_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) ||
+ (!retry_enabled ||
(virStrToLong_ll(tmp, NULL, 10, &def->retry_interval) < 0) ||
(def->retry_interval < 0))) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("unknown disk retry interval '%s'"), tmp);
return -1;
}
+ if (retry_enabled && !tmp) {
+ def->retry_interval = VIR_DOMAIN_DISK_DEFAULT_RETRY_INTERVAL;
+ }
VIR_FREE(tmp);
- def->retry_timeout = -1;
if ((tmp = virXMLPropString(cur, "retry_timeout")) &&
- ((def->error_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY &&
- def->rerror_policy != VIR_DOMAIN_DISK_ERROR_POLICY_RETRY) ||
+ (!retry_enabled ||
(virStrToLong_ll(tmp, NULL, 10, &def->retry_timeout) < 0) ||
(def->retry_timeout < 0))) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("unknown disk retry interval '%s'"), tmp);
return -1;
}
+ if (retry_enabled && !tmp) {
+ def->retry_timeout = VIR_DOMAIN_DISK_DEFAULT_RETRY_TIMEOUT;
+ }
if (virXMLPropEnum(cur, "io", virDomainDiskIoTypeFromString,
VIR_XML_PROP_NONZERO, &def->iomode) < 0)
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index 021623cce7..c810721d5b 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -515,6 +515,8 @@ typedef enum {
VIR_ENUM_DECL(virDomainSnapshotLocation);
+#define VIR_DOMAIN_DISK_DEFAULT_RETRY_INTERVAL 1000
+#define VIR_DOMAIN_DISK_DEFAULT_RETRY_TIMEOUT 0
/* Stores the virtual disk configuration */
struct _virDomainDiskDef {
--
2.27.0

View File

@ -262,7 +262,7 @@
Summary: Library providing a simple virtualization API
Name: libvirt
Version: 9.10.0
Release: 3
Release: 4
License: LGPLv2+
URL: https://libvirt.org/
@ -270,8 +270,38 @@ URL: https://libvirt.org/
%define mainturl stable_updates/
%endif
Source: https://download.libvirt.org/%{?mainturl}libvirt-%{version}.tar.xz
Patch0000: nodedev-delete-mdev.patch
Patch0001: meson.build-isolate-the-client-qemu-using-macros.patch
Patch0001: nodedev-delete-mdev.patch
Patch0002: meson.build-isolate-the-client-qemu-using-macros.patch
Patch0003: test-commandtest-skip-the-test4-if-the-testcase-is-r.patch
Patch0004: qemu-fix-a-concurrent-operation-situation.patch
Patch0005: qemu-add-pointer-check-in-qemuMonitorLastError.patch
Patch0006: migration-migration-pin-add-some-migration-multiFd-p.patch
Patch0007: migration-migration-pin-add-qemu-monitor-callback-fu.patch
Patch0008: migration-migration-pin-add-migrationpin-for-migrati.patch
Patch0009: migration-migration-pin-add-domainMigrationPid-for-q.patch
Patch0010: migration-multifd-pin-add-qemu-monitor-callback-func.patch
Patch0011: migration-multifd-pin-support-migration-multifd-thre.patch
Patch0012: node_device-fix-leak-of-DIR.patch
Patch0013: hostdev-Introduce-vDPA-device-to-hostdev-subsystem-a.patch
Patch0014: vdpa-support-vdpa-device-hot-plug-unplug.patch
Patch0015: vdpa-support-vdpa-device-migrate.patch
Patch0016: libvirt-Add-retry-support-for-error-policy.patch
Patch0017: qemu-Support-retry-BLOCK_IO_ERROR-event.patch
Patch0018: libvirt-conf-Set-default-values-of-retry-fileds.patch
Patch0019: nodedev-fix-potential-heap-use-after-free.patch
Patch0020: conf-domain_conf-pin-the-retry_interval-and-retry_ti.patch
Patch0021: cpu-aarch64-enable-host-model-cpu-for-AArch64-archit.patch
Patch0022: cpu_map-add-kunpeng-920-features-to-arm-features.patch
Patch0023: tests-Add-hostcpudata-for-machine-with-CPU-clusters.patch
Patch0024: conf-Report-CPU-clusters-in-capabilities-XML.patch
Patch0025: conf-Allow-specifying-CPU-clusters.patch
Patch0026: qemu-Introduce-QEMU_CAPS_SMP_CLUSTERS.patch
Patch0027: qemu-Use-CPU-clusters-for-guests.patch
Patch0028: qemu-Make-monitor-aware-of-CPU-clusters.patch
Patch0029: tests-Verify-handling-of-CPU-clusters-in-QMP-data.patch
Patch0030: docs-Improve-documentation-for-CPU-topology.patch
Patch0031: docs-Document-CPU-clusters.patch
Requires: libvirt-daemon = %{version}-%{release}
Requires: libvirt-daemon-config-network = %{version}-%{release}
@ -2565,6 +2595,37 @@ exit 0
%endif
%changelog
* Tue Apr 2 2024 JiaboFeng <fengjiabo1@huawei.com> - 9.10.0-4
- docs: Document CPU clusters
- docs: Improve documentation for CPU topology
- tests: Verify handling of CPU clusters in QMP data
- qemu: Make monitor aware of CPU clusters
- qemu: Use CPU clusters for guests
- qemu: Introduce QEMU_CAPS_SMP_CLUSTERS
- conf: Allow specifying CPU clusters
- conf: Report CPU clusters in capabilities XML
- tests: Add hostcpudata for machine with CPU clusters
- cpu_map: add kunpeng-920 features to arm features
- cpu/aarch64: enable host-model cpu for AArch64 architecture
- conf/domain_conf: pin the retry_interval and retry_timeout parameters to xml
- nodedev: fix potential heap use after free
- libvirt/conf: Set default values of retry fileds
- qemu: Support 'retry' BLOCK_IO_ERROR event.
- libvirt: Add 'retry' support for error policy
- vdpa: support vdpa device migrate
- vdpa: support vdpa device hot plug/unplug
- hostdev:Introduce vDPA device to hostdev subsystem as a new subtype
- node_device: fix leak of DIR*
- migration/multifd-pin: support migration multifd thread pin
- migration/multifd-pin: add qemu monitor callback functions
- migration/migration-pin: add domainMigrationPid for qemuMonitorCallbacks
- migration/migration-pin: add migrationpin for migration parameters
- migration/migration-pin: add qemu monitor callback functions
- migration/migration-pin:add some migration/multiFd params
- qemu: add pointer check in qemuMonitorLastError
- qemu: fix a concurrent operation situation
- test/commandtest: skip the test4 if the testcase is run in the container env
* Tue Apr 2 2024 mayunlong <mayunlong6@huawei.com> - 9.10.0-3
- Incorrect tar package, update it

View File

@ -0,0 +1,249 @@
From c98bd215cdd2ac3e4296d5ff6264eae18a8cda90 Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 15:01:12 +0800
Subject: [PATCH] migration/migration-pin: add domainMigrationPid for
qemuMonitorCallbacks
add domainMigrationPid for qemuMonitorCallbacks
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/qemu/qemu_process.c | 184 ++++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_process.h | 12 +++
2 files changed, 196 insertions(+)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index fc05b4b24f..5be6710ea7 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1819,6 +1819,7 @@ static qemuMonitorCallbacks monitorCallbacks = {
.domainMemoryFailure = qemuProcessHandleMemoryFailure,
.domainMemoryDeviceSizeChange = qemuProcessHandleMemoryDeviceSizeChange,
.domainDeviceUnplugError = qemuProcessHandleDeviceUnplugErr,
+ .domainMigrationPid = qemuProcessHandleMigrationPid,
.domainNetdevStreamDisconnected = qemuProcessHandleNetdevStreamDisconnected,
};
@@ -2676,6 +2677,189 @@ qemuProcessResctrlCreate(virQEMUDriver *driver,
}
+int
+qemuProcessSetupMigration(virDomainObj *vm,
+ virDomainMigrationIDDefPtr migration)
+{
+ return qemuProcessSetupPid(vm, migration->thread_id,
+ VIR_CGROUP_THREAD_MIGRATION_THREAD,
+ 0,
+ vm->def->cputune.emulatorpin,
+ vm->def->cputune.emulator_period,
+ vm->def->cputune.emulator_quota,
+ &migration->sched);
+}
+
+
+unsigned char *
+virParseCPUList(int *cpumaplen, const char *cpulist, int maxcpu)
+{
+ int lastcpu;
+ unsigned char *cpumap = NULL;
+ virBitmap *map = NULL;
+
+ if (cpulist[0] == 'r') {
+ map = virBitmapNew(maxcpu);
+ if (!map)
+ return NULL;
+ virBitmapSetAll(map);
+ } else {
+ if (virBitmapParse(cpulist, &map, 1024) < 0 ||
+ virBitmapIsAllClear(map)) {
+ goto cleanup;
+ }
+
+ lastcpu = virBitmapLastSetBit(map);
+ if (lastcpu >= maxcpu)
+ goto cleanup;
+ }
+
+ if (virBitmapToData(map, &cpumap, cpumaplen) < 0)
+ VIR_ERROR(_("Bitmap to data failure"));
+
+ cleanup:
+ virBitmapFree(map);
+ return cpumap;
+}
+
+
+/*
+ * If priv->pcpumap is NULL, it means migrationpin command is not called,
+ * otherwise we set the affinity of migration thread by migrationpin.
+ */
+static virBitmap *
+qemuProcessGetPcpumap(qemuDomainObjPrivate *priv)
+{
+ int cpumaplen = 0;
+ int maxcpu = 0;
+ g_autofree unsigned char *cpumap = NULL;
+ virBitmap *pcpumap = NULL;
+
+ if(priv->pcpumap)
+ return priv->pcpumap;
+
+ if (!(priv->migrationThreadPinList) || STREQ(priv->migrationThreadPinList, "")) {
+ VIR_ERROR(_("didn't set the migratin thread pin"));
+ return NULL;
+ }
+
+ /* judge whether migration.pin is default value or not */
+ if (STREQ(priv->migrationThreadPinList, "none"))
+ return NULL;
+
+ maxcpu = virHostCPUGetCount();
+ if (maxcpu < 0) {
+ VIR_ERROR(_("get the cpu count of host failure"));
+ return NULL;
+ }
+
+ cpumap = virParseCPUList(&cpumaplen, priv->migrationThreadPinList, maxcpu);
+ if (!cpumap) {
+ VIR_ERROR(_("parse migration.pin params failure : migration.pin = %s"),
+ priv->migrationThreadPinList);
+ return NULL;
+ }
+
+ if (!(pcpumap = virBitmapNewData(cpumap, cpumaplen))) {
+ VIR_ERROR(_("Bitmap data failure"));
+ return pcpumap;
+ }
+
+ return pcpumap;
+}
+
+
+/*
+ * In order to set migration thread affinity when vm is migrating,
+ * we should create the cgroup for migration thread.
+ */
+static void
+qemuProcessSetMigthreadAffinity(qemuDomainObjPrivate *priv,
+ virBitmap *pcpumap,
+ int mpid)
+{
+ int migration_id = 0;
+ virCgroup *cgroup_migthread = NULL;
+
+ if (!pcpumap)
+ return;
+
+ if (virCgroupHasController(priv->cgroup,
+ VIR_CGROUP_CONTROLLER_CPUSET)) {
+ if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_MIGRATION_THREAD,
+ migration_id, false, &cgroup_migthread) < 0)
+ goto cleanup;
+
+ if (virCgroupSetupCpusetCpus(cgroup_migthread, pcpumap) < 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("failed to set cpuset.cpus in cgroup"
+ " for migration%d thread"), migration_id);
+ goto cleanup;
+ }
+ }
+
+ if (virProcessSetAffinity(mpid, pcpumap, false) < 0)
+ VIR_WARN("failed to set affinity in migration");
+
+ cleanup:
+ if (cgroup_migthread)
+ virCgroupFree(cgroup_migthread);
+ return;
+}
+
+
+void
+qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid)
+{
+ qemuDomainObjPrivate *priv;
+ char *mpidStr = NULL;
+ virDomainMigrationIDDefPtr migration = NULL;
+ virBitmap *pcpumap = NULL;
+ virObjectLock(vm);
+
+ VIR_INFO("Migrating domain %p %s, migration pid %d",
+ vm, vm->def->name, mpid);
+
+ priv = vm->privateData;
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
+ VIR_DEBUG("got MIGRATION_PID event without a migration job");
+ goto cleanup;
+ }
+
+ migration = g_malloc0(sizeof(*migration));
+ migration->thread_id = mpid;
+
+ if (qemuProcessSetupMigration(vm, migration) < 0) {
+ VIR_ERROR(_("fail to setup migration cgroup"));
+ goto cleanup;
+ }
+
+ mpidStr = g_strdup_printf("%d", mpid);
+
+ VIR_FREE(priv->migrationPids);
+ priv->migrationPids = mpidStr;
+
+ pcpumap = qemuProcessGetPcpumap(priv);
+
+ if (!pcpumap)
+ goto cleanup;
+
+ qemuProcessSetMigthreadAffinity(priv, pcpumap, mpid);
+
+ cleanup:
+ /*
+ * If the value of pcpumap is setted by priv->migrationThreadPinList,
+ * we need to free pcpumap.
+ */
+ if (pcpumap != priv->pcpumap)
+ virBitmapFree(pcpumap);
+ virDomainMigrationIDDefFree(migration);
+ virObjectUnlock(vm);
+}
+
+
static char *
qemuProcessBuildPRHelperPidfilePathOld(virDomainObj *vm)
{
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index c1ea949215..fff976f6f7 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -25,6 +25,7 @@
#include "qemu_domain.h"
#include "qemu_saveimage.h"
#include "vireventthread.h"
+#include "domain_conf.h"
int qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
const char *domainDir);
@@ -250,6 +251,17 @@ int qemuProcessQMPStart(qemuProcessQMP *proc);
bool qemuProcessRebootAllowed(const virDomainDef *def);
+int qemuProcessSetupMigration(virDomainObj *vm,
+ virDomainMigrationIDDefPtr migration);
+
+unsigned char * virParseCPUList(int *cpumaplen,
+ const char *cpulist,
+ int maxcpu);
+
+void qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid);
+
void qemuProcessCleanupMigrationJob(virQEMUDriver *driver,
virDomainObj *vm);
--
2.27.0

View File

@ -0,0 +1,252 @@
From 4b1281075aacc2e968e2e35bd8a81f3199faef7d Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 15:01:12 +0800
Subject: [PATCH] migration/migration-pin: add migrationpin for migration
parameters
Add a migrationpin to the migration parameters of live migration to bind cores
to the migration thread during VM migration.
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
include/libvirt/libvirt-domain.h | 9 +++++++++
src/qemu/qemu_domain.c | 1 +
src/qemu/qemu_domain.h | 1 +
src/qemu/qemu_migration.c | 2 ++
src/qemu/qemu_migration.h | 1 +
src/qemu/qemu_migration_params.c | 22 ++++++++++++++++++++++
src/qemu/qemu_migration_params.h | 4 ++++
src/qemu/qemu_monitor.c | 9 +++------
src/qemu/qemu_monitor.h | 9 ++++-----
src/qemu/qemu_monitor_json.c | 6 +++---
10 files changed, 50 insertions(+), 14 deletions(-)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index a1902546bb..26bf60ce34 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -1312,6 +1312,15 @@ typedef enum {
*/
# define VIR_MIGRATE_PARAM_COMPRESSION_MT_DTHREADS "compression.mt.dthreads"
+/**
+ * VIR_MIGRATE_PARAM_MIGRATIONPIN:
+ *
+ * virDomainMigrate* params field: the pin of migration threads for
+ * migration as VIR_TYPED_PARAM_STRING.
+ * Since: 6.2.0
+ */
+# define VIR_MIGRATE_PARAM_MIGRATIONPIN "migration.pin"
+
/**
* VIR_MIGRATE_PARAM_COMPRESSION_XBZRLE_CACHE:
*
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 6b09c15f7a..f8a6c43797 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -1880,6 +1880,7 @@ qemuDomainObjPrivateFree(void *data)
g_free(priv->lockState);
g_free(priv->origname);
VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationThreadPinList);
virBitmapFree(priv->pcpumap);
VIR_FREE(priv->migrationMultiFdPids);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 0eff9eab72..7074023229 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -147,6 +147,7 @@ struct _qemuDomainObjPrivate {
it was changed for the current
migration job. */
char *migrationPids;
+ char *migrationThreadPinList;
char *migrationMultiFdPids;
unsigned int migrationMultiFdCount;
virChrdevs *devs;
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index b13f2e0c24..00dfd46ae7 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -4838,6 +4838,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
priv->migMaxBandwidth * 1024 * 1024) < 0)
goto error;
+ qemuMigrationMigrationParamsToVM(migParams, vm);
+
if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams, flags) < 0)
goto error;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index ed62fd4a91..1f309bbdc2 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -89,6 +89,7 @@
VIR_MIGRATE_PARAM_COMPRESSION_ZLIB_LEVEL, VIR_TYPED_PARAM_INT, \
VIR_MIGRATE_PARAM_COMPRESSION_ZSTD_LEVEL, VIR_TYPED_PARAM_INT, \
VIR_MIGRATE_PARAM_TLS_DESTINATION, VIR_TYPED_PARAM_STRING, \
+ VIR_MIGRATE_PARAM_MIGRATIONPIN, VIR_TYPED_PARAM_STRING, \
VIR_MIGRATE_PARAM_DISKS_URI, VIR_TYPED_PARAM_STRING, \
NULL
diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c
index 79fe6e97c8..5c75fa4d8e 100644
--- a/src/qemu/qemu_migration_params.c
+++ b/src/qemu/qemu_migration_params.c
@@ -121,6 +121,7 @@ VIR_ENUM_IMPL(qemuMigrationParam,
"multifd-compression",
"multifd-zlib-level",
"multifd-zstd-level",
+ "migrationpin",
);
typedef struct _qemuMigrationParamsAlwaysOnItem qemuMigrationParamsAlwaysOnItem;
@@ -243,6 +244,10 @@ static const qemuMigrationParamsTPMapItem qemuMigrationParamsTPMap[] = {
{.typedParam = VIR_MIGRATE_PARAM_TLS_DESTINATION,
.param = QEMU_MIGRATION_PARAM_TLS_HOSTNAME,
.party = QEMU_MIGRATION_SOURCE},
+
+ {.typedParam = VIR_MIGRATE_PARAM_MIGRATIONPIN,
+ .param = QEMU_MIGRATION_PARAM_MIGRATIONPIN,
+ .party = QEMU_MIGRATION_SOURCE},
};
static const qemuMigrationParamInfoItem qemuMigrationParamInfo[] = {
@@ -295,6 +300,9 @@ static const qemuMigrationParamInfoItem qemuMigrationParamInfo[] = {
[QEMU_MIGRATION_PARAM_MULTIFD_ZSTD_LEVEL] = {
.type = QEMU_MIGRATION_PARAM_TYPE_INT,
},
+ [QEMU_MIGRATION_PARAM_MIGRATIONPIN] = {
+ .type = QEMU_MIGRATION_PARAM_TYPE_STRING,
+ }
};
G_STATIC_ASSERT(G_N_ELEMENTS(qemuMigrationParamInfo) == QEMU_MIGRATION_PARAM_LAST);
@@ -651,6 +659,16 @@ qemuMigrationParamsSetCompression(virTypedParameterPtr params,
}
+void
+qemuMigrationMigrationParamsToVM(const qemuMigrationParams *migParams, const virDomainObj *vm)
+{
+ if (migParams && migParams->params[QEMU_MIGRATION_PARAM_MIGRATIONPIN].set) {
+ qemuDomainObjPrivate *priv = vm->privateData;
+ priv->migrationThreadPinList = g_strdup(migParams->params[QEMU_MIGRATION_PARAM_MIGRATIONPIN].value.s);
+ }
+}
+
+
void
qemuMigrationParamsSetBlockDirtyBitmapMapping(qemuMigrationParams *migParams,
virJSONValue **params)
@@ -870,6 +888,10 @@ qemuMigrationParamsToJSON(qemuMigrationParams *migParams,
if (!pv->set)
continue;
+ if (i == QEMU_MIGRATION_PARAM_MIGRATIONPIN) {
+ continue;
+ }
+
if (postcopyResume && !qemuMigrationParamInfo[i].applyOnPostcopyResume)
continue;
diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h
index 5857673227..5df4d3738e 100644
--- a/src/qemu/qemu_migration_params.h
+++ b/src/qemu/qemu_migration_params.h
@@ -62,6 +62,7 @@ typedef enum {
QEMU_MIGRATION_PARAM_MULTIFD_COMPRESSION,
QEMU_MIGRATION_PARAM_MULTIFD_ZLIB_LEVEL,
QEMU_MIGRATION_PARAM_MULTIFD_ZSTD_LEVEL,
+ QEMU_MIGRATION_PARAM_MIGRATIONPIN,
QEMU_MIGRATION_PARAM_LAST
} qemuMigrationParam;
@@ -77,6 +78,9 @@ typedef enum {
virBitmap *
qemuMigrationParamsGetAlwaysOnCaps(qemuMigrationParty party);
+void
+qemuMigrationMigrationParamsToVM(const qemuMigrationParams *migParams, const virDomainObj *vm);
+
qemuMigrationParams *
qemuMigrationParamsFromFlags(virTypedParameterPtr params,
int nparams,
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index c2ab9b0070..1d5d47ea22 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1401,15 +1401,12 @@ qemuMonitorEmitPRManagerStatusChanged(qemuMonitor *mon,
}
-int
-qemuMonitorEmitMigrationPid(qemuMonitorPtr mon,
+void
+qemuMonitorEmitMigrationPid(qemuMonitor *mon,
int mpid)
{
- int ret = -1;
VIR_DEBUG("mon=%p, pass=%d", mon, mpid);
- QEMU_MONITOR_CALLBACK(mon, ret, domainMigrationPid, mon->vm, mpid);
-
- return ret;
+ QEMU_MONITOR_CALLBACK(mon, domainMigrationPid, mon->vm, mpid);
}
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index 44eaa00cd8..646e97a8e6 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -328,10 +328,9 @@ typedef void (*qemuMonitorDomainRdmaGidStatusChangedCallback)(qemuMonitor *mon,
typedef void (*qemuMonitorDomainGuestCrashloadedCallback)(qemuMonitor *mon,
virDomainObj *vm);
-typedef int (*qemuMonitorDomainMigrationPidCallback)(qemuMonitorPtr mon,
- virDomainObjPtr vm,
- int mcpid,
- void *opaque);
+typedef void (*qemuMonitorDomainMigrationPidCallback)(qemuMonitor *mon,
+ virDomainObj *vm,
+ int mcpid);
typedef enum {
QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_HYPERVISOR,
@@ -511,7 +510,7 @@ void qemuMonitorEmitMigrationStatus(qemuMonitor *mon,
void qemuMonitorEmitMigrationPass(qemuMonitor *mon,
int pass);
-int qemuMonitorEmitMigrationPid(qemuMonitorPtr mon, int mpid);
+void qemuMonitorEmitMigrationPid(qemuMonitor *mon, int mpid);
void qemuMonitorEmitAcpiOstInfo(qemuMonitor *mon,
const char *alias,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 167fb7356a..4c1b8c9eb7 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -84,7 +84,7 @@ static void qemuMonitorJSONHandleRdmaGidStatusChanged(qemuMonitor *mon, virJSONV
static void qemuMonitorJSONHandleMemoryFailure(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleMemoryDeviceSizeChange(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleDeviceUnplugErr(qemuMonitor *mon, virJSONValue *data);
-static void qemuMonitorJSONHandleMigrationPid(qemuMonitorPtr mon, virJSONValuePtr data);
+static void qemuMonitorJSONHandleMigrationPid(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleNetdevStreamDisconnected(qemuMonitor *mon, virJSONValue *data);
typedef struct {
@@ -133,8 +133,8 @@ static qemuEventHandler eventHandlers[] = {
/* We use bsearch, so keep this list sorted. */
};
-static void qemuMonitorJSONHandleMigrationPid(qemuMonitorPtr mon,
- virJSONValuePtr data)
+static void qemuMonitorJSONHandleMigrationPid(qemuMonitor *mon,
+ virJSONValue *data)
{
int mpid;
--
2.27.0

View File

@ -0,0 +1,113 @@
From 6656a916c6db824c9bd914d29709407013627b3d Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 11:30:12 +0800
Subject: [PATCH] migration/migration-pin: add qemu monitor callback functions
add qemu monitor callback functions
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/qemu/qemu_monitor.c | 12 ++++++++++++
src/qemu/qemu_monitor.h | 8 ++++++++
src/qemu/qemu_monitor_json.c | 15 +++++++++++++++
3 files changed, 35 insertions(+)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index a69ead6109..c2ab9b0070 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1401,6 +1401,18 @@ qemuMonitorEmitPRManagerStatusChanged(qemuMonitor *mon,
}
+int
+qemuMonitorEmitMigrationPid(qemuMonitorPtr mon,
+ int mpid)
+{
+ int ret = -1;
+ VIR_DEBUG("mon=%p, pass=%d", mon, mpid);
+ QEMU_MONITOR_CALLBACK(mon, ret, domainMigrationPid, mon->vm, mpid);
+
+ return ret;
+}
+
+
void
qemuMonitorEmitRdmaGidStatusChanged(qemuMonitor *mon,
const char *netdev,
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index c4af9b407d..44eaa00cd8 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -328,6 +328,11 @@ typedef void (*qemuMonitorDomainRdmaGidStatusChangedCallback)(qemuMonitor *mon,
typedef void (*qemuMonitorDomainGuestCrashloadedCallback)(qemuMonitor *mon,
virDomainObj *vm);
+typedef int (*qemuMonitorDomainMigrationPidCallback)(qemuMonitorPtr mon,
+ virDomainObjPtr vm,
+ int mcpid,
+ void *opaque);
+
typedef enum {
QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_HYPERVISOR,
QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_GUEST,
@@ -400,6 +405,7 @@ struct _qemuMonitorCallbacks {
qemuMonitorDomainMemoryFailureCallback domainMemoryFailure;
qemuMonitorDomainMemoryDeviceSizeChange domainMemoryDeviceSizeChange;
qemuMonitorDomainDeviceUnplugErrCallback domainDeviceUnplugError;
+ qemuMonitorDomainMigrationPidCallback domainMigrationPid;
qemuMonitorDomainNetdevStreamDisconnectedCallback domainNetdevStreamDisconnected;
};
@@ -505,6 +511,8 @@ void qemuMonitorEmitMigrationStatus(qemuMonitor *mon,
void qemuMonitorEmitMigrationPass(qemuMonitor *mon,
int pass);
+int qemuMonitorEmitMigrationPid(qemuMonitorPtr mon, int mpid);
+
void qemuMonitorEmitAcpiOstInfo(qemuMonitor *mon,
const char *alias,
const char *slotType,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index a9133793f6..167fb7356a 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -84,6 +84,7 @@ static void qemuMonitorJSONHandleRdmaGidStatusChanged(qemuMonitor *mon, virJSONV
static void qemuMonitorJSONHandleMemoryFailure(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleMemoryDeviceSizeChange(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleDeviceUnplugErr(qemuMonitor *mon, virJSONValue *data);
+static void qemuMonitorJSONHandleMigrationPid(qemuMonitorPtr mon, virJSONValuePtr data);
static void qemuMonitorJSONHandleNetdevStreamDisconnected(qemuMonitor *mon, virJSONValue *data);
typedef struct {
@@ -107,6 +108,7 @@ static qemuEventHandler eventHandlers[] = {
{ "MEMORY_FAILURE", qemuMonitorJSONHandleMemoryFailure, },
{ "MIGRATION", qemuMonitorJSONHandleMigrationStatus, },
{ "MIGRATION_PASS", qemuMonitorJSONHandleMigrationPass, },
+ { "MIGRATION_PID", qemuMonitorJSONHandleMigrationPid, },
{ "NETDEV_STREAM_DISCONNECTED", qemuMonitorJSONHandleNetdevStreamDisconnected, },
{ "NIC_RX_FILTER_CHANGED", qemuMonitorJSONHandleNicRxFilterChanged, },
{ "PR_MANAGER_STATUS_CHANGED", qemuMonitorJSONHandlePRManagerStatusChanged, },
@@ -131,6 +133,19 @@ static qemuEventHandler eventHandlers[] = {
/* We use bsearch, so keep this list sorted. */
};
+static void qemuMonitorJSONHandleMigrationPid(qemuMonitorPtr mon,
+ virJSONValuePtr data)
+{
+ int mpid;
+
+ if (virJSONValueObjectGetNumberInt(data, "pid", &mpid) < 0) {
+ VIR_WARN("missing migration pid in migration-pid event");
+ return;
+ }
+
+ qemuMonitorEmitMigrationPid(mon, mpid);
+}
+
static int
qemuMonitorEventCompare(const void *key, const void *elt)
{
--
2.27.0

View File

@ -0,0 +1,158 @@
From c934558ff8d535a91146be93d45c730b8023a8cd Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 14 Dec 2022 11:07:58 +0800
Subject: [PATCH] migration/migration-pin:add some migration/multiFd params
add migration/multiFd params.
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/conf/domain_conf.c | 8 ++++++++
src/conf/domain_conf.h | 12 ++++++++++++
src/libvirt_private.syms | 1 +
src/qemu/qemu_domain.c | 6 ++++++
src/qemu/qemu_domain.h | 5 ++++-
src/qemu/qemu_migration.c | 1 +
src/util/vircgroup.c | 3 +++
src/util/vircgroup.h | 1 +
8 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 22ad43e1d7..f54f99f379 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -31075,6 +31075,14 @@ virDomainDefHasSpiceGraphics(const virDomainDef *def)
return false;
}
+void
+virDomainMigrationIDDefFree(virDomainMigrationIDDefPtr def)
+{
+ if (!def)
+ return;
+ virBitmapFree(def->cpumask);
+ VIR_FREE(def);
+}
ssize_t
virDomainWatchdogDefFind(const virDomainDef *def,
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index ed07859bc5..196053d950 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -27,6 +27,7 @@
#include <libxml/xpath.h>
#include "internal.h"
+#include "viralloc.h"
#include "virconftypes.h"
#include "capabilities.h"
#include "cpu_conf.h"
@@ -4498,3 +4499,14 @@ virDomainObjGetMessages(virDomainObj *vm,
bool
virDomainDefHasSpiceGraphics(const virDomainDef *def);
+
+typedef struct _virDomainMigrationIDDef virDomainMigrationIDDef;
+typedef virDomainMigrationIDDef *virDomainMigrationIDDefPtr;
+struct _virDomainMigrationIDDef {
+ bool autofill;
+ int thread_id;
+ virBitmap *cpumask;
+ virDomainThreadSchedParam sched;
+};
+
+void virDomainMigrationIDDefFree(virDomainMigrationIDDefPtr def);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 553b01b8c0..4d712f0d79 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -523,6 +523,7 @@ virDomainMemoryModelTypeToString;
virDomainMemoryRemove;
virDomainMemorySourceTypeFromString;
virDomainMemorySourceTypeToString;
+virDomainMigrationIDDefFree;
virDomainMouseModeTypeFromString;
virDomainMouseModeTypeToString;
virDomainNetAllocateActualDevice;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 953808fcfe..6b09c15f7a 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -1879,6 +1879,12 @@ qemuDomainObjPrivateFree(void *data)
virObjectUnref(priv->monConfig);
g_free(priv->lockState);
g_free(priv->origname);
+ VIR_FREE(priv->migrationPids);
+ virBitmapFree(priv->pcpumap);
+
+ VIR_FREE(priv->migrationMultiFdPids);
+ priv->migrationMultiFdPids = NULL;
+ priv->migrationMultiFdCount = 0;
virChrdevFree(priv->devs);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 1e56e50672..0eff9eab72 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -146,7 +146,9 @@ struct _qemuDomainObjPrivate {
unsigned long long preMigrationMemlock; /* Original RLIMIT_MEMLOCK in case
it was changed for the current
migration job. */
-
+ char *migrationPids;
+ char *migrationMultiFdPids;
+ unsigned int migrationMultiFdCount;
virChrdevs *devs;
qemuDomainCleanupCallback *cleanupCallbacks;
@@ -166,6 +168,7 @@ struct _qemuDomainObjPrivate {
/* Bitmaps below hold data from the auto NUMA feature */
virBitmap *autoNodeset;
virBitmap *autoCpuset;
+ virBitmap *pcpumap;
bool signalIOError; /* true if the domain condition should be signalled on
I/O error */
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 7ae1cd7051..b13f2e0c24 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3362,6 +3362,7 @@ qemuMigrationDstPrepareFresh(virQEMUDriver *driver,
priv = vm->privateData;
priv->origname = g_strdup(origname);
+ VIR_FREE(priv->migrationPids);
if (taint_hook) {
/* Domain XML has been altered by a hook script. */
diff --git a/src/util/vircgroup.c b/src/util/vircgroup.c
index b424e1b5d4..62c6df2206 100644
--- a/src/util/vircgroup.c
+++ b/src/util/vircgroup.c
@@ -1123,6 +1123,9 @@ virCgroupNewThread(virCgroup *domain,
case VIR_CGROUP_THREAD_IOTHREAD:
name = g_strdup_printf("iothread%d", id);
break;
+ case VIR_CGROUP_THREAD_MIGRATION_THREAD:
+ name = g_strdup_printf("migthread%d", id);
+ break;
case VIR_CGROUP_THREAD_LAST:
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected name value %1$d"), nameval);
diff --git a/src/util/vircgroup.h b/src/util/vircgroup.h
index adf3850b22..c5f6ddd7a6 100644
--- a/src/util/vircgroup.h
+++ b/src/util/vircgroup.h
@@ -53,6 +53,7 @@ typedef enum {
VIR_CGROUP_THREAD_VCPU = 0,
VIR_CGROUP_THREAD_EMULATOR,
VIR_CGROUP_THREAD_IOTHREAD,
+ VIR_CGROUP_THREAD_MIGRATION_THREAD,
VIR_CGROUP_THREAD_LAST
} virCgroupThreadName;
--
2.27.0

View File

@ -0,0 +1,110 @@
From c9074196761ed6c180a53ccdc0bb692f31491158 Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 15:59:27 +0800
Subject: [PATCH] migration/multifd-pin: add qemu monitor callback functions
add qemu monitor callback functions
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/qemu/qemu_monitor.c | 9 +++++++++
src/qemu/qemu_monitor.h | 8 ++++++++
src/qemu/qemu_monitor_json.c | 15 +++++++++++++++
3 files changed, 32 insertions(+)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index 1d5d47ea22..e4a1852d05 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1410,6 +1410,15 @@ qemuMonitorEmitMigrationPid(qemuMonitor *mon,
}
+void
+qemuMonitorEmitMigrationMultiFdPids(qemuMonitor *mon,
+ int mpid)
+{
+ VIR_DEBUG("mon=%p, pass=%d", mon, mpid);
+ QEMU_MONITOR_CALLBACK(mon, domainMigrationMultiFdPids, mon->vm, mpid);
+}
+
+
void
qemuMonitorEmitRdmaGidStatusChanged(qemuMonitor *mon,
const char *netdev,
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index 646e97a8e6..e893542971 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -332,6 +332,10 @@ typedef void (*qemuMonitorDomainMigrationPidCallback)(qemuMonitor *mon,
virDomainObj *vm,
int mcpid);
+typedef void (*qemuMonitorDomainMigrationMultiFdPidsCallback)(qemuMonitor *mon,
+ virDomainObj *vm,
+ int mcpid);
+
typedef enum {
QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_HYPERVISOR,
QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_GUEST,
@@ -405,6 +409,7 @@ struct _qemuMonitorCallbacks {
qemuMonitorDomainMemoryDeviceSizeChange domainMemoryDeviceSizeChange;
qemuMonitorDomainDeviceUnplugErrCallback domainDeviceUnplugError;
qemuMonitorDomainMigrationPidCallback domainMigrationPid;
+ qemuMonitorDomainMigrationMultiFdPidsCallback domainMigrationMultiFdPids;
qemuMonitorDomainNetdevStreamDisconnectedCallback domainNetdevStreamDisconnected;
};
@@ -512,6 +517,9 @@ void qemuMonitorEmitMigrationPass(qemuMonitor *mon,
void qemuMonitorEmitMigrationPid(qemuMonitor *mon, int mpid);
+void qemuMonitorEmitMigrationMultiFdPids(qemuMonitor *mon,
+ int mpid);
+
void qemuMonitorEmitAcpiOstInfo(qemuMonitor *mon,
const char *alias,
const char *slotType,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 4c1b8c9eb7..acd5ce5297 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -85,6 +85,7 @@ static void qemuMonitorJSONHandleMemoryFailure(qemuMonitor *mon, virJSONValue *d
static void qemuMonitorJSONHandleMemoryDeviceSizeChange(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleDeviceUnplugErr(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleMigrationPid(qemuMonitor *mon, virJSONValue *data);
+static void qemuMonitorJSONHandleMigrationMultiFdPids(qemuMonitor *mon, virJSONValue *data);
static void qemuMonitorJSONHandleNetdevStreamDisconnected(qemuMonitor *mon, virJSONValue *data);
typedef struct {
@@ -107,6 +108,7 @@ static qemuEventHandler eventHandlers[] = {
{ "MEMORY_DEVICE_SIZE_CHANGE", qemuMonitorJSONHandleMemoryDeviceSizeChange, },
{ "MEMORY_FAILURE", qemuMonitorJSONHandleMemoryFailure, },
{ "MIGRATION", qemuMonitorJSONHandleMigrationStatus, },
+ { "MIGRATION_MULTIFD_PID", qemuMonitorJSONHandleMigrationMultiFdPids, },
{ "MIGRATION_PASS", qemuMonitorJSONHandleMigrationPass, },
{ "MIGRATION_PID", qemuMonitorJSONHandleMigrationPid, },
{ "NETDEV_STREAM_DISCONNECTED", qemuMonitorJSONHandleNetdevStreamDisconnected, },
@@ -146,6 +148,19 @@ static void qemuMonitorJSONHandleMigrationPid(qemuMonitor *mon,
qemuMonitorEmitMigrationPid(mon, mpid);
}
+static void qemuMonitorJSONHandleMigrationMultiFdPids(qemuMonitor *mon,
+ virJSONValue *data)
+{
+ int mpid;
+
+ if (virJSONValueObjectGetNumberInt(data, "pid", &mpid) < 0) {
+ VIR_WARN("missing multifd pid in migration-multifd-pid event");
+ return;
+ }
+
+ qemuMonitorEmitMigrationMultiFdPids(mon, mpid);
+}
+
static int
qemuMonitorEventCompare(const void *key, const void *elt)
{
--
2.27.0

View File

@ -0,0 +1,191 @@
From 5ddedd900bf5556f0c01715148b387fa451d9399 Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 16:47:30 +0800
Subject: [PATCH] migration/multifd-pin: support migration multifd thread pin
support migration multifd thread pin by configuration.
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/qemu/qemu_migration.c | 2 +
src/qemu/qemu_process.c | 114 ++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_process.h | 4 ++
3 files changed, 120 insertions(+)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 00dfd46ae7..41ce565ede 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3363,6 +3363,8 @@ qemuMigrationDstPrepareFresh(virQEMUDriver *driver,
priv = vm->privateData;
priv->origname = g_strdup(origname);
VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ priv->migrationMultiFdCount = 0;
if (taint_hook) {
/* Domain XML has been altered by a hook script. */
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 5be6710ea7..e85862333c 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1438,6 +1438,58 @@ qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
}
+static void
+qemuProcessHandleMigrationPinStatus(virDomainObj *vm, int status)
+{
+ qemuDomainObjPrivate *priv = vm->privateData;
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
+ return;
+
+ switch (status) {
+ case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
+ case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
+ case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
+ case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
+ case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
+ case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
+ case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
+ case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
+ case QEMU_MONITOR_MIGRATION_STATUS_WAIT_UNPLUG:
+ break;
+ case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
+ /*
+ * migration thread is still running,
+ * so we can't delete migration Cgroup.
+ */
+ VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ VIR_FREE(priv->migrationThreadPinList);
+ priv->migrationMultiFdCount = 0;
+ virBitmapFree(priv->pcpumap);
+ priv->pcpumap = NULL;
+ break;
+ case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
+ VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ VIR_FREE(priv->migrationThreadPinList);
+ priv->migrationMultiFdCount = 0;
+ virBitmapFree(priv->pcpumap);
+ priv->pcpumap = NULL;
+ if (virCgroupDelThread(priv->cgroup,
+ VIR_CGROUP_THREAD_MIGRATION_THREAD, 0) < 0)
+ VIR_WARN("Failed to delete migration thread Cgroup!");
+ VIR_INFO("success to free pcpumap and migrationPids");
+ break;
+ default:
+ VIR_WARN("got unknown migration status'%s'",
+ qemuMonitorMigrationStatusTypeToString(status));
+ break;
+ }
+
+ return;
+}
+
+
static void
qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
virDomainObj *vm,
@@ -1553,6 +1605,8 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
break;
}
+ qemuProcessHandleMigrationPinStatus(vm, status);
+
cleanup:
virObjectUnlock(vm);
virObjectEventStateQueue(driver->domainEventState, event);
@@ -1820,6 +1874,7 @@ static qemuMonitorCallbacks monitorCallbacks = {
.domainMemoryDeviceSizeChange = qemuProcessHandleMemoryDeviceSizeChange,
.domainDeviceUnplugError = qemuProcessHandleDeviceUnplugErr,
.domainMigrationPid = qemuProcessHandleMigrationPid,
+ .domainMigrationMultiFdPids = qemuProcessHandleMigrationMultiFdPids,
.domainNetdevStreamDisconnected = qemuProcessHandleNetdevStreamDisconnected,
};
@@ -2860,6 +2915,65 @@ qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
}
+void
+qemuProcessHandleMigrationMultiFdPids(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid)
+{
+ qemuDomainObjPrivate *priv;
+ char *mpidOldStr = NULL;
+ char *mpidStr = NULL;
+ virDomainMigrationIDDefPtr migration = NULL;
+ virBitmap *pcpumap = NULL;
+ virObjectLock(vm);
+
+ VIR_INFO("Migrating domain %p %s, migration-multifd pid %d",
+ vm, vm->def->name, mpid);
+
+ priv = vm->privateData;
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
+ VIR_DEBUG("got MIGRATION_MULTIFD_PID event without a migration job");
+ goto cleanup;
+ }
+
+ migration = g_malloc0(sizeof(*migration));
+ migration->thread_id = mpid;
+
+ if (qemuProcessSetupMigration(vm, migration) < 0) {
+ VIR_ERROR(_("fail to setup migration multiFd cgroup"));
+ goto cleanup;
+ }
+
+ mpidOldStr = priv->migrationMultiFdPids;
+ if (!mpidOldStr) {
+ mpidStr = g_strdup_printf("%d", mpid);
+ } else {
+ mpidStr = g_strdup_printf("%s/%d", mpidOldStr, mpid);
+ }
+
+ VIR_FREE(priv->migrationMultiFdPids);
+ priv->migrationMultiFdPids = mpidStr;
+ priv->migrationMultiFdCount++;
+
+ pcpumap = qemuProcessGetPcpumap(priv);
+
+ if (!pcpumap)
+ goto cleanup;
+
+ qemuProcessSetMigthreadAffinity(priv, pcpumap, mpid);
+
+ cleanup:
+ /*
+ * If the value of pcpumap is setted by priv->migrationThreadPinList,
+ * we need to free pcpumap.
+ */
+ if (pcpumap != priv->pcpumap)
+ virBitmapFree(pcpumap);
+ virDomainMigrationIDDefFree(migration);
+ virObjectUnlock(vm);
+}
+
+
static char *
qemuProcessBuildPRHelperPidfilePathOld(virDomainObj *vm)
{
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index fff976f6f7..69a240e1e8 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -262,6 +262,10 @@ void qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
virDomainObj *vm,
int mpid);
+void qemuProcessHandleMigrationMultiFdPids(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid);
+
void qemuProcessCleanupMigrationJob(virQEMUDriver *driver,
virDomainObj *vm);
--
2.27.0

View File

@ -0,0 +1,47 @@
From 3cd86072fd6725178b669174eb99ab24a0bcee6c Mon Sep 17 00:00:00 2001
From: AlexChen <alex.chen@huawei.com>
Date: Tue, 27 Oct 2020 21:43:47 -0400
Subject: [PATCH] node_device: fix leak of DIR*
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Commit 53aec799fa31 introduced the function udevGetVDPACharDev(),
which scans a directory using virDirOpenIfExists() and
virDirRead(). It unfortunately forgets to close the DIR* when it is
finished with it. This patch fixes that omission.
Fixes: 53aec799fa31711ffaeacc7ec17ec6d3c2e3cadf
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
Signed-off-by: AlexChen <alex.chen@huawei.com>
---
src/node_device/node_device_udev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/node_device/node_device_udev.c b/src/node_device/node_device_udev.c
index 911325600e..c82d3e7dea 100644
--- a/src/node_device/node_device_udev.c
+++ b/src/node_device/node_device_udev.c
@@ -1198,6 +1198,7 @@ udevGetVDPACharDev(const char *sysfs_path,
virReportError(VIR_ERR_INTERNAL_ERROR,
_("vDPA chardev path '%1$s' does not exist"),
chardev);
+ virDirClose(dir);
return -1;
}
VIR_DEBUG("vDPA chardev is at '%s'", chardev);
@@ -1206,6 +1207,9 @@ udevGetVDPACharDev(const char *sysfs_path,
break;
}
}
+
+ virDirClose(dir);
+
if (direrr < 0)
return -1;
--
2.27.0

View File

@ -0,0 +1,49 @@
From 43acc011c55208570c27cef333fdda46102b1d21 Mon Sep 17 00:00:00 2001
From: Xu Yandong <xuyandong2@huawei.com>
Date: Wed, 15 Apr 2020 14:03:07 +0800
Subject: [PATCH] nodedev: fix potential heap use after free
After move device enumumeration into a thread(commit 9f0ae0b18e3),
flag driversInitialized no longer represent stateInitialized finished
complete, so reference driver->devs before use it to prevent devs freed
by virStateCleanup.
Signed-off-by: Xu Yandong <xuyandong2@huawei.com>
Signed-off-by: Adttil <yangtao286@huawei.com>
---
src/node_device/node_device_udev.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/node_device/node_device_udev.c b/src/node_device/node_device_udev.c
index 911325600e..6c5b788279 100644
--- a/src/node_device/node_device_udev.c
+++ b/src/node_device/node_device_udev.c
@@ -1512,8 +1512,8 @@ udevSetParent(struct udev_device *device,
virNodeDeviceDef *objdef;
parent_device = device;
+ virObjectRef(driver->devs);
do {
-
parent_device = udev_device_get_parent(parent_device);
if (parent_device == NULL)
break;
@@ -1523,6 +1523,7 @@ udevSetParent(struct udev_device *device,
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Could not get syspath for parent of '%1$s'"),
udev_device_get_syspath(parent_device));
+ virObjectUnref(driver->devs);
return -1;
}
@@ -1540,6 +1541,7 @@ udevSetParent(struct udev_device *device,
if (!def->parent)
def->parent = g_strdup("computer");
+ virObjectUnref(driver->devs);
return 0;
}
--
2.27.0

View File

@ -0,0 +1,177 @@
From 534c989f39c7bc9953f05f8e606bc45bb2f73a63 Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 18:42:13 +0100
Subject: [PATCH] qemu: Introduce QEMU_CAPS_SMP_CLUSTERS
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
src/qemu/qemu_capabilities.c | 2 ++
src/qemu/qemu_capabilities.h | 1 +
tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml | 1 +
tests/qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml | 1 +
tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.0.0_riscv64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.1.0_s390x.xml | 1 +
tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml | 1 +
12 files changed, 13 insertions(+)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 83119e871a..d25062b225 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -698,6 +698,7 @@ VIR_ENUM_IMPL(virQEMUCaps,
/* 450 */
"run-with.async-teardown", /* QEMU_CAPS_RUN_WITH_ASYNC_TEARDOWN */
"virtio-blk-vhost-vdpa", /* QEMU_CAPS_DEVICE_VIRTIO_BLK_VHOST_VDPA */
+ "smp-clusters", /* QEMU_CAPS_SMP_CLUSTERS */
);
@@ -1550,6 +1551,7 @@ static struct virQEMUCapsStringFlags virQEMUCapsQMPSchemaQueries[] = {
{ "query-display-options/ret-type/+sdl", QEMU_CAPS_SDL },
{ "query-display-options/ret-type/+egl-headless", QEMU_CAPS_EGL_HEADLESS },
{ "query-hotpluggable-cpus/ret-type/props/die-id", QEMU_CAPS_SMP_DIES },
+ { "query-hotpluggable-cpus/ret-type/props/cluster-id", QEMU_CAPS_SMP_CLUSTERS },
{ "query-named-block-nodes/arg-type/flat", QEMU_CAPS_QMP_QUERY_NAMED_BLOCK_NODES_FLAT },
{ "screendump/arg-type/device", QEMU_CAPS_SCREENDUMP_DEVICE },
{ "set-numa-node/arg-type/+hmat-lb", QEMU_CAPS_NUMA_HMAT },
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index 3c4f7f625b..959fbe6378 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -677,6 +677,7 @@ typedef enum { /* virQEMUCapsFlags grouping marker for syntax-check */
/* 450 */
QEMU_CAPS_RUN_WITH_ASYNC_TEARDOWN, /* asynchronous teardown -run-with async-teardown=on|off */
QEMU_CAPS_DEVICE_VIRTIO_BLK_VHOST_VDPA, /* virtio-blk-vhost-vdpa block driver */
+ QEMU_CAPS_SMP_CLUSTERS, /* -smp clusters= */
QEMU_CAPS_LAST /* this must always be the last item */
} virQEMUCapsFlags;
diff --git a/tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml b/tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml
index 4315241d1d..536524cf18 100644
--- a/tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml
+++ b/tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml
@@ -154,6 +154,7 @@
<flag name='virtio-crypto'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7001000</version>
<microcodeVersion>42900244</microcodeVersion>
<package>v7.1.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml b/tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml
index bd84750dc5..58e1111982 100644
--- a/tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml
@@ -188,6 +188,7 @@
<flag name='virtio-crypto'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7001000</version>
<microcodeVersion>43100244</microcodeVersion>
<package>v7.1.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml b/tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml
index a1fc441412..127b8ee4c2 100644
--- a/tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml
+++ b/tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml
@@ -149,6 +149,7 @@
<flag name='virtio-crypto'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7002000</version>
<microcodeVersion>0</microcodeVersion>
<package>qemu-7.2.0-6.fc37</package>
diff --git a/tests/qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml b/tests/qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml
index 06a01a2c4c..a30ec3c164 100644
--- a/tests/qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml
+++ b/tests/qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml
@@ -192,6 +192,7 @@
<flag name='cryptodev-backend-lkcf'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7002000</version>
<microcodeVersion>43100245</microcodeVersion>
<package>v7.2.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml b/tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml
index 8ac1529c30..24ac7b8f6e 100644
--- a/tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml
@@ -192,6 +192,7 @@
<flag name='cryptodev-backend-lkcf'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7002000</version>
<microcodeVersion>43100245</microcodeVersion>
<package>v7.2.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_8.0.0_riscv64.xml b/tests/qemucapabilitiesdata/caps_8.0.0_riscv64.xml
index 31300d3d31..3f2acb5018 100644
--- a/tests/qemucapabilitiesdata/caps_8.0.0_riscv64.xml
+++ b/tests/qemucapabilitiesdata/caps_8.0.0_riscv64.xml
@@ -138,6 +138,7 @@
<flag name='virtio-crypto'/>
<flag name='pvpanic-pci'/>
<flag name='virtio-gpu.blob'/>
+ <flag name='smp-clusters'/>
<version>7002050</version>
<microcodeVersion>0</microcodeVersion>
<package>v7.2.0-333-g222059a0fc</package>
diff --git a/tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml b/tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml
index c2fa8eb028..85869f6b5d 100644
--- a/tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml
@@ -196,6 +196,7 @@
<flag name='virtio-gpu.blob'/>
<flag name='rbd-encryption-layering'/>
<flag name='rbd-encryption-luks-any'/>
+ <flag name='smp-clusters'/>
<version>8000000</version>
<microcodeVersion>43100244</microcodeVersion>
<package>v8.0.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_8.1.0_s390x.xml b/tests/qemucapabilitiesdata/caps_8.1.0_s390x.xml
index 427ee9d5c7..19422f08fa 100644
--- a/tests/qemucapabilitiesdata/caps_8.1.0_s390x.xml
+++ b/tests/qemucapabilitiesdata/caps_8.1.0_s390x.xml
@@ -112,6 +112,7 @@
<flag name='rbd-encryption-layering'/>
<flag name='rbd-encryption-luks-any'/>
<flag name='run-with.async-teardown'/>
+ <flag name='smp-clusters'/>
<version>8000050</version>
<microcodeVersion>39100245</microcodeVersion>
<package>v8.0.0-1270-g1c12355b</package>
diff --git a/tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml b/tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml
index d266dd0f31..0caee53550 100644
--- a/tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml
@@ -198,6 +198,7 @@
<flag name='qcow2-discard-no-unref'/>
<flag name='run-with.async-teardown'/>
<flag name='virtio-blk-vhost-vdpa'/>
+ <flag name='smp-clusters'/>
<version>8001000</version>
<microcodeVersion>43100245</microcodeVersion>
<package>v8.1.0</package>
diff --git a/tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml b/tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml
index ef3bd14597..08967e9a8f 100644
--- a/tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml
@@ -199,6 +199,7 @@
<flag name='qcow2-discard-no-unref'/>
<flag name='run-with.async-teardown'/>
<flag name='virtio-blk-vhost-vdpa'/>
+ <flag name='smp-clusters'/>
<version>8001050</version>
<microcodeVersion>43100246</microcodeVersion>
<package>v8.1.0-3111-gad6ef0a42e</package>
--
2.27.0

View File

@ -0,0 +1,112 @@
From 7ae220e6640315b2b10f79a98eee81c1c6f3ea1d Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 18:51:29 +0100
Subject: [PATCH] qemu: Make monitor aware of CPU clusters
This makes it so libvirt can obtain accurate information about
guest CPUs from QEMU, and should make it possible to correctly
perform operations such as CPU hotplug.
Of course this is mostly moot at the moment: only aarch64 can use
CPU clusters, and CPU hotplug is not yet implemented on that
architecture.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
src/qemu/qemu_domain.c | 3 ++-
src/qemu/qemu_monitor.c | 2 ++
src/qemu/qemu_monitor.h | 2 ++
src/qemu/qemu_monitor_json.c | 5 +++++
4 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 39fd3270fc..d54846e734 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -9909,11 +9909,12 @@ qemuDomainRefreshVcpuInfo(virDomainObj *vm,
if (validTIDs)
VIR_DEBUG("vCPU[%zu] PID %llu is valid "
- "(node=%d socket=%d die=%d core=%d thread=%d)",
+ "(node=%d socket=%d die=%d cluster=%d core=%d thread=%d)",
i, (unsigned long long)info[i].tid,
info[i].node_id,
info[i].socket_id,
info[i].die_id,
+ info[i].cluster_id,
info[i].core_id,
info[i].thread_id);
}
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index e4a1852d05..e270bee2cd 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1519,6 +1519,7 @@ qemuMonitorCPUInfoClear(qemuMonitorCPUInfo *cpus,
cpus[i].qemu_id = -1;
cpus[i].socket_id = -1;
cpus[i].die_id = -1;
+ cpus[i].cluster_id = -1;
cpus[i].core_id = -1;
cpus[i].thread_id = -1;
cpus[i].node_id = -1;
@@ -1676,6 +1677,7 @@ qemuMonitorGetCPUInfoHotplug(struct qemuMonitorQueryHotpluggableCpusEntry *hotpl
!vcpus[mainvcpu].online;
vcpus[mainvcpu].socket_id = hotplugvcpus[i].socket_id;
vcpus[mainvcpu].die_id = hotplugvcpus[i].die_id;
+ vcpus[mainvcpu].cluster_id = hotplugvcpus[i].cluster_id;
vcpus[mainvcpu].core_id = hotplugvcpus[i].core_id;
vcpus[mainvcpu].thread_id = hotplugvcpus[i].thread_id;
vcpus[mainvcpu].node_id = hotplugvcpus[i].node_id;
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index e893542971..4cbd225e8f 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -605,6 +605,7 @@ struct qemuMonitorQueryHotpluggableCpusEntry {
int node_id;
int socket_id;
int die_id;
+ int cluster_id;
int core_id;
int thread_id;
@@ -628,6 +629,7 @@ struct _qemuMonitorCPUInfo {
* all entries are -1 */
int socket_id;
int die_id;
+ int cluster_id;
int core_id;
int thread_id;
int node_id;
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 8bf56e99f5..3748034bb9 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -7606,12 +7606,14 @@ qemuMonitorJSONProcessHotpluggableCpusReply(virJSONValue *vcpu,
entry->node_id = -1;
entry->socket_id = -1;
entry->die_id = -1;
+ entry->cluster_id = -1;
entry->core_id = -1;
entry->thread_id = -1;
ignore_value(virJSONValueObjectGetNumberInt(props, "node-id", &entry->node_id));
ignore_value(virJSONValueObjectGetNumberInt(props, "socket-id", &entry->socket_id));
ignore_value(virJSONValueObjectGetNumberInt(props, "die-id", &entry->die_id));
+ ignore_value(virJSONValueObjectGetNumberInt(props, "cluster-id", &entry->cluster_id));
ignore_value(virJSONValueObjectGetNumberInt(props, "core-id", &entry->core_id));
ignore_value(virJSONValueObjectGetNumberInt(props, "thread-id", &entry->thread_id));
@@ -7649,6 +7651,9 @@ qemuMonitorQueryHotpluggableCpusEntrySort(const void *p1,
if (a->die_id != b->die_id)
return a->die_id - b->die_id;
+ if (a->cluster_id != b->cluster_id)
+ return a->cluster_id - b->cluster_id;
+
if (a->core_id != b->core_id)
return a->core_id - b->core_id;
--
2.27.0

View File

@ -0,0 +1,69 @@
From e1a0ba5580ffb8d9de09885bef23b26d34b691c4 Mon Sep 17 00:00:00 2001
From: Jiahui Cen <cenjiahui@huawei.com>
Date: Thu, 25 Feb 2021 18:55:31 +0800
Subject: [PATCH] qemu: Support 'retry' BLOCK_IO_ERROR event.
Accept BLOCK_IO_ERROR event with action='retry' from qemu.
Signed-off-by: Jiahui Cen <cenjiahui@huawei.com>
Signed-off-by: Ying Fang <fangying1@huawei.com>
---
include/libvirt/libvirt-domain.h | 1 +
src/qemu/qemu_monitor_json.c | 2 +-
tools/virsh-domain-event.c | 8 ++++++++
3 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index a1902546bb..7d6117208b 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -4695,6 +4695,7 @@ typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0, /* No action, IO error ignored (Since: 0.8.0) */
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE, /* Guest CPUs are paused (Since: 0.8.0) */
VIR_DOMAIN_EVENT_IO_ERROR_REPORT, /* IO error reported to guest OS (Since: 0.8.0) */
+ VIR_DOMAIN_EVENT_IO_ERROR_RETRY, /* Failed IO retried (Since: 6.2.0) */
# ifdef VIR_ENUM_SENTINELS
VIR_DOMAIN_EVENT_IO_ERROR_LAST /* (Since: 0.9.10) */
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index a9133793f6..231a9d8a2b 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -685,7 +685,7 @@ static void qemuMonitorJSONHandleWatchdog(qemuMonitor *mon, virJSONValue *data)
VIR_ENUM_DECL(qemuMonitorIOErrorAction);
VIR_ENUM_IMPL(qemuMonitorIOErrorAction,
VIR_DOMAIN_EVENT_IO_ERROR_LAST,
- "ignore", "stop", "report",
+ "ignore", "stop", "report", "retry",
);
diff --git a/tools/virsh-domain-event.c b/tools/virsh-domain-event.c
index 6887c195a0..822d9e1e83 100644
--- a/tools/virsh-domain-event.c
+++ b/tools/virsh-domain-event.c
@@ -397,6 +397,10 @@ virshEventIOErrorPrint(virConnectPtr conn G_GNUC_UNUSED,
virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) report\n"),
virDomainGetName(dom), srcPath, devAlias);
break;
+ case VIR_DOMAIN_EVENT_IO_ERROR_RETRY:
+ virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) report\n"),
+ virDomainGetName(dom), srcPath, devAlias);
+ break;
case VIR_DOMAIN_EVENT_IO_ERROR_LAST:
default:
virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) unknown\n"),
@@ -458,6 +462,10 @@ virshEventIOErrorReasonPrint(virConnectPtr conn G_GNUC_UNUSED,
virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) report due to %4$s\n"),
virDomainGetName(dom), srcPath, devAlias, reason);
break;
+ case VIR_DOMAIN_EVENT_IO_ERROR_RETRY:
+ virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) report\n"),
+ virDomainGetName(dom), srcPath, devAlias);
+ break;
case VIR_DOMAIN_EVENT_IO_ERROR_LAST:
default:
virshEventPrintf(opaque, _("event 'io-error' for domain '%1$s': %2$s (%3$s) unknown due to %4$s\n"),
--
2.27.0

View File

@ -0,0 +1,733 @@
From 00a3773961b74702c1fe12ed086f6f7feedf8efa Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 18:44:56 +0100
Subject: [PATCH] qemu: Use CPU clusters for guests
https://issues.redhat.com/browse/RHEL-7043
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
src/qemu/qemu_command.c | 5 ++-
.../cpu-hotplug-startup.x86_64-latest.args | 2 +-
.../cpu-numa-disjoint.x86_64-latest.args | 2 +-
.../cpu-numa-disordered.x86_64-latest.args | 38 +++++++++++++++++++
.../cpu-numa-memshared.x86_64-latest.args | 2 +-
...-numa-no-memory-element.x86_64-latest.args | 2 +-
.../cpu-numa1.x86_64-latest.args | 2 +-
.../cpu-numa2.x86_64-latest.args | 2 +-
.../cpu-topology1.x86_64-latest.args | 2 +-
.../cpu-topology2.x86_64-latest.args | 2 +-
.../cpu-topology3.x86_64-latest.args | 2 +-
.../cpu-topology4.x86_64-latest.args | 2 +-
...memory-no-numa-topology.x86_64-latest.args | 2 +-
...fd-memory-numa-topology.x86_64-latest.args | 2 +-
...d-memory-numa-topology2.x86_64-latest.args | 2 +-
...d-memory-numa-topology3.x86_64-latest.args | 2 +-
.../hugepages-nvdimm.x86_64-latest.args | 2 +-
...memory-default-hugepage.x86_64-latest.args | 2 +-
.../memfd-memory-numa.x86_64-latest.args | 2 +-
...emory-hotplug-dimm-addr.x86_64-latest.args | 2 +-
.../memory-hotplug-dimm.x86_64-latest.args | 2 +-
...memory-hotplug-multiple.x86_64-latest.args | 2 +-
...y-hotplug-nvdimm-access.x86_64-latest.args | 2 +-
...ry-hotplug-nvdimm-align.x86_64-latest.args | 2 +-
...ry-hotplug-nvdimm-label.x86_64-latest.args | 2 +-
...ory-hotplug-nvdimm-pmem.x86_64-latest.args | 2 +-
...-nvdimm-ppc64-abi-update.ppc64-latest.args | 2 +-
...ory-hotplug-nvdimm-ppc64.ppc64-latest.args | 2 +-
...hotplug-nvdimm-readonly.x86_64-latest.args | 2 +-
.../memory-hotplug-nvdimm.x86_64-latest.args | 2 +-
...mory-hotplug-virtio-mem.x86_64-latest.args | 2 +-
...ory-hotplug-virtio-pmem.x86_64-latest.args | 2 +-
.../memory-hotplug.x86_64-latest.args | 2 +-
...auto-memory-vcpu-cpuset.x86_64-latest.args | 2 +-
...no-cpuset-and-placement.x86_64-latest.args | 2 +-
...d-auto-vcpu-no-numatune.x86_64-latest.args | 36 ++++++++++++++++++
...to-vcpu-static-numatune.x86_64-latest.args | 2 +-
...static-memory-auto-vcpu.x86_64-latest.args | 2 +-
...static-vcpu-no-numatune.x86_64-latest.args | 36 ++++++++++++++++++
.../qemuxml2argvdata/numad.x86_64-latest.args | 2 +-
...ne-auto-nodeset-invalid.x86_64-latest.args | 2 +-
.../pci-expander-bus.x86_64-latest.args | 2 +-
.../pcie-expander-bus.x86_64-latest.args | 2 +-
.../pseries-phb-numa-node.ppc64-latest.args | 2 +-
44 files changed, 154 insertions(+), 41 deletions(-)
create mode 100644 tests/qemuxml2argvdata/cpu-numa-disordered.x86_64-latest.args
create mode 100644 tests/qemuxml2argvdata/numad-auto-vcpu-no-numatune.x86_64-latest.args
create mode 100644 tests/qemuxml2argvdata/numad-static-vcpu-no-numatune.x86_64-latest.args
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 461584566f..7410a4280d 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7217,7 +7217,8 @@ qemuBuildSmpCommandLine(virCommand *cmd,
_("Only 1 die per socket is supported"));
return -1;
}
- if (def->cpu->clusters != 1) {
+ if (def->cpu->clusters != 1 &&
+ !virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_CLUSTERS)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Only 1 cluster per die is supported"));
return -1;
@@ -7225,6 +7226,8 @@ qemuBuildSmpCommandLine(virCommand *cmd,
virBufferAsprintf(&buf, ",sockets=%u", def->cpu->sockets);
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_DIES))
virBufferAsprintf(&buf, ",dies=%u", def->cpu->dies);
+ if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_CLUSTERS))
+ virBufferAsprintf(&buf, ",clusters=%u", def->cpu->clusters);
virBufferAsprintf(&buf, ",cores=%u", def->cpu->cores);
virBufferAsprintf(&buf, ",threads=%u", def->cpu->threads);
} else {
diff --git a/tests/qemuxml2argvdata/cpu-hotplug-startup.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-hotplug-startup.x86_64-latest.args
index 009c08d71a..af1b464104 100644
--- a/tests/qemuxml2argvdata/cpu-hotplug-startup.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-hotplug-startup.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
-overcommit mem-lock=off \
--smp 1,maxcpus=6,sockets=3,dies=1,cores=2,threads=1 \
+-smp 1,maxcpus=6,sockets=3,dies=1,clusters=1,cores=2,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/cpu-numa-disjoint.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa-disjoint.x86_64-latest.args
index 3b12934425..22fca082a8 100644
--- a/tests/qemuxml2argvdata/cpu-numa-disjoint.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-numa-disjoint.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-3,cpus=8-11,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/cpu-numa-disordered.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa-disordered.x86_64-latest.args
new file mode 100644
index 0000000000..bc4a6ad5f3
--- /dev/null
+++ b/tests/qemuxml2argvdata/cpu-numa-disordered.x86_64-latest.args
@@ -0,0 +1,38 @@
+LC_ALL=C \
+PATH=/bin \
+HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1 \
+USER=test \
+LOGNAME=test \
+XDG_DATA_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.local/share \
+XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.cache \
+XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=QEMUGuest1,debug-threads=on \
+-S \
+-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain--1-QEMUGuest1/master-key.aes"}' \
+-machine pc,usb=off,dump-guest-core=off,acpi=off \
+-accel tcg \
+-cpu qemu64 \
+-m size=328704k \
+-overcommit mem-lock=off \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
+-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
+-numa node,nodeid=0,cpus=0-5,memdev=ram-node0 \
+-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
+-numa node,nodeid=1,cpus=11-15,memdev=ram-node1 \
+-object '{"qom-type":"memory-backend-ram","id":"ram-node2","size":112197632}' \
+-numa node,nodeid=2,cpus=6-10,memdev=ram-node2 \
+-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
+-display none \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=1729,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc \
+-no-shutdown \
+-boot strict=on \
+-device '{"driver":"piix3-usb-uhci","id":"usb","bus":"pci.0","addr":"0x1.0x2"}' \
+-audiodev '{"id":"audio1","driver":"none"}' \
+-device '{"driver":"virtio-balloon-pci","id":"balloon0","bus":"pci.0","addr":"0x2"}' \
+-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
+-msg timestamp=on
diff --git a/tests/qemuxml2argvdata/cpu-numa-memshared.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa-memshared.x86_64-latest.args
index 0c9ec88b8b..1e486b1bbc 100644
--- a/tests/qemuxml2argvdata/cpu-numa-memshared.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-numa-memshared.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-file","id":"ram-node0","mem-path":"/var/lib/libvirt/qemu/ram/-1-QEMUGuest1/ram-node0","share":true,"size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-file","id":"ram-node1","mem-path":"/var/lib/libvirt/qemu/ram/-1-QEMUGuest1/ram-node1","share":false,"size":112197632}' \
diff --git a/tests/qemuxml2argvdata/cpu-numa-no-memory-element.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa-no-memory-element.x86_64-latest.args
index 31a61f023e..59372c4ab9 100644
--- a/tests/qemuxml2argvdata/cpu-numa-no-memory-element.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-numa-no-memory-element.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/cpu-numa1.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa1.x86_64-latest.args
index 31a61f023e..59372c4ab9 100644
--- a/tests/qemuxml2argvdata/cpu-numa1.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-numa1.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/cpu-numa2.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-numa2.x86_64-latest.args
index 31a61f023e..59372c4ab9 100644
--- a/tests/qemuxml2argvdata/cpu-numa2.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-numa2.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/cpu-topology1.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-topology1.x86_64-latest.args
index 009c08d71a..af1b464104 100644
--- a/tests/qemuxml2argvdata/cpu-topology1.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-topology1.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
-overcommit mem-lock=off \
--smp 1,maxcpus=6,sockets=3,dies=1,cores=2,threads=1 \
+-smp 1,maxcpus=6,sockets=3,dies=1,clusters=1,cores=2,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/cpu-topology2.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-topology2.x86_64-latest.args
index 7ba175fa80..8560eb6126 100644
--- a/tests/qemuxml2argvdata/cpu-topology2.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-topology2.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
-overcommit mem-lock=off \
--smp 6,sockets=1,dies=1,cores=2,threads=3 \
+-smp 6,sockets=1,dies=1,clusters=1,cores=2,threads=3 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/cpu-topology3.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-topology3.x86_64-latest.args
index c11b4cd307..3878c558b8 100644
--- a/tests/qemuxml2argvdata/cpu-topology3.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-topology3.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
-overcommit mem-lock=off \
--smp 6,sockets=3,dies=1,cores=2,threads=1 \
+-smp 6,sockets=3,dies=1,clusters=1,cores=2,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/cpu-topology4.x86_64-latest.args b/tests/qemuxml2argvdata/cpu-topology4.x86_64-latest.args
index d0e31ba2b5..8720038c0d 100644
--- a/tests/qemuxml2argvdata/cpu-topology4.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/cpu-topology4.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
-overcommit mem-lock=off \
--smp 1,maxcpus=6,sockets=1,dies=3,cores=2,threads=1 \
+-smp 1,maxcpus=6,sockets=1,dies=3,clusters=1,cores=2,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/fd-memory-no-numa-topology.x86_64-latest.args b/tests/qemuxml2argvdata/fd-memory-no-numa-topology.x86_64-latest.args
index 58b3c7b544..1bd75a85a6 100644
--- a/tests/qemuxml2argvdata/fd-memory-no-numa-topology.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/fd-memory-no-numa-topology.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-m size=14680064k \
-object '{"qom-type":"memory-backend-file","id":"pc.ram","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/pc.ram","share":true,"x-use-canonical-path-for-ramblock-id":false,"prealloc":true,"size":15032385536}' \
-overcommit mem-lock=off \
--smp 8,sockets=8,dies=1,cores=1,threads=1 \
+-smp 8,sockets=8,dies=1,clusters=1,cores=1,threads=1 \
-uuid 126f2720-6f8e-45ab-a886-ec9277079a67 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology.x86_64-latest.args b/tests/qemuxml2argvdata/fd-memory-numa-topology.x86_64-latest.args
index 21f9a16540..17ef506431 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-cpu qemu64 \
-m size=14680064k \
-overcommit mem-lock=off \
--smp 8,sockets=1,dies=1,cores=8,threads=1 \
+-smp 8,sockets=1,dies=1,clusters=1,cores=8,threads=1 \
-object '{"qom-type":"memory-backend-file","id":"ram-node0","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/ram-node0","share":true,"prealloc":true,"size":15032385536}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-uuid 126f2720-6f8e-45ab-a886-ec9277079a67 \
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology2.x86_64-latest.args b/tests/qemuxml2argvdata/fd-memory-numa-topology2.x86_64-latest.args
index 3bf16f9caf..b247231b85 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology2.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology2.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-cpu qemu64 \
-m size=29360128k \
-overcommit mem-lock=off \
--smp 20,sockets=1,dies=1,cores=20,threads=1 \
+-smp 20,sockets=1,dies=1,clusters=1,cores=20,threads=1 \
-object '{"qom-type":"memory-backend-file","id":"ram-node0","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/ram-node0","share":false,"prealloc":true,"size":15032385536}' \
-numa node,nodeid=0,cpus=0-7,cpus=16-19,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-file","id":"ram-node1","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/ram-node1","share":true,"prealloc":true,"size":15032385536}' \
diff --git a/tests/qemuxml2argvdata/fd-memory-numa-topology3.x86_64-latest.args b/tests/qemuxml2argvdata/fd-memory-numa-topology3.x86_64-latest.args
index 3153e22d56..9e94209499 100644
--- a/tests/qemuxml2argvdata/fd-memory-numa-topology3.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/fd-memory-numa-topology3.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-cpu qemu64 \
-m size=44040192k \
-overcommit mem-lock=off \
--smp 32,sockets=1,dies=1,cores=32,threads=1 \
+-smp 32,sockets=1,dies=1,clusters=1,cores=32,threads=1 \
-object '{"qom-type":"memory-backend-file","id":"ram-node0","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/ram-node0","share":true,"prealloc":true,"size":15032385536}' \
-numa node,nodeid=0,cpus=0-1,cpus=6-31,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-file","id":"ram-node1","mem-path":"/var/lib/libvirt/qemu/ram/-1-instance-00000092/ram-node1","share":true,"prealloc":true,"size":15032385536}' \
diff --git a/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args b/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
index fa376accb5..f30db0ad09 100644
--- a/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/hugepages-nvdimm.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=1048576k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-file","id":"ram-node0","mem-path":"/dev/hugepages2M/libvirt/qemu/-1-QEMUGuest1","share":true,"prealloc":true,"size":1073741824}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
index 55969eb2fd..f850d7be60 100644
--- a/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memfd-memory-default-hugepage.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-cpu qemu64 \
-m size=14680064k \
-overcommit mem-lock=off \
--smp 8,sockets=1,dies=1,cores=8,threads=1 \
+-smp 8,sockets=1,dies=1,clusters=1,cores=8,threads=1 \
-object '{"qom-type":"thread-context","id":"tc-ram-node0","node-affinity":[3]}' \
-object '{"qom-type":"memory-backend-memfd","id":"ram-node0","hugetlb":true,"hugetlbsize":2097152,"share":true,"prealloc":true,"size":15032385536,"host-nodes":[3],"policy":"preferred","prealloc-context":"tc-ram-node0"}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
diff --git a/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args b/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
index 1ef2d69fcb..dbe2b82a56 100644
--- a/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memfd-memory-numa.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-instance-00000092/.config \
-cpu qemu64 \
-m size=14680064k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 8,sockets=1,dies=1,cores=8,threads=1 \
+-smp 8,sockets=1,dies=1,clusters=1,cores=8,threads=1 \
-object '{"qom-type":"thread-context","id":"tc-ram-node0","node-affinity":[3]}' \
-object '{"qom-type":"memory-backend-memfd","id":"ram-node0","hugetlb":true,"hugetlbsize":2097152,"share":true,"prealloc":true,"prealloc-threads":8,"size":15032385536,"host-nodes":[3],"policy":"preferred","prealloc-context":"tc-ram-node0"}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-dimm-addr.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-dimm-addr.x86_64-latest.args
index 6ae1fd1b98..c15fe191de 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-dimm-addr.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-dimm-addr.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-dimm.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-dimm.x86_64-latest.args
index 71817da309..a729930db2 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-dimm.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-dimm.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-multiple.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-multiple.x86_64-latest.args
index ad1dad01ac..f1f2f93a11 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-multiple.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-multiple.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=2095104k,slots=2,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":2145386496}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
index f09ae22927..d53732b39e 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-access.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
index 6cfe4b8263..cba467d9d3 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-align.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
index 4041c15b2b..2ad23a0224 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-label.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
index 3547e96c00..ac5ca187b1 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-pmem.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.args
index 9b57518fca..c2c1623d9f 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64-abi-update.ppc64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu POWER9 \
-m size=1048576k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":1073741824}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
index 9b57518fca..c2c1623d9f 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-ppc64.ppc64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu POWER9 \
-m size=1048576k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":1073741824}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
index 17bacfb2f6..8af4673841 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm-readonly.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
index 1321e5556e..6531caa908 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-nvdimm.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=1048576k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":1073741824}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.x86_64-latest.args
index 607ce9b0e8..dbe96ae21d 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-virtio-mem.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=2095104k,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":2145386496}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.x86_64-latest.args
index 9bbde420a9..df7b7f80a9 100644
--- a/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=2095104k,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":2145386496}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/memory-hotplug.x86_64-latest.args b/tests/qemuxml2argvdata/memory-hotplug.x86_64-latest.args
index 53f0fbc68f..d04d9d73e9 100644
--- a/tests/qemuxml2argvdata/memory-hotplug.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/memory-hotplug.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu qemu64 \
-m size=219136k,slots=16,maxmem=1099511627776k \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":224395264}' \
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
diff --git a/tests/qemuxml2argvdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.args b/tests/qemuxml2argvdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.args
index d4238f3d9e..138c8255f7 100644
--- a/tests/qemuxml2argvdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numad-auto-memory-vcpu-cpuset.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"interleave"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.args b/tests/qemuxml2argvdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.args
index d4238f3d9e..138c8255f7 100644
--- a/tests/qemuxml2argvdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numad-auto-memory-vcpu-no-cpuset-and-placement.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"interleave"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/numad-auto-vcpu-no-numatune.x86_64-latest.args b/tests/qemuxml2argvdata/numad-auto-vcpu-no-numatune.x86_64-latest.args
new file mode 100644
index 0000000000..f13f04c9d4
--- /dev/null
+++ b/tests/qemuxml2argvdata/numad-auto-vcpu-no-numatune.x86_64-latest.args
@@ -0,0 +1,36 @@
+LC_ALL=C \
+PATH=/bin \
+HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1 \
+USER=test \
+LOGNAME=test \
+XDG_DATA_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.local/share \
+XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.cache \
+XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=QEMUGuest1,debug-threads=on \
+-S \
+-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain--1-QEMUGuest1/master-key.aes"}' \
+-machine pc,usb=off,dump-guest-core=off,memory-backend=pc.ram,acpi=off \
+-accel tcg \
+-cpu qemu64 \
+-m size=219136k \
+-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"bind"}' \
+-overcommit mem-lock=off \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
+-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
+-display none \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=1729,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc \
+-no-shutdown \
+-boot strict=on \
+-device '{"driver":"piix3-usb-uhci","id":"usb","bus":"pci.0","addr":"0x1.0x2"}' \
+-blockdev '{"driver":"host_device","filename":"/dev/HostVG/QEMUGuest1","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-1-format","read-only":false,"driver":"raw","file":"libvirt-1-storage"}' \
+-device '{"driver":"ide-hd","bus":"ide.0","unit":0,"drive":"libvirt-1-format","id":"ide0-0-0","bootindex":1}' \
+-audiodev '{"id":"audio1","driver":"none"}' \
+-device '{"driver":"virtio-balloon-pci","id":"balloon0","bus":"pci.0","addr":"0x2"}' \
+-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
+-msg timestamp=on
diff --git a/tests/qemuxml2argvdata/numad-auto-vcpu-static-numatune.x86_64-latest.args b/tests/qemuxml2argvdata/numad-auto-vcpu-static-numatune.x86_64-latest.args
index 9ddfb286b5..f1c49619db 100644
--- a/tests/qemuxml2argvdata/numad-auto-vcpu-static-numatune.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numad-auto-vcpu-static-numatune.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0],"policy":"interleave"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/numad-static-memory-auto-vcpu.x86_64-latest.args b/tests/qemuxml2argvdata/numad-static-memory-auto-vcpu.x86_64-latest.args
index d4238f3d9e..138c8255f7 100644
--- a/tests/qemuxml2argvdata/numad-static-memory-auto-vcpu.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numad-static-memory-auto-vcpu.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"interleave"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/numad-static-vcpu-no-numatune.x86_64-latest.args b/tests/qemuxml2argvdata/numad-static-vcpu-no-numatune.x86_64-latest.args
new file mode 100644
index 0000000000..76ca5c4bea
--- /dev/null
+++ b/tests/qemuxml2argvdata/numad-static-vcpu-no-numatune.x86_64-latest.args
@@ -0,0 +1,36 @@
+LC_ALL=C \
+PATH=/bin \
+HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1 \
+USER=test \
+LOGNAME=test \
+XDG_DATA_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.local/share \
+XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.cache \
+XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=QEMUGuest1,debug-threads=on \
+-S \
+-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/var/lib/libvirt/qemu/domain--1-QEMUGuest1/master-key.aes"}' \
+-machine pc,usb=off,dump-guest-core=off,memory-backend=pc.ram,acpi=off \
+-accel tcg \
+-cpu qemu64 \
+-m size=219136k \
+-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \
+-overcommit mem-lock=off \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
+-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
+-display none \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=1729,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc \
+-no-shutdown \
+-boot strict=on \
+-device '{"driver":"piix3-usb-uhci","id":"usb","bus":"pci.0","addr":"0x1.0x2"}' \
+-blockdev '{"driver":"host_device","filename":"/dev/HostVG/QEMUGuest1","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \
+-blockdev '{"node-name":"libvirt-1-format","read-only":false,"driver":"raw","file":"libvirt-1-storage"}' \
+-device '{"driver":"ide-hd","bus":"ide.0","unit":0,"drive":"libvirt-1-format","id":"ide0-0-0","bootindex":1}' \
+-audiodev '{"id":"audio1","driver":"none"}' \
+-device '{"driver":"virtio-balloon-pci","id":"balloon0","bus":"pci.0","addr":"0x2"}' \
+-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
+-msg timestamp=on
diff --git a/tests/qemuxml2argvdata/numad.x86_64-latest.args b/tests/qemuxml2argvdata/numad.x86_64-latest.args
index d4238f3d9e..138c8255f7 100644
--- a/tests/qemuxml2argvdata/numad.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numad.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"interleave"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/numatune-auto-nodeset-invalid.x86_64-latest.args b/tests/qemuxml2argvdata/numatune-auto-nodeset-invalid.x86_64-latest.args
index 57a2b893f1..e35471d91b 100644
--- a/tests/qemuxml2argvdata/numatune-auto-nodeset-invalid.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/numatune-auto-nodeset-invalid.x86_64-latest.args
@@ -16,7 +16,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-m size=219136k \
-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264,"host-nodes":[0,1,2,3],"policy":"preferred"}' \
-overcommit mem-lock=off \
--smp 2,sockets=2,dies=1,cores=1,threads=1 \
+-smp 2,sockets=2,dies=1,clusters=1,cores=1,threads=1 \
-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
-display none \
-no-user-config \
diff --git a/tests/qemuxml2argvdata/pci-expander-bus.x86_64-latest.args b/tests/qemuxml2argvdata/pci-expander-bus.x86_64-latest.args
index bf553a8e32..d3960731be 100644
--- a/tests/qemuxml2argvdata/pci-expander-bus.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/pci-expander-bus.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-expander-test/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/pcie-expander-bus.x86_64-latest.args b/tests/qemuxml2argvdata/pcie-expander-bus.x86_64-latest.args
index 3fb86c29c2..b179fadc27 100644
--- a/tests/qemuxml2argvdata/pcie-expander-bus.x86_64-latest.args
+++ b/tests/qemuxml2argvdata/pcie-expander-bus.x86_64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-pcie-expander-bus-te/.config \
-cpu qemu64 \
-m size=219136k \
-overcommit mem-lock=off \
--smp 16,sockets=2,dies=1,cores=4,threads=2 \
+-smp 16,sockets=2,dies=1,clusters=1,cores=4,threads=2 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":112197632}' \
-numa node,nodeid=0,cpus=0-7,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":112197632}' \
diff --git a/tests/qemuxml2argvdata/pseries-phb-numa-node.ppc64-latest.args b/tests/qemuxml2argvdata/pseries-phb-numa-node.ppc64-latest.args
index 7ffcb1d8c5..942540a296 100644
--- a/tests/qemuxml2argvdata/pseries-phb-numa-node.ppc64-latest.args
+++ b/tests/qemuxml2argvdata/pseries-phb-numa-node.ppc64-latest.args
@@ -15,7 +15,7 @@ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain--1-QEMUGuest1/.config \
-cpu POWER9 \
-m size=2097152k \
-overcommit mem-lock=off \
--smp 8,sockets=2,dies=1,cores=1,threads=4 \
+-smp 8,sockets=2,dies=1,clusters=1,cores=1,threads=4 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node0","size":1073741824,"host-nodes":[1],"policy":"bind"}' \
-numa node,nodeid=0,cpus=0-3,memdev=ram-node0 \
-object '{"qom-type":"memory-backend-ram","id":"ram-node1","size":1073741824,"host-nodes":[2],"policy":"bind"}' \
--
2.27.0

View File

@ -0,0 +1,32 @@
From 7461beaf732475406e39375ea8e1c6311140046f Mon Sep 17 00:00:00 2001
From: Feng Ni <fengni@huawei.com>
Date: Wed, 15 Apr 2020 11:14:35 +0800
Subject: [PATCH] qemu: add pointer check in qemuMonitorLastError
We found a exception when libvirt occurrs segmentation fault.
thread 1 is waiting object lock in qemuConnectMonitor,
qemu process exits and sends EOF event as well, so thread 2 invokes
qemuMonitorLastError but pointer mon is NULL.
Signed-off-by: Feng Ni <fengni@huawei.com>
Signed-off-by: Xu Yandong <xuyandong2@huawei.com>
---
src/qemu/qemu_monitor.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index ec586b9036..a69ead6109 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -893,7 +893,7 @@ qemuMonitorSend(qemuMonitor *mon,
virErrorPtr
qemuMonitorLastError(qemuMonitor *mon)
{
- if (mon->lastError.code == VIR_ERR_OK)
+ if (!mon || mon->lastError.code == VIR_ERR_OK)
return NULL;
return virErrorCopyNew(&mon->lastError);
--
2.27.0

View File

@ -0,0 +1,34 @@
From 9874c09393d9bff08b98e21e7931f8dcf15e89d6 Mon Sep 17 00:00:00 2001
From: Feng Ni <fengni@huawei.com>
Date: Wed, 15 Apr 2020 11:28:41 +0800
Subject: [PATCH] qemu: fix a concurrent operation situation
Migrate vm and shutdown in guestos, interface do not return occasionally.
In function qemuMigrationSrcNBDStorageCopy, it may be alays in while loop
if qemu exits.
Signed-off-by: Feng Ni <fengni@huawei.com>
Signed-off-by: Xu Yandong <xuyandong2@huawei.com>
---
src/qemu/qemu_migration.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f9c34b72e8..7ae1cd7051 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1250,6 +1250,11 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
if (rv < 0)
return -1;
+ if (!virDomainObjIsActive(vm)) {
+ VIR_ERROR(_("domain is no longer running, migrate will end"));
+ return -1;
+ }
+
if (vm->job->abortJob) {
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%1$s: %2$s"),
--
2.27.0

View File

@ -0,0 +1,46 @@
From de724c7b94cfd8049efed6349acf9e0fb643f289 Mon Sep 17 00:00:00 2001
From: AlexChen <alex.chen@huawei.com>
Date: Wed, 27 Mar 2024 05:20:21 +0800
Subject: [PATCH] test/commandtest: skip the test4 if the testcase is run in
the container env
In a container environment without an init thread, the daemoned
process is not reclaimed, and Test4 loops forever, causing
the compilation to hung. So, skip it.
Signed-off-by: AlexChen <alex.chen@huawei.com>
---
tests/commandtest.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/tests/commandtest.c b/tests/commandtest.c
index aa108ce583..1adfc9de7f 100644
--- a/tests/commandtest.c
+++ b/tests/commandtest.c
@@ -211,6 +211,7 @@ static int test4(const void *unused G_GNUC_UNUSED)
g_autofree char *pidfile = virPidFileBuildPath(abs_builddir, "commandhelper");
pid_t pid;
int ret = -1;
+ int count = 0;
if (!pidfile)
goto cleanup;
@@ -227,8 +228,13 @@ static int test4(const void *unused G_GNUC_UNUSED)
printf("cannot read pidfile\n");
goto cleanup;
}
- while (kill(pid, 0) != -1)
- g_usleep(100*1000);
+ while (kill(pid, 0) != -1) {
+ if (count++ >= 600) {
+ printf("check time exceeds 60s, it may be in container env, skip this testcase!!!\n");
+ break;
+ }
+ g_usleep(100*1000); /* 100 */
+ }
ret = checkoutput("test4");
--
2.27.0

View File

@ -0,0 +1,914 @@
From 0ed886925444ec4d3c8fa6f41e721efab36a9db2 Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Fri, 5 Jan 2024 15:07:06 +0100
Subject: [PATCH] tests: Add hostcpudata for machine with CPU clusters
The data is taken from an HPE Apollo 70 machine, which uses
aarch64 CPUs. It is interesting for us because non-dummy
information about CPU clusters is exposed through sysfs.
In order to keep things reasonable, the data was manually
modified so that only 8 of the original 224 CPUs are included.
Care has been taken to ensure that the topology is otherwise
unaltered.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
.../linux-basic-clusters/system/cpu | 1 +
.../linux-basic-clusters/system/node | 1 +
.../vircaps-aarch64-basic-clusters.xml | 39 ++++++++++
tests/vircaps2xmltest.c | 1 +
.../linux-aarch64-with-clusters.cpuinfo | 72 +++++++++++++++++++
.../linux-aarch64-with-clusters.expected | 1 +
.../cpu/cpu0/topology/cluster_cpus_list | 1 +
.../cpu/cpu0/topology/cluster_id | 1 +
.../cpu/cpu0/topology/core_cpus_list | 1 +
.../cpu/cpu0/topology/core_id | 1 +
.../cpu/cpu0/topology/core_siblings_list | 1 +
.../cpu/cpu0/topology/package_cpus_list | 1 +
.../cpu/cpu0/topology/physical_package_id | 1 +
.../cpu/cpu0/topology/thread_siblings_list | 1 +
.../cpu/cpu1/topology/cluster_cpus_list | 1 +
.../cpu/cpu1/topology/cluster_id | 1 +
.../cpu/cpu1/topology/core_cpus_list | 1 +
.../cpu/cpu1/topology/core_id | 1 +
.../cpu/cpu1/topology/core_siblings_list | 1 +
.../cpu/cpu1/topology/package_cpus_list | 1 +
.../cpu/cpu1/topology/physical_package_id | 1 +
.../cpu/cpu1/topology/thread_siblings_list | 1 +
.../cpu/cpu2/topology/cluster_cpus_list | 1 +
.../cpu/cpu2/topology/cluster_id | 1 +
.../cpu/cpu2/topology/core_cpus_list | 1 +
.../cpu/cpu2/topology/core_id | 1 +
.../cpu/cpu2/topology/core_siblings_list | 1 +
.../cpu/cpu2/topology/package_cpus_list | 1 +
.../cpu/cpu2/topology/physical_package_id | 1 +
.../cpu/cpu2/topology/thread_siblings_list | 1 +
.../cpu/cpu3/topology/cluster_cpus_list | 1 +
.../cpu/cpu3/topology/cluster_id | 1 +
.../cpu/cpu3/topology/core_cpus_list | 1 +
.../cpu/cpu3/topology/core_id | 1 +
.../cpu/cpu3/topology/core_siblings_list | 1 +
.../cpu/cpu3/topology/package_cpus_list | 1 +
.../cpu/cpu3/topology/physical_package_id | 1 +
.../cpu/cpu3/topology/thread_siblings_list | 1 +
.../cpu/cpu4/topology/cluster_cpus_list | 1 +
.../cpu/cpu4/topology/cluster_id | 1 +
.../cpu/cpu4/topology/core_cpus_list | 1 +
.../cpu/cpu4/topology/core_id | 1 +
.../cpu/cpu4/topology/core_siblings_list | 1 +
.../cpu/cpu4/topology/package_cpus_list | 1 +
.../cpu/cpu4/topology/physical_package_id | 1 +
.../cpu/cpu4/topology/thread_siblings_list | 1 +
.../cpu/cpu5/topology/cluster_cpus_list | 1 +
.../cpu/cpu5/topology/cluster_id | 1 +
.../cpu/cpu5/topology/core_cpus_list | 1 +
.../cpu/cpu5/topology/core_id | 1 +
.../cpu/cpu5/topology/core_siblings_list | 1 +
.../cpu/cpu5/topology/package_cpus_list | 1 +
.../cpu/cpu5/topology/physical_package_id | 1 +
.../cpu/cpu5/topology/thread_siblings_list | 1 +
.../cpu/cpu6/topology/cluster_cpus_list | 1 +
.../cpu/cpu6/topology/cluster_id | 1 +
.../cpu/cpu6/topology/core_cpus_list | 1 +
.../cpu/cpu6/topology/core_id | 1 +
.../cpu/cpu6/topology/core_siblings_list | 1 +
.../cpu/cpu6/topology/package_cpus_list | 1 +
.../cpu/cpu6/topology/physical_package_id | 1 +
.../cpu/cpu6/topology/thread_siblings_list | 1 +
.../cpu/cpu7/topology/cluster_cpus_list | 1 +
.../cpu/cpu7/topology/cluster_id | 1 +
.../cpu/cpu7/topology/core_cpus_list | 1 +
.../cpu/cpu7/topology/core_id | 1 +
.../cpu/cpu7/topology/core_siblings_list | 1 +
.../cpu/cpu7/topology/package_cpus_list | 1 +
.../cpu/cpu7/topology/physical_package_id | 1 +
.../cpu/cpu7/topology/thread_siblings_list | 1 +
.../linux-with-clusters/cpu/online | 1 +
.../linux-with-clusters/cpu/present | 1 +
.../linux-with-clusters/node/node0/cpu0 | 1 +
.../linux-with-clusters/node/node0/cpu1 | 1 +
.../linux-with-clusters/node/node0/cpu2 | 1 +
.../linux-with-clusters/node/node0/cpu3 | 1 +
.../linux-with-clusters/node/node0/cpulist | 1 +
.../linux-with-clusters/node/node1/cpu4 | 1 +
.../linux-with-clusters/node/node1/cpu5 | 1 +
.../linux-with-clusters/node/node1/cpu6 | 1 +
.../linux-with-clusters/node/node1/cpu7 | 1 +
.../linux-with-clusters/node/node1/cpulist | 1 +
.../linux-with-clusters/node/online | 1 +
.../linux-with-clusters/node/possible | 1 +
tests/virhostcputest.c | 1 +
85 files changed, 194 insertions(+)
create mode 120000 tests/vircaps2xmldata/linux-basic-clusters/system/cpu
create mode 120000 tests/vircaps2xmldata/linux-basic-clusters/system/node
create mode 100644 tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
create mode 100644 tests/virhostcpudata/linux-aarch64-with-clusters.cpuinfo
create mode 100644 tests/virhostcpudata/linux-aarch64-with-clusters.expected
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/package_cpus_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/physical_package_id
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/thread_siblings_list
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/online
create mode 100644 tests/virhostcpudata/linux-with-clusters/cpu/present
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node0/cpu0
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node0/cpu1
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node0/cpu2
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node0/cpu3
create mode 100644 tests/virhostcpudata/linux-with-clusters/node/node0/cpulist
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node1/cpu4
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node1/cpu5
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node1/cpu6
create mode 120000 tests/virhostcpudata/linux-with-clusters/node/node1/cpu7
create mode 100644 tests/virhostcpudata/linux-with-clusters/node/node1/cpulist
create mode 100644 tests/virhostcpudata/linux-with-clusters/node/online
create mode 100644 tests/virhostcpudata/linux-with-clusters/node/possible
diff --git a/tests/vircaps2xmldata/linux-basic-clusters/system/cpu b/tests/vircaps2xmldata/linux-basic-clusters/system/cpu
new file mode 120000
index 0000000000..f7354e3525
--- /dev/null
+++ b/tests/vircaps2xmldata/linux-basic-clusters/system/cpu
@@ -0,0 +1 @@
+../../../virhostcpudata/linux-with-clusters/cpu
\ No newline at end of file
diff --git a/tests/vircaps2xmldata/linux-basic-clusters/system/node b/tests/vircaps2xmldata/linux-basic-clusters/system/node
new file mode 120000
index 0000000000..57b972ce90
--- /dev/null
+++ b/tests/vircaps2xmldata/linux-basic-clusters/system/node
@@ -0,0 +1 @@
+../../../virhostcpudata/linux-with-clusters/node
\ No newline at end of file
diff --git a/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml b/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
new file mode 100644
index 0000000000..fe61fc42cc
--- /dev/null
+++ b/tests/vircaps2xmldata/vircaps-aarch64-basic-clusters.xml
@@ -0,0 +1,39 @@
+<capabilities>
+
+ <host>
+ <cpu>
+ <arch>aarch64</arch>
+ </cpu>
+ <power_management/>
+ <iommu support='no'/>
+ <topology>
+ <cells num='2'>
+ <cell id='0'>
+ <memory unit='KiB'>1048576</memory>
+ <pages unit='KiB' size='4'>2048</pages>
+ <pages unit='KiB' size='2048'>4096</pages>
+ <pages unit='KiB' size='1048576'>6144</pages>
+ <cpus num='4'>
+ <cpu id='0' socket_id='36' die_id='0' core_id='0' siblings='0-1'/>
+ <cpu id='1' socket_id='36' die_id='0' core_id='0' siblings='0-1'/>
+ <cpu id='2' socket_id='36' die_id='0' core_id='1' siblings='2-3'/>
+ <cpu id='3' socket_id='36' die_id='0' core_id='1' siblings='2-3'/>
+ </cpus>
+ </cell>
+ <cell id='1'>
+ <memory unit='KiB'>2097152</memory>
+ <pages unit='KiB' size='4'>4096</pages>
+ <pages unit='KiB' size='2048'>6144</pages>
+ <pages unit='KiB' size='1048576'>8192</pages>
+ <cpus num='4'>
+ <cpu id='4' socket_id='3180' die_id='0' core_id='256' siblings='4-5'/>
+ <cpu id='5' socket_id='3180' die_id='0' core_id='256' siblings='4-5'/>
+ <cpu id='6' socket_id='3180' die_id='0' core_id='257' siblings='6-7'/>
+ <cpu id='7' socket_id='3180' die_id='0' core_id='257' siblings='6-7'/>
+ </cpus>
+ </cell>
+ </cells>
+ </topology>
+ </host>
+
+</capabilities>
diff --git a/tests/vircaps2xmltest.c b/tests/vircaps2xmltest.c
index 26a512e87f..2fdf694640 100644
--- a/tests/vircaps2xmltest.c
+++ b/tests/vircaps2xmltest.c
@@ -93,6 +93,7 @@ mymain(void)
DO_TEST_FULL("basic", VIR_ARCH_X86_64, false, false);
DO_TEST_FULL("basic", VIR_ARCH_AARCH64, true, false);
DO_TEST_FULL("basic-dies", VIR_ARCH_X86_64, false, false);
+ DO_TEST_FULL("basic-clusters", VIR_ARCH_AARCH64, false, false);
DO_TEST_FULL("caches", VIR_ARCH_X86_64, true, true);
diff --git a/tests/virhostcpudata/linux-aarch64-with-clusters.cpuinfo b/tests/virhostcpudata/linux-aarch64-with-clusters.cpuinfo
new file mode 100644
index 0000000000..94030201d2
--- /dev/null
+++ b/tests/virhostcpudata/linux-aarch64-with-clusters.cpuinfo
@@ -0,0 +1,72 @@
+processor : 0
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 1
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 2
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 3
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 4
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 5
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 6
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
+processor : 7
+BogoMIPS : 400.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics cpuid asimdrdm
+CPU implementer : 0x43
+CPU architecture: 8
+CPU variant : 0x1
+CPU part : 0x0af
+CPU revision : 1
+
diff --git a/tests/virhostcpudata/linux-aarch64-with-clusters.expected b/tests/virhostcpudata/linux-aarch64-with-clusters.expected
new file mode 100644
index 0000000000..bf350bd40b
--- /dev/null
+++ b/tests/virhostcpudata/linux-aarch64-with-clusters.expected
@@ -0,0 +1 @@
+CPUs: 8/8, MHz: 0, Nodes: 2, Sockets: 1, Cores: 2, Threads: 2
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_cpus_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_cpus_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_id
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/cluster_id
@@ -0,0 +1 @@
+0
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_cpus_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_cpus_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_id
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_id
@@ -0,0 +1 @@
+0
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_siblings_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/core_siblings_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/package_cpus_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/package_cpus_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/physical_package_id
new file mode 100644
index 0000000000..7facc89938
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/physical_package_id
@@ -0,0 +1 @@
+36
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/thread_siblings_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu0/topology/thread_siblings_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_cpus_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_cpus_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_id
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/cluster_id
@@ -0,0 +1 @@
+0
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_cpus_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_cpus_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_id
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_id
@@ -0,0 +1 @@
+0
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_siblings_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/core_siblings_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/package_cpus_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/package_cpus_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/physical_package_id
new file mode 100644
index 0000000000..7facc89938
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/physical_package_id
@@ -0,0 +1 @@
+36
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/thread_siblings_list
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu1/topology/thread_siblings_list
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_cpus_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_cpus_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_id
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/cluster_id
@@ -0,0 +1 @@
+1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_cpus_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_cpus_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_id
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_id
@@ -0,0 +1 @@
+1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_siblings_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/core_siblings_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/package_cpus_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/package_cpus_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/physical_package_id
new file mode 100644
index 0000000000..7facc89938
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/physical_package_id
@@ -0,0 +1 @@
+36
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/thread_siblings_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu2/topology/thread_siblings_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_cpus_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_cpus_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_id
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/cluster_id
@@ -0,0 +1 @@
+1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_cpus_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_cpus_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_id
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_id
@@ -0,0 +1 @@
+1
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_siblings_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/core_siblings_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/package_cpus_list
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/package_cpus_list
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/physical_package_id
new file mode 100644
index 0000000000..7facc89938
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/physical_package_id
@@ -0,0 +1 @@
+36
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/thread_siblings_list
new file mode 100644
index 0000000000..7a9857542a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu3/topology/thread_siblings_list
@@ -0,0 +1 @@
+2-3
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_cpus_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_cpus_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_id
new file mode 100644
index 0000000000..9183bf03fc
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/cluster_id
@@ -0,0 +1 @@
+256
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_cpus_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_cpus_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_id
new file mode 100644
index 0000000000..9183bf03fc
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_id
@@ -0,0 +1 @@
+256
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_siblings_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/core_siblings_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/package_cpus_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/package_cpus_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/physical_package_id
new file mode 100644
index 0000000000..58cecca290
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/physical_package_id
@@ -0,0 +1 @@
+3180
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/thread_siblings_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu4/topology/thread_siblings_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_cpus_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_cpus_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_id
new file mode 100644
index 0000000000..9183bf03fc
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/cluster_id
@@ -0,0 +1 @@
+256
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_cpus_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_cpus_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_id
new file mode 100644
index 0000000000..9183bf03fc
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_id
@@ -0,0 +1 @@
+256
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_siblings_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/core_siblings_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/package_cpus_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/package_cpus_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/physical_package_id
new file mode 100644
index 0000000000..58cecca290
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/physical_package_id
@@ -0,0 +1 @@
+3180
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/thread_siblings_list
new file mode 100644
index 0000000000..e66d883ade
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu5/topology/thread_siblings_list
@@ -0,0 +1 @@
+4-5
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_cpus_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_cpus_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_id
new file mode 100644
index 0000000000..a700e79997
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/cluster_id
@@ -0,0 +1 @@
+257
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_cpus_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_cpus_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_id
new file mode 100644
index 0000000000..a700e79997
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_id
@@ -0,0 +1 @@
+257
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_siblings_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/core_siblings_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/package_cpus_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/package_cpus_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/physical_package_id
new file mode 100644
index 0000000000..58cecca290
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/physical_package_id
@@ -0,0 +1 @@
+3180
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/thread_siblings_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu6/topology/thread_siblings_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_cpus_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_cpus_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_id
new file mode 100644
index 0000000000..a700e79997
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/cluster_id
@@ -0,0 +1 @@
+257
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_cpus_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_cpus_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_id
new file mode 100644
index 0000000000..a700e79997
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_id
@@ -0,0 +1 @@
+257
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_siblings_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/core_siblings_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/package_cpus_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/package_cpus_list
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/package_cpus_list
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/physical_package_id b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/physical_package_id
new file mode 100644
index 0000000000..58cecca290
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/physical_package_id
@@ -0,0 +1 @@
+3180
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/thread_siblings_list b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/thread_siblings_list
new file mode 100644
index 0000000000..fdd9f37517
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/cpu7/topology/thread_siblings_list
@@ -0,0 +1 @@
+6-7
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/online b/tests/virhostcpudata/linux-with-clusters/cpu/online
new file mode 100644
index 0000000000..5f4593c34a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/online
@@ -0,0 +1 @@
+0-223
diff --git a/tests/virhostcpudata/linux-with-clusters/cpu/present b/tests/virhostcpudata/linux-with-clusters/cpu/present
new file mode 100644
index 0000000000..5f4593c34a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/cpu/present
@@ -0,0 +1 @@
+0-223
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node0/cpu0 b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu0
new file mode 120000
index 0000000000..c841bea28b
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu0
@@ -0,0 +1 @@
+../../cpu/cpu0
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node0/cpu1 b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu1
new file mode 120000
index 0000000000..5f4536279e
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu1
@@ -0,0 +1 @@
+../../cpu/cpu1
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node0/cpu2 b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu2
new file mode 120000
index 0000000000..2dcca332ce
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu2
@@ -0,0 +1 @@
+../../cpu/cpu2
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node0/cpu3 b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu3
new file mode 120000
index 0000000000..c7690e5aa6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node0/cpu3
@@ -0,0 +1 @@
+../../cpu/cpu3
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node0/cpulist b/tests/virhostcpudata/linux-with-clusters/node/node0/cpulist
new file mode 100644
index 0000000000..40c7bb2f1a
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node0/cpulist
@@ -0,0 +1 @@
+0-3
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node1/cpu4 b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu4
new file mode 120000
index 0000000000..9e77a64eb4
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu4
@@ -0,0 +1 @@
+../../cpu/cpu4
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node1/cpu5 b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu5
new file mode 120000
index 0000000000..cc07c3b97b
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu5
@@ -0,0 +1 @@
+../../cpu/cpu5
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node1/cpu6 b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu6
new file mode 120000
index 0000000000..2e7576354f
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu6
@@ -0,0 +1 @@
+../../cpu/cpu6
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node1/cpu7 b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu7
new file mode 120000
index 0000000000..09e3f79b43
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node1/cpu7
@@ -0,0 +1 @@
+../../cpu/cpu7
\ No newline at end of file
diff --git a/tests/virhostcpudata/linux-with-clusters/node/node1/cpulist b/tests/virhostcpudata/linux-with-clusters/node/node1/cpulist
new file mode 100644
index 0000000000..93fccd3cc6
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/node1/cpulist
@@ -0,0 +1 @@
+4-7
diff --git a/tests/virhostcpudata/linux-with-clusters/node/online b/tests/virhostcpudata/linux-with-clusters/node/online
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/online
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcpudata/linux-with-clusters/node/possible b/tests/virhostcpudata/linux-with-clusters/node/possible
new file mode 100644
index 0000000000..8b0fab869c
--- /dev/null
+++ b/tests/virhostcpudata/linux-with-clusters/node/possible
@@ -0,0 +1 @@
+0-1
diff --git a/tests/virhostcputest.c b/tests/virhostcputest.c
index 0990013878..cf310cb4ce 100644
--- a/tests/virhostcputest.c
+++ b/tests/virhostcputest.c
@@ -273,6 +273,7 @@ mymain(void)
{"subcores3", VIR_ARCH_PPC64},
{"with-frequency", VIR_ARCH_S390X},
{"with-die", VIR_ARCH_X86_64},
+ {"with-clusters", VIR_ARCH_AARCH64},
};
if (virInitialize() < 0)
--
2.27.0

View File

@ -0,0 +1,453 @@
From 2a4546f32943b429ba74d3290143909afcc10a62 Mon Sep 17 00:00:00 2001
From: Andrea Bolognani <abologna@redhat.com>
Date: Mon, 8 Jan 2024 18:44:25 +0100
Subject: [PATCH] tests: Verify handling of CPU clusters in QMP data
Since aarch64 doesn't support CPU hotplug at the moment, we have
to get a bit creative.
While the 'query-cpus-fast' output is taken directly from a VM
configured as
<vcpu current='7'>16</vcpu>
<cpu mode='host-passthrough'>
<topology sockets='2' dies='1' clusters='2' cores='2' threads='2'/>
</cpu>
the 'query-hotpluggable-cpus' output is constructed by hand
starting from the former and using the 'x86-dies' test data as
a model.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
---
...torjson-cpuinfo-aarch64-clusters-cpus.json | 88 +++++++++
...json-cpuinfo-aarch64-clusters-hotplug.json | 171 ++++++++++++++++++
...umonitorjson-cpuinfo-aarch64-clusters.data | 108 +++++++++++
tests/qemumonitorjsontest.c | 9 +-
4 files changed, 375 insertions(+), 1 deletion(-)
create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-cpus.json
create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-hotplug.json
create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters.data
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-cpus.json b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-cpus.json
new file mode 100644
index 0000000000..817f65d109
--- /dev/null
+++ b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-cpus.json
@@ -0,0 +1,88 @@
+{
+ "return": [
+ {
+ "thread-id": 284700,
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "qom-path": "/machine/unattached/device[0]",
+ "cpu-index": 0,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284701,
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "qom-path": "/machine/unattached/device[1]",
+ "cpu-index": 1,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284702,
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "qom-path": "/machine/unattached/device[2]",
+ "cpu-index": 2,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284703,
+ "props": {
+ "core-id": 1,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "qom-path": "/machine/unattached/device[3]",
+ "cpu-index": 3,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284704,
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "qom-path": "/machine/unattached/device[4]",
+ "cpu-index": 4,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284705,
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "qom-path": "/machine/unattached/device[5]",
+ "cpu-index": 5,
+ "target": "aarch64"
+ },
+ {
+ "thread-id": 284706,
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "qom-path": "/machine/unattached/device[6]",
+ "cpu-index": 6,
+ "target": "aarch64"
+ }
+ ]
+}
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-hotplug.json b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-hotplug.json
new file mode 100644
index 0000000000..7ae30bf111
--- /dev/null
+++ b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters-hotplug.json
@@ -0,0 +1,171 @@
+{
+ "return": [
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 1,
+ "socket-id": 1,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 1,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 1,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 1,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 1,
+ "socket-id": 1,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 1,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 1,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 1,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[6]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[5]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 1
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[4]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[3]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 1,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[2]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 1,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[1]",
+ "type": "host-arm-cpu"
+ },
+ {
+ "props": {
+ "core-id": 0,
+ "thread-id": 0,
+ "socket-id": 0,
+ "cluster-id": 0
+ },
+ "vcpus-count": 1,
+ "qom-path": "/machine/unattached/device[0]",
+ "type": "host-arm-cpu"
+ }
+ ]
+}
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters.data b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters.data
new file mode 100644
index 0000000000..87e927e7a8
--- /dev/null
+++ b/tests/qemumonitorjsondata/qemumonitorjson-cpuinfo-aarch64-clusters.data
@@ -0,0 +1,108 @@
+[vcpu libvirt-id='0']
+ online=yes
+ hotpluggable=no
+ thread-id='284700'
+ enable-id='1'
+ query-cpus-id='0'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[0]'
+ topology: socket='0' cluster_id='0' core='0' thread='0' vcpus='1'
+[vcpu libvirt-id='1']
+ online=yes
+ hotpluggable=no
+ thread-id='284701'
+ enable-id='2'
+ query-cpus-id='1'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[1]'
+ topology: socket='0' cluster_id='0' core='0' thread='1' vcpus='1'
+[vcpu libvirt-id='2']
+ online=yes
+ hotpluggable=no
+ thread-id='284702'
+ enable-id='3'
+ query-cpus-id='2'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[2]'
+ topology: socket='0' cluster_id='0' core='1' thread='0' vcpus='1'
+[vcpu libvirt-id='3']
+ online=yes
+ hotpluggable=no
+ thread-id='284703'
+ enable-id='4'
+ query-cpus-id='3'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[3]'
+ topology: socket='0' cluster_id='0' core='1' thread='1' vcpus='1'
+[vcpu libvirt-id='4']
+ online=yes
+ hotpluggable=no
+ thread-id='284704'
+ enable-id='5'
+ query-cpus-id='4'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[4]'
+ topology: socket='0' cluster_id='1' core='0' thread='0' vcpus='1'
+[vcpu libvirt-id='5']
+ online=yes
+ hotpluggable=no
+ thread-id='284705'
+ enable-id='6'
+ query-cpus-id='5'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[5]'
+ topology: socket='0' cluster_id='1' core='0' thread='1' vcpus='1'
+[vcpu libvirt-id='6']
+ online=yes
+ hotpluggable=no
+ thread-id='284706'
+ enable-id='7'
+ query-cpus-id='6'
+ type='host-arm-cpu'
+ qom_path='/machine/unattached/device[6]'
+ topology: socket='0' cluster_id='1' core='1' thread='0' vcpus='1'
+[vcpu libvirt-id='7']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='0' cluster_id='1' core='1' thread='1' vcpus='1'
+[vcpu libvirt-id='8']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='0' core='0' thread='0' vcpus='1'
+[vcpu libvirt-id='9']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='0' core='0' thread='1' vcpus='1'
+[vcpu libvirt-id='10']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='0' core='1' thread='0' vcpus='1'
+[vcpu libvirt-id='11']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='0' core='1' thread='1' vcpus='1'
+[vcpu libvirt-id='12']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='1' core='0' thread='0' vcpus='1'
+[vcpu libvirt-id='13']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='1' core='0' thread='1' vcpus='1'
+[vcpu libvirt-id='14']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='1' core='1' thread='0' vcpus='1'
+[vcpu libvirt-id='15']
+ online=no
+ hotpluggable=yes
+ type='host-arm-cpu'
+ topology: socket='1' cluster_id='1' core='1' thread='1' vcpus='1'
diff --git a/tests/qemumonitorjsontest.c b/tests/qemumonitorjsontest.c
index d9ebb429e7..45cee23798 100644
--- a/tests/qemumonitorjsontest.c
+++ b/tests/qemumonitorjsontest.c
@@ -2262,13 +2262,16 @@ testQemuMonitorCPUInfoFormat(qemuMonitorCPUInfo *vcpus,
if (vcpu->qom_path)
virBufferAsprintf(&buf, "qom_path='%s'\n", vcpu->qom_path);
- if (vcpu->socket_id != -1 || vcpu->core_id != -1 ||
+ if (vcpu->socket_id != -1 || vcpu->die_id != -1 ||
+ vcpu->cluster_id != -1 || vcpu->core_id != -1 ||
vcpu->thread_id != -1 || vcpu->vcpus != 0) {
virBufferAddLit(&buf, "topology:");
if (vcpu->socket_id != -1)
virBufferAsprintf(&buf, " socket='%d'", vcpu->socket_id);
if (vcpu->die_id != -1)
virBufferAsprintf(&buf, " die='%d'", vcpu->die_id);
+ if (vcpu->cluster_id != -1)
+ virBufferAsprintf(&buf, " cluster_id='%d'", vcpu->cluster_id);
if (vcpu->core_id != -1)
virBufferAsprintf(&buf, " core='%d'", vcpu->core_id);
if (vcpu->thread_id != -1)
@@ -2919,6 +2922,10 @@ mymain(void)
DO_TEST_CPU_INFO("ppc64-hotplug-4", 24);
DO_TEST_CPU_INFO("ppc64-no-threads", 16);
+ /* aarch64 doesn't support CPU hotplug yet, so the data used in
+ * this test is partially synthetic */
+ DO_TEST_CPU_INFO("aarch64-clusters", 16);
+
DO_TEST_CPU_INFO("s390", 2);
--
2.27.0

View File

@ -0,0 +1,131 @@
From 399548b72ba5630981db3ac434c9d2c7a46fa5e7 Mon Sep 17 00:00:00 2001
From: AlexChen <alex.chen@huawei.com>
Date: Fri, 29 Mar 2024 16:16:09 +0800
Subject: [PATCH] vdpa: support vdpa device hot plug/unplug
support vdpa device hot plug/unplug
Signed-off-by: AlexChen <alex.chen@huawei.com>
---
src/qemu/qemu_hotplug.c | 84 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 84 insertions(+)
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 29c0856f62..87353d2149 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -2761,6 +2761,82 @@ qemuDomainAttachMediatedDevice(virQEMUDriver *driver,
return ret;
}
+static int
+qemuDomainAttachVDPADevice(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainHostdevDef *hostdev)
+{
+ int ret = -1;
+ g_autoptr(virJSONValue) devprops = NULL;
+ bool teardowncgroup = false;
+ bool teardownlabel = false;
+ bool teardowndevice = false;
+ bool teardownmemlock = false;
+ bool releaseaddr = false;
+ qemuDomainObjPrivate *priv = vm->privateData;
+ virDomainDeviceDef dev = { VIR_DOMAIN_DEVICE_HOSTDEV,
+ { .hostdev = hostdev } };
+
+ if (qemuDomainNamespaceSetupHostdev(vm, hostdev, &teardowndevice) < 0)
+ goto cleanup;
+
+ if (qemuSetupHostdevCgroup(vm, hostdev) < 0)
+ goto cleanup;
+ teardowncgroup = true;
+
+ if (qemuSecuritySetHostdevLabel(driver, vm, hostdev) < 0)
+ goto cleanup;
+ teardownlabel = true;
+
+ if (qemuDomainEnsurePCIAddress(vm, &dev) < 0)
+ goto cleanup;
+ releaseaddr = true;
+
+ qemuAssignDeviceHostdevAlias(vm->def, &hostdev->info->alias, -1);
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit during hotplug"));
+ goto cleanup;
+ }
+
+ if (!(devprops = qemuBuildHostdevVDPADevProps(hostdev)))
+ goto cleanup;
+
+ VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs + 1);
+
+ if (qemuDomainAdjustMaxMemLockHostdev(vm, hostdev) < 0)
+ goto cleanup;
+ teardownmemlock = true;
+
+ qemuDomainObjEnterMonitor(vm);
+ ret = qemuMonitorAddDeviceProps(priv->mon, &devprops);
+ qemuDomainObjExitMonitor(vm);
+
+ virDomainAuditHostdev(vm, hostdev, "attach", ret == 0);
+ if (ret < 0)
+ goto cleanup;
+
+ vm->def->hostdevs[vm->def->nhostdevs++] = hostdev;
+
+ return 0;
+
+ cleanup:
+ if (teardowncgroup && qemuTeardownHostdevCgroup(vm, hostdev) < 0)
+ VIR_WARN("Unable to remove host device cgroup ACL on hotplug fail");
+ if (teardownlabel &&
+ qemuSecurityRestoreHostdevLabel(driver, vm, hostdev) < 0)
+ VIR_WARN("Unable to restore host device labelling on hotplug fail");
+ if (teardowndevice &&
+ qemuDomainNamespaceTeardownHostdev(vm, hostdev) < 0)
+ VIR_WARN("Unable to remove host device from /dev");
+ if (teardownmemlock && qemuDomainAdjustMaxMemLock(vm) < 0)
+ VIR_WARN("Unable to reset maximum locked memory on hotplug fail");
+ if (releaseaddr)
+ qemuDomainReleaseDeviceAddress(vm, hostdev->info);
+
+ return -1;
+}
static int
qemuDomainAttachHostDevice(virQEMUDriver *driver,
@@ -2806,6 +2882,9 @@ qemuDomainAttachHostDevice(virQEMUDriver *driver,
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ if (qemuDomainAttachVDPADevice(driver, vm, hostdev) < 0)
+ return -1;
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
@@ -5689,6 +5768,7 @@ qemuDomainDetachPrepHostdev(virDomainObj *vm,
virDomainHostdevSubsysPCI *pcisrc = &subsys->u.pci;
virDomainHostdevSubsysSCSI *scsisrc = &subsys->u.scsi;
virDomainHostdevSubsysMediatedDev *mdevsrc = &subsys->u.mdev;
+ virDomainHostdevSubsysVDPA *vdpasrc = &subsys->u.vdpa;
virDomainHostdevDef *hostdev = NULL;
int idx;
@@ -5746,6 +5826,10 @@ qemuDomainDetachPrepHostdev(virDomainObj *vm,
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ virReportError(VIR_ERR_DEVICE_MISSING,
+ _("vdpa device '%s' not found"),
+ vdpasrc->devpath);
+ break;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
default:
virReportError(VIR_ERR_INTERNAL_ERROR,
--
2.27.0

View File

@ -0,0 +1,37 @@
From a74ff9ceea076a92a4dd3b9b26a31f14c513da0a Mon Sep 17 00:00:00 2001
From: AlexChen <alex.chen@huawei.com>
Date: Sat, 25 Nov 2023 09:55:08 +0800
Subject: [PATCH] vdpa: support vdpa device migrate
support vdpa device migrate
Signed-off-by: jiangdongxu <jiangdongxu1@huawei.com>
Signed-off-by: AlexChen <alex.chen@huawei.com>
---
src/qemu/qemu_migration.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 0a89d102df..9c7eea0d6d 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1304,14 +1304,13 @@ qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
case VIR_DOMAIN_HOSTDEV_MODE_SUBSYS:
switch (hostdev->source.subsys.type) {
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
- /* USB devices can be "migrated" */
+ case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
+ /* USB and VDPA devices can be "migrated" */
continue;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
- /* The vDPA devices don't support migration for now */
- case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_VDPA:
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot migrate a domain with <hostdev mode='subsystem' type='%1$s'>"),
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
--
2.27.0