libvirt/migration-multifd-pin-support-migration-multifd-thre.patch

192 lines
6.4 KiB
Diff
Raw Normal View History

libvirt update to version 9.10.0-4: - docs: Document CPU clusters - docs: Improve documentation for CPU topology - tests: Verify handling of CPU clusters in QMP data - qemu: Make monitor aware of CPU clusters - qemu: Use CPU clusters for guests - qemu: Introduce QEMU_CAPS_SMP_CLUSTERS - conf: Allow specifying CPU clusters - conf: Report CPU clusters in capabilities XML - tests: Add hostcpudata for machine with CPU clusters - cpu_map: add kunpeng-920 features to arm features - cpu/aarch64: enable host-model cpu for AArch64 architecture - conf/domain_conf: pin the retry_interval and retry_timeout parameters to xml - nodedev: fix potential heap use after free - libvirt/conf: Set default values of retry fileds - qemu: Support 'retry' BLOCK_IO_ERROR event. - libvirt: Add 'retry' support for error policy - vdpa: support vdpa device migrate - vdpa: support vdpa device hot plug/unplug - hostdev:Introduce vDPA device to hostdev subsystem as a new subtype - node_device: fix leak of DIR* - migration/multifd-pin: support migration multifd thread pin - migration/multifd-pin: add qemu monitor callback functions - migration/migration-pin: add domainMigrationPid for qemuMonitorCallbacks - migration/migration-pin: add migrationpin for migration parameters - migration/migration-pin: add qemu monitor callback functions - migration/migration-pin:add some migration/multiFd params - qemu: add pointer check in qemuMonitorLastError - qemu: fix a concurrent operation situation - test/commandtest: skip the test4 if the testcase is run in the container env Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
2024-04-02 20:56:45 +08:00
From 5ddedd900bf5556f0c01715148b387fa451d9399 Mon Sep 17 00:00:00 2001
From: zhengchuan <zhengchuan@huawei.com>
Date: Wed, 30 Nov 2022 16:47:30 +0800
Subject: [PATCH] migration/multifd-pin: support migration multifd thread pin
support migration multifd thread pin by configuration.
Signed-off-by:zhengchuan<zhengchuan@huawei.com>
---
src/qemu/qemu_migration.c | 2 +
src/qemu/qemu_process.c | 114 ++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_process.h | 4 ++
3 files changed, 120 insertions(+)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 00dfd46ae7..41ce565ede 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3363,6 +3363,8 @@ qemuMigrationDstPrepareFresh(virQEMUDriver *driver,
priv = vm->privateData;
priv->origname = g_strdup(origname);
VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ priv->migrationMultiFdCount = 0;
if (taint_hook) {
/* Domain XML has been altered by a hook script. */
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 5be6710ea7..e85862333c 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1438,6 +1438,58 @@ qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
}
+static void
+qemuProcessHandleMigrationPinStatus(virDomainObj *vm, int status)
+{
+ qemuDomainObjPrivate *priv = vm->privateData;
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
+ return;
+
+ switch (status) {
+ case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
+ case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
+ case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
+ case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
+ case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
+ case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
+ case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
+ case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
+ case QEMU_MONITOR_MIGRATION_STATUS_WAIT_UNPLUG:
+ break;
+ case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
+ /*
+ * migration thread is still running,
+ * so we can't delete migration Cgroup.
+ */
+ VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ VIR_FREE(priv->migrationThreadPinList);
+ priv->migrationMultiFdCount = 0;
+ virBitmapFree(priv->pcpumap);
+ priv->pcpumap = NULL;
+ break;
+ case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
+ VIR_FREE(priv->migrationPids);
+ VIR_FREE(priv->migrationMultiFdPids);
+ VIR_FREE(priv->migrationThreadPinList);
+ priv->migrationMultiFdCount = 0;
+ virBitmapFree(priv->pcpumap);
+ priv->pcpumap = NULL;
+ if (virCgroupDelThread(priv->cgroup,
+ VIR_CGROUP_THREAD_MIGRATION_THREAD, 0) < 0)
+ VIR_WARN("Failed to delete migration thread Cgroup!");
+ VIR_INFO("success to free pcpumap and migrationPids");
+ break;
+ default:
+ VIR_WARN("got unknown migration status'%s'",
+ qemuMonitorMigrationStatusTypeToString(status));
+ break;
+ }
+
+ return;
+}
+
+
static void
qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
virDomainObj *vm,
@@ -1553,6 +1605,8 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
break;
}
+ qemuProcessHandleMigrationPinStatus(vm, status);
+
cleanup:
virObjectUnlock(vm);
virObjectEventStateQueue(driver->domainEventState, event);
@@ -1820,6 +1874,7 @@ static qemuMonitorCallbacks monitorCallbacks = {
.domainMemoryDeviceSizeChange = qemuProcessHandleMemoryDeviceSizeChange,
.domainDeviceUnplugError = qemuProcessHandleDeviceUnplugErr,
.domainMigrationPid = qemuProcessHandleMigrationPid,
+ .domainMigrationMultiFdPids = qemuProcessHandleMigrationMultiFdPids,
.domainNetdevStreamDisconnected = qemuProcessHandleNetdevStreamDisconnected,
};
@@ -2860,6 +2915,65 @@ qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
}
+void
+qemuProcessHandleMigrationMultiFdPids(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid)
+{
+ qemuDomainObjPrivate *priv;
+ char *mpidOldStr = NULL;
+ char *mpidStr = NULL;
+ virDomainMigrationIDDefPtr migration = NULL;
+ virBitmap *pcpumap = NULL;
+ virObjectLock(vm);
+
+ VIR_INFO("Migrating domain %p %s, migration-multifd pid %d",
+ vm, vm->def->name, mpid);
+
+ priv = vm->privateData;
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
+ VIR_DEBUG("got MIGRATION_MULTIFD_PID event without a migration job");
+ goto cleanup;
+ }
+
+ migration = g_malloc0(sizeof(*migration));
+ migration->thread_id = mpid;
+
+ if (qemuProcessSetupMigration(vm, migration) < 0) {
+ VIR_ERROR(_("fail to setup migration multiFd cgroup"));
+ goto cleanup;
+ }
+
+ mpidOldStr = priv->migrationMultiFdPids;
+ if (!mpidOldStr) {
+ mpidStr = g_strdup_printf("%d", mpid);
+ } else {
+ mpidStr = g_strdup_printf("%s/%d", mpidOldStr, mpid);
+ }
+
+ VIR_FREE(priv->migrationMultiFdPids);
+ priv->migrationMultiFdPids = mpidStr;
+ priv->migrationMultiFdCount++;
+
+ pcpumap = qemuProcessGetPcpumap(priv);
+
+ if (!pcpumap)
+ goto cleanup;
+
+ qemuProcessSetMigthreadAffinity(priv, pcpumap, mpid);
+
+ cleanup:
+ /*
+ * If the value of pcpumap is setted by priv->migrationThreadPinList,
+ * we need to free pcpumap.
+ */
+ if (pcpumap != priv->pcpumap)
+ virBitmapFree(pcpumap);
+ virDomainMigrationIDDefFree(migration);
+ virObjectUnlock(vm);
+}
+
+
static char *
qemuProcessBuildPRHelperPidfilePathOld(virDomainObj *vm)
{
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index fff976f6f7..69a240e1e8 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -262,6 +262,10 @@ void qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
virDomainObj *vm,
int mpid);
+void qemuProcessHandleMigrationMultiFdPids(qemuMonitor *mon ATTRIBUTE_UNUSED,
+ virDomainObj *vm,
+ int mpid);
+
void qemuProcessCleanupMigrationJob(virQEMUDriver *driver,
virDomainObj *vm);
--
2.27.0