bind/backport-0009-Remove-taskmgr-excl_lock-fix-the-locking-for-taskmgr.patch
2023-01-09 16:44:23 +08:00

180 lines
5.6 KiB
Diff

From 5be356760dd6855944234980f2fc0f13130267fb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@isc.org>
Date: Wed, 5 Jan 2022 13:06:37 +0100
Subject: [PATCH] Remove taskmgr->excl_lock, fix the locking for
taskmgr->exiting
While doing code review, it was found that the taskmgr->exiting is set
under taskmgr->lock, but accessed under taskmgr->excl_lock in the
isc_task_beginexclusive().
Additionally, before the change that moved running the tasks to the
netmgr, the task_ready() subrouting of isc_task_detach() would lock
mgr->lock, requiring the mgr->excl to be protected mgr->excl_lock
to prevent deadlock in the code. After !4918 has been merged, this is
no longer true, and we can remove taskmgr->excl_lock and use
taskmgr->lock in its stead.
Solve both issues by removing the taskmgr->excl_lock and exclusively use
taskmgr->lock to protect both taskmgr->excl and taskmgr->exiting which
now doesn't need to be atomic_bool, because it's always accessed from
within the locked section.
(cherry picked from commit e705f213cac8a79e1fa8c20ce20f2e7a28daf3f9)
Conflict: NA
Reference: https://gitlab.isc.org/isc-projects/bind9/-/commit/5be356760dd6855944234980f2fc0f13130267fb
---
lib/isc/task.c | 54 ++++++++++++++++----------------------------------
1 file changed, 17 insertions(+), 37 deletions(-)
diff --git a/lib/isc/task.c b/lib/isc/task.c
index 3e894f8dab..caf2c06c47 100644
--- a/lib/isc/task.c
+++ b/lib/isc/task.c
@@ -140,14 +140,7 @@ struct isc_taskmgr {
LIST(isc_task_t) tasks;
atomic_uint_fast32_t mode;
atomic_bool exclusive_req;
- atomic_bool exiting;
-
- /*
- * Multiple threads can read/write 'excl' at the same time, so we need
- * to protect the access. We can't use 'lock' since isc_task_detach()
- * will try to acquire it.
- */
- isc_mutex_t excl_lock;
+ bool exiting;
isc_task_t *excl;
};
@@ -254,13 +247,11 @@ isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum,
INIT_LINK(task, link);
task->magic = TASK_MAGIC;
- exiting = false;
LOCK(&manager->lock);
- if (!atomic_load_relaxed(&manager->exiting)) {
+ exiting = manager->exiting;
+ if (!exiting) {
APPEND(manager->tasks, task, link);
atomic_fetch_add(&manager->tasks_count, 1);
- } else {
- exiting = true;
}
UNLOCK(&manager->lock);
@@ -956,7 +947,6 @@ manager_free(isc_taskmgr_t *manager) {
isc_nm_detach(&manager->netmgr);
isc_mutex_destroy(&manager->lock);
- isc_mutex_destroy(&manager->excl_lock);
manager->magic = 0;
isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
}
@@ -1000,7 +990,6 @@ isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
*manager = (isc_taskmgr_t){ .magic = TASK_MANAGER_MAGIC };
isc_mutex_init(&manager->lock);
- isc_mutex_init(&manager->excl_lock);
if (default_quantum == 0) {
default_quantum = DEFAULT_DEFAULT_QUANTUM;
@@ -1012,7 +1001,6 @@ isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
}
INIT_LIST(manager->tasks);
- atomic_init(&manager->exiting, false);
atomic_init(&manager->mode, isc_taskmgrmode_normal);
atomic_init(&manager->exclusive_req, false);
atomic_init(&manager->tasks_count, 0);
@@ -1041,15 +1029,6 @@ isc__taskmgr_shutdown(isc_taskmgr_t *manager) {
* that the startup thread is sleeping on.
*/
- /*
- * Detach the exclusive task before acquiring the manager lock
- */
- LOCK(&manager->excl_lock);
- if (manager->excl != NULL) {
- isc_task_detach((isc_task_t **)&manager->excl);
- }
- UNLOCK(&manager->excl_lock);
-
/*
* Unlike elsewhere, we're going to hold this lock a long time.
* We need to do so, because otherwise the list of tasks could
@@ -1058,14 +1037,16 @@ isc__taskmgr_shutdown(isc_taskmgr_t *manager) {
* This is also the only function where we will hold both the
* task manager lock and a task lock at the same time.
*/
-
LOCK(&manager->lock);
+ if (manager->excl != NULL) {
+ isc_task_detach((isc_task_t **)&manager->excl);
+ }
/*
* Make sure we only get called once.
*/
- INSIST(atomic_compare_exchange_strong(&manager->exiting,
- &(bool){ false }, true));
+ INSIST(manager->exiting == false);
+ manager->exiting = true;
/*
* Post shutdown event(s) to every task (if they haven't already been
@@ -1120,12 +1101,12 @@ isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task) {
REQUIRE(task->threadid == 0);
UNLOCK(&task->lock);
- LOCK(&mgr->excl_lock);
+ LOCK(&mgr->lock);
if (mgr->excl != NULL) {
isc_task_detach(&mgr->excl);
}
isc_task_attach(task, &mgr->excl);
- UNLOCK(&mgr->excl_lock);
+ UNLOCK(&mgr->lock);
}
isc_result_t
@@ -1135,16 +1116,16 @@ isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp) {
REQUIRE(VALID_MANAGER(mgr));
REQUIRE(taskp != NULL && *taskp == NULL);
- LOCK(&mgr->excl_lock);
+ LOCK(&mgr->lock);
if (mgr->excl != NULL) {
isc_task_attach(mgr->excl, taskp);
result = ISC_R_SUCCESS;
- } else if (atomic_load_relaxed(&mgr->exiting)) {
+ } else if (mgr->exiting) {
result = ISC_R_SHUTTINGDOWN;
} else {
result = ISC_R_NOTFOUND;
}
- UNLOCK(&mgr->excl_lock);
+ UNLOCK(&mgr->lock);
return (result);
}
@@ -1159,11 +1140,10 @@ isc_task_beginexclusive(isc_task_t *task) {
REQUIRE(task->state == task_state_running);
- LOCK(&manager->excl_lock);
- REQUIRE(task == task->manager->excl ||
- (atomic_load_relaxed(&task->manager->exiting) &&
- task->manager->excl == NULL));
- UNLOCK(&manager->excl_lock);
+ LOCK(&manager->lock);
+ REQUIRE(task == manager->excl ||
+ (manager->exiting && manager->excl == NULL));
+ UNLOCK(&manager->lock);
if (!atomic_compare_exchange_strong(&manager->exclusive_req,
&(bool){ false }, true))
--
2.23.0