From 765d4c48aeaad779008f82ff6643d9cdbe917bd1 Mon Sep 17 00:00:00 2001 From: yangjiaqi Date: Wed, 22 Mar 2023 09:31:04 +0800 Subject: [PATCH] cpuview: fix possible use-after-free in find_proc_stat_node Signed-off-by: yangjiaqi --- src/proc_cpuview.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/src/proc_cpuview.c b/src/proc_cpuview.c index 75006a6..207a6df 100644 --- a/src/proc_cpuview.c +++ b/src/proc_cpuview.c @@ -171,6 +171,7 @@ static struct cg_proc_stat *add_proc_stat_node(struct cg_proc_stat *new_node) } out_rwlock_unlock: + pthread_mutex_lock(&rv->lock); pthread_rwlock_unlock(&head->lock); return move_ptr(rv); } @@ -224,6 +225,7 @@ static bool cgroup_supports(const char *controller, const char *cgroup, return faccessat(cfd, path, F_OK, 0) == 0; } +/* should be called with wr-locked list */ static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node) { struct cg_proc_stat *first = NULL; @@ -232,6 +234,31 @@ static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node) if (!cgroup_supports("cpu", node->cg, "cpu.shares")) { struct cg_proc_stat *cur = node; + /* + * We need to ensure that no one referenced this node, + * because we are going to remove it from the list and free memory. + * + * If we can't grab the lock then just keep this node for now. + */ + if (pthread_mutex_trylock(&cur->lock)) + goto next; + + /* + * Yes, we can put lock back just after taking it, as we ensured + * that we are only one user of it right now. + * + * It follows from three facts: + * - we are under pthread_rwlock_wrlock(hash_table_bucket) + * - pthread_mutex_lock is taken by find_proc_stat_node() + * with pthread_rwlock_rdlock(hash_table_bucket) held. + * - pthread_mutex_lock is taken by add_proc_stat_node() + * with pthread_rwlock_wrlock(hash_table_bucket) held. + * + * It means that nobody can get a pointer to (cur) node in a parallel + * thread and all old users of (cur) node have released pthread_mutex_lock(cur). + */ + pthread_mutex_unlock(&cur->lock); + if (prev) prev->next = node->next; else @@ -242,6 +269,7 @@ static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node) free_proc_stat_node(cur); } else { +next: if (!first) first = node; prev = node; @@ -279,6 +307,7 @@ static struct cg_proc_stat *find_proc_stat_node(struct cg_proc_stat_head *head, { struct cg_proc_stat *node; + prune_proc_stat_history(); pthread_rwlock_rdlock(&head->lock); if (!head->next) { @@ -289,15 +318,16 @@ static struct cg_proc_stat *find_proc_stat_node(struct cg_proc_stat_head *head, node = head->next; do { - if (strcmp(cg, node->cg) == 0) + if (strcmp(cg, node->cg) == 0) { + pthread_mutex_lock(&node->lock); goto out; + } } while ((node = node->next)); node = NULL; out: pthread_rwlock_unlock(&head->lock); - prune_proc_stat_history(); return node; } @@ -318,8 +348,6 @@ static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage * lxcfs_debug("New stat node (%d) for %s\n", cpu_count, cg); } - pthread_mutex_lock(&node->lock); - /* * If additional CPUs on the host have been enabled, CPU usage counter * arrays have to be expanded. -- 2.30.0