2021-03-08 16:59:52 +08:00
|
|
|
From 808cf7c45e187c1889867ac83d047abfdf81c7a3 Mon Sep 17 00:00:00 2001
|
|
|
|
|
From: xuhuijie <xuhuijie2@huawei.com>
|
|
|
|
|
Date: Fri, 14 Aug 2020 17:41:59 +0800
|
|
|
|
|
Subject: [PATCH] build extra lipthreadcond so
|
|
|
|
|
performance degradation in multi-core scenarios, here is an
|
|
|
|
|
extra libpthreadcond.so using old version of the function. you can use it by
|
|
|
|
|
adding LD_PRELOAD=./libpthreadcond.so in front of your program (eg:
|
|
|
|
|
LD_PRELOAD=./libpthreadcond.so ./test). use with-libpthreadcond to compile
|
|
|
|
|
it. warning:2.17 version does not meet the posix standard, you should pay
|
|
|
|
|
attention when using it.
|
|
|
|
|
https://sourceware.org/git/?p=glibc.git;a=commit;h=ed19993b5b0d05d62cc883571519a67dae481a14
|
|
|
|
|
|
|
|
|
|
---
|
|
|
|
|
nptl_2_17/Makefile | 52 +
|
|
|
|
|
nptl_2_17/bits/pthreadtypes_2_17.h | 121 ++
|
|
|
|
|
nptl_2_17/bits/thread-shared-types_2_17.h | 104 ++
|
|
|
|
|
nptl_2_17/build_libpthreadcondso.sh | 9 +
|
|
|
|
|
nptl_2_17/cancellation_2_17.c | 104 ++
|
|
|
|
|
nptl_2_17/cleanup_compat_2_17.c | 50 +
|
|
|
|
|
nptl_2_17/libpthreadcond-aarch64.map | 8 +
|
|
|
|
|
nptl_2_17/libpthreadcond-x86_64.map | 8 +
|
|
|
|
|
nptl_2_17/pthreadP_2_17.h | 620 +++++++++
|
|
|
|
|
nptl_2_17/pthread_2_17.h | 1173 ++++++++++++++++++
|
|
|
|
|
nptl_2_17/pthread_cond_broadcast_2_17.c | 94 ++
|
|
|
|
|
nptl_2_17/pthread_cond_destroy_2_17.c | 85 ++
|
|
|
|
|
nptl_2_17/pthread_cond_init_2_17.c | 50 +
|
|
|
|
|
nptl_2_17/pthread_cond_signal_2_17.c | 82 ++
|
|
|
|
|
nptl_2_17/pthread_cond_timedwait_2_17.c | 268 ++++
|
|
|
|
|
nptl_2_17/pthread_cond_wait_2_17.c | 231 ++++
|
|
|
|
|
nptl_2_17/pthread_condattr_getclock_2_17.c | 28 +
|
|
|
|
|
nptl_2_17/pthread_condattr_getpshared_2_17.c | 28 +
|
|
|
|
|
nptl_2_17/pthread_condattr_init_2_17.c | 34 +
|
|
|
|
|
nptl_2_17/pthread_condattr_setclock_2_17.c | 45 +
|
|
|
|
|
nptl_2_17/pthread_mutex_cond_lock_2_17.c | 21 +
|
|
|
|
|
nptl_2_17/pthread_mutex_lock_2_17.c | 628 ++++++++++
|
|
|
|
|
nptl_2_17/pthread_mutex_unlock_2_17.c | 360 ++++++
|
|
|
|
|
nptl_2_17/pthreadtypes_2_17.h | 179 +++
|
|
|
|
|
nptl_2_17/tpp_2_17.c | 195 +++
|
|
|
|
|
nptl_2_17/unwind_2_17.c | 138 +++
|
|
|
|
|
nptl_2_17/vars_2_17.c | 43 +
|
|
|
|
|
27 files changed, 4758 insertions(+)
|
|
|
|
|
create mode 100644 nptl_2_17/Makefile
|
|
|
|
|
create mode 100644 nptl_2_17/bits/pthreadtypes_2_17.h
|
|
|
|
|
create mode 100644 nptl_2_17/bits/thread-shared-types_2_17.h
|
|
|
|
|
create mode 100644 nptl_2_17/build_libpthreadcondso.sh
|
|
|
|
|
create mode 100644 nptl_2_17/cancellation_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/cleanup_compat_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/libpthreadcond-aarch64.map
|
|
|
|
|
create mode 100644 nptl_2_17/libpthreadcond-x86_64.map
|
|
|
|
|
create mode 100644 nptl_2_17/pthreadP_2_17.h
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_2_17.h
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_broadcast_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_destroy_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_init_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_signal_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_timedwait_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_cond_wait_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_condattr_getclock_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_condattr_getpshared_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_condattr_init_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_condattr_setclock_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_mutex_cond_lock_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_mutex_lock_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthread_mutex_unlock_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/pthreadtypes_2_17.h
|
|
|
|
|
create mode 100644 nptl_2_17/tpp_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/unwind_2_17.c
|
|
|
|
|
create mode 100644 nptl_2_17/vars_2_17.c
|
2020-08-04 19:05:22 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
diff --git a/nptl_2_17/Makefile b/nptl_2_17/Makefile
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..4c30c5f1
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/Makefile
|
|
|
|
|
@@ -0,0 +1,52 @@
|
|
|
|
|
+include libpthreadcond_config
|
|
|
|
|
+subdir=libpthreadcond
|
|
|
|
|
+objdir=../$(build_dir)/
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ifdef subdir
|
|
|
|
|
+.. := ../
|
|
|
|
|
+endif
|
|
|
|
|
+
|
|
|
|
|
+objpfx := $(patsubst %//,%/,$(objdir)/$(subdir)/)
|
|
|
|
|
+common-objpfx = $(objdir)/
|
|
|
|
|
+common-objdir = $(objdir)
|
|
|
|
|
+
|
|
|
|
|
+sysdep_dir := $(..)sysdeps
|
|
|
|
|
+export sysdep_dir := $(sysdep_dir)
|
|
|
|
|
+
|
|
|
|
|
+include $(common-objpfx)soversions.mk
|
|
|
|
|
+include $(common-objpfx)config.make
|
|
|
|
|
+
|
|
|
|
|
+uses-callbacks = -fexceptions
|
|
|
|
|
+
|
|
|
|
|
+sysdirs := $(foreach D,$(config-sysdirs),$(firstword $(filter /%,$D) $(..)$D))
|
|
|
|
|
+
|
|
|
|
|
++sysdep_dirs = $(sysdirs)
|
|
|
|
|
++sysdep_dirs := $(objdir) $(+sysdep_dirs)
|
|
|
|
|
+
|
|
|
|
|
++sysdep-includes := $(foreach dir,$(+sysdep_dirs), $(addprefix -I,$(wildcard $(dir)/include) $(dir)))
|
|
|
|
|
+
|
|
|
|
|
+compile_obj = vars_2_17.os pthread_cond_wait_2_17.os pthread_cond_timedwait_2_17.os pthread_cond_signal_2_17.os pthread_cond_broadcast_2_17.os pthread_cond_init_2_17.os pthread_cond_destroy_2_17.os cleanup_compat_2_17.os unwind_2_17.os cancellation_2_17.os pthread_mutex_cond_lock_2_17.os pthread_mutex_lock_2_17.os pthread_mutex_unlock_2_17.os pthread_condattr_getclock_2_17.os pthread_condattr_getpshared_2_17.os pthread_condattr_init_2_17.os pthread_condattr_setclock_2_17.os tpp_2_17.os
|
|
|
|
|
+compile_obj_dir = $(foreach n,$(compile_obj),../$(build_dir)/nptl/$(n))
|
|
|
|
|
+
|
|
|
|
|
+exist_obj = lowlevellock.os lll_timedlock_wait.os pthread_mutex_conf.os
|
|
|
|
|
+ifeq (x86_64, $(arch))
|
|
|
|
|
+exist_obj += elision-lock.os elision-unlock.os elision-timed.os elision-trylock.os
|
|
|
|
|
+endif
|
|
|
|
|
+
|
|
|
|
|
+exist_obj_dir = $(foreach n,$(exist_obj),../$(build_dir)/nptl/$(n))
|
|
|
|
|
+
|
|
|
|
|
+CFLAGS = -c -std=gnu11 -fgnu89-inline -g -O2 -Wall -Wwrite-strings -Wundef -Werror -fmerge-all-constants -frounding-math -fno-stack-protector -Wstrict-prototypes -Wold-style-definition -fmath-errno -fPIC -ftls-model=initial-exec -DPIC -DSHARED -DTOP_NAMESPACE=glibc
|
|
|
|
|
+
|
|
|
|
|
+Headers = -I../include -I../$(build_dir)/nptl $(+sysdep-includes) -I../nptl_2_17 -I../nptl -I../libio -I../. -D_LIBC_REENTRANT -include ../$(build_dir)/libc-modules.h -include include/libc-symbols.h
|
|
|
|
|
+
|
|
|
|
|
+all: libpthreadcond.so
|
|
|
|
|
+
|
|
|
|
|
+libpthreadcond.so : $(compile_obj) libpthreadcond_pic.a
|
|
|
|
|
+ gcc -shared -static-libgcc -Wl,-O1 -Wl,-z,defs -Wl,-dynamic-linker=/usr/local/lib/$(ld.so-version) -B../$(build_dir)/csu/ -Wl,--version-script=libpthreadcond-$(arch).map -Wl,-soname=libpthreadcond.so.0 -Wl,-z,combreloc -Wl,-z,relro -Wl,--hash-style=both -L../$(build_dir) -L../$(build_dir)/math -L../$(build_dir)/elf -L../$(build_dir)/dlfcn -L../$(build_dir)/nss -L../$(build_dir)/nis -L../$(build_dir)/rt -L../$(build_dir)/resolv -L../$(build_dir)/mathvec -L../$(build_dir)/support -L../$(build_dir)/crypt -L../$(build_dir)/nptl -Wl,-rpath-link=../$(build_dir):../$(build_dir)/math:../$(build_dir)/elf:../$(build_dir)/dlfcn:../$(build_dir)/nss:../$(build_dir)/nis:../$(build_dir)/rt:../$(build_dir)/resolv:../$(build_dir)/mathvec:../$(build_dir)/support:../$(build_dir)/crypt:../$(build_dir)/nptl -o ../$(build_dir)/nptl/libpthreadcond.so ../$(build_dir)/csu/abi-note.o -Wl,--whole-archive ../$(build_dir)/nptl/libpthreadcond_pic.a -Wl,--no-whole-archive -Wl,--start-group ../$(build_dir)/libc.so ../$(build_dir)/libc_nonshared.a -Wl,--as-needed ../$(build_dir)/elf/ld.so -Wl,--no-as-needed -Wl,--end-group
|
|
|
|
|
+
|
|
|
|
|
+libpthreadcond_pic.a : $(compile_obj_dir) $(exist_obj_dir)
|
|
|
|
|
+ ar cruv ../$(build_dir)/nptl/$@ $^
|
|
|
|
|
+
|
|
|
|
|
+$(compile_obj) : %.os : %.c
|
|
|
|
|
+ gcc $< $(CFLAGS) $(Headers) -o ../$(build_dir)/nptl/$@ -MD -MP -MF ../$(build_dir)/nptl/$@.dt -MT ../$(build_dir)/nptl/$@
|
|
|
|
|
diff --git a/nptl_2_17/bits/pthreadtypes_2_17.h b/nptl_2_17/bits/pthreadtypes_2_17.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..f501ea4c
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/bits/pthreadtypes_2_17.h
|
|
|
|
|
@@ -0,0 +1,121 @@
|
|
|
|
|
+/* Declaration of common pthread types for all architectures.
|
|
|
|
|
+ Copyright (C) 2017-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _BITS_PTHREADTYPES_COMMON_H
|
|
|
|
|
+# define _BITS_PTHREADTYPES_COMMON_H 1
|
|
|
|
|
+
|
|
|
|
|
+/* For internal mutex and condition variable definitions. */
|
|
|
|
|
+#include <bits/thread-shared-types_2_17.h>
|
|
|
|
|
+
|
|
|
|
|
+/* Thread identifiers. The structure of the attribute type is not
|
|
|
|
|
+ exposed on purpose. */
|
|
|
|
|
+typedef unsigned long int pthread_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Data structures for mutex handling. The structure of the attribute
|
|
|
|
|
+ type is not exposed on purpose. */
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_MUTEXATTR_T];
|
|
|
|
|
+ int __align;
|
|
|
|
|
+} pthread_mutexattr_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Data structure for condition variable handling. The structure of
|
|
|
|
|
+ the attribute type is not exposed on purpose. */
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_CONDATTR_T];
|
|
|
|
|
+ int __align;
|
|
|
|
|
+} pthread_condattr_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Keys for thread-specific data */
|
|
|
|
|
+typedef unsigned int pthread_key_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Once-only execution */
|
|
|
|
|
+typedef int __ONCE_ALIGNMENT pthread_once_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+union pthread_attr_t
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_ATTR_T];
|
|
|
|
|
+ long int __align;
|
|
|
|
|
+};
|
|
|
|
|
+#ifndef __have_pthread_attr_t
|
|
|
|
|
+typedef union pthread_attr_t pthread_attr_t;
|
|
|
|
|
+# define __have_pthread_attr_t 1
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ struct __pthread_mutex_s __data;
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_MUTEX_T];
|
|
|
|
|
+ long int __align;
|
|
|
|
|
+} pthread_mutex_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ struct __pthread_cond_s __data;
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_COND_T];
|
|
|
|
|
+ __extension__ long long int __align;
|
|
|
|
|
+} pthread_cond_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
|
|
|
|
|
+/* Data structure for reader-writer lock variable handling. The
|
|
|
|
|
+ structure of the attribute type is deliberately not exposed. */
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ struct __pthread_rwlock_arch_t __data;
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_RWLOCK_T];
|
|
|
|
|
+ long int __align;
|
|
|
|
|
+} pthread_rwlock_t;
|
|
|
|
|
+
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_RWLOCKATTR_T];
|
|
|
|
|
+ long int __align;
|
|
|
|
|
+} pthread_rwlockattr_t;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* POSIX spinlock data type. */
|
|
|
|
|
+typedef volatile int pthread_spinlock_t;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* POSIX barriers data type. The structure of the type is
|
|
|
|
|
+ deliberately not exposed. */
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_BARRIER_T];
|
|
|
|
|
+ long int __align;
|
|
|
|
|
+} pthread_barrier_t;
|
|
|
|
|
+
|
|
|
|
|
+typedef union
|
|
|
|
|
+{
|
|
|
|
|
+ char __size[__SIZEOF_PTHREAD_BARRIERATTR_T];
|
|
|
|
|
+ int __align;
|
|
|
|
|
+} pthread_barrierattr_t;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#endif
|
|
|
|
|
diff --git a/nptl_2_17/bits/thread-shared-types_2_17.h b/nptl_2_17/bits/thread-shared-types_2_17.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..50e86261
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/bits/thread-shared-types_2_17.h
|
|
|
|
|
@@ -0,0 +1,104 @@
|
|
|
|
|
+/* Common threading primitives definitions for both POSIX and C11.
|
|
|
|
|
+ Copyright (C) 2017-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _THREAD_SHARED_TYPES_H
|
|
|
|
|
+#define _THREAD_SHARED_TYPES_H 1
|
|
|
|
|
+
|
|
|
|
|
+/* Arch-specific definitions. Each architecture must define the following
|
|
|
|
|
+ macros to define the expected sizes of pthread data types:
|
|
|
|
|
+
|
|
|
|
|
+ __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t.
|
|
|
|
|
+ __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t.
|
|
|
|
|
+
|
|
|
|
|
+ The additional macro defines any constraint for the lock alignment
|
|
|
|
|
+ inside the thread structures:
|
|
|
|
|
+
|
|
|
|
|
+ __LOCK_ALIGNMENT - for internal lock/futex usage.
|
|
|
|
|
+
|
|
|
|
|
+ Same idea but for the once locking primitive:
|
|
|
|
|
+
|
|
|
|
|
+ __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. */
|
|
|
|
|
+
|
|
|
|
|
+#include <bits/pthreadtypes-arch.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Common definition of pthread_mutex_t. */
|
|
|
|
|
+
|
|
|
|
|
+typedef struct __pthread_internal_list
|
|
|
|
|
+{
|
|
|
|
|
+ struct __pthread_internal_list *__prev;
|
|
|
|
|
+ struct __pthread_internal_list *__next;
|
|
|
|
|
+} __pthread_list_t;
|
|
|
|
|
+
|
|
|
|
|
+typedef struct __pthread_internal_slist
|
|
|
|
|
+{
|
|
|
|
|
+ struct __pthread_internal_slist *__next;
|
|
|
|
|
+} __pthread_slist_t;
|
|
|
|
|
+
|
|
|
|
|
+/* Arch-specific mutex definitions. A generic implementation is provided
|
|
|
|
|
+ by sysdeps/nptl/bits/struct_mutex.h. If required, an architecture
|
|
|
|
|
+ can override it by defining:
|
|
|
|
|
+
|
|
|
|
|
+ 1. struct __pthread_mutex_s (used on both pthread_mutex_t and mtx_t
|
|
|
|
|
+ definition). It should contains at least the internal members
|
|
|
|
|
+ defined in the generic version.
|
|
|
|
|
+
|
|
|
|
|
+ 2. __LOCK_ALIGNMENT for any extra attribute for internal lock used with
|
|
|
|
|
+ atomic operations.
|
|
|
|
|
+
|
|
|
|
|
+ 3. The macro __PTHREAD_MUTEX_INITIALIZER used for static initialization.
|
|
|
|
|
+ It should initialize the mutex internal flag. */
|
|
|
|
|
+
|
|
|
|
|
+#include <bits/struct_mutex.h>
|
|
|
|
|
+
|
|
|
|
|
+/* Arch-sepecific read-write lock definitions. A generic implementation is
|
|
|
|
|
+ provided by struct_rwlock.h. If required, an architecture can override it
|
|
|
|
|
+ by defining:
|
|
|
|
|
+
|
|
|
|
|
+ 1. struct __pthread_rwlock_arch_t (used on pthread_rwlock_t definition).
|
|
|
|
|
+ It should contain at least the internal members defined in the
|
|
|
|
|
+ generic version.
|
|
|
|
|
+
|
|
|
|
|
+ 2. The macro __PTHREAD_RWLOCK_INITIALIZER used for static initialization.
|
|
|
|
|
+ It should initialize the rwlock internal type. */
|
|
|
|
|
+
|
|
|
|
|
+#include <bits/struct_rwlock.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Common definition of pthread_cond_t. */
|
|
|
|
|
+
|
|
|
|
|
+struct __pthread_cond_s
|
|
|
|
|
+{
|
|
|
|
|
+ int __lock;
|
|
|
|
|
+ unsigned int __futex;
|
|
|
|
|
+ __extension__ unsigned long long int __total_seq;
|
|
|
|
|
+ __extension__ unsigned long long int __wakeup_seq;
|
|
|
|
|
+ __extension__ unsigned long long int __woken_seq;
|
|
|
|
|
+ void *__mutex;
|
|
|
|
|
+ unsigned int __nwaiters;
|
|
|
|
|
+ unsigned int __broadcast_seq;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+#endif /* _THREAD_SHARED_TYPES_H */
|
|
|
|
|
diff --git a/nptl_2_17/build_libpthreadcondso.sh b/nptl_2_17/build_libpthreadcondso.sh
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..6997277b
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/build_libpthreadcondso.sh
|
|
|
|
|
@@ -0,0 +1,9 @@
|
|
|
|
|
+#!/bin/sh
|
|
|
|
|
+build_arch=$1
|
|
|
|
|
+build_dir=$2
|
|
|
|
|
+config_dir=libpthreadcond_config
|
|
|
|
|
+
|
|
|
|
|
+echo arch=${build_arch} > ${config_dir}
|
|
|
|
|
+echo build_dir=${build_dir} >> ${config_dir}
|
|
|
|
|
+make
|
|
|
|
|
+rm -rf ${config_dir}
|
|
|
|
|
diff --git a/nptl_2_17/cancellation_2_17.c b/nptl_2_17/cancellation_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..644d83bf
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/cancellation_2_17.c
|
|
|
|
|
@@ -0,0 +1,104 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <setjmp.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <futex-internal.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* The next two functions are similar to pthread_setcanceltype() but
|
|
|
|
|
+ more specialized for the use in the cancelable functions like write().
|
|
|
|
|
+ They do not need to check parameters etc. These functions must be
|
|
|
|
|
+ AS-safe, with the exception of the actual cancellation, because they
|
|
|
|
|
+ are called by wrappers around AS-safe functions like write().*/
|
|
|
|
|
+int
|
|
|
|
|
+attribute_hidden
|
|
|
|
|
+__pthread_enable_asynccancel (void)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+ int oldval = THREAD_GETMEM (self, cancelhandling);
|
|
|
|
|
+
|
|
|
|
|
+ while (1)
|
|
|
|
|
+ {
|
|
|
|
|
+ int newval = oldval | CANCELTYPE_BITMASK;
|
|
|
|
|
+
|
|
|
|
|
+ if (newval == oldval)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
|
|
|
|
+ oldval);
|
|
|
|
|
+ if (__glibc_likely (curval == oldval))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
|
|
|
|
|
+ {
|
|
|
|
|
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
|
|
|
|
+ __do_cancel ();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Prepare the next round. */
|
|
|
|
|
+ oldval = curval;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return oldval;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* See the comment for __pthread_enable_asynccancel regarding
|
|
|
|
|
+ the AS-safety of this function. */
|
|
|
|
|
+void
|
|
|
|
|
+attribute_hidden
|
|
|
|
|
+__pthread_disable_asynccancel (int oldtype)
|
|
|
|
|
+{
|
|
|
|
|
+ /* If asynchronous cancellation was enabled before we do not have
|
|
|
|
|
+ anything to do. */
|
|
|
|
|
+ if (oldtype & CANCELTYPE_BITMASK)
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+ int newval;
|
|
|
|
|
+
|
|
|
|
|
+ int oldval = THREAD_GETMEM (self, cancelhandling);
|
|
|
|
|
+
|
|
|
|
|
+ while (1)
|
|
|
|
|
+ {
|
|
|
|
|
+ newval = oldval & ~CANCELTYPE_BITMASK;
|
|
|
|
|
+
|
|
|
|
|
+ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
|
|
|
|
+ oldval);
|
|
|
|
|
+ if (__glibc_likely (curval == oldval))
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ /* Prepare the next round. */
|
|
|
|
|
+ oldval = curval;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We cannot return when we are being canceled. Upon return the
|
|
|
|
|
+ thread might be things which would have to be undone. The
|
|
|
|
|
+ following loop should loop until the cancellation signal is
|
|
|
|
|
+ delivered. */
|
|
|
|
|
+ while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
|
|
|
|
|
+ == CANCELING_BITMASK, 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ futex_wait_simple ((unsigned int *) &self->cancelhandling, newval,
|
|
|
|
|
+ FUTEX_PRIVATE);
|
|
|
|
|
+ newval = THREAD_GETMEM (self, cancelhandling);
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
diff --git a/nptl_2_17/cleanup_compat_2_17.c b/nptl_2_17/cleanup_compat_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..ccc55836
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/cleanup_compat_2_17.c
|
|
|
|
|
@@ -0,0 +1,50 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+_pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ void (*routine) (void *), void *arg)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+
|
|
|
|
|
+ buffer->__routine = routine;
|
|
|
|
|
+ buffer->__arg = arg;
|
|
|
|
|
+ buffer->__prev = THREAD_GETMEM (self, cleanup);
|
|
|
|
|
+
|
|
|
|
|
+ THREAD_SETMEM (self, cleanup, buffer);
|
|
|
|
|
+}
|
|
|
|
|
+strong_alias (_pthread_cleanup_push, __pthread_cleanup_push)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+_pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, int execute)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self __attribute ((unused)) = THREAD_SELF;
|
|
|
|
|
+
|
|
|
|
|
+ THREAD_SETMEM (self, cleanup, buffer->__prev);
|
|
|
|
|
+
|
|
|
|
|
+ /* If necessary call the cleanup routine after we removed the
|
|
|
|
|
+ current cleanup block from the list. */
|
|
|
|
|
+ if (execute)
|
|
|
|
|
+ buffer->__routine (buffer->__arg);
|
|
|
|
|
+}
|
|
|
|
|
+strong_alias (_pthread_cleanup_pop, __pthread_cleanup_pop)
|
|
|
|
|
diff --git a/nptl_2_17/libpthreadcond-aarch64.map b/nptl_2_17/libpthreadcond-aarch64.map
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..d970af06
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/libpthreadcond-aarch64.map
|
|
|
|
|
@@ -0,0 +1,8 @@
|
|
|
|
|
+GLIBC_2.17 {
|
|
|
|
|
+ global:
|
|
|
|
|
+ pthread_cond_init; pthread_cond_destroy;
|
|
|
|
|
+ pthread_cond_signal; pthread_cond_broadcast;
|
|
|
|
|
+ pthread_cond_wait; pthread_cond_timedwait;
|
|
|
|
|
+ local:
|
|
|
|
|
+ *;
|
|
|
|
|
+};
|
|
|
|
|
diff --git a/nptl_2_17/libpthreadcond-x86_64.map b/nptl_2_17/libpthreadcond-x86_64.map
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..d7f23322
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/libpthreadcond-x86_64.map
|
|
|
|
|
@@ -0,0 +1,8 @@
|
|
|
|
|
+GLIBC_2.3.2 {
|
|
|
|
|
+ global:
|
|
|
|
|
+ pthread_cond_init; pthread_cond_destroy;
|
|
|
|
|
+ pthread_cond_signal; pthread_cond_broadcast;
|
|
|
|
|
+ pthread_cond_wait; pthread_cond_timedwait;
|
|
|
|
|
+ local:
|
|
|
|
|
+ *;
|
|
|
|
|
+};
|
|
|
|
|
diff --git a/nptl_2_17/pthreadP_2_17.h b/nptl_2_17/pthreadP_2_17.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..e195221a
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthreadP_2_17.h
|
|
|
|
|
@@ -0,0 +1,620 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _PTHREADP_H
|
|
|
|
|
+#define _PTHREADP_H 1
|
|
|
|
|
+
|
|
|
|
|
+#include <pthread_2_17.h>
|
|
|
|
|
+#include <setjmp.h>
|
|
|
|
|
+#include <stdbool.h>
|
|
|
|
|
+#include <sys/syscall.h>
|
|
|
|
|
+#include "descr.h"
|
|
|
|
|
+#include <tls.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+#include <stackinfo.h>
|
|
|
|
|
+#include <internaltypes.h>
|
|
|
|
|
+#include <pthread-functions.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+#include <kernel-features.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <internal-signals.h>
|
|
|
|
|
+#include "pthread_mutex_conf.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Atomic operations on TLS memory. */
|
|
|
|
|
+#ifndef THREAD_ATOMIC_CMPXCHG_VAL
|
|
|
|
|
+# define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, new, old) \
|
|
|
|
|
+ atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef THREAD_ATOMIC_BIT_SET
|
|
|
|
|
+# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
|
|
|
|
|
+ atomic_bit_set (&(descr)->member, bit)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+static inline short max_adaptive_count (void)
|
|
|
|
|
+{
|
|
|
|
|
+#if HAVE_TUNABLES
|
|
|
|
|
+ return __mutex_aconf.spin_count;
|
|
|
|
|
+#else
|
|
|
|
|
+ return DEFAULT_ADAPTIVE_COUNT;
|
|
|
|
|
+#endif
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Magic cookie representing robust mutex with dead owner. */
|
|
|
|
|
+#define PTHREAD_MUTEX_INCONSISTENT INT_MAX
|
|
|
|
|
+/* Magic cookie representing not recoverable robust mutex. */
|
|
|
|
|
+#define PTHREAD_MUTEX_NOTRECOVERABLE (INT_MAX - 1)
|
|
|
|
|
+
|
|
|
|
|
+#define COND_NWAITERS_SHIFT 1
|
|
|
|
|
+
|
|
|
|
|
+/* Internal mutex type value. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_MUTEX_KIND_MASK_NP = 3,
|
|
|
|
|
+
|
|
|
|
|
+ PTHREAD_MUTEX_ELISION_NP = 256,
|
|
|
|
|
+ PTHREAD_MUTEX_NO_ELISION_NP = 512,
|
|
|
|
|
+
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PRIO_INHERIT_NP = 32,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_NORMAL_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_RECURSIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ERRORCHECK_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ADAPTIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PRIO_PROTECT_NP = 64,
|
|
|
|
|
+ PTHREAD_MUTEX_PP_NORMAL_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
|
|
|
|
|
+ PTHREAD_MUTEX_PP_RECURSIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PP_ERRORCHECK_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_PP_ADAPTIVE_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ELISION_FLAGS_NP
|
|
|
|
|
+ = PTHREAD_MUTEX_ELISION_NP | PTHREAD_MUTEX_NO_ELISION_NP,
|
|
|
|
|
+
|
|
|
|
|
+ PTHREAD_MUTEX_TIMED_ELISION_NP =
|
|
|
|
|
+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_ELISION_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_TIMED_NO_ELISION_NP =
|
|
|
|
|
+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_NO_ELISION_NP,
|
|
|
|
|
+};
|
|
|
|
|
+#define PTHREAD_MUTEX_PSHARED_BIT 128
|
|
|
|
|
+
|
|
|
|
|
+/* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+#define PTHREAD_MUTEX_TYPE(m) \
|
|
|
|
|
+ (atomic_load_relaxed (&((m)->__data.__kind)) & 127)
|
|
|
|
|
+/* Don't include NO_ELISION, as that type is always the same
|
|
|
|
|
+ as the underlying lock type. */
|
|
|
|
|
+#define PTHREAD_MUTEX_TYPE_ELISION(m) \
|
|
|
|
|
+ (atomic_load_relaxed (&((m)->__data.__kind)) \
|
|
|
|
|
+ & (127 | PTHREAD_MUTEX_ELISION_NP))
|
|
|
|
|
+
|
|
|
|
|
+#if LLL_PRIVATE == 0 && LLL_SHARED == 128
|
|
|
|
|
+# define PTHREAD_MUTEX_PSHARED(m) \
|
|
|
|
|
+ (atomic_load_relaxed (&((m)->__data.__kind)) & 128)
|
|
|
|
|
+#else
|
|
|
|
|
+# define PTHREAD_MUTEX_PSHARED(m) \
|
|
|
|
|
+ ((atomic_load_relaxed (&((m)->__data.__kind)) & 128) \
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* The kernel when waking robust mutexes on exit never uses
|
|
|
|
|
+ FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
|
|
|
|
|
+#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED
|
|
|
|
|
+
|
|
|
|
|
+/* Ceiling in __data.__lock. __data.__lock is signed, so don't
|
|
|
|
|
+ use the MSB bit in there, but in the mask also include that bit,
|
|
|
|
|
+ so that the compiler can optimize & PTHREAD_MUTEX_PRIO_CEILING_MASK
|
|
|
|
|
+ masking if the value is then shifted down by
|
|
|
|
|
+ PTHREAD_MUTEX_PRIO_CEILING_SHIFT. */
|
|
|
|
|
+#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT 19
|
|
|
|
|
+#define PTHREAD_MUTEX_PRIO_CEILING_MASK 0xfff80000
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Flags in mutex attr. */
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT 28
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_PROTOCOL_MASK 0x30000000
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT 12
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK 0x00fff000
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_FLAG_ROBUST 0x40000000
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_FLAG_PSHARED 0x80000000
|
|
|
|
|
+#define PTHREAD_MUTEXATTR_FLAG_BITS \
|
|
|
|
|
+ (PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED \
|
|
|
|
|
+ | PTHREAD_MUTEXATTR_PROTOCOL_MASK | PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* For the following, see pthread_rwlock_common.c. */
|
|
|
|
|
+#define PTHREAD_RWLOCK_WRPHASE 1
|
|
|
|
|
+#define PTHREAD_RWLOCK_WRLOCKED 2
|
|
|
|
|
+#define PTHREAD_RWLOCK_RWAITING 4
|
|
|
|
|
+#define PTHREAD_RWLOCK_READER_SHIFT 3
|
|
|
|
|
+#define PTHREAD_RWLOCK_READER_OVERFLOW ((unsigned int) 1 \
|
|
|
|
|
+ << (sizeof (unsigned int) * 8 - 1))
|
|
|
|
|
+#define PTHREAD_RWLOCK_WRHANDOVER ((unsigned int) 1 \
|
|
|
|
|
+ << (sizeof (unsigned int) * 8 - 1))
|
|
|
|
|
+#define PTHREAD_RWLOCK_FUTEX_USED 2
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Bits used in robust mutex implementation. */
|
|
|
|
|
+#define FUTEX_WAITERS 0x80000000
|
|
|
|
|
+#define FUTEX_OWNER_DIED 0x40000000
|
|
|
|
|
+#define FUTEX_TID_MASK 0x3fffffff
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* pthread_once definitions. See __pthread_once for how these are used. */
|
|
|
|
|
+#define __PTHREAD_ONCE_INPROGRESS 1
|
|
|
|
|
+#define __PTHREAD_ONCE_DONE 2
|
|
|
|
|
+#define __PTHREAD_ONCE_FORK_GEN_INCR 4
|
|
|
|
|
+
|
|
|
|
|
+/* Attribute to indicate thread creation was issued from C11 thrd_create. */
|
|
|
|
|
+#define ATTR_C11_THREAD ((void*)(uintptr_t)-1)
|
|
|
|
|
+
|
|
|
|
|
+#if 0
|
|
|
|
|
+/* Condition variable definitions. See __pthread_cond_wait_common.
|
|
|
|
|
+ Need to be defined here so there is one place from which
|
|
|
|
|
+ nptl_lock_constants can grab them. */
|
|
|
|
|
+#define __PTHREAD_COND_CLOCK_MONOTONIC_MASK 2
|
|
|
|
|
+#define __PTHREAD_COND_SHARED_MASK 1
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Internal variables. */
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Default pthread attributes. */
|
|
|
|
|
+extern struct pthread_attr __default_pthread_attr attribute_hidden;
|
|
|
|
|
+extern int __default_pthread_attr_lock attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Size and alignment of static TLS block. */
|
|
|
|
|
+extern size_t __static_tls_size attribute_hidden;
|
|
|
|
|
+extern size_t __static_tls_align_m1 attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Flag whether the machine is SMP or not. */
|
|
|
|
|
+extern int __is_smp attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Thread descriptor handling. */
|
|
|
|
|
+extern list_t __stack_user;
|
|
|
|
|
+hidden_proto (__stack_user)
|
|
|
|
|
+
|
|
|
|
|
+/* Attribute handling. */
|
|
|
|
|
+extern struct pthread_attr *__attr_list attribute_hidden;
|
|
|
|
|
+extern int __attr_list_lock attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Concurrency handling. */
|
|
|
|
|
+extern int __concurrency_level attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Thread-local data key handling. */
|
|
|
|
|
+extern struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX];
|
|
|
|
|
+hidden_proto (__pthread_keys)
|
|
|
|
|
+
|
|
|
|
|
+/* Number of threads running. */
|
|
|
|
|
+extern unsigned int __nptl_nthreads attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+#ifndef __ASSUME_SET_ROBUST_LIST
|
|
|
|
|
+/* Negative if we do not have the system call and we can use it. */
|
|
|
|
|
+extern int __set_robust_list_avail attribute_hidden;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Thread Priority Protection. */
|
|
|
|
|
+extern int __sched_fifo_min_prio attribute_hidden;
|
|
|
|
|
+extern int __sched_fifo_max_prio attribute_hidden;
|
|
|
|
|
+extern void __init_sched_fifo_prio (void) attribute_hidden;
|
|
|
|
|
+extern int __pthread_tpp_change_priority (int prev_prio, int new_prio)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+extern int __pthread_current_priority (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* The library can run in debugging mode where it performs a lot more
|
|
|
|
|
+ tests. */
|
|
|
|
|
+extern int __pthread_debug attribute_hidden;
|
|
|
|
|
+/** For now disable debugging support. */
|
|
|
|
|
+#if 0
|
|
|
|
|
+# define DEBUGGING_P __builtin_expect (__pthread_debug, 0)
|
|
|
|
|
+# define INVALID_TD_P(pd) (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
|
|
|
|
|
+# define INVALID_NOT_TERMINATED_TD_P(pd) INVALID_TD_P (pd)
|
|
|
|
|
+#else
|
|
|
|
|
+# define DEBUGGING_P 0
|
|
|
|
|
+/* Simplified test. This will not catch all invalid descriptors but
|
|
|
|
|
+ is better than nothing. And if the test triggers the thread
|
|
|
|
|
+ descriptor is guaranteed to be invalid. */
|
|
|
|
|
+# define INVALID_TD_P(pd) __builtin_expect ((pd)->tid <= 0, 0)
|
|
|
|
|
+# define INVALID_NOT_TERMINATED_TD_P(pd) __builtin_expect ((pd)->tid < 0, 0)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Cancellation test. */
|
|
|
|
|
+#define CANCELLATION_P(self) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ int cancelhandling = THREAD_GETMEM (self, cancelhandling); \
|
|
|
|
|
+ if (CANCEL_ENABLED_AND_CANCELED (cancelhandling)) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED); \
|
|
|
|
|
+ __do_cancel (); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+extern void __pthread_unwind (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute __attribute ((__noreturn__))
|
|
|
|
|
+ ;
|
|
|
|
|
+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute __attribute ((__noreturn__))
|
|
|
|
|
+#ifndef SHARED
|
|
|
|
|
+ weak_function
|
|
|
|
|
+#endif
|
|
|
|
|
+ ;
|
|
|
|
|
+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+hidden_proto (__pthread_unwind)
|
|
|
|
|
+hidden_proto (__pthread_unwind_next)
|
|
|
|
|
+hidden_proto (__pthread_register_cancel)
|
|
|
|
|
+hidden_proto (__pthread_unregister_cancel)
|
|
|
|
|
+# ifdef SHARED
|
|
|
|
|
+extern void attribute_hidden pthread_cancel_init (void);
|
|
|
|
|
+# endif
|
|
|
|
|
+extern void __nptl_unwind_freeres (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Called when a thread reacts on a cancellation request. */
|
|
|
|
|
+static inline void
|
|
|
|
|
+__attribute ((noreturn, always_inline))
|
|
|
|
|
+__do_cancel (void)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+
|
|
|
|
|
+ /* Make sure we get no more cancellations. */
|
|
|
|
|
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
|
|
|
|
|
+
|
|
|
|
|
+ __pthread_unwind ((__pthread_unwind_buf_t *)
|
|
|
|
|
+ THREAD_GETMEM (self, cleanup_jmp_buf));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Internal prototypes. */
|
|
|
|
|
+
|
|
|
|
|
+/* Thread list handling. */
|
|
|
|
|
+extern struct pthread *__find_in_stack_list (struct pthread *pd)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Deallocate a thread's stack after optionally making sure the thread
|
|
|
|
|
+ descriptor is still valid. */
|
|
|
|
|
+extern void __free_tcb (struct pthread *pd) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Free allocated stack. */
|
|
|
|
|
+extern void __deallocate_stack (struct pthread *pd) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Mark all the stacks except for the current one as available. This
|
|
|
|
|
+ function also re-initializes the lock for the stack cache. */
|
|
|
|
|
+extern void __reclaim_stacks (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Make all threads's stacks executable. */
|
|
|
|
|
+extern int __make_stacks_executable (void **stack_endp) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* longjmp handling. */
|
|
|
|
|
+extern void __pthread_cleanup_upto (__jmp_buf target, char *targetframe);
|
|
|
|
|
+hidden_proto (__pthread_cleanup_upto)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions with versioned interfaces. */
|
|
|
|
|
+extern int __pthread_create_2_1 (pthread_t *newthread,
|
|
|
|
|
+ const pthread_attr_t *attr,
|
|
|
|
|
+ void *(*start_routine) (void *), void *arg);
|
|
|
|
|
+extern int __pthread_create_2_0 (pthread_t *newthread,
|
|
|
|
|
+ const pthread_attr_t *attr,
|
|
|
|
|
+ void *(*start_routine) (void *), void *arg);
|
|
|
|
|
+extern int __pthread_attr_init_2_1 (pthread_attr_t *attr);
|
|
|
|
|
+extern int __pthread_attr_init_2_0 (pthread_attr_t *attr);
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Event handlers for libthread_db interface. */
|
|
|
|
|
+extern void __nptl_create_event (void);
|
|
|
|
|
+extern void __nptl_death_event (void);
|
|
|
|
|
+hidden_proto (__nptl_create_event)
|
|
|
|
|
+hidden_proto (__nptl_death_event)
|
|
|
|
|
+
|
|
|
|
|
+/* Register the generation counter in the libpthread with the libc. */
|
|
|
|
|
+#ifdef TLS_MULTIPLE_THREADS_IN_TCB
|
|
|
|
|
+extern void __libc_pthread_init (unsigned long int *ptr,
|
|
|
|
|
+ void (*reclaim) (void),
|
|
|
|
|
+ const struct pthread_functions *functions);
|
|
|
|
|
+#else
|
|
|
|
|
+extern int *__libc_pthread_init (unsigned long int *ptr,
|
|
|
|
|
+ void (*reclaim) (void),
|
|
|
|
|
+ const struct pthread_functions *functions);
|
|
|
|
|
+
|
|
|
|
|
+/* Variable set to a nonzero value either if more than one thread runs or ran,
|
|
|
|
|
+ or if a single-threaded process is trying to cancel itself. See
|
|
|
|
|
+ nptl/descr.h for more context on the single-threaded process case. */
|
|
|
|
|
+extern int __pthread_multiple_threads attribute_hidden;
|
|
|
|
|
+/* Pointer to the corresponding variable in libc. */
|
|
|
|
|
+extern int *__libc_multiple_threads_ptr attribute_hidden;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+extern void __pthread_init_static_tls (struct link_map *) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+extern size_t __pthread_get_minstack (const pthread_attr_t *attr);
|
|
|
|
|
+
|
|
|
|
|
+/* Namespace save aliases. */
|
|
|
|
|
+extern int __pthread_getschedparam (pthread_t thread_id, int *policy,
|
|
|
|
|
+ struct sched_param *param);
|
|
|
|
|
+extern int __pthread_setschedparam (pthread_t thread_id, int policy,
|
|
|
|
|
+ const struct sched_param *param);
|
|
|
|
|
+extern int __pthread_setcancelstate (int state, int *oldstate);
|
|
|
|
|
+extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
|
|
|
|
|
+ const pthread_mutexattr_t *__mutexattr);
|
|
|
|
|
+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
|
|
|
|
|
+extern int __pthread_mutex_trylock (pthread_mutex_t *_mutex);
|
|
|
|
|
+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
|
|
|
|
|
+extern int __pthread_mutex_timedlock (pthread_mutex_t *__mutex,
|
|
|
|
|
+ const struct timespec *__abstime);
|
|
|
|
|
+extern int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+extern void __pthread_mutex_cond_lock_adjust (pthread_mutex_t *__mutex)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
|
|
|
|
|
+extern int __pthread_mutex_unlock_usercnt (pthread_mutex_t *__mutex,
|
|
|
|
|
+ int __decr) attribute_hidden;
|
|
|
|
|
+extern int __pthread_mutexattr_init (pthread_mutexattr_t *attr);
|
|
|
|
|
+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *attr);
|
|
|
|
|
+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind);
|
|
|
|
|
+extern int __pthread_attr_destroy (pthread_attr_t *attr);
|
|
|
|
|
+extern int __pthread_attr_getdetachstate (const pthread_attr_t *attr,
|
|
|
|
|
+ int *detachstate);
|
|
|
|
|
+extern int __pthread_attr_setdetachstate (pthread_attr_t *attr,
|
|
|
|
|
+ int detachstate);
|
|
|
|
|
+extern int __pthread_attr_getinheritsched (const pthread_attr_t *attr,
|
|
|
|
|
+ int *inherit);
|
|
|
|
|
+extern int __pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit);
|
|
|
|
|
+extern int __pthread_attr_getschedparam (const pthread_attr_t *attr,
|
|
|
|
|
+ struct sched_param *param);
|
|
|
|
|
+extern int __pthread_attr_setschedparam (pthread_attr_t *attr,
|
|
|
|
|
+ const struct sched_param *param);
|
|
|
|
|
+extern int __pthread_attr_getschedpolicy (const pthread_attr_t *attr,
|
|
|
|
|
+ int *policy);
|
|
|
|
|
+extern int __pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy);
|
|
|
|
|
+extern int __pthread_attr_getscope (const pthread_attr_t *attr, int *scope);
|
|
|
|
|
+extern int __pthread_attr_setscope (pthread_attr_t *attr, int scope);
|
|
|
|
|
+extern int __pthread_attr_getstackaddr (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr, void **__restrict __stackaddr);
|
|
|
|
|
+extern int __pthread_attr_setstackaddr (pthread_attr_t *__attr,
|
|
|
|
|
+ void *__stackaddr);
|
|
|
|
|
+extern int __pthread_attr_getstacksize (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr,
|
|
|
|
|
+ size_t *__restrict __stacksize);
|
|
|
|
|
+extern int __pthread_attr_setstacksize (pthread_attr_t *__attr,
|
|
|
|
|
+ size_t __stacksize);
|
|
|
|
|
+extern int __pthread_attr_getstack (const pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ void **__restrict __stackaddr,
|
|
|
|
|
+ size_t *__restrict __stacksize);
|
|
|
|
|
+extern int __pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
|
|
|
|
|
+ size_t __stacksize);
|
|
|
|
|
+extern int __pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ const pthread_rwlockattr_t *__restrict
|
|
|
|
|
+ __attr);
|
|
|
|
|
+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
|
|
|
|
|
+extern int __pthread_cond_broadcast (pthread_cond_t *cond);
|
|
|
|
|
+extern int __pthread_cond_destroy (pthread_cond_t *cond);
|
|
|
|
|
+extern int __pthread_cond_init (pthread_cond_t *cond,
|
|
|
|
|
+ const pthread_condattr_t *cond_attr);
|
|
|
|
|
+extern int __pthread_cond_signal (pthread_cond_t *cond);
|
|
|
|
|
+extern int __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex);
|
|
|
|
|
+extern int __pthread_cond_timedwait (pthread_cond_t *cond,
|
|
|
|
|
+ pthread_mutex_t *mutex,
|
|
|
|
|
+ const struct timespec *abstime);
|
|
|
|
|
+extern int __pthread_condattr_destroy (pthread_condattr_t *attr);
|
|
|
|
|
+extern int __pthread_condattr_init (pthread_condattr_t *attr);
|
|
|
|
|
+extern int __pthread_key_create (pthread_key_t *key, void (*destr) (void *));
|
|
|
|
|
+extern int __pthread_key_delete (pthread_key_t key);
|
|
|
|
|
+extern void *__pthread_getspecific (pthread_key_t key);
|
|
|
|
|
+extern int __pthread_setspecific (pthread_key_t key, const void *value);
|
|
|
|
|
+extern int __pthread_once (pthread_once_t *once_control,
|
|
|
|
|
+ void (*init_routine) (void));
|
|
|
|
|
+extern int __pthread_atfork (void (*prepare) (void), void (*parent) (void),
|
|
|
|
|
+ void (*child) (void));
|
|
|
|
|
+extern pthread_t __pthread_self (void);
|
|
|
|
|
+extern int __pthread_equal (pthread_t thread1, pthread_t thread2);
|
|
|
|
|
+extern int __pthread_detach (pthread_t th);
|
|
|
|
|
+extern int __pthread_cancel (pthread_t th);
|
|
|
|
|
+extern int __pthread_kill (pthread_t threadid, int signo);
|
|
|
|
|
+extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
|
|
|
|
|
+extern int __pthread_join (pthread_t threadid, void **thread_return);
|
|
|
|
|
+extern int __pthread_setcanceltype (int type, int *oldtype);
|
|
|
|
|
+extern int __pthread_enable_asynccancel (void) attribute_hidden;
|
|
|
|
|
+extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden;
|
|
|
|
|
+extern void __pthread_testcancel (void);
|
|
|
|
|
+extern int __pthread_clockjoin_ex (pthread_t, void **, clockid_t,
|
|
|
|
|
+ const struct timespec *, bool)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+hidden_proto (__pthread_mutex_init)
|
|
|
|
|
+hidden_proto (__pthread_mutex_destroy)
|
|
|
|
|
+hidden_proto (__pthread_mutex_lock)
|
|
|
|
|
+hidden_proto (__pthread_mutex_trylock)
|
|
|
|
|
+hidden_proto (__pthread_mutex_unlock)
|
|
|
|
|
+hidden_proto (__pthread_rwlock_rdlock)
|
|
|
|
|
+hidden_proto (__pthread_rwlock_wrlock)
|
|
|
|
|
+hidden_proto (__pthread_rwlock_unlock)
|
|
|
|
|
+hidden_proto (__pthread_key_create)
|
|
|
|
|
+hidden_proto (__pthread_getspecific)
|
|
|
|
|
+hidden_proto (__pthread_setspecific)
|
|
|
|
|
+hidden_proto (__pthread_once)
|
|
|
|
|
+hidden_proto (__pthread_setcancelstate)
|
|
|
|
|
+hidden_proto (__pthread_testcancel)
|
|
|
|
|
+hidden_proto (__pthread_mutexattr_init)
|
|
|
|
|
+hidden_proto (__pthread_mutexattr_settype)
|
|
|
|
|
+
|
|
|
|
|
+extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond);
|
|
|
|
|
+extern int __pthread_cond_destroy_2_0 (pthread_cond_2_0_t *cond);
|
|
|
|
|
+extern int __pthread_cond_init_2_0 (pthread_cond_2_0_t *cond,
|
|
|
|
|
+ const pthread_condattr_t *cond_attr);
|
|
|
|
|
+extern int __pthread_cond_signal_2_0 (pthread_cond_2_0_t *cond);
|
|
|
|
|
+extern int __pthread_cond_timedwait_2_0 (pthread_cond_2_0_t *cond,
|
|
|
|
|
+ pthread_mutex_t *mutex,
|
|
|
|
|
+ const struct timespec *abstime);
|
|
|
|
|
+extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond,
|
|
|
|
|
+ pthread_mutex_t *mutex);
|
|
|
|
|
+
|
|
|
|
|
+extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize,
|
|
|
|
|
+ cpu_set_t *cpuset);
|
|
|
|
|
+
|
|
|
|
|
+/* Special versions which use non-exported functions. */
|
|
|
|
|
+extern void __pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ void (*routine) (void *), void *arg)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Replace cleanup macros defined in <pthread.h> with internal
|
|
|
|
|
+ versions that don't depend on unwind info and better support
|
|
|
|
|
+ cancellation. */
|
|
|
|
|
+# undef pthread_cleanup_push
|
|
|
|
|
+# define pthread_cleanup_push(routine,arg) \
|
|
|
|
|
+ { struct _pthread_cleanup_buffer _buffer; \
|
|
|
|
|
+ __pthread_cleanup_push (&_buffer, (routine), (arg));
|
|
|
|
|
+
|
|
|
|
|
+extern void __pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ int execute) attribute_hidden;
|
|
|
|
|
+# undef pthread_cleanup_pop
|
|
|
|
|
+# define pthread_cleanup_pop(execute) \
|
|
|
|
|
+ __pthread_cleanup_pop (&_buffer, (execute)); }
|
|
|
|
|
+
|
|
|
|
|
+extern void __pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ void (*routine) (void *), void *arg);
|
|
|
|
|
+extern void __pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ int execute);
|
|
|
|
|
+
|
|
|
|
|
+/* Old cleanup interfaces, still used in libc.so. */
|
|
|
|
|
+extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ void (*routine) (void *), void *arg);
|
|
|
|
|
+extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ int execute);
|
|
|
|
|
+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ void (*routine) (void *), void *arg);
|
|
|
|
|
+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
|
|
|
|
|
+ int execute);
|
|
|
|
|
+
|
|
|
|
|
+extern void __nptl_deallocate_tsd (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+extern void __nptl_setxid_error (struct xid_command *cmdp, int error)
|
|
|
|
|
+ attribute_hidden;
|
|
|
|
|
+extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
|
|
|
|
|
+#ifndef SHARED
|
|
|
|
|
+extern void __nptl_set_robust (struct pthread *self);
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+extern void __nptl_stacks_freeres (void) attribute_hidden;
|
|
|
|
|
+extern void __shm_directory_freeres (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+extern void __wait_lookup_done (void) attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+#ifdef SHARED
|
|
|
|
|
+# define PTHREAD_STATIC_FN_REQUIRE(name)
|
|
|
|
|
+#else
|
|
|
|
|
+# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Test if the mutex is suitable for the FUTEX_WAIT_REQUEUE_PI operation. */
|
|
|
|
|
+#if (defined lll_futex_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+# define USE_REQUEUE_PI(mut) \
|
|
|
|
|
+ ((mut) && (mut) != (void *) ~0l \
|
|
|
|
|
+ && (((mut)->__data.__kind \
|
|
|
|
|
+ & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) \
|
|
|
|
|
+ == PTHREAD_MUTEX_PRIO_INHERIT_NP))
|
|
|
|
|
+#else
|
|
|
|
|
+# define USE_REQUEUE_PI(mut) 0
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Returns 0 if POL is a valid scheduling policy. */
|
|
|
|
|
+static inline int
|
|
|
|
|
+check_sched_policy_attr (int pol)
|
|
|
|
|
+{
|
|
|
|
|
+ if (pol == SCHED_OTHER || pol == SCHED_FIFO || pol == SCHED_RR)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Returns 0 if PR is within the accepted range of priority values for
|
|
|
|
|
+ the scheduling policy POL or EINVAL otherwise. */
|
|
|
|
|
+static inline int
|
|
|
|
|
+check_sched_priority_attr (int pr, int pol)
|
|
|
|
|
+{
|
|
|
|
|
+ int min = __sched_get_priority_min (pol);
|
|
|
|
|
+ int max = __sched_get_priority_max (pol);
|
|
|
|
|
+
|
|
|
|
|
+ if (min >= 0 && max >= 0 && pr >= min && pr <= max)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Returns 0 if ST is a valid stack size for a thread stack and EINVAL
|
|
|
|
|
+ otherwise. */
|
|
|
|
|
+static inline int
|
|
|
|
|
+check_stacksize_attr (size_t st)
|
|
|
|
|
+{
|
|
|
|
|
+ if (st >= PTHREAD_STACK_MIN)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+#define ASSERT_TYPE_SIZE(type, size) \
|
|
|
|
|
+ _Static_assert (sizeof (type) == size, \
|
|
|
|
|
+ "sizeof (" #type ") != " #size)
|
|
|
|
|
+
|
|
|
|
|
+#define ASSERT_PTHREAD_INTERNAL_SIZE(type, internal) \
|
|
|
|
|
+ _Static_assert (sizeof ((type) { { 0 } }).__size >= sizeof (internal),\
|
|
|
|
|
+ "sizeof (" #type ".__size) < sizeof (" #internal ")")
|
|
|
|
|
+
|
|
|
|
|
+#define ASSERT_PTHREAD_STRING(x) __STRING (x)
|
|
|
|
|
+#define ASSERT_PTHREAD_INTERNAL_OFFSET(type, member, offset) \
|
|
|
|
|
+ _Static_assert (offsetof (type, member) == offset, \
|
|
|
|
|
+ "offset of " #member " field of " #type " != " \
|
|
|
|
|
+ ASSERT_PTHREAD_STRING (offset))
|
|
|
|
|
+#define ASSERT_PTHREAD_INTERNAL_MEMBER_SIZE(type, member, mtype) \
|
|
|
|
|
+ _Static_assert (sizeof (((type) { 0 }).member) != 8, \
|
|
|
|
|
+ "sizeof (" #type "." #member ") != sizeof (" #mtype "))")
|
|
|
|
|
+
|
|
|
|
|
+#endif /* pthreadP.h */
|
|
|
|
|
diff --git a/nptl_2_17/pthread_2_17.h b/nptl_2_17/pthread_2_17.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..3954770a
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_2_17.h
|
|
|
|
|
@@ -0,0 +1,1173 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _PTHREAD_H
|
|
|
|
|
+#define _PTHREAD_H 1
|
|
|
|
|
+
|
|
|
|
|
+#include <features.h>
|
|
|
|
|
+#include <sched.h>
|
|
|
|
|
+#include <time.h>
|
|
|
|
|
+
|
|
|
|
|
+#include <bits/endian.h>
|
|
|
|
|
+#include <bits/pthreadtypes_2_17.h>
|
|
|
|
|
+#include <bits/setjmp.h>
|
|
|
|
|
+#include <bits/wordsize.h>
|
|
|
|
|
+#include <bits/types/struct_timespec.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Detach state. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_CREATE_JOINABLE,
|
|
|
|
|
+#define PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_JOINABLE
|
|
|
|
|
+ PTHREAD_CREATE_DETACHED
|
|
|
|
|
+#define PTHREAD_CREATE_DETACHED PTHREAD_CREATE_DETACHED
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Mutex types. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_MUTEX_TIMED_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ADAPTIVE_NP
|
|
|
|
|
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
|
|
|
|
|
+ ,
|
|
|
|
|
+ PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,
|
|
|
|
|
+ PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
|
|
|
|
|
+#endif
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+ /* For compatibility. */
|
|
|
|
|
+ , PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_TIMED_NP
|
|
|
|
|
+#endif
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Robust mutex or not flags. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_MUTEX_STALLED,
|
|
|
|
|
+ PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED,
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST,
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST
|
|
|
|
|
+};
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#if defined __USE_POSIX199506 || defined __USE_UNIX98
|
|
|
|
|
+/* Mutex protocols. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_PRIO_NONE,
|
|
|
|
|
+ PTHREAD_PRIO_INHERIT,
|
|
|
|
|
+ PTHREAD_PRIO_PROTECT
|
|
|
|
|
+};
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#define PTHREAD_MUTEX_INITIALIZER \
|
|
|
|
|
+ { { __PTHREAD_MUTEX_INITIALIZER (PTHREAD_MUTEX_TIMED_NP) } }
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
|
|
|
|
|
+ { { __PTHREAD_MUTEX_INITIALIZER (PTHREAD_MUTEX_RECURSIVE_NP) } }
|
|
|
|
|
+# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
|
|
|
|
|
+ { { __PTHREAD_MUTEX_INITIALIZER (PTHREAD_MUTEX_ERRORCHECK_NP) } }
|
|
|
|
|
+# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
|
|
|
|
|
+ { { __PTHREAD_MUTEX_INITIALIZER (PTHREAD_MUTEX_ADAPTIVE_NP) } }
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Read-write lock types. */
|
|
|
|
|
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_RWLOCK_PREFER_READER_NP,
|
|
|
|
|
+ PTHREAD_RWLOCK_PREFER_WRITER_NP,
|
|
|
|
|
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,
|
|
|
|
|
+ PTHREAD_RWLOCK_DEFAULT_NP = PTHREAD_RWLOCK_PREFER_READER_NP
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Read-write lock initializers. */
|
|
|
|
|
+# define PTHREAD_RWLOCK_INITIALIZER \
|
|
|
|
|
+ { { __PTHREAD_RWLOCK_INITIALIZER (PTHREAD_RWLOCK_DEFAULT_NP) } }
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
|
|
|
|
|
+ { { __PTHREAD_RWLOCK_INITIALIZER (PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) } }
|
|
|
|
|
+# endif
|
|
|
|
|
+#endif /* Unix98 or XOpen2K */
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Scheduler inheritance. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_INHERIT_SCHED,
|
|
|
|
|
+#define PTHREAD_INHERIT_SCHED PTHREAD_INHERIT_SCHED
|
|
|
|
|
+ PTHREAD_EXPLICIT_SCHED
|
|
|
|
|
+#define PTHREAD_EXPLICIT_SCHED PTHREAD_EXPLICIT_SCHED
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Scope handling. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_SCOPE_SYSTEM,
|
|
|
|
|
+#define PTHREAD_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM
|
|
|
|
|
+ PTHREAD_SCOPE_PROCESS
|
|
|
|
|
+#define PTHREAD_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Process shared or private flag. */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_PROCESS_PRIVATE,
|
|
|
|
|
+#define PTHREAD_PROCESS_PRIVATE PTHREAD_PROCESS_PRIVATE
|
|
|
|
|
+ PTHREAD_PROCESS_SHARED
|
|
|
|
|
+#define PTHREAD_PROCESS_SHARED PTHREAD_PROCESS_SHARED
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Conditional variable handling. */
|
|
|
|
|
+#define PTHREAD_COND_INITIALIZER { { 0, 0, 0, 0, 0, (void *) 0, 0, 0 } }
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Cleanup buffers */
|
|
|
|
|
+struct _pthread_cleanup_buffer
|
|
|
|
|
+{
|
|
|
|
|
+ void (*__routine) (void *); /* Function to call. */
|
|
|
|
|
+ void *__arg; /* Its argument. */
|
|
|
|
|
+ int __canceltype; /* Saved cancellation type. */
|
|
|
|
|
+ struct _pthread_cleanup_buffer *__prev; /* Chaining of cleanup functions. */
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Cancellation */
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_CANCEL_ENABLE,
|
|
|
|
|
+#define PTHREAD_CANCEL_ENABLE PTHREAD_CANCEL_ENABLE
|
|
|
|
|
+ PTHREAD_CANCEL_DISABLE
|
|
|
|
|
+#define PTHREAD_CANCEL_DISABLE PTHREAD_CANCEL_DISABLE
|
|
|
|
|
+};
|
|
|
|
|
+enum
|
|
|
|
|
+{
|
|
|
|
|
+ PTHREAD_CANCEL_DEFERRED,
|
|
|
|
|
+#define PTHREAD_CANCEL_DEFERRED PTHREAD_CANCEL_DEFERRED
|
|
|
|
|
+ PTHREAD_CANCEL_ASYNCHRONOUS
|
|
|
|
|
+#define PTHREAD_CANCEL_ASYNCHRONOUS PTHREAD_CANCEL_ASYNCHRONOUS
|
|
|
|
|
+};
|
|
|
|
|
+#define PTHREAD_CANCELED ((void *) -1)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Single execution handling. */
|
|
|
|
|
+#define PTHREAD_ONCE_INIT 0
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Value returned by 'pthread_barrier_wait' for one of the threads after
|
|
|
|
|
+ the required number of threads have called this function.
|
|
|
|
|
+ -1 is distinct from 0 and all errno constants */
|
|
|
|
|
+# define PTHREAD_BARRIER_SERIAL_THREAD -1
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+__BEGIN_DECLS
|
|
|
|
|
+
|
|
|
|
|
+/* Create a new thread, starting with execution of START-ROUTINE
|
|
|
|
|
+ getting passed ARG. Creation attributed come from ATTR. The new
|
|
|
|
|
+ handle is stored in *NEWTHREAD. */
|
|
|
|
|
+extern int pthread_create (pthread_t *__restrict __newthread,
|
|
|
|
|
+ const pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ void *(*__start_routine) (void *),
|
|
|
|
|
+ void *__restrict __arg) __THROWNL __nonnull ((1, 3));
|
|
|
|
|
+
|
|
|
|
|
+/* Terminate calling thread.
|
|
|
|
|
+
|
|
|
|
|
+ The registered cleanup handlers are called via exception handling
|
|
|
|
|
+ so we cannot mark this function with __THROW.*/
|
|
|
|
|
+extern void pthread_exit (void *__retval) __attribute__ ((__noreturn__));
|
|
|
|
|
+
|
|
|
|
|
+/* Make calling thread wait for termination of the thread TH. The
|
|
|
|
|
+ exit status of the thread is stored in *THREAD_RETURN, if THREAD_RETURN
|
|
|
|
|
+ is not NULL.
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_join (pthread_t __th, void **__thread_return);
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+/* Check whether thread TH has terminated. If yes return the status of
|
|
|
|
|
+ the thread in *THREAD_RETURN, if THREAD_RETURN is not NULL. */
|
|
|
|
|
+extern int pthread_tryjoin_np (pthread_t __th, void **__thread_return) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+/* Make calling thread wait for termination of the thread TH, but only
|
|
|
|
|
+ until TIMEOUT. The exit status of the thread is stored in
|
|
|
|
|
+ *THREAD_RETURN, if THREAD_RETURN is not NULL.
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_timedjoin_np (pthread_t __th, void **__thread_return,
|
|
|
|
|
+ const struct timespec *__abstime);
|
|
|
|
|
+
|
|
|
|
|
+/* Make calling thread wait for termination of the thread TH, but only
|
|
|
|
|
+ until TIMEOUT measured against the clock specified by CLOCKID. The
|
|
|
|
|
+ exit status of the thread is stored in *THREAD_RETURN, if
|
|
|
|
|
+ THREAD_RETURN is not NULL.
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_clockjoin_np (pthread_t __th, void **__thread_return,
|
|
|
|
|
+ clockid_t __clockid,
|
|
|
|
|
+ const struct timespec *__abstime);
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Indicate that the thread TH is never to be joined with PTHREAD_JOIN.
|
|
|
|
|
+ The resources of TH will therefore be freed immediately when it
|
|
|
|
|
+ terminates, instead of waiting for another thread to perform PTHREAD_JOIN
|
|
|
|
|
+ on it. */
|
|
|
|
|
+extern int pthread_detach (pthread_t __th) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Obtain the identifier of the current thread. */
|
|
|
|
|
+extern pthread_t pthread_self (void) __THROW __attribute__ ((__const__));
|
|
|
|
|
+
|
|
|
|
|
+/* Compare two thread identifiers. */
|
|
|
|
|
+extern int pthread_equal (pthread_t __thread1, pthread_t __thread2)
|
|
|
|
|
+ __THROW __attribute__ ((__const__));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Thread attribute handling. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize thread attribute *ATTR with default attributes
|
|
|
|
|
+ (detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
|
|
|
|
|
+ no user-provided stack). */
|
|
|
|
|
+extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy thread attribute *ATTR. */
|
|
|
|
|
+extern int pthread_attr_destroy (pthread_attr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Get detach state attribute. */
|
|
|
|
|
+extern int pthread_attr_getdetachstate (const pthread_attr_t *__attr,
|
|
|
|
|
+ int *__detachstate)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set detach state attribute. */
|
|
|
|
|
+extern int pthread_attr_setdetachstate (pthread_attr_t *__attr,
|
|
|
|
|
+ int __detachstate)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Get the size of the guard area created for stack overflow protection. */
|
|
|
|
|
+extern int pthread_attr_getguardsize (const pthread_attr_t *__attr,
|
|
|
|
|
+ size_t *__guardsize)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the size of the guard area created for stack overflow protection. */
|
|
|
|
|
+extern int pthread_attr_setguardsize (pthread_attr_t *__attr,
|
|
|
|
|
+ size_t __guardsize)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *PARAM the scheduling parameters of *ATTR. */
|
|
|
|
|
+extern int pthread_attr_getschedparam (const pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ struct sched_param *__restrict __param)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set scheduling parameters (priority, etc) in *ATTR according to PARAM. */
|
|
|
|
|
+extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ const struct sched_param *__restrict
|
|
|
|
|
+ __param) __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *POLICY the scheduling policy of *ATTR. */
|
|
|
|
|
+extern int pthread_attr_getschedpolicy (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr, int *__restrict __policy)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set scheduling policy in *ATTR according to POLICY. */
|
|
|
|
|
+extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *INHERIT the scheduling inheritance mode of *ATTR. */
|
|
|
|
|
+extern int pthread_attr_getinheritsched (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr, int *__restrict __inherit)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set scheduling inheritance mode in *ATTR according to INHERIT. */
|
|
|
|
|
+extern int pthread_attr_setinheritsched (pthread_attr_t *__attr,
|
|
|
|
|
+ int __inherit)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *SCOPE the scheduling contention scope of *ATTR. */
|
|
|
|
|
+extern int pthread_attr_getscope (const pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ int *__restrict __scope)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set scheduling contention scope in *ATTR according to SCOPE. */
|
|
|
|
|
+extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Return the previously set address for the stack. */
|
|
|
|
|
+extern int pthread_attr_getstackaddr (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr, void **__restrict __stackaddr)
|
|
|
|
|
+ __THROW __nonnull ((1, 2)) __attribute_deprecated__;
|
|
|
|
|
+
|
|
|
|
|
+/* Set the starting address of the stack of the thread to be created.
|
|
|
|
|
+ Depending on whether the stack grows up or down the value must either
|
|
|
|
|
+ be higher or lower than all the address in the memory block. The
|
|
|
|
|
+ minimal size of the block must be PTHREAD_STACK_MIN. */
|
|
|
|
|
+extern int pthread_attr_setstackaddr (pthread_attr_t *__attr,
|
|
|
|
|
+ void *__stackaddr)
|
|
|
|
|
+ __THROW __nonnull ((1)) __attribute_deprecated__;
|
|
|
|
|
+
|
|
|
|
|
+/* Return the currently used minimal stack size. */
|
|
|
|
|
+extern int pthread_attr_getstacksize (const pthread_attr_t *__restrict
|
|
|
|
|
+ __attr, size_t *__restrict __stacksize)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Add information about the minimum stack size needed for the thread
|
|
|
|
|
+ to be started. This size must never be less than PTHREAD_STACK_MIN
|
|
|
|
|
+ and must also not exceed the system limits. */
|
|
|
|
|
+extern int pthread_attr_setstacksize (pthread_attr_t *__attr,
|
|
|
|
|
+ size_t __stacksize)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Return the previously set address for the stack. */
|
|
|
|
|
+extern int pthread_attr_getstack (const pthread_attr_t *__restrict __attr,
|
|
|
|
|
+ void **__restrict __stackaddr,
|
|
|
|
|
+ size_t *__restrict __stacksize)
|
|
|
|
|
+ __THROW __nonnull ((1, 2, 3));
|
|
|
|
|
+
|
|
|
|
|
+/* The following two interfaces are intended to replace the last two. They
|
|
|
|
|
+ require setting the address as well as the size since only setting the
|
|
|
|
|
+ address will make the implementation on some architectures impossible. */
|
|
|
|
|
+extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
|
|
|
|
|
+ size_t __stacksize) __THROW __nonnull ((1));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+/* Thread created with attribute ATTR will be limited to run only on
|
|
|
|
|
+ the processors represented in CPUSET. */
|
|
|
|
|
+extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr,
|
|
|
|
|
+ size_t __cpusetsize,
|
|
|
|
|
+ const cpu_set_t *__cpuset)
|
|
|
|
|
+ __THROW __nonnull ((1, 3));
|
|
|
|
|
+
|
|
|
|
|
+/* Get bit set in CPUSET representing the processors threads created with
|
|
|
|
|
+ ATTR can run on. */
|
|
|
|
|
+extern int pthread_attr_getaffinity_np (const pthread_attr_t *__attr,
|
|
|
|
|
+ size_t __cpusetsize,
|
|
|
|
|
+ cpu_set_t *__cpuset)
|
|
|
|
|
+ __THROW __nonnull ((1, 3));
|
|
|
|
|
+
|
|
|
|
|
+/* Get the default attributes used by pthread_create in this process. */
|
|
|
|
|
+extern int pthread_getattr_default_np (pthread_attr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the default attributes to be used by pthread_create in this
|
|
|
|
|
+ process. */
|
|
|
|
|
+extern int pthread_setattr_default_np (const pthread_attr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize thread attribute *ATTR with attributes corresponding to the
|
|
|
|
|
+ already running thread TH. It shall be called on uninitialized ATTR
|
|
|
|
|
+ and destroyed with pthread_attr_destroy when no longer needed. */
|
|
|
|
|
+extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((2));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for scheduling control. */
|
|
|
|
|
+
|
|
|
|
|
+/* Set the scheduling parameters for TARGET_THREAD according to POLICY
|
|
|
|
|
+ and *PARAM. */
|
|
|
|
|
+extern int pthread_setschedparam (pthread_t __target_thread, int __policy,
|
|
|
|
|
+ const struct sched_param *__param)
|
|
|
|
|
+ __THROW __nonnull ((3));
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */
|
|
|
|
|
+extern int pthread_getschedparam (pthread_t __target_thread,
|
|
|
|
|
+ int *__restrict __policy,
|
|
|
|
|
+ struct sched_param *__restrict __param)
|
|
|
|
|
+ __THROW __nonnull ((2, 3));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the scheduling priority for TARGET_THREAD. */
|
|
|
|
|
+extern int pthread_setschedprio (pthread_t __target_thread, int __prio)
|
|
|
|
|
+ __THROW;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+/* Get thread name visible in the kernel and its interfaces. */
|
|
|
|
|
+extern int pthread_getname_np (pthread_t __target_thread, char *__buf,
|
|
|
|
|
+ size_t __buflen)
|
|
|
|
|
+ __THROW __nonnull ((2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set thread name visible in the kernel and its interfaces. */
|
|
|
|
|
+extern int pthread_setname_np (pthread_t __target_thread, const char *__name)
|
|
|
|
|
+ __THROW __nonnull ((2));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_UNIX98
|
|
|
|
|
+/* Determine level of concurrency. */
|
|
|
|
|
+extern int pthread_getconcurrency (void) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+/* Set new concurrency level to LEVEL. */
|
|
|
|
|
+extern int pthread_setconcurrency (int __level) __THROW;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+/* Yield the processor to another thread or process.
|
|
|
|
|
+ This function is similar to the POSIX `sched_yield' function but
|
|
|
|
|
+ might be differently implemented in the case of a m-on-n thread
|
|
|
|
|
+ implementation. */
|
|
|
|
|
+extern int pthread_yield (void) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Limit specified thread TH to run only on the processors represented
|
|
|
|
|
+ in CPUSET. */
|
|
|
|
|
+extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize,
|
|
|
|
|
+ const cpu_set_t *__cpuset)
|
|
|
|
|
+ __THROW __nonnull ((3));
|
|
|
|
|
+
|
|
|
|
|
+/* Get bit set in CPUSET representing the processors TH can run on. */
|
|
|
|
|
+extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
|
|
|
|
|
+ cpu_set_t *__cpuset)
|
|
|
|
|
+ __THROW __nonnull ((3));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling initialization. */
|
|
|
|
|
+
|
|
|
|
|
+/* Guarantee that the initialization function INIT_ROUTINE will be called
|
|
|
|
|
+ only once, even if pthread_once is executed several times with the
|
|
|
|
|
+ same ONCE_CONTROL argument. ONCE_CONTROL must point to a static or
|
|
|
|
|
+ extern variable initialized to PTHREAD_ONCE_INIT.
|
|
|
|
|
+
|
|
|
|
|
+ The initialization functions might throw exception which is why
|
|
|
|
|
+ this function is not marked with __THROW. */
|
|
|
|
|
+extern int pthread_once (pthread_once_t *__once_control,
|
|
|
|
|
+ void (*__init_routine) (void)) __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling cancellation.
|
|
|
|
|
+
|
|
|
|
|
+ Note that these functions are explicitly not marked to not throw an
|
|
|
|
|
+ exception in C++ code. If cancellation is implemented by unwinding
|
|
|
|
|
+ this is necessary to have the compiler generate the unwind information. */
|
|
|
|
|
+
|
|
|
|
|
+/* Set cancelability state of current thread to STATE, returning old
|
|
|
|
|
+ state in *OLDSTATE if OLDSTATE is not NULL. */
|
|
|
|
|
+extern int pthread_setcancelstate (int __state, int *__oldstate);
|
|
|
|
|
+
|
|
|
|
|
+/* Set cancellation state of current thread to TYPE, returning the old
|
|
|
|
|
+ type in *OLDTYPE if OLDTYPE is not NULL. */
|
|
|
|
|
+extern int pthread_setcanceltype (int __type, int *__oldtype);
|
|
|
|
|
+
|
|
|
|
|
+/* Cancel THREAD immediately or at the next possibility. */
|
|
|
|
|
+extern int pthread_cancel (pthread_t __th);
|
|
|
|
|
+
|
|
|
|
|
+/* Test for pending cancellation for the current thread and terminate
|
|
|
|
|
+ the thread as per pthread_exit(PTHREAD_CANCELED) if it has been
|
|
|
|
|
+ cancelled. */
|
|
|
|
|
+extern void pthread_testcancel (void);
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Cancellation handling with integration into exception handling. */
|
|
|
|
|
+
|
|
|
|
|
+typedef struct
|
|
|
|
|
+{
|
|
|
|
|
+ struct
|
|
|
|
|
+ {
|
|
|
|
|
+ __jmp_buf __cancel_jmp_buf;
|
|
|
|
|
+ int __mask_was_saved;
|
|
|
|
|
+ } __cancel_jmp_buf[1];
|
|
|
|
|
+ void *__pad[4];
|
|
|
|
|
+} __pthread_unwind_buf_t __attribute__ ((__aligned__));
|
|
|
|
|
+
|
|
|
|
|
+/* No special attributes by default. */
|
|
|
|
|
+#ifndef __cleanup_fct_attribute
|
|
|
|
|
+# define __cleanup_fct_attribute
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Structure to hold the cleanup handler information. */
|
|
|
|
|
+struct __pthread_cleanup_frame
|
|
|
|
|
+{
|
|
|
|
|
+ void (*__cancel_routine) (void *);
|
|
|
|
|
+ void *__cancel_arg;
|
|
|
|
|
+ int __do_it;
|
|
|
|
|
+ int __cancel_type;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+#if defined __GNUC__ && defined __EXCEPTIONS
|
|
|
|
|
+# ifdef __cplusplus
|
|
|
|
|
+/* Class to handle cancellation handler invocation. */
|
|
|
|
|
+class __pthread_cleanup_class
|
|
|
|
|
+{
|
|
|
|
|
+ void (*__cancel_routine) (void *);
|
|
|
|
|
+ void *__cancel_arg;
|
|
|
|
|
+ int __do_it;
|
|
|
|
|
+ int __cancel_type;
|
|
|
|
|
+
|
|
|
|
|
+ public:
|
|
|
|
|
+ __pthread_cleanup_class (void (*__fct) (void *), void *__arg)
|
|
|
|
|
+ : __cancel_routine (__fct), __cancel_arg (__arg), __do_it (1) { }
|
|
|
|
|
+ ~__pthread_cleanup_class () { if (__do_it) __cancel_routine (__cancel_arg); }
|
|
|
|
|
+ void __setdoit (int __newval) { __do_it = __newval; }
|
|
|
|
|
+ void __defer () { pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED,
|
|
|
|
|
+ &__cancel_type); }
|
|
|
|
|
+ void __restore () const { pthread_setcanceltype (__cancel_type, 0); }
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
|
|
|
|
|
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
|
|
|
|
|
+ be called with arguments ARG when the matching pthread_cleanup_pop
|
|
|
|
|
+ is executed with non-zero EXECUTE argument.
|
|
|
|
|
+
|
|
|
|
|
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
|
|
|
|
|
+ be used in matching pairs at the same nesting level of braces. */
|
|
|
|
|
+# define pthread_cleanup_push(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ __pthread_cleanup_class __clframe (routine, arg)
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
|
|
|
|
|
+ If EXECUTE is non-zero, the handler function is called. */
|
|
|
|
|
+# define pthread_cleanup_pop(execute) \
|
|
|
|
|
+ __clframe.__setdoit (execute); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+/* Install a cleanup handler as pthread_cleanup_push does, but also
|
|
|
|
|
+ saves the current cancellation type and sets it to deferred
|
|
|
|
|
+ cancellation. */
|
|
|
|
|
+# define pthread_cleanup_push_defer_np(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ __pthread_cleanup_class __clframe (routine, arg); \
|
|
|
|
|
+ __clframe.__defer ()
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
|
|
|
|
|
+ restores the cancellation type that was in effect when the matching
|
|
|
|
|
+ pthread_cleanup_push_defer was called. */
|
|
|
|
|
+# define pthread_cleanup_pop_restore_np(execute) \
|
|
|
|
|
+ __clframe.__restore (); \
|
|
|
|
|
+ __clframe.__setdoit (execute); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+# endif
|
|
|
|
|
+# else
|
|
|
|
|
+/* Function called to call the cleanup handler. As an extern inline
|
|
|
|
|
+ function the compiler is free to decide inlining the change when
|
|
|
|
|
+ needed or fall back on the copy which must exist somewhere
|
|
|
|
|
+ else. */
|
|
|
|
|
+__extern_inline void
|
|
|
|
|
+__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
|
|
|
|
|
+{
|
|
|
|
|
+ if (__frame->__do_it)
|
|
|
|
|
+ __frame->__cancel_routine (__frame->__cancel_arg);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
|
|
|
|
|
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
|
|
|
|
|
+ be called with arguments ARG when the matching pthread_cleanup_pop
|
|
|
|
|
+ is executed with non-zero EXECUTE argument.
|
|
|
|
|
+
|
|
|
|
|
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
|
|
|
|
|
+ be used in matching pairs at the same nesting level of braces. */
|
|
|
|
|
+# define pthread_cleanup_push(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ struct __pthread_cleanup_frame __clframe \
|
|
|
|
|
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
|
|
|
|
|
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
|
|
|
|
|
+ .__do_it = 1 };
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
|
|
|
|
|
+ If EXECUTE is non-zero, the handler function is called. */
|
|
|
|
|
+# define pthread_cleanup_pop(execute) \
|
|
|
|
|
+ __clframe.__do_it = (execute); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+/* Install a cleanup handler as pthread_cleanup_push does, but also
|
|
|
|
|
+ saves the current cancellation type and sets it to deferred
|
|
|
|
|
+ cancellation. */
|
|
|
|
|
+# define pthread_cleanup_push_defer_np(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ struct __pthread_cleanup_frame __clframe \
|
|
|
|
|
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
|
|
|
|
|
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
|
|
|
|
|
+ .__do_it = 1 }; \
|
|
|
|
|
+ (void) pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED, \
|
|
|
|
|
+ &__clframe.__cancel_type)
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
|
|
|
|
|
+ restores the cancellation type that was in effect when the matching
|
|
|
|
|
+ pthread_cleanup_push_defer was called. */
|
|
|
|
|
+# define pthread_cleanup_pop_restore_np(execute) \
|
|
|
|
|
+ (void) pthread_setcanceltype (__clframe.__cancel_type, NULL); \
|
|
|
|
|
+ __clframe.__do_it = (execute); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+# endif
|
|
|
|
|
+# endif
|
|
|
|
|
+#else
|
|
|
|
|
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
|
|
|
|
|
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
|
|
|
|
|
+ be called with arguments ARG when the matching pthread_cleanup_pop
|
|
|
|
|
+ is executed with non-zero EXECUTE argument.
|
|
|
|
|
+
|
|
|
|
|
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
|
|
|
|
|
+ be used in matching pairs at the same nesting level of braces. */
|
|
|
|
|
+# define pthread_cleanup_push(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ __pthread_unwind_buf_t __cancel_buf; \
|
|
|
|
|
+ void (*__cancel_routine) (void *) = (routine); \
|
|
|
|
|
+ void *__cancel_arg = (arg); \
|
|
|
|
|
+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
|
|
|
|
|
+ __cancel_buf.__cancel_jmp_buf, 0); \
|
|
|
|
|
+ if (__glibc_unlikely (__not_first_call)) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ __cancel_routine (__cancel_arg); \
|
|
|
|
|
+ __pthread_unwind_next (&__cancel_buf); \
|
|
|
|
|
+ /* NOTREACHED */ \
|
|
|
|
|
+ } \
|
|
|
|
|
+ \
|
|
|
|
|
+ __pthread_register_cancel (&__cancel_buf); \
|
|
|
|
|
+ do {
|
|
|
|
|
+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
|
|
|
|
|
+ If EXECUTE is non-zero, the handler function is called. */
|
|
|
|
|
+# define pthread_cleanup_pop(execute) \
|
|
|
|
|
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
|
|
|
|
|
+ } while (0); \
|
|
|
|
|
+ __pthread_unregister_cancel (&__cancel_buf); \
|
|
|
|
|
+ if (execute) \
|
|
|
|
|
+ __cancel_routine (__cancel_arg); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+/* Install a cleanup handler as pthread_cleanup_push does, but also
|
|
|
|
|
+ saves the current cancellation type and sets it to deferred
|
|
|
|
|
+ cancellation. */
|
|
|
|
|
+# define pthread_cleanup_push_defer_np(routine, arg) \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ __pthread_unwind_buf_t __cancel_buf; \
|
|
|
|
|
+ void (*__cancel_routine) (void *) = (routine); \
|
|
|
|
|
+ void *__cancel_arg = (arg); \
|
|
|
|
|
+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
|
|
|
|
|
+ __cancel_buf.__cancel_jmp_buf, 0); \
|
|
|
|
|
+ if (__glibc_unlikely (__not_first_call)) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ __cancel_routine (__cancel_arg); \
|
|
|
|
|
+ __pthread_unwind_next (&__cancel_buf); \
|
|
|
|
|
+ /* NOTREACHED */ \
|
|
|
|
|
+ } \
|
|
|
|
|
+ \
|
|
|
|
|
+ __pthread_register_cancel_defer (&__cancel_buf); \
|
|
|
|
|
+ do {
|
|
|
|
|
+extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+
|
|
|
|
|
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
|
|
|
|
|
+ restores the cancellation type that was in effect when the matching
|
|
|
|
|
+ pthread_cleanup_push_defer was called. */
|
|
|
|
|
+# define pthread_cleanup_pop_restore_np(execute) \
|
|
|
|
|
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
|
|
|
|
|
+ } while (0); \
|
|
|
|
|
+ __pthread_unregister_cancel_restore (&__cancel_buf); \
|
|
|
|
|
+ if (execute) \
|
|
|
|
|
+ __cancel_routine (__cancel_arg); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute;
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+/* Internal interface to initiate cleanup. */
|
|
|
|
|
+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
|
|
|
|
|
+ __cleanup_fct_attribute __attribute__ ((__noreturn__))
|
|
|
|
|
+# ifndef SHARED
|
|
|
|
|
+ __attribute__ ((__weak__))
|
|
|
|
|
+# endif
|
|
|
|
|
+ ;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Function used in the macros. */
|
|
|
|
|
+struct __jmp_buf_tag;
|
|
|
|
|
+extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROWNL;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Mutex handling. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize a mutex. */
|
|
|
|
|
+extern int pthread_mutex_init (pthread_mutex_t *__mutex,
|
|
|
|
|
+ const pthread_mutexattr_t *__mutexattr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy a mutex. */
|
|
|
|
|
+extern int pthread_mutex_destroy (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Try locking a mutex. */
|
|
|
|
|
+extern int pthread_mutex_trylock (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Lock a mutex. */
|
|
|
|
|
+extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Wait until lock becomes available, or specified time passes. */
|
|
|
|
|
+extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 2));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_mutex_clocklock (pthread_mutex_t *__restrict __mutex,
|
|
|
|
|
+ clockid_t __clockid,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 3));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Unlock a mutex. */
|
|
|
|
|
+extern int pthread_mutex_unlock (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Get the priority ceiling of MUTEX. */
|
|
|
|
|
+extern int pthread_mutex_getprioceiling (const pthread_mutex_t *
|
|
|
|
|
+ __restrict __mutex,
|
|
|
|
|
+ int *__restrict __prioceiling)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the priority ceiling of MUTEX to PRIOCEILING, return old
|
|
|
|
|
+ priority ceiling value in *OLD_CEILING. */
|
|
|
|
|
+extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
|
|
|
|
|
+ int __prioceiling,
|
|
|
|
|
+ int *__restrict __old_ceiling)
|
|
|
|
|
+ __THROW __nonnull ((1, 3));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K8
|
|
|
|
|
+/* Declare the state protected by MUTEX as consistent. */
|
|
|
|
|
+extern int pthread_mutex_consistent (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+# endif
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling mutex attributes. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize mutex attribute object ATTR with default attributes
|
|
|
|
|
+ (kind is PTHREAD_MUTEX_TIMED_NP). */
|
|
|
|
|
+extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy mutex attribute object ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Get the process-shared flag of the mutex attribute ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_getpshared (const pthread_mutexattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the process-shared flag of the mutex attribute ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
|
|
|
|
|
+/* Return in *KIND the mutex kind attribute in *ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_gettype (const pthread_mutexattr_t *__restrict
|
|
|
|
|
+ __attr, int *__restrict __kind)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL,
|
|
|
|
|
+ PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or
|
|
|
|
|
+ PTHREAD_MUTEX_DEFAULT). */
|
|
|
|
|
+extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *PROTOCOL the mutex protocol attribute in *ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_getprotocol (const pthread_mutexattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __protocol)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either
|
|
|
|
|
+ PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT). */
|
|
|
|
|
+extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int __protocol)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __prioceiling)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING. */
|
|
|
|
|
+extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int __prioceiling)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Get the robustness flag of the mutex attribute ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_getrobust (const pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int *__robustness)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_mutexattr_getrobust_np (const pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int *__robustness)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+/* Set the robustness flag of the mutex attribute ATTR. */
|
|
|
|
|
+extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int __robustness)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr,
|
|
|
|
|
+ int __robustness)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+# endif
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K
|
|
|
|
|
+/* Functions for handling read-write locks. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize read-write lock RWLOCK using attributes ATTR, or use
|
|
|
|
|
+ the default values if later is NULL. */
|
|
|
|
|
+extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ const pthread_rwlockattr_t *__restrict
|
|
|
|
|
+ __attr) __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy read-write lock RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Acquire read lock for RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Try to acquire read lock for RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Try to acquire read lock for RWLOCK or return after specfied time. */
|
|
|
|
|
+extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 2));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_rwlock_clockrdlock (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ clockid_t __clockid,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 3));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+/* Acquire write lock for RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Try to acquire write lock for RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Try to acquire write lock for RWLOCK or return after specfied time. */
|
|
|
|
|
+extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 2));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+extern int pthread_rwlock_clockwrlock (pthread_rwlock_t *__restrict __rwlock,
|
|
|
|
|
+ clockid_t __clockid,
|
|
|
|
|
+ const struct timespec *__restrict
|
|
|
|
|
+ __abstime) __THROWNL __nonnull ((1, 3));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+/* Unlock RWLOCK. */
|
|
|
|
|
+extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling read-write lock attributes. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize attribute object ATTR with default values. */
|
|
|
|
|
+extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy attribute object ATTR. */
|
|
|
|
|
+extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Return current setting of process-shared attribute of ATTR in PSHARED. */
|
|
|
|
|
+extern int pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set process-shared attribute of ATTR to PSHARED. */
|
|
|
|
|
+extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr,
|
|
|
|
|
+ int __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Return current setting of reader/writer preference. */
|
|
|
|
|
+extern int pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __pref)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set reader/write preference. */
|
|
|
|
|
+extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
|
|
|
|
|
+ int __pref) __THROW __nonnull ((1));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling conditional variables. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize condition variable COND using attributes ATTR, or use
|
|
|
|
|
+ the default values if later is NULL. */
|
|
|
|
|
+extern int pthread_cond_init (pthread_cond_t *__restrict __cond,
|
|
|
|
|
+ const pthread_condattr_t *__restrict __cond_attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy condition variable COND. */
|
|
|
|
|
+extern int pthread_cond_destroy (pthread_cond_t *__cond)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Wake up one thread waiting for condition variable COND. */
|
|
|
|
|
+extern int pthread_cond_signal (pthread_cond_t *__cond)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Wake up all threads waiting for condition variables COND. */
|
|
|
|
|
+extern int pthread_cond_broadcast (pthread_cond_t *__cond)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Wait for condition variable COND to be signaled or broadcast.
|
|
|
|
|
+ MUTEX is assumed to be locked before.
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
|
|
|
|
|
+ pthread_mutex_t *__restrict __mutex)
|
|
|
|
|
+ __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Wait for condition variable COND to be signaled or broadcast until
|
|
|
|
|
+ ABSTIME. MUTEX is assumed to be locked before. ABSTIME is an
|
|
|
|
|
+ absolute time specification; zero is the beginning of the epoch
|
|
|
|
|
+ (00:00:00 GMT, January 1, 1970).
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
|
|
|
|
|
+ pthread_mutex_t *__restrict __mutex,
|
|
|
|
|
+ const struct timespec *__restrict __abstime)
|
|
|
|
|
+ __nonnull ((1, 2, 3));
|
|
|
|
|
+
|
|
|
|
|
+# ifdef __USE_GNU
|
|
|
|
|
+/* Wait for condition variable COND to be signaled or broadcast until
|
|
|
|
|
+ ABSTIME measured by the specified clock. MUTEX is assumed to be
|
|
|
|
|
+ locked before. CLOCK is the clock to use. ABSTIME is an absolute
|
|
|
|
|
+ time specification against CLOCK's epoch.
|
|
|
|
|
+
|
|
|
|
|
+ This function is a cancellation point and therefore not marked with
|
|
|
|
|
+ __THROW. */
|
|
|
|
|
+extern int pthread_cond_clockwait (pthread_cond_t *__restrict __cond,
|
|
|
|
|
+ pthread_mutex_t *__restrict __mutex,
|
|
|
|
|
+ __clockid_t __clock_id,
|
|
|
|
|
+ const struct timespec *__restrict __abstime)
|
|
|
|
|
+ __nonnull ((1, 2, 4));
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling condition variable attributes. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_init (pthread_condattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_destroy (pthread_condattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Get the process-shared flag of the condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_getpshared (const pthread_condattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the process-shared flag of the condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_setpshared (pthread_condattr_t *__attr,
|
|
|
|
|
+ int __pshared) __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Get the clock selected for the condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_getclock (const pthread_condattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ __clockid_t *__restrict __clock_id)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the clock selected for the condition variable attribute ATTR. */
|
|
|
|
|
+extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
|
|
|
|
|
+ __clockid_t __clock_id)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Functions to handle spinlocks. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can
|
|
|
|
|
+ be shared between different processes. */
|
|
|
|
|
+extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy the spinlock LOCK. */
|
|
|
|
|
+extern int pthread_spin_destroy (pthread_spinlock_t *__lock)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Wait until spinlock LOCK is retrieved. */
|
|
|
|
|
+extern int pthread_spin_lock (pthread_spinlock_t *__lock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Try to lock spinlock LOCK. */
|
|
|
|
|
+extern int pthread_spin_trylock (pthread_spinlock_t *__lock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Release spinlock LOCK. */
|
|
|
|
|
+extern int pthread_spin_unlock (pthread_spinlock_t *__lock)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions to handle barriers. */
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize BARRIER with the attributes in ATTR. The barrier is
|
|
|
|
|
+ opened when COUNT waiters arrived. */
|
|
|
|
|
+extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier,
|
|
|
|
|
+ const pthread_barrierattr_t *__restrict
|
|
|
|
|
+ __attr, unsigned int __count)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy a previously dynamically initialized barrier BARRIER. */
|
|
|
|
|
+extern int pthread_barrier_destroy (pthread_barrier_t *__barrier)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Wait on barrier BARRIER. */
|
|
|
|
|
+extern int pthread_barrier_wait (pthread_barrier_t *__barrier)
|
|
|
|
|
+ __THROWNL __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize barrier attribute ATTR. */
|
|
|
|
|
+extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy previously dynamically initialized barrier attribute ATTR. */
|
|
|
|
|
+extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Get the process-shared flag of the barrier attribute ATTR. */
|
|
|
|
|
+extern int pthread_barrierattr_getpshared (const pthread_barrierattr_t *
|
|
|
|
|
+ __restrict __attr,
|
|
|
|
|
+ int *__restrict __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1, 2));
|
|
|
|
|
+
|
|
|
|
|
+/* Set the process-shared flag of the barrier attribute ATTR. */
|
|
|
|
|
+extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
|
|
|
|
|
+ int __pshared)
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Functions for handling thread-specific data. */
|
|
|
|
|
+
|
|
|
|
|
+/* Create a key value identifying a location in the thread-specific
|
|
|
|
|
+ data area. Each thread maintains a distinct thread-specific data
|
|
|
|
|
+ area. DESTR_FUNCTION, if non-NULL, is called with the value
|
|
|
|
|
+ associated to that key when the key is destroyed.
|
|
|
|
|
+ DESTR_FUNCTION is not called if the value associated is NULL when
|
|
|
|
|
+ the key is destroyed. */
|
|
|
|
|
+extern int pthread_key_create (pthread_key_t *__key,
|
|
|
|
|
+ void (*__destr_function) (void *))
|
|
|
|
|
+ __THROW __nonnull ((1));
|
|
|
|
|
+
|
|
|
|
|
+/* Destroy KEY. */
|
|
|
|
|
+extern int pthread_key_delete (pthread_key_t __key) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+/* Return current value of the thread-specific data slot identified by KEY. */
|
|
|
|
|
+extern void *pthread_getspecific (pthread_key_t __key) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+/* Store POINTER in the thread-specific data slot identified by KEY. */
|
|
|
|
|
+extern int pthread_setspecific (pthread_key_t __key,
|
|
|
|
|
+ const void *__pointer) __THROW ;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_XOPEN2K
|
|
|
|
|
+/* Get ID of CPU-time clock for thread THREAD_ID. */
|
|
|
|
|
+extern int pthread_getcpuclockid (pthread_t __thread_id,
|
|
|
|
|
+ __clockid_t *__clock_id)
|
|
|
|
|
+ __THROW __nonnull ((2));
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Install handlers to be called when a new process is created with FORK.
|
|
|
|
|
+ The PREPARE handler is called in the parent process just before performing
|
|
|
|
|
+ FORK. The PARENT handler is called in the parent process just after FORK.
|
|
|
|
|
+ The CHILD handler is called in the child process. Each of the three
|
|
|
|
|
+ handlers can be NULL, meaning that no handler needs to be called at that
|
|
|
|
|
+ point.
|
|
|
|
|
+ PTHREAD_ATFORK can be called several times, in which case the PREPARE
|
|
|
|
|
+ handlers are called in LIFO order (last added with PTHREAD_ATFORK,
|
|
|
|
|
+ first called before FORK), and the PARENT and CHILD handlers are called
|
|
|
|
|
+ in FIFO (first added, first called). */
|
|
|
|
|
+
|
|
|
|
|
+extern int pthread_atfork (void (*__prepare) (void),
|
|
|
|
|
+ void (*__parent) (void),
|
|
|
|
|
+ void (*__child) (void)) __THROW;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __USE_EXTERN_INLINES
|
|
|
|
|
+/* Optimizations. */
|
|
|
|
|
+__extern_inline int
|
|
|
|
|
+__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2))
|
|
|
|
|
+{
|
|
|
|
|
+ return __thread1 == __thread2;
|
|
|
|
|
+}
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+__END_DECLS
|
|
|
|
|
+
|
|
|
|
|
+#endif /* pthread.h */
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_broadcast_2_17.c b/nptl_2_17/pthread_cond_broadcast_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..e1d5f332
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_broadcast_2_17.c
|
|
|
|
|
@@ -0,0 +1,94 @@
|
|
|
|
|
+/* Copyright (C) 2003-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <pthread_2_17.h>
|
|
|
|
|
+#include <pthreadP_2_17.h>
|
|
|
|
|
+#include <endian.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <sysdep.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+
|
|
|
|
|
+#include <kernel-features.h>
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_broadcast (pthread_cond_t *cond)
|
|
|
|
|
+{
|
|
|
|
|
+ LIBC_PROBE (cond_broadcast, 1, cond);
|
|
|
|
|
+
|
|
|
|
|
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+ /* Make sure we are alone. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Are there any waiters to be woken? */
|
|
|
|
|
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
|
|
|
|
+
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Yes. Mark them all as woken. */
|
|
|
|
|
+ cond->__data.__wakeup_seq = cond->__data.__total_seq;
|
|
|
|
|
+ cond->__data.__woken_seq = cond->__data.__total_seq;
|
|
|
|
|
+ cond->__data.__futex = (unsigned int) cond->__data.__total_seq * 2;
|
|
|
|
|
+ int futex_val = cond->__data.__futex;
|
|
|
|
|
+ /* Signal that a broadcast happened. */
|
|
|
|
|
+ ++cond->__data.__broadcast_seq;
|
|
|
|
|
+
|
|
|
|
|
+ /* We are done. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Wake everybody. */
|
|
|
|
|
+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Do not use requeue for pshared condvars. */
|
|
|
|
|
+ if (mut == (void *) ~0l
|
|
|
|
|
+ || PTHREAD_MUTEX_PSHARED (mut) & PTHREAD_MUTEX_PSHARED_BIT)
|
|
|
|
|
+ goto wake_all;
|
|
|
|
|
+
|
|
|
|
|
+#if (defined lll_futex_cmp_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ if (USE_REQUEUE_PI (mut))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, INT_MAX,
|
|
|
|
|
+ &mut->__data.__lock, futex_val,
|
|
|
|
|
+ LLL_PRIVATE) == 0)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+ /* lll_futex_requeue returns 0 for success and non-zero
|
|
|
|
|
+ for errors. */
|
|
|
|
|
+ if (!__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1,
|
|
|
|
|
+ INT_MAX, &mut->__data.__lock,
|
|
|
|
|
+ futex_val, LLL_PRIVATE), 0))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+wake_all:
|
|
|
|
|
+ lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared);
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* We are done. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
|
|
|
|
|
+ GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_destroy_2_17.c b/nptl_2_17/pthread_cond_destroy_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..62c8ae72
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_destroy_2_17.c
|
|
|
|
|
@@ -0,0 +1,85 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_destroy (pthread_cond_t *cond)
|
|
|
|
|
+{
|
|
|
|
|
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (cond_destroy, 1, cond);
|
|
|
|
|
+
|
|
|
|
|
+ /* Make sure we are alone. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* If there are still some waiters which have not been
|
|
|
|
|
+ woken up, this is an application bug. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+ return EBUSY;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Tell pthread_cond_*wait that this condvar is being destroyed. */
|
|
|
|
|
+ cond->__data.__total_seq = -1ULL;
|
|
|
|
|
+
|
|
|
|
|
+ /* If there are waiters which have been already signalled or
|
|
|
|
|
+ broadcasted, but still are using the pthread_cond_t structure,
|
|
|
|
|
+ pthread_cond_destroy needs to wait for them. */
|
|
|
|
|
+ unsigned int nwaiters = cond->__data.__nwaiters;
|
|
|
|
|
+
|
|
|
|
|
+ if (nwaiters >= (1 << COND_NWAITERS_SHIFT))
|
|
|
|
|
+
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Wake everybody on the associated mutex in case there are
|
|
|
|
|
+ threads that have been requeued to it.
|
|
|
|
|
+ Without this, pthread_cond_destroy could block potentially
|
|
|
|
|
+ for a long time or forever, as it would depend on other
|
|
|
|
|
+ thread's using the mutex.
|
|
|
|
|
+ When all threads waiting on the mutex are woken up, pthread_cond_wait
|
|
|
|
|
+ only waits for threads to acquire and release the internal
|
|
|
|
|
+ condvar lock. */
|
|
|
|
|
+ if (cond->__data.__mutex != NULL
|
|
|
|
|
+ && cond->__data.__mutex != (void *) ~0l)
|
|
|
|
|
+ {
|
|
|
|
|
+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
|
|
|
|
|
+ lll_futex_wake (&mut->__data.__lock, INT_MAX,
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mut));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ nwaiters = cond->__data.__nwaiters;
|
|
|
|
|
+ }
|
|
|
|
|
+ while (nwaiters >= (1 << COND_NWAITERS_SHIFT));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_destroy,
|
|
|
|
|
+ pthread_cond_destroy, GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_init_2_17.c b/nptl_2_17/pthread_cond_init_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..7acaa86b
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_init_2_17.c
|
|
|
|
|
@@ -0,0 +1,50 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
|
|
|
|
+{
|
|
|
|
|
+ ASSERT_TYPE_SIZE (pthread_cond_t, __SIZEOF_PTHREAD_COND_T);
|
|
|
|
|
+
|
|
|
|
|
+ struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
|
|
|
|
|
+
|
|
|
|
|
+ cond->__data.__lock = LLL_LOCK_INITIALIZER;
|
|
|
|
|
+ cond->__data.__futex = 0;
|
|
|
|
|
+ cond->__data.__nwaiters = (icond_attr != NULL
|
|
|
|
|
+ ? ((icond_attr->value >> 1)
|
|
|
|
|
+ & ((1 << COND_NWAITERS_SHIFT) - 1))
|
|
|
|
|
+ : CLOCK_REALTIME);
|
|
|
|
|
+ cond->__data.__total_seq = 0;
|
|
|
|
|
+ cond->__data.__wakeup_seq = 0;
|
|
|
|
|
+ cond->__data.__woken_seq = 0;
|
|
|
|
|
+ cond->__data.__mutex = (icond_attr == NULL || (icond_attr->value & 1) == 0
|
|
|
|
|
+ ? NULL : (void *) ~0l);
|
|
|
|
|
+ cond->__data.__broadcast_seq = 0;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (cond_init, 2, cond, cond_attr);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_init,
|
|
|
|
|
+ pthread_cond_init, GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_signal_2_17.c b/nptl_2_17/pthread_cond_signal_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..a8053d33
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_signal_2_17.c
|
|
|
|
|
@@ -0,0 +1,82 @@
|
|
|
|
|
+/* Copyright (C) 2003-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <pthread_2_17.h>
|
|
|
|
|
+#include <pthreadP_2_17.h>
|
|
|
|
|
+#include <endian.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <sysdep.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+#include <kernel-features.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_signal (pthread_cond_t *cond)
|
|
|
|
|
+{
|
|
|
|
|
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (cond_signal, 1, cond);
|
|
|
|
|
+
|
|
|
|
|
+ /* Make sure we are alone. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Are there any waiters to be woken? */
|
|
|
|
|
+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Yes. Mark one of them as woken. */
|
|
|
|
|
+ ++cond->__data.__wakeup_seq;
|
|
|
|
|
+ ++cond->__data.__futex;
|
|
|
|
|
+
|
|
|
|
|
+#if (defined lll_futex_cmp_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ pthread_mutex_t *mut = cond->__data.__mutex;
|
|
|
|
|
+
|
|
|
|
|
+ if (USE_REQUEUE_PI (mut)
|
|
|
|
|
+ /* This can only really fail with a ENOSYS, since nobody can modify
|
|
|
|
|
+ futex while we have the cond_lock. */
|
|
|
|
|
+ && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0,
|
|
|
|
|
+ &mut->__data.__lock,
|
|
|
|
|
+ cond->__data.__futex, pshared) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+ /* Wake one. */
|
|
|
|
|
+ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex,
|
|
|
|
|
+ 1, 1,
|
|
|
|
|
+ &cond->__data.__lock,
|
|
|
|
|
+ pshared), 0))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* Fallback if neither of them work. */
|
|
|
|
|
+ lll_futex_wake (&cond->__data.__futex, 1, pshared);
|
|
|
|
|
+ }
|
|
|
|
|
+/* We are done. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
|
|
|
|
|
+ GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_timedwait_2_17.c b/nptl_2_17/pthread_cond_timedwait_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..965d51a1
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_timedwait_2_17.c
|
|
|
|
|
@@ -0,0 +1,268 @@
|
|
|
|
|
+/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <pthread_2_17.h>
|
|
|
|
|
+#include <pthreadP_2_17.h>
|
|
|
|
|
+#include <endian.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <sysdep.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+#include <sys/time.h>
|
|
|
|
|
+#include <kernel-features.h>
|
|
|
|
|
+
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+
|
|
|
|
|
+#ifndef HAVE_CLOCK_GETTIME_VSYSCALL
|
|
|
|
|
+# undef INTERNAL_VSYSCALL
|
|
|
|
|
+# define INTERNAL_VSYSCALL INTERNAL_SYSCALL
|
|
|
|
|
+# undef INLINE_VSYSCALL
|
|
|
|
|
+# define INLINE_VSYSCALL INLINE_SYSCALL
|
|
|
|
|
+#else
|
|
|
|
|
+# include <libc-vdso.h>
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Cleanup handler, defined in pthread_cond_wait.c. */
|
|
|
|
|
+extern void __condvar_cleanup (void *arg)
|
|
|
|
|
+ __attribute__ ((visibility ("hidden")));
|
|
|
|
|
+
|
|
|
|
|
+struct _condvar_cleanup_buffer
|
|
|
|
|
+{
|
|
|
|
|
+ int oldtype;
|
|
|
|
|
+ pthread_cond_t *cond;
|
|
|
|
|
+ pthread_mutex_t *mutex;
|
|
|
|
|
+ unsigned int bc_seq;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
|
|
|
+ const struct timespec *abstime)
|
|
|
|
|
+{
|
|
|
|
|
+ struct _pthread_cleanup_buffer buffer;
|
|
|
|
|
+ struct _condvar_cleanup_buffer cbuffer;
|
|
|
|
|
+ int result = 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* Catch invalid parameters. */
|
|
|
|
|
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+
|
|
|
|
|
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+
|
|
|
|
|
+#if (defined lll_futex_timed_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ int pi_flag = 0;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ /* Make sure we are alone. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Now we can release the mutex. */
|
|
|
|
|
+ int err = __pthread_mutex_unlock_usercnt (mutex, 0);
|
|
|
|
|
+ if (err)
|
|
|
|
|
+ {
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+ return err;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We have one new user of the condvar. */
|
|
|
|
|
+ ++cond->__data.__total_seq;
|
|
|
|
|
+ ++cond->__data.__futex;
|
|
|
|
|
+ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ /* Work around the fact that the kernel rejects negative timeout values
|
|
|
|
|
+ despite them being valid. */
|
|
|
|
|
+ if (__glibc_unlikely (abstime->tv_sec < 0))
|
|
|
|
|
+ goto timeout;
|
|
|
|
|
+
|
|
|
|
|
+ /* Remember the mutex we are using here. If there is already a
|
|
|
|
|
+ different address store this is a bad user bug. Do not store
|
|
|
|
|
+ anything for pshared condvars. */
|
|
|
|
|
+ if (cond->__data.__mutex != (void *) ~0l)
|
|
|
|
|
+ cond->__data.__mutex = mutex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Prepare structure passed to cancellation handler. */
|
|
|
|
|
+ cbuffer.cond = cond;
|
|
|
|
|
+ cbuffer.mutex = mutex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Before we block we enable cancellation. Therefore we have to
|
|
|
|
|
+ install a cancellation handler. */
|
|
|
|
|
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
|
|
|
|
|
+
|
|
|
|
|
+ /* The current values of the wakeup counter. The "woken" counter
|
|
|
|
|
+ must exceed this value. */
|
|
|
|
|
+ unsigned long long int val;
|
|
|
|
|
+ unsigned long long int seq;
|
|
|
|
|
+ val = seq = cond->__data.__wakeup_seq;
|
|
|
|
|
+ /* Remember the broadcast counter. */
|
|
|
|
|
+ cbuffer.bc_seq = cond->__data.__broadcast_seq;
|
|
|
|
|
+
|
|
|
|
|
+ while (1)
|
|
|
|
|
+ {
|
|
|
|
|
+#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|
|
|
|
|
+ || !defined lll_futex_timed_wait_bitset)
|
|
|
|
|
+ struct timespec rt;
|
|
|
|
|
+ {
|
|
|
|
|
+# ifdef __NR_clock_gettime
|
|
|
|
|
+ INTERNAL_SYSCALL_DECL (err);
|
|
|
|
|
+ (void) INTERNAL_VSYSCALL (clock_gettime, err, 2,
|
|
|
|
|
+ (cond->__data.__nwaiters
|
|
|
|
|
+ & ((1 << COND_NWAITERS_SHIFT) - 1)),
|
|
|
|
|
+ &rt);
|
|
|
|
|
+ /* Convert the absolute timeout value to a relative timeout. */
|
|
|
|
|
+ rt.tv_sec = abstime->tv_sec - rt.tv_sec;
|
|
|
|
|
+ rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec;
|
|
|
|
|
+# else
|
|
|
|
|
+ /* Get the current time. So far we support only one clock. */
|
|
|
|
|
+ struct timeval tv;
|
|
|
|
|
+ (void) __gettimeofday (&tv, NULL);
|
|
|
|
|
+
|
|
|
|
|
+ /* Convert the absolute timeout value to a relative timeout. */
|
|
|
|
|
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
|
|
|
|
|
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
|
|
|
|
|
+# endif
|
|
|
|
|
+ }
|
|
|
|
|
+ if (rt.tv_nsec < 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rt.tv_nsec += 1000000000;
|
|
|
|
|
+ --rt.tv_sec;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Did we already time out? */
|
|
|
|
|
+ if (__glibc_unlikely (rt.tv_sec < 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
|
|
|
|
+ goto bc_out;
|
|
|
|
|
+
|
|
|
|
|
+ goto timeout;
|
|
|
|
|
+ }
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ unsigned int futex_val = cond->__data.__futex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Prepare to wait. Release the condvar futex. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Enable asynchronous cancellation. Required by the standard. */
|
|
|
|
|
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
|
|
|
|
|
+
|
|
|
|
|
+/* REQUEUE_PI was implemented after FUTEX_CLOCK_REALTIME, so it is sufficient
|
|
|
|
|
+ to check just the former. */
|
|
|
|
|
+#if (defined lll_futex_timed_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ /* If pi_flag remained 1 then it means that we had the lock and the mutex
|
|
|
|
|
+ but a spurious waker raced ahead of us. Give back the mutex before
|
|
|
|
|
+ going into wait again. */
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ __pthread_mutex_cond_lock_adjust (mutex);
|
|
|
|
|
+ __pthread_mutex_unlock_usercnt (mutex, 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ pi_flag = USE_REQUEUE_PI (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ unsigned int clockbit = (cond->__data.__nwaiters & 1
|
|
|
|
|
+ ? 0 : FUTEX_CLOCK_REALTIME);
|
|
|
|
|
+ err = lll_futex_timed_wait_requeue_pi (&cond->__data.__futex,
|
|
|
|
|
+ futex_val, abstime, clockbit,
|
|
|
|
|
+ &mutex->__data.__lock,
|
|
|
|
|
+ pshared);
|
|
|
|
|
+ pi_flag = (err == 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ {
|
|
|
|
|
+#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|
|
|
|
|
+ || !defined lll_futex_timed_wait_bitset)
|
|
|
|
|
+ /* Wait until woken by signal or broadcast. */
|
|
|
|
|
+ err = lll_futex_timed_wait (&cond->__data.__futex,
|
|
|
|
|
+ futex_val, &rt, pshared);
|
|
|
|
|
+#else
|
|
|
|
|
+ unsigned int clockbit = (cond->__data.__nwaiters & 1
|
|
|
|
|
+ ? 0 : FUTEX_CLOCK_REALTIME);
|
|
|
|
|
+ err = lll_futex_timed_wait_bitset (&cond->__data.__futex, futex_val,
|
|
|
|
|
+ abstime, clockbit, pshared);
|
|
|
|
|
+#endif
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Disable asynchronous cancellation. */
|
|
|
|
|
+ __pthread_disable_asynccancel (cbuffer.oldtype);
|
|
|
|
|
+
|
|
|
|
|
+ /* We are going to look at shared data again, so get the lock. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* If a broadcast happened, we are done. */
|
|
|
|
|
+ if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
|
|
|
|
+ goto bc_out;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we are eligible for wakeup. */
|
|
|
|
|
+ val = cond->__data.__wakeup_seq;
|
|
|
|
|
+ if (val != seq && cond->__data.__woken_seq != val)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ /* Not woken yet. Maybe the time expired? */
|
|
|
|
|
+ if (__glibc_unlikely (err == -ETIMEDOUT))
|
|
|
|
|
+ {
|
|
|
|
|
+ timeout:
|
|
|
|
|
+ /* Yep. Adjust the counters. */
|
|
|
|
|
+ ++cond->__data.__wakeup_seq;
|
|
|
|
|
+ ++cond->__data.__futex;
|
|
|
|
|
+
|
|
|
|
|
+ /* The error value. */
|
|
|
|
|
+ result = ETIMEDOUT;
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Another thread woken up. */
|
|
|
|
|
+ ++cond->__data.__woken_seq;
|
|
|
|
|
+
|
|
|
|
|
+ bc_out:
|
|
|
|
|
+
|
|
|
|
|
+ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ /* If pthread_cond_destroy was called on this variable already,
|
|
|
|
|
+ notify the pthread_cond_destroy caller all waiters have left
|
|
|
|
|
+ and it can be successfully destroyed. */
|
|
|
|
|
+ if (cond->__data.__total_seq == -1ULL
|
|
|
|
|
+ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
|
|
|
|
|
+ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* We are done with the condvar. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* The cancellation handling is back to normal, remove the handler. */
|
|
|
|
|
+ __pthread_cleanup_pop (&buffer, 0);
|
|
|
|
|
+
|
|
|
|
|
+ /* Get the mutex before returning. */
|
|
|
|
|
+#if (defined lll_futex_timed_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ __pthread_mutex_cond_lock_adjust (mutex);
|
|
|
|
|
+ err = 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+ err = __pthread_mutex_cond_lock (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ return err ?: result;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
|
|
|
|
|
+ GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_cond_wait_2_17.c b/nptl_2_17/pthread_cond_wait_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..ecd404ad
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_cond_wait_2_17.c
|
|
|
|
|
@@ -0,0 +1,231 @@
|
|
|
|
|
+/* Copyright (C) 2003-2018 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <pthread_2_17.h>
|
|
|
|
|
+#include <pthreadP_2_17.h>
|
|
|
|
|
+#include <endian.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <sysdep.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+#include <kernel-features.h>
|
|
|
|
|
+#include <shlib-compat.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+
|
|
|
|
|
+struct _condvar_cleanup_buffer
|
|
|
|
|
+{
|
|
|
|
|
+ int oldtype;
|
|
|
|
|
+ pthread_cond_t *cond;
|
|
|
|
|
+ pthread_mutex_t *mutex;
|
|
|
|
|
+ unsigned int bc_seq;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+__attribute__ ((visibility ("hidden")))
|
|
|
|
|
+__condvar_cleanup (void *arg)
|
|
|
|
|
+{
|
|
|
|
|
+ struct _condvar_cleanup_buffer *cbuffer =
|
|
|
|
|
+ (struct _condvar_cleanup_buffer *) arg;
|
|
|
|
|
+ unsigned int destroying;
|
|
|
|
|
+ int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+
|
|
|
|
|
+ /* We are going to modify shared data. */
|
|
|
|
|
+ lll_lock (cbuffer->cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* This thread is not waiting anymore. Adjust the sequence counters
|
|
|
|
|
+ * appropriately. We do not increment WAKEUP_SEQ if this would
|
|
|
|
|
+ * bump it over the value of TOTAL_SEQ. This can happen if a thread
|
|
|
|
|
+ * was woken and then canceled. */
|
|
|
|
|
+ if (cbuffer->cond->__data.__wakeup_seq
|
|
|
|
|
+ < cbuffer->cond->__data.__total_seq)
|
|
|
|
|
+ {
|
|
|
|
|
+ ++cbuffer->cond->__data.__wakeup_seq;
|
|
|
|
|
+ ++cbuffer->cond->__data.__futex;
|
|
|
|
|
+ }
|
|
|
|
|
+ ++cbuffer->cond->__data.__woken_seq;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ /* If pthread_cond_destroy was called on this variable already,
|
|
|
|
|
+ * notify the pthread_cond_destroy caller all waiters have left
|
|
|
|
|
+ * and it can be successfully destroyed. */
|
|
|
|
|
+ destroying = 0;
|
|
|
|
|
+ if (cbuffer->cond->__data.__total_seq == -1ULL
|
|
|
|
|
+ && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
|
|
|
|
|
+ {
|
|
|
|
|
+ lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared);
|
|
|
|
|
+ destroying = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We are done. */
|
|
|
|
|
+ lll_unlock (cbuffer->cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Wake everybody to make sure no condvar signal gets lost. */
|
|
|
|
|
+ if (! destroying)
|
|
|
|
|
+ lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Get the mutex before returning unless asynchronous cancellation
|
|
|
|
|
+ * is in effect. We don't try to get the mutex if we already own it. */
|
|
|
|
|
+ if (!(USE_REQUEUE_PI (cbuffer->mutex))
|
|
|
|
|
+ || ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK)
|
|
|
|
|
+ != THREAD_GETMEM (THREAD_SELF, tid)))
|
|
|
|
|
+ {
|
|
|
|
|
+ __pthread_mutex_cond_lock (cbuffer->mutex);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ __pthread_mutex_cond_lock_adjust (cbuffer->mutex);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
|
|
|
|
|
+{
|
|
|
|
|
+struct _pthread_cleanup_buffer buffer;
|
|
|
|
|
+ struct _condvar_cleanup_buffer cbuffer;
|
|
|
|
|
+ int err;
|
|
|
|
|
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
|
|
|
|
|
+ ? LLL_SHARED : LLL_PRIVATE;
|
|
|
|
|
+
|
|
|
|
|
+#if (defined lll_futex_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ int pi_flag = 0;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (cond_wait, 2, cond, mutex);
|
|
|
|
|
+ /* Make sure we are alone. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Now we can release the mutex. */
|
|
|
|
|
+ err = __pthread_mutex_unlock_usercnt (mutex, 0);
|
|
|
|
|
+ if (__glibc_unlikely (err))
|
|
|
|
|
+ {
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+ return err;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* We have one new user of the condvar. */
|
|
|
|
|
+ ++cond->__data.__total_seq;
|
|
|
|
|
+ ++cond->__data.__futex;
|
|
|
|
|
+ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ /* Remember the mutex we are using here. If there is already a
|
|
|
|
|
+ * different address store this is a bad user bug. Do not store
|
|
|
|
|
+ * anything for pshared condvars. */
|
|
|
|
|
+ if (cond->__data.__mutex != (void *) ~0l)
|
|
|
|
|
+ cond->__data.__mutex = mutex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Prepare structure passed to cancellation handler. */
|
|
|
|
|
+ cbuffer.cond = cond;
|
|
|
|
|
+ cbuffer.mutex = mutex;
|
|
|
|
|
+
|
|
|
|
|
+ /* Before we block we enable cancellation. Therefore we have to
|
|
|
|
|
+ * install a cancellation handler. */
|
|
|
|
|
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
|
|
|
|
|
+
|
|
|
|
|
+ /* The current values of the wakeup counter. The "woken" counter
|
|
|
|
|
+ * must exceed this value. */
|
|
|
|
|
+ unsigned long long int val;
|
|
|
|
|
+ unsigned long long int seq;
|
|
|
|
|
+ val = seq = cond->__data.__wakeup_seq;
|
|
|
|
|
+ /* Remember the broadcast counter. */
|
|
|
|
|
+ cbuffer.bc_seq = cond->__data.__broadcast_seq;
|
|
|
|
|
+
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ unsigned int futex_val = cond->__data.__futex;
|
|
|
|
|
+ /* Prepare to wait. Release the condvar futex. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Enable asynchronous cancellation. Required by the standard. */
|
|
|
|
|
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
|
|
|
|
|
+
|
|
|
|
|
+#if (defined lll_futex_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ /* If pi_flag remained 1 then it means that we had the lock and the mutex
|
|
|
|
|
+ but a spurious waker raced ahead of us. Give back the mutex before
|
|
|
|
|
+ going into wait again. */
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ __pthread_mutex_cond_lock_adjust (mutex);
|
|
|
|
|
+ __pthread_mutex_unlock_usercnt (mutex, 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ pi_flag = USE_REQUEUE_PI (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ err = lll_futex_wait_requeue_pi (&cond->__data.__futex,
|
|
|
|
|
+ futex_val, &mutex->__data.__lock,
|
|
|
|
|
+ pshared);
|
|
|
|
|
+
|
|
|
|
|
+ pi_flag = (err == 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+ /* Wait until woken by signal or broadcast. */
|
|
|
|
|
+ lll_futex_wait (&cond->__data.__futex, futex_val, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* Disable asynchronous cancellation. */
|
|
|
|
|
+ __pthread_disable_asynccancel (cbuffer.oldtype);
|
|
|
|
|
+
|
|
|
|
|
+ /* We are going to look at shared data again, so get the lock. */
|
|
|
|
|
+ lll_lock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* If a broadcast happened, we are done. */
|
|
|
|
|
+ if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
|
|
|
|
+ goto bc_out;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we are eligible for wakeup. */
|
|
|
|
|
+ val = cond->__data.__wakeup_seq;
|
|
|
|
|
+ }
|
|
|
|
|
+ while (val == seq || cond->__data.__woken_seq == val);
|
|
|
|
|
+
|
|
|
|
|
+ /* Another thread woken up. */
|
|
|
|
|
+ ++cond->__data.__woken_seq;
|
|
|
|
|
+
|
|
|
|
|
+bc_out:
|
|
|
|
|
+ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ /* If pthread_cond_destroy was called on this varaible already,
|
|
|
|
|
+ notify the pthread_cond_destroy caller all waiters have left
|
|
|
|
|
+ and it can be successfully destroyed. */
|
|
|
|
|
+ if (cond->__data.__total_seq == -1ULL
|
|
|
|
|
+ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
|
|
|
|
|
+ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* We are done with the condvar. */
|
|
|
|
|
+ lll_unlock (cond->__data.__lock, pshared);
|
|
|
|
|
+
|
|
|
|
|
+ /* The cancellation handling is back to normal, remove the handler. */
|
|
|
|
|
+ __pthread_cleanup_pop (&buffer, 0);
|
|
|
|
|
+
|
|
|
|
|
+ /* Get the mutex before returning. Not needed for PI. */
|
|
|
|
|
+#if (defined lll_futex_wait_requeue_pi \
|
|
|
|
|
+ && defined __ASSUME_REQUEUE_PI)
|
|
|
|
|
+ if (pi_flag)
|
|
|
|
|
+ {
|
|
|
|
|
+ __pthread_mutex_cond_lock_adjust (mutex);
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+#endif
|
|
|
|
|
+ return __pthread_mutex_cond_lock (mutex);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
|
|
|
|
|
+ GLIBC_2_3_2);
|
|
|
|
|
diff --git a/nptl_2_17/pthread_condattr_getclock_2_17.c b/nptl_2_17/pthread_condattr_getclock_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..e07b349e
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_condattr_getclock_2_17.c
|
|
|
|
|
@@ -0,0 +1,28 @@
|
|
|
|
|
+/* Copyright (C) 2003-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+pthread_condattr_getclock (const pthread_condattr_t *attr, clockid_t *clock_id)
|
|
|
|
|
+{
|
|
|
|
|
+ *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1)
|
|
|
|
|
+ & ((1 << COND_NWAITERS_SHIFT) - 1));
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
diff --git a/nptl_2_17/pthread_condattr_getpshared_2_17.c b/nptl_2_17/pthread_condattr_getpshared_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..8f4fe2bf
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_condattr_getpshared_2_17.c
|
|
|
|
|
@@ -0,0 +1,28 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared)
|
|
|
|
|
+{
|
|
|
|
|
+ *pshared = ((const struct pthread_condattr *) attr)->value & 1;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
diff --git a/nptl_2_17/pthread_condattr_init_2_17.c b/nptl_2_17/pthread_condattr_init_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..d90ba1e8
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_condattr_init_2_17.c
|
|
|
|
|
@@ -0,0 +1,34 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <string.h>
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_condattr_init (pthread_condattr_t *attr)
|
|
|
|
|
+{
|
|
|
|
|
+ ASSERT_TYPE_SIZE (pthread_condattr_t, __SIZEOF_PTHREAD_CONDATTR_T);
|
|
|
|
|
+ ASSERT_PTHREAD_INTERNAL_SIZE (pthread_condattr_t,
|
|
|
|
|
+ struct pthread_condattr);
|
|
|
|
|
+
|
|
|
|
|
+ memset (attr, '\0', sizeof (*attr));
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+strong_alias (__pthread_condattr_init, pthread_condattr_init)
|
|
|
|
|
diff --git a/nptl_2_17/pthread_condattr_setclock_2_17.c b/nptl_2_17/pthread_condattr_setclock_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..5d91f17b
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_condattr_setclock_2_17.c
|
|
|
|
|
@@ -0,0 +1,45 @@
|
|
|
|
|
+/* Copyright (C) 2003-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <assert.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <stdbool.h>
|
|
|
|
|
+#include <time.h>
|
|
|
|
|
+#include <sysdep.h>
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+pthread_condattr_setclock (pthread_condattr_t *attr, clockid_t clock_id)
|
|
|
|
|
+{
|
|
|
|
|
+ /* Only a few clocks are allowed. */
|
|
|
|
|
+ if (clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_REALTIME)
|
|
|
|
|
+ /* If more clocks are allowed some day the storing of the clock ID
|
|
|
|
|
+ in the pthread_cond_t structure needs to be adjusted. */
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+
|
|
|
|
|
+ /* Make sure the value fits in the bits we reserved. */
|
|
|
|
|
+ assert (clock_id < (1 << COND_NWAITERS_SHIFT));
|
|
|
|
|
+
|
|
|
|
|
+ int *valuep = &((struct pthread_condattr *) attr)->value;
|
|
|
|
|
+
|
|
|
|
|
+ *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1))
|
|
|
|
|
+ | (clock_id << 1));
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
diff --git a/nptl_2_17/pthread_mutex_cond_lock_2_17.c b/nptl_2_17/pthread_mutex_cond_lock_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..2f077130
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_mutex_cond_lock_2_17.c
|
|
|
|
|
@@ -0,0 +1,21 @@
|
|
|
|
|
+#include <pthreadP.h>
|
|
|
|
|
+
|
|
|
|
|
+#define LLL_MUTEX_LOCK(mutex) \
|
|
|
|
|
+ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
|
|
|
|
|
+
|
|
|
|
|
+/* Not actually elided so far. Needed? */
|
|
|
|
|
+#define LLL_MUTEX_LOCK_ELISION(mutex) \
|
|
|
|
|
+ ({ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); 0; })
|
|
|
|
|
+
|
|
|
|
|
+#define LLL_MUTEX_TRYLOCK(mutex) \
|
|
|
|
|
+ lll_cond_trylock ((mutex)->__data.__lock)
|
|
|
|
|
+#define LLL_MUTEX_TRYLOCK_ELISION(mutex) LLL_MUTEX_TRYLOCK(mutex)
|
|
|
|
|
+
|
|
|
|
|
+/* We need to assume that there are other threads blocked on the futex.
|
|
|
|
|
+ See __pthread_mutex_lock_full for further details. */
|
|
|
|
|
+#define LLL_ROBUST_MUTEX_LOCK_MODIFIER FUTEX_WAITERS
|
|
|
|
|
+#define __pthread_mutex_lock __pthread_mutex_cond_lock
|
|
|
|
|
+#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full
|
|
|
|
|
+#define NO_INCR
|
|
|
|
|
+
|
|
|
|
|
+#include <nptl/pthread_mutex_lock.c>
|
|
|
|
|
diff --git a/nptl_2_17/pthread_mutex_lock_2_17.c b/nptl_2_17/pthread_mutex_lock_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..73ee0842
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_mutex_lock_2_17.c
|
|
|
|
|
@@ -0,0 +1,628 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <assert.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <unistd.h>
|
|
|
|
|
+#include <sys/param.h>
|
|
|
|
|
+#include <not-cancel.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+#include <futex-internal.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+
|
|
|
|
|
+#ifndef lll_lock_elision
|
|
|
|
|
+#define lll_lock_elision(lock, try_lock, private) ({ \
|
|
|
|
|
+ lll_lock (lock, private); 0; })
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef lll_trylock_elision
|
|
|
|
|
+#define lll_trylock_elision(a,t) lll_trylock(a)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Some of the following definitions differ when pthread_mutex_cond_lock.c
|
|
|
|
|
+ includes this file. */
|
|
|
|
|
+#ifndef LLL_MUTEX_LOCK
|
|
|
|
|
+# define LLL_MUTEX_LOCK(mutex) \
|
|
|
|
|
+ lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
|
|
|
|
|
+# define LLL_MUTEX_TRYLOCK(mutex) \
|
|
|
|
|
+ lll_trylock ((mutex)->__data.__lock)
|
|
|
|
|
+# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
|
|
|
|
|
+# define LLL_MUTEX_LOCK_ELISION(mutex) \
|
|
|
|
|
+ lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mutex))
|
|
|
|
|
+# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
|
|
|
|
|
+ lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mutex))
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef FORCE_ELISION
|
|
|
|
|
+#define FORCE_ELISION(m, s)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
|
|
|
|
|
+ __attribute_noinline__;
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_mutex_lock (pthread_mutex_t *mutex)
|
|
|
|
|
+{
|
|
|
|
|
+ /* See concurrency notes regarding mutex type which is loaded from __kind
|
|
|
|
|
+ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_entry, 1, mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
|
|
|
|
|
+ | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
|
|
|
|
|
+ return __pthread_mutex_lock_full (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
|
|
|
|
|
+ {
|
|
|
|
|
+ FORCE_ELISION (mutex, goto elision);
|
|
|
|
|
+ simple:
|
|
|
|
|
+ /* Normal mutex. */
|
|
|
|
|
+ LLL_MUTEX_LOCK (mutex);
|
|
|
|
|
+ assert (mutex->__data.__owner == 0);
|
|
|
|
|
+ }
|
|
|
|
|
+#ifdef HAVE_ELISION
|
|
|
|
|
+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
|
|
|
|
|
+ {
|
|
|
|
|
+ elision: __attribute__((unused))
|
|
|
|
|
+ /* This case can never happen on a system without elision,
|
|
|
|
|
+ as the mutex type initialization functions will not
|
|
|
|
|
+ allow to set the elision flags. */
|
|
|
|
|
+ /* Don't record owner or users for elision case. This is a
|
|
|
|
|
+ tail call. */
|
|
|
|
|
+ return LLL_MUTEX_LOCK_ELISION (mutex);
|
|
|
|
|
+ }
|
|
|
|
|
+#endif
|
|
|
|
|
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
|
|
|
|
|
+ == PTHREAD_MUTEX_RECURSIVE_NP, 1))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we already hold the mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner == id)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Just bump the counter. */
|
|
|
|
|
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
|
|
|
|
|
+ /* Overflow of the counter. */
|
|
|
|
|
+ return EAGAIN;
|
|
|
|
|
+
|
|
|
|
|
+ ++mutex->__data.__count;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We have to get the mutex. */
|
|
|
|
|
+ LLL_MUTEX_LOCK (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ assert (mutex->__data.__owner == 0);
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
|
|
|
|
|
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (! __is_smp)
|
|
|
|
|
+ goto simple;
|
|
|
|
|
+
|
|
|
|
|
+ if (LLL_MUTEX_TRYLOCK (mutex) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ int cnt = 0;
|
|
|
|
|
+ int max_cnt = MIN (DEFAULT_ADAPTIVE_COUNT,
|
|
|
|
|
+ mutex->__data.__spins * 2 + 10);
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ if (cnt++ >= max_cnt)
|
|
|
|
|
+ {
|
|
|
|
|
+ LLL_MUTEX_LOCK (mutex);
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ atomic_spin_nop ();
|
|
|
|
|
+ }
|
|
|
|
|
+ while (LLL_MUTEX_TRYLOCK (mutex) != 0);
|
|
|
|
|
+
|
|
|
|
|
+ mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
|
|
|
|
|
+ }
|
|
|
|
|
+ assert (mutex->__data.__owner == 0);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
|
|
|
|
+ assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
|
|
|
|
|
+ /* Check whether we already hold the mutex. */
|
|
|
|
|
+ if (__glibc_unlikely (mutex->__data.__owner == id))
|
|
|
|
|
+ return EDEADLK;
|
|
|
|
|
+ goto simple;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
|
|
|
|
+
|
|
|
|
|
+ /* Record the ownership. */
|
|
|
|
|
+ mutex->__data.__owner = id;
|
|
|
|
|
+#ifndef NO_INCR
|
|
|
|
|
+ ++mutex->__data.__nusers;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_acquired, 1, mutex);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+__pthread_mutex_lock_full (pthread_mutex_t *mutex)
|
|
|
|
|
+{
|
|
|
|
|
+ int oldval;
|
|
|
|
|
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
|
|
|
|
+
|
|
|
|
|
+ switch (PTHREAD_MUTEX_TYPE (mutex))
|
|
|
|
|
+ {
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ &mutex->__data.__list.__next);
|
|
|
|
|
+ /* We need to set op_pending before starting the operation. Also
|
|
|
|
|
+ see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+ /* This is set to FUTEX_WAITERS iff we might have shared the
|
|
|
|
|
+ FUTEX_WAITERS flag with other threads, and therefore need to keep it
|
|
|
|
|
+ set to avoid lost wake-ups. We have the same requirement in the
|
|
|
|
|
+ simple mutex algorithm.
|
|
|
|
|
+ We start with value zero for a normal mutex, and FUTEX_WAITERS if we
|
|
|
|
|
+ are building the special case mutexes for use from within condition
|
|
|
|
|
+ variables. */
|
|
|
|
|
+ unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
|
|
|
|
|
+ while (1)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Try to acquire the lock through a CAS from 0 (not acquired) to
|
|
|
|
|
+ our TID | assume_other_futex_waiters. */
|
|
|
|
|
+ if (__glibc_likely (oldval == 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ oldval
|
|
|
|
|
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+ id | assume_other_futex_waiters, 0);
|
|
|
|
|
+ if (__glibc_likely (oldval == 0))
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if ((oldval & FUTEX_OWNER_DIED) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* The previous owner died. Try locking the mutex. */
|
|
|
|
|
+ int newval = id;
|
|
|
|
|
+#ifdef NO_INCR
|
|
|
|
|
+ /* We are not taking assume_other_futex_waiters into accoount
|
|
|
|
|
+ here simply because we'll set FUTEX_WAITERS anyway. */
|
|
|
|
|
+ newval |= FUTEX_WAITERS;
|
|
|
|
|
+#else
|
|
|
|
|
+ newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ newval
|
|
|
|
|
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+ newval, oldval);
|
|
|
|
|
+
|
|
|
|
|
+ if (newval != oldval)
|
|
|
|
|
+ {
|
|
|
|
|
+ oldval = newval;
|
|
|
|
|
+ continue;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We got the mutex. */
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ /* But it is inconsistent unless marked otherwise. */
|
|
|
|
|
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
|
|
|
|
+
|
|
|
|
|
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
|
|
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ ENQUEUE_MUTEX (mutex);
|
|
|
|
|
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+
|
|
|
|
|
+ /* Note that we deliberately exit here. If we fall
|
|
|
|
|
+ through to the end of the function __nusers would be
|
|
|
|
|
+ incremented which is not correct because the old
|
|
|
|
|
+ owner has to be discounted. If we are not supposed
|
|
|
|
|
+ to increment __nusers we actually have to decrement
|
|
|
|
|
+ it here. */
|
|
|
|
|
+#ifdef NO_INCR
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ return EOWNERDEAD;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we already hold the mutex. */
|
|
|
|
|
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
|
|
|
|
|
+ {
|
|
|
|
|
+ int kind = PTHREAD_MUTEX_TYPE (mutex);
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We do not need to ensure ordering wrt another memory
|
|
|
|
|
+ access. Also see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ NULL);
|
|
|
|
|
+ return EDEADLK;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We do not need to ensure ordering wrt another memory
|
|
|
|
|
+ access. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ NULL);
|
|
|
|
|
+
|
|
|
|
|
+ /* Just bump the counter. */
|
|
|
|
|
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
|
|
|
|
|
+ /* Overflow of the counter. */
|
|
|
|
|
+ return EAGAIN;
|
|
|
|
|
+
|
|
|
|
|
+ ++mutex->__data.__count;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We cannot acquire the mutex nor has its owner died. Thus, try
|
|
|
|
|
+ to block using futexes. Set FUTEX_WAITERS if necessary so that
|
|
|
|
|
+ other threads are aware that there are potentially threads
|
|
|
|
|
+ blocked on the futex. Restart if oldval changed in the
|
|
|
|
|
+ meantime. */
|
|
|
|
|
+ if ((oldval & FUTEX_WAITERS) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
|
|
|
|
|
+ oldval | FUTEX_WAITERS,
|
|
|
|
|
+ oldval)
|
|
|
|
|
+ != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+ continue;
|
|
|
|
|
+ }
|
|
|
|
|
+ oldval |= FUTEX_WAITERS;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* It is now possible that we share the FUTEX_WAITERS flag with
|
|
|
|
|
+ another thread; therefore, update assume_other_futex_waiters so
|
|
|
|
|
+ that we do not forget about this when handling other cases
|
|
|
|
|
+ above and thus do not cause lost wake-ups. */
|
|
|
|
|
+ assume_other_futex_waiters |= FUTEX_WAITERS;
|
|
|
|
|
+
|
|
|
|
|
+ /* Block using the futex and reload current lock value. */
|
|
|
|
|
+ lll_futex_wait (&mutex->__data.__lock, oldval,
|
|
|
|
|
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We have acquired the mutex; check if it is still consistent. */
|
|
|
|
|
+ if (__builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* This mutex is now not recoverable. */
|
|
|
|
|
+ mutex->__data.__count = 0;
|
|
|
|
|
+ int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
|
|
|
|
|
+ lll_unlock (mutex->__data.__lock, private);
|
|
|
|
|
+ /* FIXME This violates the mutex destruction requirements. See
|
|
|
|
|
+ __pthread_mutex_unlock_full. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ return ENOTRECOVERABLE;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
|
|
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ ENQUEUE_MUTEX (mutex);
|
|
|
|
|
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ /* The PI support requires the Linux futex system call. If that's not
|
|
|
|
|
+ available, pthread_mutex_init should never have allowed the type to
|
|
|
|
|
+ be set. So it will get the default case for an invalid type. */
|
|
|
|
|
+#ifdef __NR_futex
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
|
|
|
|
+ {
|
|
|
|
|
+ int kind, robust;
|
|
|
|
|
+ {
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
|
|
|
|
|
+ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
|
|
|
|
+ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (robust)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Note: robust PI futexes are signaled by setting bit 0. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
|
|
|
|
|
+ | 1));
|
|
|
|
|
+ /* We need to set op_pending before starting the operation. Also
|
|
|
|
|
+ see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we already hold the mutex. */
|
|
|
|
|
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We do not need to ensure ordering wrt another memory
|
|
|
|
|
+ access. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ return EDEADLK;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We do not need to ensure ordering wrt another memory
|
|
|
|
|
+ access. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+
|
|
|
|
|
+ /* Just bump the counter. */
|
|
|
|
|
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
|
|
|
|
|
+ /* Overflow of the counter. */
|
|
|
|
|
+ return EAGAIN;
|
|
|
|
|
+
|
|
|
|
|
+ ++mutex->__data.__count;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ int newval = id;
|
|
|
|
|
+# ifdef NO_INCR
|
|
|
|
|
+ newval |= FUTEX_WAITERS;
|
|
|
|
|
+# endif
|
|
|
|
|
+ oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+ newval, 0);
|
|
|
|
|
+
|
|
|
|
|
+ if (oldval != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* The mutex is locked. The kernel will now take care of
|
|
|
|
|
+ everything. */
|
|
|
|
|
+ int private = (robust
|
|
|
|
|
+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
|
|
|
|
+ : PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+ int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
|
|
|
|
|
+ NULL, private);
|
|
|
|
|
+ if (e == ESRCH || e == EDEADLK)
|
|
|
|
|
+ {
|
|
|
|
|
+ assert (e != EDEADLK
|
|
|
|
|
+ || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
|
|
|
|
|
+ && kind != PTHREAD_MUTEX_RECURSIVE_NP));
|
|
|
|
|
+ /* ESRCH can happen only for non-robust PI mutexes where
|
|
|
|
|
+ the owner of the lock died. */
|
|
|
|
|
+ assert (e != ESRCH || !robust);
|
|
|
|
|
+
|
|
|
|
|
+ /* Delay the thread indefinitely. */
|
|
|
|
|
+ while (1)
|
|
|
|
|
+ lll_timedwait (&(int){0}, 0, 0 /* ignored */, NULL,
|
|
|
|
|
+ private);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+
|
|
|
|
|
+ assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
|
|
|
|
|
+ {
|
|
|
|
|
+ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
|
|
|
|
|
+
|
|
|
|
|
+ /* We got the mutex. */
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ /* But it is inconsistent unless marked otherwise. */
|
|
|
|
|
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
|
|
|
|
+
|
|
|
|
|
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
|
|
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ ENQUEUE_MUTEX_PI (mutex);
|
|
|
|
|
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+
|
|
|
|
|
+ /* Note that we deliberately exit here. If we fall
|
|
|
|
|
+ through to the end of the function __nusers would be
|
|
|
|
|
+ incremented which is not correct because the old owner
|
|
|
|
|
+ has to be discounted. If we are not supposed to
|
|
|
|
|
+ increment __nusers we actually have to decrement it here. */
|
|
|
|
|
+# ifdef NO_INCR
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+# endif
|
|
|
|
|
+
|
|
|
|
|
+ return EOWNERDEAD;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (robust
|
|
|
|
|
+ && __builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* This mutex is now not recoverable. */
|
|
|
|
|
+ mutex->__data.__count = 0;
|
|
|
|
|
+
|
|
|
|
|
+ futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
|
|
|
|
|
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
|
|
|
|
+
|
|
|
|
|
+ /* To the kernel, this will be visible after the kernel has
|
|
|
|
|
+ acquired the mutex in the syscall. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ return ENOTRECOVERABLE;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ if (robust)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We must not enqueue the mutex before we have acquired it.
|
|
|
|
|
+ Also see comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ ENQUEUE_MUTEX_PI (mutex);
|
|
|
|
|
+ /* We need to clear op_pending after we enqueue the mutex. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ break;
|
|
|
|
|
+#endif /* __NR_futex. */
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
|
|
|
|
+ {
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ int kind = atomic_load_relaxed (&(mutex->__data.__kind))
|
|
|
|
|
+ & PTHREAD_MUTEX_KIND_MASK_NP;
|
|
|
|
|
+
|
|
|
|
|
+ oldval = mutex->__data.__lock;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether we already hold the mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner == id)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
|
|
|
|
+ return EDEADLK;
|
|
|
|
|
+
|
|
|
|
|
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Just bump the counter. */
|
|
|
|
|
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
|
|
|
|
|
+ /* Overflow of the counter. */
|
|
|
|
|
+ return EAGAIN;
|
|
|
|
|
+
|
|
|
|
|
+ ++mutex->__data.__count;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ int oldprio = -1, ceilval;
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
|
|
|
|
+ >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ if (__pthread_current_priority () > ceiling)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (oldprio != -1)
|
|
|
|
|
+ __pthread_tpp_change_priority (oldprio, -1);
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ int retval = __pthread_tpp_change_priority (oldprio, ceiling);
|
|
|
|
|
+ if (retval)
|
|
|
|
|
+ return retval;
|
|
|
|
|
+
|
|
|
|
|
+ ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
|
|
|
|
+ oldprio = ceiling;
|
|
|
|
|
+
|
|
|
|
|
+ oldval
|
|
|
|
|
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+#ifdef NO_INCR
|
|
|
|
|
+ ceilval | 2,
|
|
|
|
|
+#else
|
|
|
|
|
+ ceilval | 1,
|
|
|
|
|
+#endif
|
|
|
|
|
+ ceilval);
|
|
|
|
|
+
|
|
|
|
|
+ if (oldval == ceilval)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ oldval
|
|
|
|
|
+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+ ceilval | 2,
|
|
|
|
|
+ ceilval | 1);
|
|
|
|
|
+
|
|
|
|
|
+ if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ if (oldval != ceilval)
|
|
|
|
|
+ lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+ }
|
|
|
|
|
+ while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
|
|
|
|
+ ceilval | 2, ceilval)
|
|
|
|
|
+ != ceilval);
|
|
|
|
|
+ }
|
|
|
|
|
+ while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
|
|
|
|
|
+
|
|
|
|
|
+ assert (mutex->__data.__owner == 0);
|
|
|
|
|
+ mutex->__data.__count = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ /* Correct code cannot set any other type. */
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Record the ownership. */
|
|
|
|
|
+ mutex->__data.__owner = id;
|
|
|
|
|
+#ifndef NO_INCR
|
|
|
|
|
+ ++mutex->__data.__nusers;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_acquired, 1, mutex);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+#ifndef __pthread_mutex_lock
|
|
|
|
|
+weak_alias (__pthread_mutex_lock, pthread_mutex_lock)
|
|
|
|
|
+hidden_def (__pthread_mutex_lock)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#ifdef NO_INCR
|
|
|
|
|
+void
|
|
|
|
|
+__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
|
|
|
|
|
+{
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
|
|
|
|
|
+ assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
|
|
|
|
|
+ assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
|
|
|
|
|
+ assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
|
|
|
|
|
+
|
|
|
|
|
+ /* Record the ownership. */
|
|
|
|
|
+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
|
|
|
|
+ mutex->__data.__owner = id;
|
|
|
|
|
+
|
|
|
|
|
+ if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
|
|
|
|
|
+ ++mutex->__data.__count;
|
|
|
|
|
+}
|
|
|
|
|
+#endif
|
|
|
|
|
diff --git a/nptl_2_17/pthread_mutex_unlock_2_17.c b/nptl_2_17/pthread_mutex_unlock_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..18ba158e
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthread_mutex_unlock_2_17.c
|
|
|
|
|
@@ -0,0 +1,360 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <assert.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <lowlevellock.h>
|
|
|
|
|
+#include <stap-probe.h>
|
|
|
|
|
+#include <futex-internal.h>
|
|
|
|
|
+
|
|
|
|
|
+#ifndef lll_unlock_elision
|
|
|
|
|
+#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
|
|
|
|
|
+ __attribute_noinline__;
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+attribute_hidden
|
|
|
|
|
+__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
|
|
|
|
|
+{
|
|
|
|
|
+ /* See concurrency notes regarding mutex type which is loaded from __kind
|
|
|
|
|
+ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
|
|
|
|
|
+ if (__builtin_expect (type
|
|
|
|
|
+ & ~(PTHREAD_MUTEX_KIND_MASK_NP
|
|
|
|
|
+ |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
|
|
|
|
|
+ return __pthread_mutex_unlock_full (mutex, decr);
|
|
|
|
|
+
|
|
|
|
|
+ if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
|
|
|
|
|
+ == PTHREAD_MUTEX_TIMED_NP)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Always reset the owner field. */
|
|
|
|
|
+ normal:
|
|
|
|
|
+ mutex->__data.__owner = 0;
|
|
|
|
|
+ if (decr)
|
|
|
|
|
+ /* One less user. */
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+
|
|
|
|
|
+ /* Unlock. */
|
|
|
|
|
+ lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_release, 1, mutex);
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Don't reset the owner/users fields for elision. */
|
|
|
|
|
+ return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
|
|
|
|
|
+ == PTHREAD_MUTEX_RECURSIVE_NP, 1))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ goto normal;
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
|
|
|
|
|
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
|
|
|
|
|
+ goto normal;
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Error checking mutex. */
|
|
|
|
|
+ assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ || ! lll_islocked (mutex->__data.__lock))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+ goto normal;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
|
|
|
|
|
+{
|
|
|
|
|
+ int newowner = 0;
|
|
|
|
|
+ int private;
|
|
|
|
|
+
|
|
|
|
|
+ switch (PTHREAD_MUTEX_TYPE (mutex))
|
|
|
|
|
+ {
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
|
|
|
|
+ == THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ && __builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_INCONSISTENT, 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return ENOTRECOVERABLE;
|
|
|
|
|
+
|
|
|
|
|
+ goto notrecoverable;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ goto robust;
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
|
|
|
|
+ if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
|
|
|
|
+ != THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ || ! lll_islocked (mutex->__data.__lock))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ /* If the previous owner died and the caller did not succeed in
|
|
|
|
|
+ making the state consistent, mark the mutex as unrecoverable
|
|
|
|
|
+ and make all waiters. */
|
|
|
|
|
+ if (__builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_INCONSISTENT, 0))
|
|
|
|
|
+ notrecoverable:
|
|
|
|
|
+ newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
|
|
|
|
|
+
|
|
|
|
|
+ robust:
|
|
|
|
|
+ /* Remove mutex from the list. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ &mutex->__data.__list.__next);
|
|
|
|
|
+ /* We must set op_pending before we dequeue the mutex. Also see
|
|
|
|
|
+ comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ DEQUEUE_MUTEX (mutex);
|
|
|
|
|
+
|
|
|
|
|
+ mutex->__data.__owner = newowner;
|
|
|
|
|
+ if (decr)
|
|
|
|
|
+ /* One less user. */
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+
|
|
|
|
|
+ /* Unlock by setting the lock to 0 (not acquired); if the lock had
|
|
|
|
|
+ FUTEX_WAITERS set previously, then wake any waiters.
|
|
|
|
|
+ The unlock operation must be the last access to the mutex to not
|
|
|
|
|
+ violate the mutex destruction requirements (see __lll_unlock). */
|
|
|
|
|
+ private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
|
|
|
|
|
+ if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
|
|
|
|
|
+ & FUTEX_WAITERS) != 0))
|
|
|
|
|
+ lll_futex_wake (&mutex->__data.__lock, 1, private);
|
|
|
|
|
+
|
|
|
|
|
+ /* We must clear op_pending after we release the mutex.
|
|
|
|
|
+ FIXME However, this violates the mutex destruction requirements
|
|
|
|
|
+ because another thread could acquire the mutex, destroy it, and
|
|
|
|
|
+ reuse the memory for something else; then, if this thread crashes,
|
|
|
|
|
+ and the memory happens to have a value equal to the TID, the kernel
|
|
|
|
|
+ will believe it is still related to the mutex (which has been
|
|
|
|
|
+ destroyed already) and will modify some other random object. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ /* The PI support requires the Linux futex system call. If that's not
|
|
|
|
|
+ available, pthread_mutex_init should never have allowed the type to
|
|
|
|
|
+ be set. So it will get the default case for an invalid type. */
|
|
|
|
|
+#ifdef __NR_futex
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ goto continue_pi_non_robust;
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
|
|
|
|
+ == THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ && __builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_INCONSISTENT, 0))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return ENOTRECOVERABLE;
|
|
|
|
|
+
|
|
|
|
|
+ goto pi_notrecoverable;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ goto continue_pi_robust;
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
|
|
|
|
+ if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
|
|
|
|
+ != THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ || ! lll_islocked (mutex->__data.__lock))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ /* If the previous owner died and the caller did not succeed in
|
|
|
|
|
+ making the state consistent, mark the mutex as unrecoverable
|
|
|
|
|
+ and make all waiters. */
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ if ((atomic_load_relaxed (&(mutex->__data.__kind))
|
|
|
|
|
+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
|
|
|
|
|
+ && __builtin_expect (mutex->__data.__owner
|
|
|
|
|
+ == PTHREAD_MUTEX_INCONSISTENT, 0))
|
|
|
|
|
+ pi_notrecoverable:
|
|
|
|
|
+ newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
|
|
|
|
|
+
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ if ((atomic_load_relaxed (&(mutex->__data.__kind))
|
|
|
|
|
+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ continue_pi_robust:
|
|
|
|
|
+ /* Remove mutex from the list.
|
|
|
|
|
+ Note: robust PI futexes are signaled by setting bit 0. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
|
|
|
|
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
|
|
|
|
|
+ | 1));
|
|
|
|
|
+ /* We must set op_pending before we dequeue the mutex. Also see
|
|
|
|
|
+ comments at ENQUEUE_MUTEX. */
|
|
|
|
|
+ __asm ("" ::: "memory");
|
|
|
|
|
+ DEQUEUE_MUTEX (mutex);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ continue_pi_non_robust:
|
|
|
|
|
+ mutex->__data.__owner = newowner;
|
|
|
|
|
+ if (decr)
|
|
|
|
|
+ /* One less user. */
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+
|
|
|
|
|
+ /* Unlock. Load all necessary mutex data before releasing the mutex
|
|
|
|
|
+ to not violate the mutex destruction requirements (see
|
|
|
|
|
+ lll_unlock). */
|
|
|
|
|
+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s
|
|
|
|
|
+ in sysdeps/nptl/bits/thread-shared-types.h. */
|
|
|
|
|
+ int robust = atomic_load_relaxed (&(mutex->__data.__kind))
|
|
|
|
|
+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
|
|
|
|
+ private = (robust
|
|
|
|
|
+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
|
|
|
|
+ : PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+ /* Unlock the mutex using a CAS unless there are futex waiters or our
|
|
|
|
|
+ TID is not the value of __lock anymore, in which case we let the
|
|
|
|
|
+ kernel take care of the situation. Use release MO in the CAS to
|
|
|
|
|
+ synchronize with acquire MO in lock acquisitions. */
|
|
|
|
|
+ int l = atomic_load_relaxed (&mutex->__data.__lock);
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ if (((l & FUTEX_WAITERS) != 0)
|
|
|
|
|
+ || (l != THREAD_GETMEM (THREAD_SELF, tid)))
|
|
|
|
|
+ {
|
|
|
|
|
+ futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
|
|
|
|
|
+ private);
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
|
|
|
|
|
+ &l, 0));
|
|
|
|
|
+
|
|
|
|
|
+ /* This happens after the kernel releases the mutex but violates the
|
|
|
|
|
+ mutex destruction requirements; see comments in the code handling
|
|
|
|
|
+ PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
|
|
|
|
|
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
|
|
|
|
+ break;
|
|
|
|
|
+#endif /* __NR_futex. */
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
|
|
|
|
+ /* Recursive mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+
|
|
|
|
|
+ if (--mutex->__data.__count != 0)
|
|
|
|
|
+ /* We still hold the mutex. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ goto pp;
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
|
|
|
|
+ /* Error checking mutex. */
|
|
|
|
|
+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
|
|
|
|
|
+ || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
|
|
|
|
|
+ return EPERM;
|
|
|
|
|
+ /* FALLTHROUGH */
|
|
|
|
|
+
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_NORMAL_NP:
|
|
|
|
|
+ case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
|
|
|
|
+ /* Always reset the owner field. */
|
|
|
|
|
+ pp:
|
|
|
|
|
+ mutex->__data.__owner = 0;
|
|
|
|
|
+
|
|
|
|
|
+ if (decr)
|
|
|
|
|
+ /* One less user. */
|
|
|
|
|
+ --mutex->__data.__nusers;
|
|
|
|
|
+
|
|
|
|
|
+ /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
|
|
|
|
|
+ lock acquisitions. */
|
|
|
|
|
+ int newval;
|
|
|
|
|
+ int oldval = atomic_load_relaxed (&mutex->__data.__lock);
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
|
|
|
|
|
+ }
|
|
|
|
|
+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
|
|
|
|
|
+ &oldval, newval));
|
|
|
|
|
+
|
|
|
|
|
+ if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
|
|
|
|
|
+ lll_futex_wake (&mutex->__data.__lock, 1,
|
|
|
|
|
+ PTHREAD_MUTEX_PSHARED (mutex));
|
|
|
|
|
+
|
|
|
|
|
+ int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_release, 1, mutex);
|
|
|
|
|
+
|
|
|
|
|
+ return __pthread_tpp_change_priority (oldprio, -1);
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ /* Correct code cannot set any other type. */
|
|
|
|
|
+ return EINVAL;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ LIBC_PROBE (mutex_release, 1, mutex);
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_mutex_unlock (pthread_mutex_t *mutex)
|
|
|
|
|
+{
|
|
|
|
|
+ return __pthread_mutex_unlock_usercnt (mutex, 1);
|
|
|
|
|
+}
|
|
|
|
|
+weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
|
|
|
|
|
+hidden_def (__pthread_mutex_unlock)
|
|
|
|
|
diff --git a/nptl_2_17/pthreadtypes_2_17.h b/nptl_2_17/pthreadtypes_2_17.h
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..0483e44a
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/pthreadtypes_2_17.h
|
|
|
|
|
@@ -0,0 +1,179 @@
|
|
|
|
|
+/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _INTERNALTYPES_H
|
|
|
|
|
+#define _INTERNALTYPES_H 1
|
|
|
|
|
+
|
|
|
|
|
+#include <stdint.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+#include <endian.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+struct pthread_attr
|
|
|
|
|
+{
|
|
|
|
|
+ /* Scheduler parameters and priority. */
|
|
|
|
|
+ struct sched_param schedparam;
|
|
|
|
|
+ int schedpolicy;
|
|
|
|
|
+ /* Various flags like detachstate, scope, etc. */
|
|
|
|
|
+ int flags;
|
|
|
|
|
+ /* Size of guard area. */
|
|
|
|
|
+ size_t guardsize;
|
|
|
|
|
+ /* Stack handling. */
|
|
|
|
|
+ void *stackaddr;
|
|
|
|
|
+ size_t stacksize;
|
|
|
|
|
+ /* Affinity map. */
|
|
|
|
|
+ cpu_set_t *cpuset;
|
|
|
|
|
+ size_t cpusetsize;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+#define ATTR_FLAG_DETACHSTATE 0x0001
|
|
|
|
|
+#define ATTR_FLAG_NOTINHERITSCHED 0x0002
|
|
|
|
|
+#define ATTR_FLAG_SCOPEPROCESS 0x0004
|
|
|
|
|
+#define ATTR_FLAG_STACKADDR 0x0008
|
|
|
|
|
+#define ATTR_FLAG_OLDATTR 0x0010
|
|
|
|
|
+#define ATTR_FLAG_SCHED_SET 0x0020
|
|
|
|
|
+#define ATTR_FLAG_POLICY_SET 0x0040
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Mutex attribute data structure. */
|
|
|
|
|
+struct pthread_mutexattr
|
|
|
|
|
+{
|
|
|
|
|
+ /* Identifier for the kind of mutex.
|
|
|
|
|
+
|
|
|
|
|
+ Bit 31 is set if the mutex is to be shared between processes.
|
|
|
|
|
+
|
|
|
|
|
+ Bit 0 to 30 contain one of the PTHREAD_MUTEX_ values to identify
|
|
|
|
|
+ the type of the mutex. */
|
|
|
|
|
+ int mutexkind;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Conditional variable attribute data structure. */
|
|
|
|
|
+struct pthread_condattr
|
|
|
|
|
+{
|
|
|
|
|
+ /* Combination of values:
|
|
|
|
|
+
|
|
|
|
|
+ Bit 0 : flag whether conditional variable will be
|
|
|
|
|
+ sharable between processes.
|
|
|
|
|
+ Bit 1-COND_CLOCK_BITS: Clock ID. COND_CLOCK_BITS is the number of bits
|
|
|
|
|
+ needed to represent the ID of the clock. */
|
|
|
|
|
+ int value;
|
|
|
|
|
+};
|
|
|
|
|
+#define COND_CLOCK_BITS 1
|
|
|
|
|
+#define COND_NWAITERS_SHIFT 1
|
|
|
|
|
+
|
|
|
|
|
+/* Read-write lock variable attribute data structure. */
|
|
|
|
|
+struct pthread_rwlockattr
|
|
|
|
|
+{
|
|
|
|
|
+ int lockkind;
|
|
|
|
|
+ int pshared;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Barrier data structure. See pthread_barrier_wait for a description
|
|
|
|
|
+ of how these fields are used. */
|
|
|
|
|
+struct pthread_barrier
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int in;
|
|
|
|
|
+ unsigned int current_round;
|
|
|
|
|
+ unsigned int count;
|
|
|
|
|
+ int shared;
|
|
|
|
|
+ unsigned int out;
|
|
|
|
|
+};
|
|
|
|
|
+/* See pthread_barrier_wait for a description. */
|
|
|
|
|
+#define BARRIER_IN_THRESHOLD (UINT_MAX/2)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Barrier variable attribute data structure. */
|
|
|
|
|
+struct pthread_barrierattr
|
|
|
|
|
+{
|
|
|
|
|
+ int pshared;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Thread-local data handling. */
|
|
|
|
|
+struct pthread_key_struct
|
|
|
|
|
+{
|
|
|
|
|
+ /* Sequence numbers. Even numbers indicated vacant entries. Note
|
|
|
|
|
+ that zero is even. We use uintptr_t to not require padding on
|
|
|
|
|
+ 32- and 64-bit machines. On 64-bit machines it helps to avoid
|
|
|
|
|
+ wrapping, too. */
|
|
|
|
|
+ uintptr_t seq;
|
|
|
|
|
+
|
|
|
|
|
+ /* Destructor for the data. */
|
|
|
|
|
+ void (*destr) (void *);
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Check whether an entry is unused. */
|
|
|
|
|
+#define KEY_UNUSED(p) (((p) & 1) == 0)
|
|
|
|
|
+/* Check whether a key is usable. We cannot reuse an allocated key if
|
|
|
|
|
+ the sequence counter would overflow after the next destroy call.
|
|
|
|
|
+ This would mean that we potentially free memory for a key with the
|
|
|
|
|
+ same sequence. This is *very* unlikely to happen, A program would
|
|
|
|
|
+ have to create and destroy a key 2^31 times (on 32-bit platforms,
|
|
|
|
|
+ on 64-bit platforms that would be 2^63). If it should happen we
|
|
|
|
|
+ simply don't use this specific key anymore. */
|
|
|
|
|
+#define KEY_USABLE(p) (((uintptr_t) (p)) < ((uintptr_t) ((p) + 2)))
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Handling of read-write lock data. */
|
|
|
|
|
+// XXX For now there is only one flag. Maybe more in future.
|
|
|
|
|
+#define RWLOCK_RECURSIVE(rwlock) ((rwlock)->__data.__flags != 0)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Semaphore variable structure. */
|
|
|
|
|
+struct new_sem
|
|
|
|
|
+{
|
|
|
|
|
+#if __HAVE_64B_ATOMICS
|
|
|
|
|
+ /* The data field holds both value (in the least-significant 32 bits) and
|
|
|
|
|
+ nwaiters. */
|
|
|
|
|
+# if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
|
+# define SEM_VALUE_OFFSET 0
|
|
|
|
|
+# elif __BYTE_ORDER == __BIG_ENDIAN
|
|
|
|
|
+# define SEM_VALUE_OFFSET 1
|
|
|
|
|
+# else
|
|
|
|
|
+# error Unsupported byte order.
|
|
|
|
|
+# endif
|
|
|
|
|
+# define SEM_NWAITERS_SHIFT 32
|
|
|
|
|
+# define SEM_VALUE_MASK (~(unsigned int)0)
|
|
|
|
|
+ uint64_t data;
|
|
|
|
|
+ int private;
|
|
|
|
|
+ int pad;
|
|
|
|
|
+#else
|
|
|
|
|
+# define SEM_VALUE_SHIFT 1
|
|
|
|
|
+# define SEM_NWAITERS_MASK ((unsigned int)1)
|
|
|
|
|
+ unsigned int value;
|
|
|
|
|
+ int private;
|
|
|
|
|
+ int pad;
|
|
|
|
|
+ unsigned int nwaiters;
|
|
|
|
|
+#endif
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+struct old_sem
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int value;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Compatibility type for old conditional variable interfaces. */
|
|
|
|
|
+typedef struct
|
|
|
|
|
+{
|
|
|
|
|
+ pthread_cond_t *cond;
|
|
|
|
|
+} pthread_cond_2_0_t;
|
|
|
|
|
+
|
|
|
|
|
+#endif /* internaltypes.h */
|
|
|
|
|
diff --git a/nptl_2_17/tpp_2_17.c b/nptl_2_17/tpp_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..56357ea3
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/tpp_2_17.c
|
|
|
|
|
@@ -0,0 +1,195 @@
|
|
|
|
|
+/* Thread Priority Protect helpers.
|
|
|
|
|
+ Copyright (C) 2006-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <assert.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+#include <errno.h>
|
|
|
|
|
+#include <pthreadP.h>
|
|
|
|
|
+#include <sched.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <atomic.h>
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+int __sched_fifo_min_prio = -1;
|
|
|
|
|
+int __sched_fifo_max_prio = -1;
|
|
|
|
|
+
|
|
|
|
|
+/* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio
|
|
|
|
|
+ once. The standard solution would be similar to pthread_once, but then
|
|
|
|
|
+ readers would need to use an acquire fence. In this specific case,
|
|
|
|
|
+ initialization is comprised of just idempotent writes to two variables
|
|
|
|
|
+ that have an initial value of -1. Therefore, we can treat each variable as
|
|
|
|
|
+ a separate, at-least-once initialized value. This enables using just
|
|
|
|
|
+ relaxed MO loads and stores, but requires that consumers check for
|
|
|
|
|
+ initialization of each value that is to be used; see
|
|
|
|
|
+ __pthread_tpp_change_priority for an example.
|
|
|
|
|
+ */
|
|
|
|
|
+void
|
|
|
|
|
+__init_sched_fifo_prio (void)
|
|
|
|
|
+{
|
|
|
|
|
+ atomic_store_relaxed (&__sched_fifo_max_prio,
|
|
|
|
|
+ __sched_get_priority_max (SCHED_FIFO));
|
|
|
|
|
+ atomic_store_relaxed (&__sched_fifo_min_prio,
|
|
|
|
|
+ __sched_get_priority_min (SCHED_FIFO));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_tpp_change_priority (int previous_prio, int new_prio)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+ struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
|
|
|
|
|
+ int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
|
|
|
|
|
+ int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
|
|
|
|
|
+
|
|
|
|
|
+ if (tpp == NULL)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* See __init_sched_fifo_prio. We need both the min and max prio,
|
|
|
|
|
+ so need to check both, and run initialization if either one is
|
|
|
|
|
+ not initialized. The memory model's write-read coherence rule
|
|
|
|
|
+ makes this work. */
|
|
|
|
|
+ if (fifo_min_prio == -1 || fifo_max_prio == -1)
|
|
|
|
|
+ {
|
|
|
|
|
+ __init_sched_fifo_prio ();
|
|
|
|
|
+ fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
|
|
|
|
|
+ fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ size_t size = sizeof *tpp;
|
|
|
|
|
+ size += (fifo_max_prio - fifo_min_prio + 1)
|
|
|
|
|
+ * sizeof (tpp->priomap[0]);
|
|
|
|
|
+ tpp = calloc (size, 1);
|
|
|
|
|
+ if (tpp == NULL)
|
|
|
|
|
+ return ENOMEM;
|
|
|
|
|
+ tpp->priomax = fifo_min_prio - 1;
|
|
|
|
|
+ THREAD_SETMEM (self, tpp, tpp);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ assert (new_prio == -1
|
|
|
|
|
+ || (new_prio >= fifo_min_prio
|
|
|
|
|
+ && new_prio <= fifo_max_prio));
|
|
|
|
|
+ assert (previous_prio == -1
|
|
|
|
|
+ || (previous_prio >= fifo_min_prio
|
|
|
|
|
+ && previous_prio <= fifo_max_prio));
|
|
|
|
|
+
|
|
|
|
|
+ int priomax = tpp->priomax;
|
|
|
|
|
+ int newpriomax = priomax;
|
|
|
|
|
+ if (new_prio != -1)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0)
|
|
|
|
|
+ return EAGAIN;
|
|
|
|
|
+ ++tpp->priomap[new_prio - fifo_min_prio];
|
|
|
|
|
+ if (new_prio > priomax)
|
|
|
|
|
+ newpriomax = new_prio;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (previous_prio != -1)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (--tpp->priomap[previous_prio - fifo_min_prio] == 0
|
|
|
|
|
+ && priomax == previous_prio
|
|
|
|
|
+ && previous_prio > new_prio)
|
|
|
|
|
+ {
|
|
|
|
|
+ int i;
|
|
|
|
|
+ for (i = previous_prio - 1; i >= fifo_min_prio; --i)
|
|
|
|
|
+ if (tpp->priomap[i - fifo_min_prio])
|
|
|
|
|
+ break;
|
|
|
|
|
+ newpriomax = i;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (priomax == newpriomax)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */
|
|
|
|
|
+ lll_lock (self->lock, LLL_PRIVATE);
|
|
|
|
|
+
|
|
|
|
|
+ tpp->priomax = newpriomax;
|
|
|
|
|
+
|
|
|
|
|
+ int result = 0;
|
|
|
|
|
+
|
|
|
|
|
+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (__sched_getparam (self->tid, &self->schedparam) != 0)
|
|
|
|
|
+ result = errno;
|
|
|
|
|
+ else
|
|
|
|
|
+ self->flags |= ATTR_FLAG_SCHED_SET;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ self->schedpolicy = __sched_getscheduler (self->tid);
|
|
|
|
|
+ if (self->schedpolicy == -1)
|
|
|
|
|
+ result = errno;
|
|
|
|
|
+ else
|
|
|
|
|
+ self->flags |= ATTR_FLAG_POLICY_SET;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (result == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ struct sched_param sp = self->schedparam;
|
|
|
|
|
+ if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (sp.sched_priority < newpriomax)
|
|
|
|
|
+ sp.sched_priority = newpriomax;
|
|
|
|
|
+
|
|
|
|
|
+ if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
|
|
|
|
|
+ result = errno;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ lll_unlock (self->lock, LLL_PRIVATE);
|
|
|
|
|
+
|
|
|
|
|
+ return result;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+__pthread_current_priority (void)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+ if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
|
|
|
|
|
+ == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
|
|
|
|
|
+ return self->schedparam.sched_priority;
|
|
|
|
|
+
|
|
|
|
|
+ int result = 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */
|
|
|
|
|
+ lll_lock (self->lock, LLL_PRIVATE);
|
|
|
|
|
+
|
|
|
|
|
+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (__sched_getparam (self->tid, &self->schedparam) != 0)
|
|
|
|
|
+ result = -1;
|
|
|
|
|
+ else
|
|
|
|
|
+ self->flags |= ATTR_FLAG_SCHED_SET;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ self->schedpolicy = __sched_getscheduler (self->tid);
|
|
|
|
|
+ if (self->schedpolicy == -1)
|
|
|
|
|
+ result = -1;
|
|
|
|
|
+ else
|
|
|
|
|
+ self->flags |= ATTR_FLAG_POLICY_SET;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (result != -1)
|
|
|
|
|
+ result = self->schedparam.sched_priority;
|
|
|
|
|
+
|
|
|
|
|
+ lll_unlock (self->lock, LLL_PRIVATE);
|
|
|
|
|
+
|
|
|
|
|
+ return result;
|
|
|
|
|
+}
|
|
|
|
|
diff --git a/nptl_2_17/unwind_2_17.c b/nptl_2_17/unwind_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..1534540c
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/unwind_2_17.c
|
|
|
|
|
@@ -0,0 +1,138 @@
|
|
|
|
|
+/* Copyright (C) 2003-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+ Contributed by Ulrich Drepper <drepper@redhat.com>
|
|
|
|
|
+ and Richard Henderson <rth@redhat.com>, 2003.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "pthreadP_2_17.h"
|
|
|
|
|
+#include <setjmp.h>
|
|
|
|
|
+#include <stdio.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <string.h>
|
|
|
|
|
+#include <unistd.h>
|
|
|
|
|
+#include <jmpbuf-unwind.h>
|
|
|
|
|
+
|
|
|
|
|
+#ifdef _STACK_GROWS_DOWN
|
|
|
|
|
+# define FRAME_LEFT(frame, other, adj) \
|
|
|
|
|
+ ((uintptr_t) frame - adj >= (uintptr_t) other - adj)
|
|
|
|
|
+#elif _STACK_GROWS_UP
|
|
|
|
|
+# define FRAME_LEFT(frame, other, adj) \
|
|
|
|
|
+ ((uintptr_t) frame - adj <= (uintptr_t) other - adj)
|
|
|
|
|
+#else
|
|
|
|
|
+# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+static _Unwind_Reason_Code
|
|
|
|
|
+unwind_stop (int version, _Unwind_Action actions,
|
|
|
|
|
+ _Unwind_Exception_Class exc_class,
|
|
|
|
|
+ struct _Unwind_Exception *exc_obj,
|
|
|
|
|
+ struct _Unwind_Context *context, void *stop_parameter)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread_unwind_buf *buf = stop_parameter;
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+ struct _pthread_cleanup_buffer *curp = THREAD_GETMEM (self, cleanup);
|
|
|
|
|
+ int do_longjump = 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* Adjust all pointers used in comparisons, so that top of thread's
|
|
|
|
|
+ stack is at the top of address space. Without that, things break
|
|
|
|
|
+ if stack is allocated above the main stack. */
|
|
|
|
|
+ uintptr_t adj = (uintptr_t) self->stackblock + self->stackblock_size;
|
|
|
|
|
+
|
|
|
|
|
+ /* Do longjmp if we're at "end of stack", aka "end of unwind data".
|
|
|
|
|
+ We assume there are only C frame without unwind data in between
|
|
|
|
|
+ here and the jmp_buf target. Otherwise simply note that the CFA
|
|
|
|
|
+ of a function is NOT within it's stack frame; it's the SP of the
|
|
|
|
|
+ previous frame. */
|
|
|
|
|
+ if ((actions & _UA_END_OF_STACK)
|
|
|
|
|
+ || ! _JMPBUF_CFA_UNWINDS_ADJ (buf->cancel_jmp_buf[0].jmp_buf, context,
|
|
|
|
|
+ adj))
|
|
|
|
|
+ do_longjump = 1;
|
|
|
|
|
+
|
|
|
|
|
+ if (__glibc_unlikely (curp != NULL))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Handle the compatibility stuff. Execute all handlers
|
|
|
|
|
+ registered with the old method which would be unwound by this
|
|
|
|
|
+ step. */
|
|
|
|
|
+ struct _pthread_cleanup_buffer *oldp = buf->priv.data.cleanup;
|
|
|
|
|
+ void *cfa = (void *) (_Unwind_Ptr) _Unwind_GetCFA (context);
|
|
|
|
|
+
|
|
|
|
|
+ if (curp != oldp && (do_longjump || FRAME_LEFT (cfa, curp, adj)))
|
|
|
|
|
+ {
|
|
|
|
|
+ do
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Pointer to the next element. */
|
|
|
|
|
+ struct _pthread_cleanup_buffer *nextp = curp->__prev;
|
|
|
|
|
+
|
|
|
|
|
+ /* Call the handler. */
|
|
|
|
|
+ curp->__routine (curp->__arg);
|
|
|
|
|
+
|
|
|
|
|
+ /* To the next. */
|
|
|
|
|
+ curp = nextp;
|
|
|
|
|
+ }
|
|
|
|
|
+ while (curp != oldp
|
|
|
|
|
+ && (do_longjump || FRAME_LEFT (cfa, curp, adj)));
|
|
|
|
|
+
|
|
|
|
|
+ /* Mark the current element as handled. */
|
|
|
|
|
+ THREAD_SETMEM (self, cleanup, curp);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (do_longjump)
|
|
|
|
|
+ __libc_unwind_longjmp ((struct __jmp_buf_tag *) buf->cancel_jmp_buf, 1);
|
|
|
|
|
+
|
|
|
|
|
+ return _URC_NO_REASON;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+unwind_cleanup (_Unwind_Reason_Code reason, struct _Unwind_Exception *exc)
|
|
|
|
|
+{
|
|
|
|
|
+ /* When we get here a C++ catch block didn't rethrow the object. We
|
|
|
|
|
+ cannot handle this case and therefore abort. */
|
|
|
|
|
+ __libc_fatal ("FATAL: exception not rethrown\n");
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+__cleanup_fct_attribute __attribute ((noreturn))
|
|
|
|
|
+__pthread_unwind (__pthread_unwind_buf_t *buf)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
|
|
|
|
|
+ struct pthread *self = THREAD_SELF;
|
|
|
|
|
+
|
|
|
|
|
+ /* This is not a catchable exception, so don't provide any details about
|
|
|
|
|
+ the exception type. We do need to initialize the field though. */
|
|
|
|
|
+ THREAD_SETMEM (self, exc.exception_class, 0);
|
|
|
|
|
+ THREAD_SETMEM (self, exc.exception_cleanup, &unwind_cleanup);
|
|
|
|
|
+
|
|
|
|
|
+ _Unwind_ForcedUnwind (&self->exc, unwind_stop, ibuf);
|
|
|
|
|
+ /* NOTREACHED */
|
|
|
|
|
+
|
|
|
|
|
+ /* We better do not get here. */
|
|
|
|
|
+ abort ();
|
|
|
|
|
+}
|
|
|
|
|
+hidden_def (__pthread_unwind)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+__cleanup_fct_attribute __attribute ((noreturn))
|
|
|
|
|
+__pthread_unwind_next (__pthread_unwind_buf_t *buf)
|
|
|
|
|
+{
|
|
|
|
|
+ struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
|
|
|
|
|
+
|
|
|
|
|
+ __pthread_unwind ((__pthread_unwind_buf_t *) ibuf->priv.data.prev);
|
|
|
|
|
+}
|
|
|
|
|
+hidden_def (__pthread_unwind_next)
|
|
|
|
|
diff --git a/nptl_2_17/vars_2_17.c b/nptl_2_17/vars_2_17.c
|
|
|
|
|
new file mode 100644
|
|
|
|
|
index 00000000..295d7e33
|
|
|
|
|
--- /dev/null
|
|
|
|
|
+++ b/nptl_2_17/vars_2_17.c
|
|
|
|
|
@@ -0,0 +1,43 @@
|
|
|
|
|
+/* Copyright (C) 2004-2020 Free Software Foundation, Inc.
|
|
|
|
|
+ This file is part of the GNU C Library.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
+ License as published by the Free Software Foundation; either
|
|
|
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
+ Lesser General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
+ License along with the GNU C Library; if not, see
|
|
|
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include <pthreadP_2_17.h>
|
|
|
|
|
+#include <stdlib.h>
|
|
|
|
|
+#include <tls.h>
|
|
|
|
|
+#include <unistd.h>
|
|
|
|
|
+
|
|
|
|
|
+/* Default thread attributes for the case when the user does not
|
|
|
|
|
+ provide any. */
|
|
|
|
|
+struct pthread_attr __default_pthread_attr attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+/* Mutex protecting __default_pthread_attr. */
|
|
|
|
|
+int __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
|
|
|
|
|
+
|
|
|
|
|
+/* Flag whether the machine is SMP or not. */
|
|
|
|
|
+int __is_smp attribute_hidden;
|
|
|
|
|
+
|
|
|
|
|
+#ifndef TLS_MULTIPLE_THREADS_IN_TCB
|
|
|
|
|
+/* Variable set to a nonzero value either if more than one thread runs or ran,
|
|
|
|
|
+ or if a single-threaded process is trying to cancel itself. See
|
|
|
|
|
+ nptl/descr.h for more context on the single-threaded process case. */
|
|
|
|
|
+int __pthread_multiple_threads attribute_hidden;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Table of the key information. */
|
|
|
|
|
+struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]
|
|
|
|
|
+ __attribute__ ((nocommon));
|
|
|
|
|
+hidden_data_def (__pthread_keys)
|
|
|
|
|
--
|
|
|
|
|
2.23.0
|
|
|
|
|
|