14#include "kmp_affinity.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
32#include <sys/syscall.h>
38#include <sys/sysinfo.h>
54#include <sys/sysctl.h>
55#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
57#include <sys/sysctl.h>
59#include <pthread_np.h>
60#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
62#include <sys/sysctl.h>
64#include <sys/loadavg.h>
72 struct timespec start;
77#define TIMEVAL_TO_TIMESPEC(tv, ts) \
79 (ts)->tv_sec = (tv)->tv_sec; \
80 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
85#define TS2NS(timespec) \
86 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
88static struct kmp_sys_timer __kmp_sys_timer_data;
91typedef void (*sig_func_t)(int);
92STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
93static sigset_t __kmp_sigset;
96static int __kmp_init_runtime = FALSE;
98static int __kmp_fork_count = 0;
100static pthread_condattr_t __kmp_suspend_cond_attr;
101static pthread_mutexattr_t __kmp_suspend_mutex_attr;
103static kmp_cond_align_t __kmp_wait_cv;
104static kmp_mutex_align_t __kmp_wait_mx;
106kmp_uint64 __kmp_ticks_per_msec = 1000000;
107kmp_uint64 __kmp_ticks_per_usec = 1000;
110static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
111 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
112 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
113 cond->c_cond.__c_waiting);
117#if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
121void __kmp_affinity_bind_thread(
int which) {
122 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
123 "Illegal set affinity operation when not capable");
125 kmp_affin_mask_t *mask;
126 KMP_CPU_ALLOC_ON_STACK(mask);
128 KMP_CPU_SET(which, mask);
129 __kmp_set_system_affinity(mask, TRUE);
130 KMP_CPU_FREE_FROM_STACK(mask);
136void __kmp_affinity_determine_capable(
const char *env_var) {
140#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
141#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
143#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
146 int verbose = __kmp_affinity.flags.verbose;
147 int warnings = __kmp_affinity.flags.warnings;
148 enum affinity_type type = __kmp_affinity.type;
153 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
157 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
158 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
159 "initial getaffinity call returned %ld errno = %d\n",
162 if (gCode < 0 && errno != EINVAL) {
165 (warnings && (type != affinity_none) && (type != affinity_default) &&
166 (type != affinity_disabled))) {
168 kmp_msg_t err_code = KMP_ERR(error);
169 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
170 err_code, __kmp_msg_null);
171 if (__kmp_generate_warnings == kmp_warnings_off) {
172 __kmp_str_free(&err_code.str);
175 KMP_AFFINITY_DISABLE();
176 KMP_INTERNAL_FREE(buf);
178 }
else if (gCode > 0) {
180 KMP_AFFINITY_ENABLE(gCode);
181 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
182 "affinity supported (mask size %d)\n",
183 (
int)__kmp_affin_mask_size));
184 KMP_INTERNAL_FREE(buf);
190 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
191 "searching for proper set size\n"));
193 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
194 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
195 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
196 "getaffinity for mask size %ld returned %ld errno = %d\n",
197 size, gCode, errno));
200 if (errno == ENOSYS) {
202 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
203 "inconsistent OS call behavior: errno == ENOSYS for mask "
207 (warnings && (type != affinity_none) &&
208 (type != affinity_default) && (type != affinity_disabled))) {
210 kmp_msg_t err_code = KMP_ERR(error);
211 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
212 err_code, __kmp_msg_null);
213 if (__kmp_generate_warnings == kmp_warnings_off) {
214 __kmp_str_free(&err_code.str);
217 KMP_AFFINITY_DISABLE();
218 KMP_INTERNAL_FREE(buf);
224 KMP_AFFINITY_ENABLE(gCode);
225 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
226 "affinity supported (mask size %d)\n",
227 (
int)__kmp_affin_mask_size));
228 KMP_INTERNAL_FREE(buf);
234 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
235 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
236 reinterpret_cast<cpuset_t *
>(buf));
237 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
238 "initial getaffinity call returned %d errno = %d\n",
241 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
242 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
243 "affinity supported (mask size %d)\n",
244 (
int)__kmp_affin_mask_size));
245 KMP_INTERNAL_FREE(buf);
249 KMP_INTERNAL_FREE(buf);
252 KMP_AFFINITY_DISABLE();
253 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
254 "cannot determine mask size - affinity not supported\n"));
255 if (verbose || (warnings && (type != affinity_none) &&
256 (type != affinity_default) && (type != affinity_disabled))) {
257 KMP_WARNING(AffCantGetMaskSize, env_var);
265int __kmp_futex_determine_capable() {
267 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
268 int retval = (rc == 0) || (errno != ENOSYS);
271 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
272 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
273 retval ?
"" :
" not"));
280#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
284kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
285 kmp_int8 old_value, new_value;
287 old_value = TCR_1(*p);
288 new_value = old_value | d;
290 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
292 old_value = TCR_1(*p);
293 new_value = old_value | d;
298kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
299 kmp_int8 old_value, new_value;
301 old_value = TCR_1(*p);
302 new_value = old_value & d;
304 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
306 old_value = TCR_1(*p);
307 new_value = old_value & d;
312kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
313 kmp_uint32 old_value, new_value;
315 old_value = TCR_4(*p);
316 new_value = old_value | d;
318 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
320 old_value = TCR_4(*p);
321 new_value = old_value | d;
326kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
327 kmp_uint32 old_value, new_value;
329 old_value = TCR_4(*p);
330 new_value = old_value & d;
332 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
334 old_value = TCR_4(*p);
335 new_value = old_value & d;
341kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
342 kmp_int8 old_value, new_value;
344 old_value = TCR_1(*p);
345 new_value = old_value + d;
347 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
349 old_value = TCR_1(*p);
350 new_value = old_value + d;
355kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
356 kmp_int64 old_value, new_value;
358 old_value = TCR_8(*p);
359 new_value = old_value + d;
361 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
363 old_value = TCR_8(*p);
364 new_value = old_value + d;
370kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
371 kmp_uint64 old_value, new_value;
373 old_value = TCR_8(*p);
374 new_value = old_value | d;
375 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
377 old_value = TCR_8(*p);
378 new_value = old_value | d;
383kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
384 kmp_uint64 old_value, new_value;
386 old_value = TCR_8(*p);
387 new_value = old_value & d;
388 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
390 old_value = TCR_8(*p);
391 new_value = old_value & d;
398void __kmp_terminate_thread(
int gtid) {
400 kmp_info_t *th = __kmp_threads[gtid];
405#ifdef KMP_CANCEL_THREADS
406 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
407 status = pthread_cancel(th->th.th_info.ds.ds_thread);
408 if (status != 0 && status != ESRCH) {
409 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
420static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
422#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
423 KMP_OS_HURD || KMP_OS_SOLARIS
432 if (!KMP_UBER_GTID(gtid)) {
435 status = pthread_attr_init(&attr);
436 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
437#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
438 status = pthread_attr_get_np(pthread_self(), &attr);
439 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
441 status = pthread_getattr_np(pthread_self(), &attr);
442 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
444 status = pthread_attr_getstack(&attr, &addr, &size);
445 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
447 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
448 " %lu, low addr: %p\n",
450 status = pthread_attr_destroy(&attr);
451 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
454 if (size != 0 && addr != 0) {
456 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
457 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
458 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
464 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
465 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
466 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
470static void *__kmp_launch_worker(
void *thr) {
471 int status, old_type, old_state;
472#ifdef KMP_BLOCK_SIGNALS
473 sigset_t new_set, old_set;
476#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
477 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
478 void *
volatile padding = 0;
482 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
483 __kmp_gtid_set_specific(gtid);
489 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
490 __kmp_stats_thread_ptr->startLife();
491 KMP_SET_THREAD_STATE(IDLE);
496 __kmp_itt_thread_name(gtid);
499#if KMP_AFFINITY_SUPPORTED
500 __kmp_affinity_bind_init_mask(gtid);
503#ifdef KMP_CANCEL_THREADS
504 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
505 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
507 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
508 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
511#if KMP_ARCH_X86 || KMP_ARCH_X86_64
513 __kmp_clear_x87_fpu_status_word();
514 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
515 __kmp_load_mxcsr(&__kmp_init_mxcsr);
518#ifdef KMP_BLOCK_SIGNALS
519 status = sigfillset(&new_set);
520 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
521 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
522 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
525#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
526 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
527 if (__kmp_stkoffset > 0 && gtid > 0) {
528 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
534 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
536 __kmp_check_stack_overlap((kmp_info_t *)thr);
538 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
540#ifdef KMP_BLOCK_SIGNALS
541 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
542 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
551static void *__kmp_launch_monitor(
void *thr) {
552 int status, old_type, old_state;
553#ifdef KMP_BLOCK_SIGNALS
556 struct timespec interval;
560 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
563 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
565 __kmp_gtid = KMP_GTID_MONITOR;
572 __kmp_itt_thread_ignore();
575 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
578 __kmp_check_stack_overlap((kmp_info_t *)thr);
580#ifdef KMP_CANCEL_THREADS
581 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
582 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
584 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
585 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
593 int sched = sched_getscheduler(0);
594 if (sched == SCHED_FIFO || sched == SCHED_RR) {
597 struct sched_param param;
598 int max_priority = sched_get_priority_max(sched);
600 KMP_WARNING(RealTimeSchedNotSupported);
601 sched_getparam(0, ¶m);
602 if (param.sched_priority < max_priority) {
603 param.sched_priority += 1;
604 rc = sched_setscheduler(0, sched, ¶m);
607 kmp_msg_t err_code = KMP_ERR(error);
608 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
609 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
610 if (__kmp_generate_warnings == kmp_warnings_off) {
611 __kmp_str_free(&err_code.str);
618 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
619 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
624 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
630 if (__kmp_monitor_wakeups == 1) {
632 interval.tv_nsec = 0;
635 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
638 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
640 while (!TCR_4(__kmp_global.g.g_done)) {
646 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
648 status = gettimeofday(&tval, NULL);
649 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
650 TIMEVAL_TO_TIMESPEC(&tval, &now);
652 now.tv_sec += interval.tv_sec;
653 now.tv_nsec += interval.tv_nsec;
655 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
657 now.tv_nsec -= KMP_NSEC_PER_SEC;
660 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
661 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
663 if (!TCR_4(__kmp_global.g.g_done)) {
664 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
665 &__kmp_wait_mx.m_mutex, &now);
667 if (status != ETIMEDOUT && status != EINTR) {
668 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
672 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
673 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
675 TCW_4(__kmp_global.g.g_time.dt.t_value,
676 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
681 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
683#ifdef KMP_BLOCK_SIGNALS
684 status = sigfillset(&new_set);
685 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
686 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
687 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
690 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
692 if (__kmp_global.g.g_abort != 0) {
698 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
699 __kmp_global.g.g_abort));
704 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
705 __kmp_terminate_thread(gtid);
709 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
710 __kmp_global.g.g_abort));
712 if (__kmp_global.g.g_abort > 0)
713 raise(__kmp_global.g.g_abort);
716 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
722void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
724 pthread_attr_t thread_attr;
727 th->th.th_info.ds.ds_gtid = gtid;
731 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
737 if (!KMP_UBER_GTID(gtid)) {
738 th->th.th_stats = __kmp_stats_list->push_back(gtid);
742 th->th.th_stats = __kmp_stats_thread_ptr;
744 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
748 if (KMP_UBER_GTID(gtid)) {
749 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
750 th->th.th_info.ds.ds_thread = pthread_self();
751 __kmp_set_stack_info(gtid, th);
752 __kmp_check_stack_overlap(th);
756 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
760#ifdef KMP_THREAD_ATTR
761 status = pthread_attr_init(&thread_attr);
763 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
765 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
767 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
777 stack_size += gtid * __kmp_stkoffset * 2;
779 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
780 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
781 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
783#ifdef _POSIX_THREAD_ATTR_STACKSIZE
784 status = pthread_attr_setstacksize(&thread_attr, stack_size);
785#ifdef KMP_BACKUP_STKSIZE
787 if (!__kmp_env_stksize) {
788 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
789 __kmp_stksize = KMP_BACKUP_STKSIZE;
790 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
791 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
793 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
794 status = pthread_attr_setstacksize(&thread_attr, stack_size);
799 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
800 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
807 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
808 if (status != 0 || !handle) {
809#ifdef _POSIX_THREAD_ATTR_STACKSIZE
810 if (status == EINVAL) {
811 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
812 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
814 if (status == ENOMEM) {
815 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
816 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
819 if (status == EAGAIN) {
820 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
821 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
823 KMP_SYSFAIL(
"pthread_create", status);
826 th->th.th_info.ds.ds_thread = handle;
828#ifdef KMP_THREAD_ATTR
829 status = pthread_attr_destroy(&thread_attr);
831 kmp_msg_t err_code = KMP_ERR(status);
832 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
834 if (__kmp_generate_warnings == kmp_warnings_off) {
835 __kmp_str_free(&err_code.str);
842 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
847void __kmp_create_monitor(kmp_info_t *th) {
849 pthread_attr_t thread_attr;
852 int auto_adj_size = FALSE;
854 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
856 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
858 th->th.th_info.ds.ds_tid = 0;
859 th->th.th_info.ds.ds_gtid = 0;
862 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
866 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
867 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
869 TCW_4(__kmp_global.g.g_time.dt.t_value,
872 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
875#ifdef KMP_THREAD_ATTR
876 if (__kmp_monitor_stksize == 0) {
877 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
878 auto_adj_size = TRUE;
880 status = pthread_attr_init(&thread_attr);
882 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
884 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
886 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
889#ifdef _POSIX_THREAD_ATTR_STACKSIZE
890 status = pthread_attr_getstacksize(&thread_attr, &size);
891 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
893 size = __kmp_sys_min_stksize;
897 if (__kmp_monitor_stksize == 0) {
898 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
900 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
901 __kmp_monitor_stksize = __kmp_sys_min_stksize;
904 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
905 "requested stacksize = %lu bytes\n",
906 size, __kmp_monitor_stksize));
911#ifdef _POSIX_THREAD_ATTR_STACKSIZE
912 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
913 __kmp_monitor_stksize));
914 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
917 __kmp_monitor_stksize *= 2;
920 kmp_msg_t err_code = KMP_ERR(status);
921 __kmp_msg(kmp_ms_warning,
922 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
923 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
924 if (__kmp_generate_warnings == kmp_warnings_off) {
925 __kmp_str_free(&err_code.str);
931 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
934#ifdef _POSIX_THREAD_ATTR_STACKSIZE
935 if (status == EINVAL) {
936 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
937 __kmp_monitor_stksize *= 2;
940 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
941 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
944 if (status == ENOMEM) {
945 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
946 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
950 if (status == EAGAIN) {
951 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
952 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
954 KMP_SYSFAIL(
"pthread_create", status);
957 th->th.th_info.ds.ds_thread = handle;
961 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
962 sizeof(__kmp_global.g.g_time.dt.t_value));
963 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
967#ifdef KMP_THREAD_ATTR
968 status = pthread_attr_destroy(&thread_attr);
970 kmp_msg_t err_code = KMP_ERR(status);
971 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
973 if (__kmp_generate_warnings == kmp_warnings_off) {
974 __kmp_str_free(&err_code.str);
981 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
982 th->th.th_info.ds.ds_thread));
987void __kmp_exit_thread(
int exit_status) {
988 pthread_exit((
void *)(intptr_t)exit_status);
992void __kmp_resume_monitor();
994extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
998 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1000 th->th.th_info.ds.ds_thread));
1005 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1006 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1007 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1017 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1018 if (status != ESRCH) {
1019 __kmp_resume_monitor();
1021 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1022 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1023 if (exit_val != th) {
1024 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1027 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1028 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1030 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1032 th->th.th_info.ds.ds_thread));
1039extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1044void __kmp_reap_worker(kmp_info_t *th) {
1051 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1053 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1057 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1059 if (exit_val != th) {
1060 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1062 th->th.th_info.ds.ds_gtid, exit_val));
1068 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1069 th->th.th_info.ds.ds_gtid));
1074#if KMP_HANDLE_SIGNALS
1076static void __kmp_null_handler(
int signo) {
1080static void __kmp_team_handler(
int signo) {
1081 if (__kmp_global.g.g_abort == 0) {
1084 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1099 if (__kmp_debug_buf) {
1100 __kmp_dump_debug_buffer();
1102 __kmp_unregister_library();
1104 TCW_4(__kmp_global.g.g_abort, signo);
1106 TCW_4(__kmp_global.g.g_done, TRUE);
1111 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1118static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1119 struct sigaction *oldact) {
1120 int rc = sigaction(signum, act, oldact);
1121 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1124static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1125 int parallel_init) {
1128 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1129 if (parallel_init) {
1130 struct sigaction new_action;
1131 struct sigaction old_action;
1132 new_action.sa_handler = handler_func;
1133 new_action.sa_flags = 0;
1134 sigfillset(&new_action.sa_mask);
1135 __kmp_sigaction(sig, &new_action, &old_action);
1136 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1137 sigaddset(&__kmp_sigset, sig);
1140 __kmp_sigaction(sig, &old_action, NULL);
1144 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1149static void __kmp_remove_one_handler(
int sig) {
1150 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1151 if (sigismember(&__kmp_sigset, sig)) {
1152 struct sigaction old;
1154 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1155 if ((old.sa_handler != __kmp_team_handler) &&
1156 (old.sa_handler != __kmp_null_handler)) {
1158 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1159 "restoring: sig=%d\n",
1161 __kmp_sigaction(sig, &old, NULL);
1163 sigdelset(&__kmp_sigset, sig);
1168void __kmp_install_signals(
int parallel_init) {
1169 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1170 if (__kmp_handle_signals || !parallel_init) {
1173 sigemptyset(&__kmp_sigset);
1174 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1175 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1176 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1177 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1178 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1179 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1180 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1181 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1183 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1185 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1187 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1192void __kmp_remove_signals(
void) {
1194 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1195 for (sig = 1; sig < NSIG; ++sig) {
1196 __kmp_remove_one_handler(sig);
1202void __kmp_enable(
int new_state) {
1203#ifdef KMP_CANCEL_THREADS
1204 int status, old_state;
1205 status = pthread_setcancelstate(new_state, &old_state);
1206 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1207 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1211void __kmp_disable(
int *old_state) {
1212#ifdef KMP_CANCEL_THREADS
1214 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1215 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1219static void __kmp_atfork_prepare(
void) {
1220 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1221 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1224static void __kmp_atfork_parent(
void) {
1225 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1226 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1232static void __kmp_atfork_child(
void) {
1233 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1234 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1241#if KMP_AFFINITY_SUPPORTED
1242#if KMP_OS_LINUX || KMP_OS_FREEBSD
1245 kmp_set_thread_affinity_mask_initial();
1250 if (__kmp_nested_proc_bind.bind_types != NULL) {
1251 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1253 for (kmp_affinity_t *affinity : __kmp_affinities)
1254 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1255 __kmp_affin_fullMask =
nullptr;
1256 __kmp_affin_origMask =
nullptr;
1257 __kmp_topology =
nullptr;
1261 __kmp_init_monitor = 0;
1263 __kmp_init_parallel = FALSE;
1264 __kmp_init_middle = FALSE;
1265 __kmp_init_serial = FALSE;
1266 TCW_4(__kmp_init_gtid, FALSE);
1267 __kmp_init_common = FALSE;
1269 TCW_4(__kmp_init_user_locks, FALSE);
1270#if !KMP_USE_DYNAMIC_LOCK
1271 __kmp_user_lock_table.used = 1;
1272 __kmp_user_lock_table.allocated = 0;
1273 __kmp_user_lock_table.table = NULL;
1274 __kmp_lock_blocks = NULL;
1278 TCW_4(__kmp_nth, 0);
1280 __kmp_thread_pool = NULL;
1281 __kmp_thread_pool_insert_pt = NULL;
1282 __kmp_team_pool = NULL;
1286 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1287 __kmp_threadpriv_cache_list));
1289 while (__kmp_threadpriv_cache_list != NULL) {
1291 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1292 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1293 &(*__kmp_threadpriv_cache_list->addr)));
1295 *__kmp_threadpriv_cache_list->addr = NULL;
1297 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1300 __kmp_init_runtime = FALSE;
1303 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1304 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1305 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1306 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1316 __kmp_need_register_serial = FALSE;
1317 __kmp_serial_initialize();
1331void __kmp_register_atfork(
void) {
1332 if (__kmp_need_register_atfork) {
1333 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1334 __kmp_atfork_child);
1335 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1336 __kmp_need_register_atfork = FALSE;
1340void __kmp_suspend_initialize(
void) {
1342 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1343 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1344 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1345 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1348void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1349 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1350 int new_value = __kmp_fork_count + 1;
1352 if (old_value == new_value)
1355 if (old_value == -1 || !__kmp_atomic_compare_store(
1356 &th->th.th_suspend_init_count, old_value, -1)) {
1357 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1363 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1364 &__kmp_suspend_cond_attr);
1365 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1366 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1367 &__kmp_suspend_mutex_attr);
1368 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1369 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1373void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1374 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1379 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1380 if (status != 0 && status != EBUSY) {
1381 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1383 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1384 if (status != 0 && status != EBUSY) {
1385 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1387 --th->th.th_suspend_init_count;
1388 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1394int __kmp_try_suspend_mx(kmp_info_t *th) {
1395 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1398void __kmp_lock_suspend_mx(kmp_info_t *th) {
1399 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1400 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1403void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1404 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1405 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1411static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1412 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1413 kmp_info_t *th = __kmp_threads[th_gtid];
1415 typename C::flag_t old_spin;
1417 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1420 __kmp_suspend_initialize_thread(th);
1422 __kmp_lock_suspend_mx(th);
1424 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1425 th_gtid, flag->get()));
1429 old_spin = flag->set_sleeping();
1430 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1431 th->th.th_sleep_loc_type = flag->get_type();
1432 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1433 __kmp_pause_status != kmp_soft_paused) {
1434 flag->unset_sleeping();
1435 TCW_PTR(th->th.th_sleep_loc, NULL);
1436 th->th.th_sleep_loc_type = flag_unset;
1437 __kmp_unlock_suspend_mx(th);
1440 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1442 th_gtid, flag->get(), flag->load(), old_spin));
1444 if (flag->done_check_val(old_spin) || flag->done_check()) {
1445 flag->unset_sleeping();
1446 TCW_PTR(th->th.th_sleep_loc, NULL);
1447 th->th.th_sleep_loc_type = flag_unset;
1448 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1450 th_gtid, flag->get()));
1455 int deactivated = FALSE;
1457 while (flag->is_sleeping()) {
1460 __kmp_suspend_count++;
1461 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1462 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1468 th->th.th_active = FALSE;
1469 if (th->th.th_active_in_pool) {
1470 th->th.th_active_in_pool = FALSE;
1471 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1472 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1477 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1478 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1480#if USE_SUSPEND_TIMEOUT
1481 struct timespec now;
1482 struct timeval tval;
1485 status = gettimeofday(&tval, NULL);
1486 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1487 TIMEVAL_TO_TIMESPEC(&tval, &now);
1489 msecs = (4 * __kmp_dflt_blocktime) + 200;
1490 now.tv_sec += msecs / 1000;
1491 now.tv_nsec += (msecs % 1000) * 1000;
1493 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1494 "pthread_cond_timedwait\n",
1496 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1497 &th->th.th_suspend_mx.m_mutex, &now);
1499 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1500 " pthread_cond_wait\n",
1502 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1503 &th->th.th_suspend_mx.m_mutex);
1506 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1507 KMP_SYSFAIL(
"pthread_cond_wait", status);
1510 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1512 if (!flag->is_sleeping() &&
1513 ((status == EINTR) || (status == ETIMEDOUT))) {
1517 flag->unset_sleeping();
1518 TCW_PTR(th->th.th_sleep_loc, NULL);
1519 th->th.th_sleep_loc_type = flag_unset;
1522 if (status == ETIMEDOUT) {
1523 if (flag->is_sleeping()) {
1525 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1527 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1530 TCW_PTR(th->th.th_sleep_loc, NULL);
1531 th->th.th_sleep_loc_type = flag_unset;
1533 }
else if (flag->is_sleeping()) {
1535 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1542 th->th.th_active = TRUE;
1543 if (TCR_4(th->th.th_in_pool)) {
1544 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1545 th->th.th_active_in_pool = TRUE;
1551 TCW_PTR(th->th.th_sleep_loc, NULL);
1552 th->th.th_sleep_loc_type = flag_unset;
1554 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1555 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1559 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1560 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1565 __kmp_unlock_suspend_mx(th);
1566 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1569template <
bool C,
bool S>
1570void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1571 __kmp_suspend_template(th_gtid, flag);
1573template <
bool C,
bool S>
1574void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1575 __kmp_suspend_template(th_gtid, flag);
1577template <
bool C,
bool S>
1578void __kmp_atomic_suspend_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1579 __kmp_suspend_template(th_gtid, flag);
1581void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1582 __kmp_suspend_template(th_gtid, flag);
1585template void __kmp_suspend_32<false, false>(
int, kmp_flag_32<false, false> *);
1586template void __kmp_suspend_64<false, true>(
int, kmp_flag_64<false, true> *);
1587template void __kmp_suspend_64<true, false>(
int, kmp_flag_64<true, false> *);
1589__kmp_atomic_suspend_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1591__kmp_atomic_suspend_64<true, false>(
int, kmp_atomic_flag_64<true, false> *);
1597static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1598 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1599 kmp_info_t *th = __kmp_threads[target_gtid];
1603 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1606 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1607 gtid, target_gtid));
1608 KMP_DEBUG_ASSERT(gtid != target_gtid);
1610 __kmp_suspend_initialize_thread(th);
1612 __kmp_lock_suspend_mx(th);
1614 if (!flag || flag != th->th.th_sleep_loc) {
1617 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1623 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1624 "awake: flag(%p)\n",
1625 gtid, target_gtid, (
void *)NULL));
1626 __kmp_unlock_suspend_mx(th);
1628 }
else if (flag->get_type() != th->th.th_sleep_loc_type) {
1633 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1634 "spin(%p) type=%d ptr_type=%d\n",
1635 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1636 th->th.th_sleep_loc_type));
1637 __kmp_unlock_suspend_mx(th);
1638 __kmp_null_resume_wrapper(th);
1642 if (!flag->is_sleeping()) {
1643 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1644 "awake: flag(%p): %u\n",
1645 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1646 __kmp_unlock_suspend_mx(th);
1650 KMP_DEBUG_ASSERT(flag);
1651 flag->unset_sleeping();
1652 TCW_PTR(th->th.th_sleep_loc, NULL);
1653 th->th.th_sleep_loc_type = flag_unset;
1655 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1656 "sleep bit for flag's loc(%p): %u\n",
1657 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1662 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1663 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1664 target_gtid, buffer);
1667 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1668 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1669 __kmp_unlock_suspend_mx(th);
1670 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1672 gtid, target_gtid));
1675template <
bool C,
bool S>
1676void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1677 __kmp_resume_template(target_gtid, flag);
1679template <
bool C,
bool S>
1680void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1681 __kmp_resume_template(target_gtid, flag);
1683template <
bool C,
bool S>
1684void __kmp_atomic_resume_64(
int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1685 __kmp_resume_template(target_gtid, flag);
1687void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1688 __kmp_resume_template(target_gtid, flag);
1691template void __kmp_resume_32<false, true>(
int, kmp_flag_32<false, true> *);
1692template void __kmp_resume_32<false, false>(
int, kmp_flag_32<false, false> *);
1693template void __kmp_resume_64<false, true>(
int, kmp_flag_64<false, true> *);
1695__kmp_atomic_resume_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1698void __kmp_resume_monitor() {
1699 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1702 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1703 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1705 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1707 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1708 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1712 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1713 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1714 KMP_GTID_MONITOR, buffer);
1717 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1718 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1719 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1720 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1721 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1723 gtid, KMP_GTID_MONITOR));
1727void __kmp_yield() { sched_yield(); }
1729void __kmp_gtid_set_specific(
int gtid) {
1730 if (__kmp_init_gtid) {
1732 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1733 (
void *)(intptr_t)(gtid + 1));
1734 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1736 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1740int __kmp_gtid_get_specific() {
1742 if (!__kmp_init_gtid) {
1743 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1744 "KMP_GTID_SHUTDOWN\n"));
1745 return KMP_GTID_SHUTDOWN;
1747 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1749 gtid = KMP_GTID_DNE;
1753 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1754 __kmp_gtid_threadprivate_key, gtid));
1758double __kmp_read_cpu_time(
void) {
1764 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1765 (
double)CLOCKS_PER_SEC;
1768int __kmp_read_system_info(
struct kmp_sys_info *info) {
1770 struct rusage r_usage;
1772 memset(info, 0,
sizeof(*info));
1774 status = getrusage(RUSAGE_SELF, &r_usage);
1775 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1778 info->maxrss = r_usage.ru_maxrss;
1780 info->minflt = r_usage.ru_minflt;
1782 info->majflt = r_usage.ru_majflt;
1784 info->nswap = r_usage.ru_nswap;
1786 info->inblock = r_usage.ru_inblock;
1788 info->oublock = r_usage.ru_oublock;
1790 info->nvcsw = r_usage.ru_nvcsw;
1792 info->nivcsw = r_usage.ru_nivcsw;
1794 return (status != 0);
1797void __kmp_read_system_time(
double *delta) {
1799 struct timeval tval;
1800 struct timespec stop;
1803 status = gettimeofday(&tval, NULL);
1804 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1805 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1806 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1807 *delta = (t_ns * 1e-9);
1810void __kmp_clear_system_time(
void) {
1811 struct timeval tval;
1813 status = gettimeofday(&tval, NULL);
1814 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1815 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1818static int __kmp_get_xproc(
void) {
1824 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1826#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1827 KMP_OS_HURD || KMP_OS_SOLARIS
1829 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1837 host_basic_info_data_t info;
1838 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1839 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1840 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1843 r = info.avail_cpus;
1845 KMP_WARNING(CantGetNumAvailCPU);
1846 KMP_INFORM(AssumedNumCPU);
1851#error "Unknown or unsupported OS."
1855 return r > 0 ? r : 2;
1859int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1863 va_start(args, format);
1864 FILE *f = fopen(path,
"rb");
1869 result = vfscanf(f, format, args);
1876void __kmp_runtime_initialize(
void) {
1878 pthread_mutexattr_t mutex_attr;
1879 pthread_condattr_t cond_attr;
1881 if (__kmp_init_runtime) {
1885#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1886 if (!__kmp_cpuinfo.initialized) {
1887 __kmp_query_cpuid(&__kmp_cpuinfo);
1891 __kmp_xproc = __kmp_get_xproc();
1897 status = getrlimit(RLIMIT_STACK, &rlim);
1899 __kmp_stksize = rlim.rlim_cur;
1900 __kmp_check_stksize(&__kmp_stksize);
1904 if (sysconf(_SC_THREADS)) {
1907 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1909 if (__kmp_sys_max_nth == -1) {
1912 __kmp_sys_max_nth = KMP_MAX_NTH;
1915 if (__kmp_sys_max_nth == -1) {
1917 __kmp_sys_max_nth = INT_MAX;
1918 }
else if (__kmp_sys_max_nth <= 1) {
1920 __kmp_sys_max_nth = KMP_MAX_NTH;
1925 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1926 if (__kmp_sys_min_stksize <= 1) {
1927 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1932 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1934 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1935 __kmp_internal_end_dest);
1936 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1937 status = pthread_mutexattr_init(&mutex_attr);
1938 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1939 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1940 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1941 status = pthread_mutexattr_destroy(&mutex_attr);
1942 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1943 status = pthread_condattr_init(&cond_attr);
1944 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1945 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1946 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1947 status = pthread_condattr_destroy(&cond_attr);
1948 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1950 __kmp_itt_initialize();
1953 __kmp_init_runtime = TRUE;
1956void __kmp_runtime_destroy(
void) {
1959 if (!__kmp_init_runtime) {
1964 __kmp_itt_destroy();
1967 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1968 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1970 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1971 if (status != 0 && status != EBUSY) {
1972 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1974 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1975 if (status != 0 && status != EBUSY) {
1976 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1978#if KMP_AFFINITY_SUPPORTED
1979 __kmp_affinity_uninitialize();
1982 __kmp_init_runtime = FALSE;
1987void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1990void __kmp_elapsed(
double *t) {
1995 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1996 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1998 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
2002 status = gettimeofday(&tv, NULL);
2003 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
2005 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
2010void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2013kmp_uint64 __kmp_now_nsec() {
2015 gettimeofday(&t, NULL);
2016 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2017 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2021#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2023void __kmp_initialize_system_tick() {
2024 kmp_uint64 now, nsec2, diff;
2025 kmp_uint64 delay = 1000000;
2026 kmp_uint64 nsec = __kmp_now_nsec();
2027 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2028 while ((now = __kmp_hardware_timestamp()) < goal)
2030 nsec2 = __kmp_now_nsec();
2031 diff = nsec2 - nsec;
2033 double tpus = 1000.0 * (double)(delay + (now - goal)) / (
double)diff;
2035 __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2036 __kmp_ticks_per_usec = (kmp_uint64)tpus;
2045int __kmp_is_address_mapped(
void *addr) {
2050#if KMP_OS_LINUX || KMP_OS_HURD
2055 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2058 file = fopen(name,
"r");
2059 KMP_ASSERT(file != NULL);
2063 void *beginning = NULL;
2064 void *ending = NULL;
2067 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2071 KMP_ASSERT(rc == 3 &&
2072 KMP_STRLEN(perms) == 4);
2075 if ((addr >= beginning) && (addr < ending)) {
2077 if (strcmp(perms,
"rw") == 0) {
2087 KMP_INTERNAL_FREE(name);
2091 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2092 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2097 lstsz = lstsz * 4 / 3;
2098 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2099 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2106 char *up = buf + lstsz;
2109 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2110 size_t cursz = cur->kve_structsize;
2113 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2114 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2116 if ((addr >= start) && (addr < end)) {
2117 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2118 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2134 rc = vm_read_overwrite(
2136 (vm_address_t)(addr),
2138 (vm_address_t)(&buffer),
2151 mib[2] = VM_PROC_MAP;
2153 mib[4] =
sizeof(
struct kinfo_vmentry);
2156 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2160 size = size * 4 / 3;
2161 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2164 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2168 for (
size_t i = 0; i < size; i++) {
2169 if (kiv[i].kve_start >= (uint64_t)addr &&
2170 kiv[i].kve_end <= (uint64_t)addr) {
2175 KMP_INTERNAL_FREE(kiv);
2180 mib[1] = KERN_PROC_VMMAP;
2185 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2190 struct kinfo_vmentry kiv = {.kve_start = 0};
2192 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2194 if (kiv.kve_end == end)
2197 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2203#elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS
2210#error "Unknown or unsupported OS"
2218#ifdef USE_LOAD_BALANCE
2220#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2221 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2228int __kmp_get_load_balance(
int max) {
2232 int res = getloadavg(averages, 3);
2237 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2238 ret_avg = (int)averages[0];
2239 }
else if ((__kmp_load_balance_interval >= 180 &&
2240 __kmp_load_balance_interval < 600) &&
2242 ret_avg = (int)averages[1];
2243 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2244 ret_avg = (int)averages[2];
2258int __kmp_get_load_balance(
int max) {
2259 static int permanent_error = 0;
2260 static int glb_running_threads = 0;
2262 static double glb_call_time = 0;
2264 int running_threads = 0;
2266 DIR *proc_dir = NULL;
2267 struct dirent *proc_entry = NULL;
2269 kmp_str_buf_t task_path;
2270 DIR *task_dir = NULL;
2271 struct dirent *task_entry = NULL;
2272 int task_path_fixed_len;
2274 kmp_str_buf_t stat_path;
2276 int stat_path_fixed_len;
2279 int total_processes = 0;
2282 double call_time = 0.0;
2284 __kmp_str_buf_init(&task_path);
2285 __kmp_str_buf_init(&stat_path);
2287 __kmp_elapsed(&call_time);
2289 if (glb_call_time &&
2290 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2291 running_threads = glb_running_threads;
2295 glb_call_time = call_time;
2298 if (permanent_error) {
2299 running_threads = -1;
2308 proc_dir = opendir(
"/proc");
2309 if (proc_dir == NULL) {
2312 running_threads = -1;
2313 permanent_error = 1;
2318 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2319 task_path_fixed_len = task_path.used;
2321 proc_entry = readdir(proc_dir);
2322 while (proc_entry != NULL) {
2325 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2335 KMP_DEBUG_ASSERT(total_processes != 1 ||
2336 strcmp(proc_entry->d_name,
"1") == 0);
2339 task_path.used = task_path_fixed_len;
2340 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2341 KMP_STRLEN(proc_entry->d_name));
2342 __kmp_str_buf_cat(&task_path,
"/task", 5);
2344 task_dir = opendir(task_path.str);
2345 if (task_dir == NULL) {
2354 if (strcmp(proc_entry->d_name,
"1") == 0) {
2355 running_threads = -1;
2356 permanent_error = 1;
2361 __kmp_str_buf_clear(&stat_path);
2362 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2363 __kmp_str_buf_cat(&stat_path,
"/", 1);
2364 stat_path_fixed_len = stat_path.used;
2366 task_entry = readdir(task_dir);
2367 while (task_entry != NULL) {
2369 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2376 stat_path_fixed_len;
2377 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2378 KMP_STRLEN(task_entry->d_name));
2379 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2383 stat_file = open(stat_path.str, O_RDONLY);
2384 if (stat_file == -1) {
2414 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2421 char *close_parent = strstr(buffer,
") ");
2422 if (close_parent != NULL) {
2423 char state = *(close_parent + 2);
2426 if (running_threads >= max) {
2436 task_entry = readdir(task_dir);
2442 proc_entry = readdir(proc_dir);
2448 KMP_DEBUG_ASSERT(running_threads > 0);
2449 if (running_threads <= 0) {
2450 running_threads = 1;
2454 if (proc_dir != NULL) {
2457 __kmp_str_buf_free(&task_path);
2458 if (task_dir != NULL) {
2461 __kmp_str_buf_free(&stat_path);
2462 if (stat_file != -1) {
2466 glb_running_threads = running_threads;
2468 return running_threads;
2476#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2477 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2478 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2479 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X)
2483int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2487 void **exit_frame_ptr
2491 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2496 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2500 (*pkfn)(>id, &tid);
2503 (*pkfn)(>id, &tid, p_argv[0]);
2506 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2509 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2512 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2515 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2518 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2522 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2523 p_argv[5], p_argv[6]);
2526 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2527 p_argv[5], p_argv[6], p_argv[7]);
2530 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2531 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2534 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2535 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2538 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2539 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2542 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2543 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2547 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2548 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2549 p_argv[11], p_argv[12]);
2552 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2553 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2554 p_argv[11], p_argv[12], p_argv[13]);
2557 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2558 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2559 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2572pthread_cond_t hidden_helper_threads_initz_cond_var;
2573pthread_mutex_t hidden_helper_threads_initz_lock;
2574volatile int hidden_helper_initz_signaled = FALSE;
2577pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2578pthread_mutex_t hidden_helper_threads_deinitz_lock;
2579volatile int hidden_helper_deinitz_signaled = FALSE;
2582pthread_cond_t hidden_helper_main_thread_cond_var;
2583pthread_mutex_t hidden_helper_main_thread_lock;
2584volatile int hidden_helper_main_thread_signaled = FALSE;
2589sem_t hidden_helper_task_sem;
2592void __kmp_hidden_helper_worker_thread_wait() {
2593 int status = sem_wait(&hidden_helper_task_sem);
2594 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2597void __kmp_do_initialize_hidden_helper_threads() {
2600 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2601 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2603 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2604 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2606 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2607 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2609 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2610 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2612 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2613 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2615 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2616 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2619 status = sem_init(&hidden_helper_task_sem, 0, 0);
2620 KMP_CHECK_SYSFAIL(
"sem_init", status);
2624 status = pthread_create(
2626 [](
void *) ->
void * {
2627 __kmp_hidden_helper_threads_initz_routine();
2631 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2634void __kmp_hidden_helper_threads_initz_wait() {
2637 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2638 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2640 if (!TCR_4(hidden_helper_initz_signaled)) {
2641 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2642 &hidden_helper_threads_initz_lock);
2643 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2646 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2647 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2650void __kmp_hidden_helper_initz_release() {
2652 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2653 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2655 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2656 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2658 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2660 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2661 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2664void __kmp_hidden_helper_main_thread_wait() {
2667 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2668 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2670 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2671 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2672 &hidden_helper_main_thread_lock);
2673 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2676 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2677 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2680void __kmp_hidden_helper_main_thread_release() {
2683 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2684 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2686 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2687 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2690 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2692 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2693 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2696void __kmp_hidden_helper_worker_thread_signal() {
2697 int status = sem_post(&hidden_helper_task_sem);
2698 KMP_CHECK_SYSFAIL(
"sem_post", status);
2701void __kmp_hidden_helper_threads_deinitz_wait() {
2704 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2705 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2707 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2708 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2709 &hidden_helper_threads_deinitz_lock);
2710 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2713 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2714 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2717void __kmp_hidden_helper_threads_deinitz_release() {
2718 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2719 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2721 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2722 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2724 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2726 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2727 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2730void __kmp_hidden_helper_worker_thread_wait() {
2731 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2734void __kmp_do_initialize_hidden_helper_threads() {
2735 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2738void __kmp_hidden_helper_threads_initz_wait() {
2739 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2742void __kmp_hidden_helper_initz_release() {
2743 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2746void __kmp_hidden_helper_main_thread_wait() {
2747 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2750void __kmp_hidden_helper_main_thread_release() {
2751 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2754void __kmp_hidden_helper_worker_thread_signal() {
2755 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2758void __kmp_hidden_helper_threads_deinitz_wait() {
2759 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2762void __kmp_hidden_helper_threads_deinitz_release() {
2763 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2767bool __kmp_detect_shm() {
2768 DIR *dir = opendir(
"/dev/shm");
2772 }
else if (ENOENT == errno) {
2779bool __kmp_detect_tmp() {
2780 DIR *dir = opendir(
"/tmp");
2784 }
else if (ENOENT == errno) {
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.