14#include "kmp_affinity.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
33#include <sys/syscall.h>
40#include <sys/sysinfo.h>
56#include <sys/sysctl.h>
57#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
59#include <sys/sysctl.h>
61#include <pthread_np.h>
62#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
64#include <sys/sysctl.h>
66#include <sys/loadavg.h>
74 struct timespec start;
77#ifndef TIMEVAL_TO_TIMESPEC
79#define TIMEVAL_TO_TIMESPEC(tv, ts) \
81 (ts)->tv_sec = (tv)->tv_sec; \
82 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
87#define TS2NS(timespec) \
88 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
90static struct kmp_sys_timer __kmp_sys_timer_data;
93typedef void (*sig_func_t)(int);
94STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
95static sigset_t __kmp_sigset;
98static int __kmp_init_runtime = FALSE;
100static int __kmp_fork_count = 0;
102static pthread_condattr_t __kmp_suspend_cond_attr;
103static pthread_mutexattr_t __kmp_suspend_mutex_attr;
105static kmp_cond_align_t __kmp_wait_cv;
106static kmp_mutex_align_t __kmp_wait_mx;
108kmp_uint64 __kmp_ticks_per_msec = 1000000;
109kmp_uint64 __kmp_ticks_per_usec = 1000;
112static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
113 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
114 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
115 cond->c_cond.__c_waiting);
119#if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
123void __kmp_affinity_bind_thread(
int which) {
124 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
125 "Illegal set affinity operation when not capable");
127 kmp_affin_mask_t *mask;
128 KMP_CPU_ALLOC_ON_STACK(mask);
130 KMP_CPU_SET(which, mask);
131 __kmp_set_system_affinity(mask, TRUE);
132 KMP_CPU_FREE_FROM_STACK(mask);
138void __kmp_affinity_determine_capable(
const char *env_var) {
142#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
143#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
145#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
148 int verbose = __kmp_affinity.flags.verbose;
149 int warnings = __kmp_affinity.flags.warnings;
150 enum affinity_type type = __kmp_affinity.type;
155 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
159 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
160 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
161 "initial getaffinity call returned %ld errno = %d\n",
164 if (gCode < 0 && errno != EINVAL) {
167 (warnings && (type != affinity_none) && (type != affinity_default) &&
168 (type != affinity_disabled))) {
170 kmp_msg_t err_code = KMP_ERR(error);
171 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
172 err_code, __kmp_msg_null);
173 if (__kmp_generate_warnings == kmp_warnings_off) {
174 __kmp_str_free(&err_code.str);
177 KMP_AFFINITY_DISABLE();
178 KMP_INTERNAL_FREE(buf);
180 }
else if (gCode > 0) {
182 KMP_AFFINITY_ENABLE(gCode);
183 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
184 "affinity supported (mask size %d)\n",
185 (
int)__kmp_affin_mask_size));
186 KMP_INTERNAL_FREE(buf);
192 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
193 "searching for proper set size\n"));
195 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
196 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
197 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
198 "getaffinity for mask size %ld returned %ld errno = %d\n",
199 size, gCode, errno));
202 if (errno == ENOSYS) {
204 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
205 "inconsistent OS call behavior: errno == ENOSYS for mask "
209 (warnings && (type != affinity_none) &&
210 (type != affinity_default) && (type != affinity_disabled))) {
212 kmp_msg_t err_code = KMP_ERR(error);
213 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
214 err_code, __kmp_msg_null);
215 if (__kmp_generate_warnings == kmp_warnings_off) {
216 __kmp_str_free(&err_code.str);
219 KMP_AFFINITY_DISABLE();
220 KMP_INTERNAL_FREE(buf);
226 KMP_AFFINITY_ENABLE(gCode);
227 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
228 "affinity supported (mask size %d)\n",
229 (
int)__kmp_affin_mask_size));
230 KMP_INTERNAL_FREE(buf);
236 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
237 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
238 reinterpret_cast<cpuset_t *
>(buf));
239 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
240 "initial getaffinity call returned %d errno = %d\n",
243 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
244 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
245 "affinity supported (mask size %d)\n",
246 (
int)__kmp_affin_mask_size));
247 KMP_INTERNAL_FREE(buf);
251 KMP_INTERNAL_FREE(buf);
254 KMP_AFFINITY_DISABLE();
255 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
256 "cannot determine mask size - affinity not supported\n"));
257 if (verbose || (warnings && (type != affinity_none) &&
258 (type != affinity_default) && (type != affinity_disabled))) {
259 KMP_WARNING(AffCantGetMaskSize, env_var);
267int __kmp_futex_determine_capable() {
269 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
270 int retval = (rc == 0) || (errno != ENOSYS);
273 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
274 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
275 retval ?
"" :
" not"));
282#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
286kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
287 kmp_int8 old_value, new_value;
289 old_value = TCR_1(*p);
290 new_value = old_value | d;
292 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
294 old_value = TCR_1(*p);
295 new_value = old_value | d;
300kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
301 kmp_int8 old_value, new_value;
303 old_value = TCR_1(*p);
304 new_value = old_value & d;
306 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
308 old_value = TCR_1(*p);
309 new_value = old_value & d;
314kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
315 kmp_uint32 old_value, new_value;
317 old_value = TCR_4(*p);
318 new_value = old_value | d;
320 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
322 old_value = TCR_4(*p);
323 new_value = old_value | d;
328kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
329 kmp_uint32 old_value, new_value;
331 old_value = TCR_4(*p);
332 new_value = old_value & d;
334 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
336 old_value = TCR_4(*p);
337 new_value = old_value & d;
342#if KMP_ARCH_X86 || KMP_ARCH_WASM
343kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
344 kmp_int8 old_value, new_value;
346 old_value = TCR_1(*p);
347 new_value = old_value + d;
349 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
351 old_value = TCR_1(*p);
352 new_value = old_value + d;
357kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
358 kmp_int64 old_value, new_value;
360 old_value = TCR_8(*p);
361 new_value = old_value + d;
363 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
365 old_value = TCR_8(*p);
366 new_value = old_value + d;
372kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
373 kmp_uint64 old_value, new_value;
375 old_value = TCR_8(*p);
376 new_value = old_value | d;
377 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
379 old_value = TCR_8(*p);
380 new_value = old_value | d;
385kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
386 kmp_uint64 old_value, new_value;
388 old_value = TCR_8(*p);
389 new_value = old_value & d;
390 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
392 old_value = TCR_8(*p);
393 new_value = old_value & d;
400void __kmp_terminate_thread(
int gtid) {
402 kmp_info_t *th = __kmp_threads[gtid];
407#ifdef KMP_CANCEL_THREADS
408 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
409 status = pthread_cancel(th->th.th_info.ds.ds_thread);
410 if (status != 0 && status != ESRCH) {
411 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
422static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
424#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
425 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
434 if (!KMP_UBER_GTID(gtid)) {
437 status = pthread_attr_init(&attr);
438 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
439#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
440 status = pthread_attr_get_np(pthread_self(), &attr);
441 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
443 status = pthread_getattr_np(pthread_self(), &attr);
444 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
446 status = pthread_attr_getstack(&attr, &addr, &size);
447 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
449 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
450 " %lu, low addr: %p\n",
452 status = pthread_attr_destroy(&attr);
453 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
456 if (size != 0 && addr != 0) {
458 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
459 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
460 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
466 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
467 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
468 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
472static void *__kmp_launch_worker(
void *thr) {
473 int status, old_type, old_state;
474#ifdef KMP_BLOCK_SIGNALS
475 sigset_t new_set, old_set;
478#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
479 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
480 void *
volatile padding = 0;
484 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
485 __kmp_gtid_set_specific(gtid);
491 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
492 __kmp_stats_thread_ptr->startLife();
493 KMP_SET_THREAD_STATE(IDLE);
498 __kmp_itt_thread_name(gtid);
501#if KMP_AFFINITY_SUPPORTED
502 __kmp_affinity_bind_init_mask(gtid);
505#ifdef KMP_CANCEL_THREADS
506 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
507 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
509 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
510 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
513#if KMP_ARCH_X86 || KMP_ARCH_X86_64
515 __kmp_clear_x87_fpu_status_word();
516 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
517 __kmp_load_mxcsr(&__kmp_init_mxcsr);
520#ifdef KMP_BLOCK_SIGNALS
521 status = sigfillset(&new_set);
522 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
523 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
524 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
527#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
528 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
529 if (__kmp_stkoffset > 0 && gtid > 0) {
530 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
536 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
538 __kmp_check_stack_overlap((kmp_info_t *)thr);
540 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
542#ifdef KMP_BLOCK_SIGNALS
543 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
544 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
553static void *__kmp_launch_monitor(
void *thr) {
554 int status, old_type, old_state;
555#ifdef KMP_BLOCK_SIGNALS
558 struct timespec interval;
562 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
565 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
567 __kmp_gtid = KMP_GTID_MONITOR;
574 __kmp_itt_thread_ignore();
577 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
580 __kmp_check_stack_overlap((kmp_info_t *)thr);
582#ifdef KMP_CANCEL_THREADS
583 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
584 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
586 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
587 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
595 int sched = sched_getscheduler(0);
596 if (sched == SCHED_FIFO || sched == SCHED_RR) {
599 struct sched_param param;
600 int max_priority = sched_get_priority_max(sched);
602 KMP_WARNING(RealTimeSchedNotSupported);
603 sched_getparam(0, ¶m);
604 if (param.sched_priority < max_priority) {
605 param.sched_priority += 1;
606 rc = sched_setscheduler(0, sched, ¶m);
609 kmp_msg_t err_code = KMP_ERR(error);
610 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
611 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
612 if (__kmp_generate_warnings == kmp_warnings_off) {
613 __kmp_str_free(&err_code.str);
620 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
621 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
626 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
632 if (__kmp_monitor_wakeups == 1) {
634 interval.tv_nsec = 0;
637 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
640 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
642 while (!TCR_4(__kmp_global.g.g_done)) {
648 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
650 status = gettimeofday(&tval, NULL);
651 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
652 TIMEVAL_TO_TIMESPEC(&tval, &now);
654 now.tv_sec += interval.tv_sec;
655 now.tv_nsec += interval.tv_nsec;
657 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
659 now.tv_nsec -= KMP_NSEC_PER_SEC;
662 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
663 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
665 if (!TCR_4(__kmp_global.g.g_done)) {
666 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
667 &__kmp_wait_mx.m_mutex, &now);
669 if (status != ETIMEDOUT && status != EINTR) {
670 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
674 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
675 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
677 TCW_4(__kmp_global.g.g_time.dt.t_value,
678 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
683 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
685#ifdef KMP_BLOCK_SIGNALS
686 status = sigfillset(&new_set);
687 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
688 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
689 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
692 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
694 if (__kmp_global.g.g_abort != 0) {
700 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
701 __kmp_global.g.g_abort));
706 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
707 __kmp_terminate_thread(gtid);
711 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
712 __kmp_global.g.g_abort));
714 if (__kmp_global.g.g_abort > 0)
715 raise(__kmp_global.g.g_abort);
718 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
724void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
726 pthread_attr_t thread_attr;
729 th->th.th_info.ds.ds_gtid = gtid;
733 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
739 if (!KMP_UBER_GTID(gtid)) {
740 th->th.th_stats = __kmp_stats_list->push_back(gtid);
744 th->th.th_stats = __kmp_stats_thread_ptr;
746 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
750 if (KMP_UBER_GTID(gtid)) {
751 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
752 th->th.th_info.ds.ds_thread = pthread_self();
753 __kmp_set_stack_info(gtid, th);
754 __kmp_check_stack_overlap(th);
758 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
762#ifdef KMP_THREAD_ATTR
763 status = pthread_attr_init(&thread_attr);
765 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
767 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
769 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
779 stack_size += gtid * __kmp_stkoffset * 2;
781 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
782 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
783 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
785#ifdef _POSIX_THREAD_ATTR_STACKSIZE
786 status = pthread_attr_setstacksize(&thread_attr, stack_size);
787#ifdef KMP_BACKUP_STKSIZE
789 if (!__kmp_env_stksize) {
790 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
791 __kmp_stksize = KMP_BACKUP_STKSIZE;
792 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
793 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
795 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
796 status = pthread_attr_setstacksize(&thread_attr, stack_size);
801 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
802 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
809 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
810 if (status != 0 || !handle) {
811#ifdef _POSIX_THREAD_ATTR_STACKSIZE
812 if (status == EINVAL) {
813 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
814 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
816 if (status == ENOMEM) {
817 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
818 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
821 if (status == EAGAIN) {
822 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
823 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
825 KMP_SYSFAIL(
"pthread_create", status);
828 th->th.th_info.ds.ds_thread = handle;
830#ifdef KMP_THREAD_ATTR
831 status = pthread_attr_destroy(&thread_attr);
833 kmp_msg_t err_code = KMP_ERR(status);
834 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
836 if (__kmp_generate_warnings == kmp_warnings_off) {
837 __kmp_str_free(&err_code.str);
844 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
849void __kmp_create_monitor(kmp_info_t *th) {
851 pthread_attr_t thread_attr;
854 int auto_adj_size = FALSE;
856 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
858 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
860 th->th.th_info.ds.ds_tid = 0;
861 th->th.th_info.ds.ds_gtid = 0;
864 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
868 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
869 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
871 TCW_4(__kmp_global.g.g_time.dt.t_value,
874 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
877#ifdef KMP_THREAD_ATTR
878 if (__kmp_monitor_stksize == 0) {
879 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
880 auto_adj_size = TRUE;
882 status = pthread_attr_init(&thread_attr);
884 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
886 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
888 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
891#ifdef _POSIX_THREAD_ATTR_STACKSIZE
892 status = pthread_attr_getstacksize(&thread_attr, &size);
893 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
895 size = __kmp_sys_min_stksize;
899 if (__kmp_monitor_stksize == 0) {
900 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
902 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
903 __kmp_monitor_stksize = __kmp_sys_min_stksize;
906 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
907 "requested stacksize = %lu bytes\n",
908 size, __kmp_monitor_stksize));
913#ifdef _POSIX_THREAD_ATTR_STACKSIZE
914 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
915 __kmp_monitor_stksize));
916 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
919 __kmp_monitor_stksize *= 2;
922 kmp_msg_t err_code = KMP_ERR(status);
923 __kmp_msg(kmp_ms_warning,
924 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
925 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
926 if (__kmp_generate_warnings == kmp_warnings_off) {
927 __kmp_str_free(&err_code.str);
933 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
936#ifdef _POSIX_THREAD_ATTR_STACKSIZE
937 if (status == EINVAL) {
938 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
939 __kmp_monitor_stksize *= 2;
942 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
943 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
946 if (status == ENOMEM) {
947 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
948 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
952 if (status == EAGAIN) {
953 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
954 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
956 KMP_SYSFAIL(
"pthread_create", status);
959 th->th.th_info.ds.ds_thread = handle;
963 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
964 sizeof(__kmp_global.g.g_time.dt.t_value));
965 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
969#ifdef KMP_THREAD_ATTR
970 status = pthread_attr_destroy(&thread_attr);
972 kmp_msg_t err_code = KMP_ERR(status);
973 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
975 if (__kmp_generate_warnings == kmp_warnings_off) {
976 __kmp_str_free(&err_code.str);
983 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
984 th->th.th_info.ds.ds_thread));
989void __kmp_exit_thread(
int exit_status) {
993 pthread_exit((
void *)(intptr_t)exit_status);
998void __kmp_resume_monitor();
1000extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1004 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1006 th->th.th_info.ds.ds_thread));
1011 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1012 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1013 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1023 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1024 if (status != ESRCH) {
1025 __kmp_resume_monitor();
1027 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1028 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1029 if (exit_val != th) {
1030 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1033 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1034 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1036 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1038 th->th.th_info.ds.ds_thread));
1045extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1050void __kmp_reap_worker(kmp_info_t *th) {
1057 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1059 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1063 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1065 if (exit_val != th) {
1066 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1068 th->th.th_info.ds.ds_gtid, exit_val));
1074 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1075 th->th.th_info.ds.ds_gtid));
1080#if KMP_HANDLE_SIGNALS
1082static void __kmp_null_handler(
int signo) {
1086static void __kmp_team_handler(
int signo) {
1087 if (__kmp_global.g.g_abort == 0) {
1090 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1105 if (__kmp_debug_buf) {
1106 __kmp_dump_debug_buffer();
1108 __kmp_unregister_library();
1110 TCW_4(__kmp_global.g.g_abort, signo);
1112 TCW_4(__kmp_global.g.g_done, TRUE);
1117 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1124static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1125 struct sigaction *oldact) {
1126 int rc = sigaction(signum, act, oldact);
1127 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1130static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1131 int parallel_init) {
1134 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1135 if (parallel_init) {
1136 struct sigaction new_action;
1137 struct sigaction old_action;
1138 new_action.sa_handler = handler_func;
1139 new_action.sa_flags = 0;
1140 sigfillset(&new_action.sa_mask);
1141 __kmp_sigaction(sig, &new_action, &old_action);
1142 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1143 sigaddset(&__kmp_sigset, sig);
1146 __kmp_sigaction(sig, &old_action, NULL);
1150 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1155static void __kmp_remove_one_handler(
int sig) {
1156 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1157 if (sigismember(&__kmp_sigset, sig)) {
1158 struct sigaction old;
1160 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1161 if ((old.sa_handler != __kmp_team_handler) &&
1162 (old.sa_handler != __kmp_null_handler)) {
1164 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1165 "restoring: sig=%d\n",
1167 __kmp_sigaction(sig, &old, NULL);
1169 sigdelset(&__kmp_sigset, sig);
1174void __kmp_install_signals(
int parallel_init) {
1175 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1176 if (__kmp_handle_signals || !parallel_init) {
1179 sigemptyset(&__kmp_sigset);
1180 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1181 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1182 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1183 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1184 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1185 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1186 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1187 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1189 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1191 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1193 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1198void __kmp_remove_signals(
void) {
1200 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1201 for (sig = 1; sig < NSIG; ++sig) {
1202 __kmp_remove_one_handler(sig);
1208void __kmp_enable(
int new_state) {
1209#ifdef KMP_CANCEL_THREADS
1210 int status, old_state;
1211 status = pthread_setcancelstate(new_state, &old_state);
1212 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1213 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1217void __kmp_disable(
int *old_state) {
1218#ifdef KMP_CANCEL_THREADS
1220 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1221 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1225static void __kmp_atfork_prepare(
void) {
1226 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1227 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1230static void __kmp_atfork_parent(
void) {
1231 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1232 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1238static void __kmp_atfork_child(
void) {
1239 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1240 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1247#if KMP_AFFINITY_SUPPORTED
1248#if KMP_OS_LINUX || KMP_OS_FREEBSD
1251 kmp_set_thread_affinity_mask_initial();
1256 if (__kmp_nested_proc_bind.bind_types != NULL) {
1257 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1259 for (kmp_affinity_t *affinity : __kmp_affinities)
1260 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1261 __kmp_affin_fullMask =
nullptr;
1262 __kmp_affin_origMask =
nullptr;
1263 __kmp_topology =
nullptr;
1267 __kmp_init_monitor = 0;
1269 __kmp_init_parallel = FALSE;
1270 __kmp_init_middle = FALSE;
1271 __kmp_init_serial = FALSE;
1272 TCW_4(__kmp_init_gtid, FALSE);
1273 __kmp_init_common = FALSE;
1275 TCW_4(__kmp_init_user_locks, FALSE);
1276#if !KMP_USE_DYNAMIC_LOCK
1277 __kmp_user_lock_table.used = 1;
1278 __kmp_user_lock_table.allocated = 0;
1279 __kmp_user_lock_table.table = NULL;
1280 __kmp_lock_blocks = NULL;
1284 TCW_4(__kmp_nth, 0);
1286 __kmp_thread_pool = NULL;
1287 __kmp_thread_pool_insert_pt = NULL;
1288 __kmp_team_pool = NULL;
1292 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1293 __kmp_threadpriv_cache_list));
1295 while (__kmp_threadpriv_cache_list != NULL) {
1297 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1298 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1299 &(*__kmp_threadpriv_cache_list->addr)));
1301 *__kmp_threadpriv_cache_list->addr = NULL;
1303 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1306 __kmp_init_runtime = FALSE;
1309 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1310 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1311 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1312 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1322 __kmp_need_register_serial = FALSE;
1323 __kmp_serial_initialize();
1337void __kmp_register_atfork(
void) {
1338 if (__kmp_need_register_atfork) {
1340 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1341 __kmp_atfork_child);
1342 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1344 __kmp_need_register_atfork = FALSE;
1348void __kmp_suspend_initialize(
void) {
1350 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1351 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1352 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1353 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1356void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1357 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1358 int new_value = __kmp_fork_count + 1;
1360 if (old_value == new_value)
1363 if (old_value == -1 || !__kmp_atomic_compare_store(
1364 &th->th.th_suspend_init_count, old_value, -1)) {
1365 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1371 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1372 &__kmp_suspend_cond_attr);
1373 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1374 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1375 &__kmp_suspend_mutex_attr);
1376 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1377 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1381void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1382 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1387 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1388 if (status != 0 && status != EBUSY) {
1389 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1391 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1392 if (status != 0 && status != EBUSY) {
1393 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1395 --th->th.th_suspend_init_count;
1396 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1402int __kmp_try_suspend_mx(kmp_info_t *th) {
1403 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1406void __kmp_lock_suspend_mx(kmp_info_t *th) {
1407 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1408 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1411void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1412 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1413 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1419static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1420 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1421 kmp_info_t *th = __kmp_threads[th_gtid];
1423 typename C::flag_t old_spin;
1425 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1428 __kmp_suspend_initialize_thread(th);
1430 __kmp_lock_suspend_mx(th);
1432 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1433 th_gtid, flag->get()));
1437 old_spin = flag->set_sleeping();
1438 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1439 th->th.th_sleep_loc_type = flag->get_type();
1440 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1441 __kmp_pause_status != kmp_soft_paused) {
1442 flag->unset_sleeping();
1443 TCW_PTR(th->th.th_sleep_loc, NULL);
1444 th->th.th_sleep_loc_type = flag_unset;
1445 __kmp_unlock_suspend_mx(th);
1448 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1450 th_gtid, flag->get(), flag->load(), old_spin));
1452 if (flag->done_check_val(old_spin) || flag->done_check()) {
1453 flag->unset_sleeping();
1454 TCW_PTR(th->th.th_sleep_loc, NULL);
1455 th->th.th_sleep_loc_type = flag_unset;
1456 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1458 th_gtid, flag->get()));
1463 int deactivated = FALSE;
1465 while (flag->is_sleeping()) {
1468 __kmp_suspend_count++;
1469 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1470 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1476 th->th.th_active = FALSE;
1477 if (th->th.th_active_in_pool) {
1478 th->th.th_active_in_pool = FALSE;
1479 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1480 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1485 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1486 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1488#if USE_SUSPEND_TIMEOUT
1489 struct timespec now;
1490 struct timeval tval;
1493 status = gettimeofday(&tval, NULL);
1494 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1495 TIMEVAL_TO_TIMESPEC(&tval, &now);
1497 msecs = (4 * __kmp_dflt_blocktime) + 200;
1498 now.tv_sec += msecs / 1000;
1499 now.tv_nsec += (msecs % 1000) * 1000;
1501 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1502 "pthread_cond_timedwait\n",
1504 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1505 &th->th.th_suspend_mx.m_mutex, &now);
1507 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1508 " pthread_cond_wait\n",
1510 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1511 &th->th.th_suspend_mx.m_mutex);
1514 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1515 KMP_SYSFAIL(
"pthread_cond_wait", status);
1518 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1520 if (!flag->is_sleeping() &&
1521 ((status == EINTR) || (status == ETIMEDOUT))) {
1525 flag->unset_sleeping();
1526 TCW_PTR(th->th.th_sleep_loc, NULL);
1527 th->th.th_sleep_loc_type = flag_unset;
1530 if (status == ETIMEDOUT) {
1531 if (flag->is_sleeping()) {
1533 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1535 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1538 TCW_PTR(th->th.th_sleep_loc, NULL);
1539 th->th.th_sleep_loc_type = flag_unset;
1541 }
else if (flag->is_sleeping()) {
1543 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1550 th->th.th_active = TRUE;
1551 if (TCR_4(th->th.th_in_pool)) {
1552 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1553 th->th.th_active_in_pool = TRUE;
1559 TCW_PTR(th->th.th_sleep_loc, NULL);
1560 th->th.th_sleep_loc_type = flag_unset;
1562 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1563 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1567 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1568 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1573 __kmp_unlock_suspend_mx(th);
1574 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1577template <
bool C,
bool S>
1578void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1579 __kmp_suspend_template(th_gtid, flag);
1581template <
bool C,
bool S>
1582void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1583 __kmp_suspend_template(th_gtid, flag);
1585template <
bool C,
bool S>
1586void __kmp_atomic_suspend_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1587 __kmp_suspend_template(th_gtid, flag);
1589void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1590 __kmp_suspend_template(th_gtid, flag);
1593template void __kmp_suspend_32<false, false>(
int, kmp_flag_32<false, false> *);
1594template void __kmp_suspend_64<false, true>(
int, kmp_flag_64<false, true> *);
1595template void __kmp_suspend_64<true, false>(
int, kmp_flag_64<true, false> *);
1597__kmp_atomic_suspend_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1599__kmp_atomic_suspend_64<true, false>(
int, kmp_atomic_flag_64<true, false> *);
1605static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1606 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1607 kmp_info_t *th = __kmp_threads[target_gtid];
1611 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1614 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1615 gtid, target_gtid));
1616 KMP_DEBUG_ASSERT(gtid != target_gtid);
1618 __kmp_suspend_initialize_thread(th);
1620 __kmp_lock_suspend_mx(th);
1622 if (!flag || flag != th->th.th_sleep_loc) {
1625 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1631 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1632 "awake: flag(%p)\n",
1633 gtid, target_gtid, (
void *)NULL));
1634 __kmp_unlock_suspend_mx(th);
1636 }
else if (flag->get_type() != th->th.th_sleep_loc_type) {
1641 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1642 "spin(%p) type=%d ptr_type=%d\n",
1643 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1644 th->th.th_sleep_loc_type));
1645 __kmp_unlock_suspend_mx(th);
1646 __kmp_null_resume_wrapper(th);
1650 if (!flag->is_sleeping()) {
1651 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1652 "awake: flag(%p): %u\n",
1653 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1654 __kmp_unlock_suspend_mx(th);
1658 KMP_DEBUG_ASSERT(flag);
1659 flag->unset_sleeping();
1660 TCW_PTR(th->th.th_sleep_loc, NULL);
1661 th->th.th_sleep_loc_type = flag_unset;
1663 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1664 "sleep bit for flag's loc(%p): %u\n",
1665 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1670 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1671 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1672 target_gtid, buffer);
1675 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1676 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1677 __kmp_unlock_suspend_mx(th);
1678 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1680 gtid, target_gtid));
1683template <
bool C,
bool S>
1684void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1685 __kmp_resume_template(target_gtid, flag);
1687template <
bool C,
bool S>
1688void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1689 __kmp_resume_template(target_gtid, flag);
1691template <
bool C,
bool S>
1692void __kmp_atomic_resume_64(
int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1693 __kmp_resume_template(target_gtid, flag);
1695void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1696 __kmp_resume_template(target_gtid, flag);
1699template void __kmp_resume_32<false, true>(
int, kmp_flag_32<false, true> *);
1700template void __kmp_resume_32<false, false>(
int, kmp_flag_32<false, false> *);
1701template void __kmp_resume_64<false, true>(
int, kmp_flag_64<false, true> *);
1703__kmp_atomic_resume_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1706void __kmp_resume_monitor() {
1707 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1710 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1711 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1713 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1715 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1716 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1720 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1721 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1722 KMP_GTID_MONITOR, buffer);
1725 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1726 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1727 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1728 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1729 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1731 gtid, KMP_GTID_MONITOR));
1735void __kmp_yield() { sched_yield(); }
1737void __kmp_gtid_set_specific(
int gtid) {
1738 if (__kmp_init_gtid) {
1740 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1741 (
void *)(intptr_t)(gtid + 1));
1742 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1744 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1748int __kmp_gtid_get_specific() {
1750 if (!__kmp_init_gtid) {
1751 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1752 "KMP_GTID_SHUTDOWN\n"));
1753 return KMP_GTID_SHUTDOWN;
1755 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1757 gtid = KMP_GTID_DNE;
1761 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1762 __kmp_gtid_threadprivate_key, gtid));
1766double __kmp_read_cpu_time(
void) {
1772 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1773 (
double)CLOCKS_PER_SEC;
1776int __kmp_read_system_info(
struct kmp_sys_info *info) {
1778 struct rusage r_usage;
1780 memset(info, 0,
sizeof(*info));
1782 status = getrusage(RUSAGE_SELF, &r_usage);
1783 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1787 info->maxrss = r_usage.ru_maxrss;
1789 info->minflt = r_usage.ru_minflt;
1791 info->majflt = r_usage.ru_majflt;
1793 info->nswap = r_usage.ru_nswap;
1795 info->inblock = r_usage.ru_inblock;
1797 info->oublock = r_usage.ru_oublock;
1799 info->nvcsw = r_usage.ru_nvcsw;
1801 info->nivcsw = r_usage.ru_nivcsw;
1804 return (status != 0);
1807void __kmp_read_system_time(
double *delta) {
1809 struct timeval tval;
1810 struct timespec stop;
1813 status = gettimeofday(&tval, NULL);
1814 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1815 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1816 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1817 *delta = (t_ns * 1e-9);
1820void __kmp_clear_system_time(
void) {
1821 struct timeval tval;
1823 status = gettimeofday(&tval, NULL);
1824 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1825 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1828static int __kmp_get_xproc(
void) {
1834 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1836#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1837 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1839 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1847 host_basic_info_data_t info;
1848 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1849 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1850 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1853 r = info.avail_cpus;
1855 KMP_WARNING(CantGetNumAvailCPU);
1856 KMP_INFORM(AssumedNumCPU);
1861#error "Unknown or unsupported OS."
1865 return r > 0 ? r : 2;
1869int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1873 va_start(args, format);
1874 FILE *f = fopen(path,
"rb");
1879 result = vfscanf(f, format, args);
1886void __kmp_runtime_initialize(
void) {
1888 pthread_mutexattr_t mutex_attr;
1889 pthread_condattr_t cond_attr;
1891 if (__kmp_init_runtime) {
1895#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1896 if (!__kmp_cpuinfo.initialized) {
1897 __kmp_query_cpuid(&__kmp_cpuinfo);
1901 __kmp_xproc = __kmp_get_xproc();
1907 status = getrlimit(RLIMIT_STACK, &rlim);
1909 __kmp_stksize = rlim.rlim_cur;
1910 __kmp_check_stksize(&__kmp_stksize);
1914 if (sysconf(_SC_THREADS)) {
1917 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1919 if (__kmp_sys_max_nth == -1) {
1922 __kmp_sys_max_nth = KMP_MAX_NTH;
1925 if (__kmp_sys_max_nth == -1) {
1927 __kmp_sys_max_nth = INT_MAX;
1928 }
else if (__kmp_sys_max_nth <= 1) {
1930 __kmp_sys_max_nth = KMP_MAX_NTH;
1935 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1936 if (__kmp_sys_min_stksize <= 1) {
1937 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1942 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1944 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1945 __kmp_internal_end_dest);
1946 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1947 status = pthread_mutexattr_init(&mutex_attr);
1948 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1949 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1950 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1951 status = pthread_mutexattr_destroy(&mutex_attr);
1952 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1953 status = pthread_condattr_init(&cond_attr);
1954 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1955 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1956 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1957 status = pthread_condattr_destroy(&cond_attr);
1958 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1960 __kmp_itt_initialize();
1963 __kmp_init_runtime = TRUE;
1966void __kmp_runtime_destroy(
void) {
1969 if (!__kmp_init_runtime) {
1974 __kmp_itt_destroy();
1977 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1978 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1980 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1981 if (status != 0 && status != EBUSY) {
1982 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1984 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1985 if (status != 0 && status != EBUSY) {
1986 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1988#if KMP_AFFINITY_SUPPORTED
1989 __kmp_affinity_uninitialize();
1992 __kmp_init_runtime = FALSE;
1997void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
2000void __kmp_elapsed(
double *t) {
2005 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2006 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
2008 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
2012 status = gettimeofday(&tv, NULL);
2013 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
2015 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
2020void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2023kmp_uint64 __kmp_now_nsec() {
2025 gettimeofday(&t, NULL);
2026 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2027 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2031#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2033void __kmp_initialize_system_tick() {
2034 kmp_uint64 now, nsec2, diff;
2035 kmp_uint64 delay = 1000000;
2036 kmp_uint64 nsec = __kmp_now_nsec();
2037 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2038 while ((now = __kmp_hardware_timestamp()) < goal)
2040 nsec2 = __kmp_now_nsec();
2041 diff = nsec2 - nsec;
2043 double tpus = 1000.0 * (double)(delay + (now - goal)) / (
double)diff;
2045 __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2046 __kmp_ticks_per_usec = (kmp_uint64)tpus;
2055int __kmp_is_address_mapped(
void *addr) {
2060#if KMP_OS_LINUX || KMP_OS_HURD
2065 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2068 file = fopen(name,
"r");
2069 KMP_ASSERT(file != NULL);
2073 void *beginning = NULL;
2074 void *ending = NULL;
2077 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2081 KMP_ASSERT(rc == 3 &&
2082 KMP_STRLEN(perms) == 4);
2085 if ((addr >= beginning) && (addr < ending)) {
2087 if (strcmp(perms,
"rw") == 0) {
2097 KMP_INTERNAL_FREE(name);
2101 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2102 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2107 lstsz = lstsz * 4 / 3;
2108 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2109 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2116 char *up = buf + lstsz;
2119 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2120 size_t cursz = cur->kve_structsize;
2123 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2124 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2126 if ((addr >= start) && (addr < end)) {
2127 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2128 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2144 rc = vm_read_overwrite(
2146 (vm_address_t)(addr),
2148 (vm_address_t)(&buffer),
2161 mib[2] = VM_PROC_MAP;
2163 mib[4] =
sizeof(
struct kinfo_vmentry);
2166 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2170 size = size * 4 / 3;
2171 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2174 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2178 for (
size_t i = 0; i < size; i++) {
2179 if (kiv[i].kve_start >= (uint64_t)addr &&
2180 kiv[i].kve_end <= (uint64_t)addr) {
2185 KMP_INTERNAL_FREE(kiv);
2190 mib[1] = KERN_PROC_VMMAP;
2195 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2200 struct kinfo_vmentry kiv = {.kve_start = 0};
2202 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2204 if (kiv.kve_end == end)
2207 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2214 found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2215#elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS || KMP_OS_AIX
2222#error "Unknown or unsupported OS"
2230#ifdef USE_LOAD_BALANCE
2232#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2233 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2240int __kmp_get_load_balance(
int max) {
2244 int res = getloadavg(averages, 3);
2249 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2250 ret_avg = (int)averages[0];
2251 }
else if ((__kmp_load_balance_interval >= 180 &&
2252 __kmp_load_balance_interval < 600) &&
2254 ret_avg = (int)averages[1];
2255 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2256 ret_avg = (int)averages[2];
2270int __kmp_get_load_balance(
int max) {
2271 static int permanent_error = 0;
2272 static int glb_running_threads = 0;
2274 static double glb_call_time = 0;
2276 int running_threads = 0;
2278 DIR *proc_dir = NULL;
2279 struct dirent *proc_entry = NULL;
2281 kmp_str_buf_t task_path;
2282 DIR *task_dir = NULL;
2283 struct dirent *task_entry = NULL;
2284 int task_path_fixed_len;
2286 kmp_str_buf_t stat_path;
2288 int stat_path_fixed_len;
2291 int total_processes = 0;
2294 double call_time = 0.0;
2296 __kmp_str_buf_init(&task_path);
2297 __kmp_str_buf_init(&stat_path);
2299 __kmp_elapsed(&call_time);
2301 if (glb_call_time &&
2302 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2303 running_threads = glb_running_threads;
2307 glb_call_time = call_time;
2310 if (permanent_error) {
2311 running_threads = -1;
2320 proc_dir = opendir(
"/proc");
2321 if (proc_dir == NULL) {
2324 running_threads = -1;
2325 permanent_error = 1;
2330 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2331 task_path_fixed_len = task_path.used;
2333 proc_entry = readdir(proc_dir);
2334 while (proc_entry != NULL) {
2337 if (isdigit(proc_entry->d_name[0])) {
2341 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2352 KMP_DEBUG_ASSERT(total_processes != 1 ||
2353 strcmp(proc_entry->d_name,
"1") == 0);
2356 task_path.used = task_path_fixed_len;
2357 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2358 KMP_STRLEN(proc_entry->d_name));
2359 __kmp_str_buf_cat(&task_path,
"/task", 5);
2361 task_dir = opendir(task_path.str);
2362 if (task_dir == NULL) {
2371 if (strcmp(proc_entry->d_name,
"1") == 0) {
2372 running_threads = -1;
2373 permanent_error = 1;
2378 __kmp_str_buf_clear(&stat_path);
2379 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2380 __kmp_str_buf_cat(&stat_path,
"/", 1);
2381 stat_path_fixed_len = stat_path.used;
2383 task_entry = readdir(task_dir);
2384 while (task_entry != NULL) {
2387 if (isdigit(task_entry->d_name[0])) {
2389 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2397 stat_path_fixed_len;
2398 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2399 KMP_STRLEN(task_entry->d_name));
2400 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2404 stat_file = open(stat_path.str, O_RDONLY);
2405 if (stat_file == -1) {
2435 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2442 char *close_parent = strstr(buffer,
") ");
2443 if (close_parent != NULL) {
2444 char state = *(close_parent + 2);
2447 if (running_threads >= max) {
2457 task_entry = readdir(task_dir);
2463 proc_entry = readdir(proc_dir);
2469 KMP_DEBUG_ASSERT(running_threads > 0);
2470 if (running_threads <= 0) {
2471 running_threads = 1;
2475 if (proc_dir != NULL) {
2478 __kmp_str_buf_free(&task_path);
2479 if (task_dir != NULL) {
2482 __kmp_str_buf_free(&stat_path);
2483 if (stat_file != -1) {
2487 glb_running_threads = running_threads;
2489 return running_threads;
2497#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2498 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2499 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2500 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF)
2504int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2508 void **exit_frame_ptr
2512 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2517 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2521 (*pkfn)(>id, &tid);
2524 (*pkfn)(>id, &tid, p_argv[0]);
2527 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2530 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2533 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2536 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2539 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2543 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2544 p_argv[5], p_argv[6]);
2547 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2548 p_argv[5], p_argv[6], p_argv[7]);
2551 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2552 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2555 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2556 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2559 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2560 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2563 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2564 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2568 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2569 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2570 p_argv[11], p_argv[12]);
2573 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2574 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2575 p_argv[11], p_argv[12], p_argv[13]);
2578 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2579 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2580 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2593pthread_cond_t hidden_helper_threads_initz_cond_var;
2594pthread_mutex_t hidden_helper_threads_initz_lock;
2595volatile int hidden_helper_initz_signaled = FALSE;
2598pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2599pthread_mutex_t hidden_helper_threads_deinitz_lock;
2600volatile int hidden_helper_deinitz_signaled = FALSE;
2603pthread_cond_t hidden_helper_main_thread_cond_var;
2604pthread_mutex_t hidden_helper_main_thread_lock;
2605volatile int hidden_helper_main_thread_signaled = FALSE;
2610sem_t hidden_helper_task_sem;
2613void __kmp_hidden_helper_worker_thread_wait() {
2614 int status = sem_wait(&hidden_helper_task_sem);
2615 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2618void __kmp_do_initialize_hidden_helper_threads() {
2621 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2622 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2624 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2625 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2627 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2628 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2630 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2631 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2633 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2634 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2636 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2637 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2640 status = sem_init(&hidden_helper_task_sem, 0, 0);
2641 KMP_CHECK_SYSFAIL(
"sem_init", status);
2645 status = pthread_create(
2647 [](
void *) ->
void * {
2648 __kmp_hidden_helper_threads_initz_routine();
2652 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2655void __kmp_hidden_helper_threads_initz_wait() {
2658 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2659 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2661 if (!TCR_4(hidden_helper_initz_signaled)) {
2662 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2663 &hidden_helper_threads_initz_lock);
2664 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2667 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2668 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2671void __kmp_hidden_helper_initz_release() {
2673 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2674 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2676 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2677 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2679 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2681 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2682 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2685void __kmp_hidden_helper_main_thread_wait() {
2688 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2689 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2691 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2692 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2693 &hidden_helper_main_thread_lock);
2694 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2697 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2698 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2701void __kmp_hidden_helper_main_thread_release() {
2704 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2705 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2707 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2708 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2711 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2713 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2714 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2717void __kmp_hidden_helper_worker_thread_signal() {
2718 int status = sem_post(&hidden_helper_task_sem);
2719 KMP_CHECK_SYSFAIL(
"sem_post", status);
2722void __kmp_hidden_helper_threads_deinitz_wait() {
2725 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2726 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2728 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2729 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2730 &hidden_helper_threads_deinitz_lock);
2731 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2734 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2735 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2738void __kmp_hidden_helper_threads_deinitz_release() {
2739 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2740 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2742 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2743 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2745 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2747 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2748 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2751void __kmp_hidden_helper_worker_thread_wait() {
2752 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2755void __kmp_do_initialize_hidden_helper_threads() {
2756 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2759void __kmp_hidden_helper_threads_initz_wait() {
2760 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2763void __kmp_hidden_helper_initz_release() {
2764 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2767void __kmp_hidden_helper_main_thread_wait() {
2768 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2771void __kmp_hidden_helper_main_thread_release() {
2772 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2775void __kmp_hidden_helper_worker_thread_signal() {
2776 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2779void __kmp_hidden_helper_threads_deinitz_wait() {
2780 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2783void __kmp_hidden_helper_threads_deinitz_release() {
2784 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2788bool __kmp_detect_shm() {
2789 DIR *dir = opendir(
"/dev/shm");
2793 }
else if (ENOENT == errno) {
2800bool __kmp_detect_tmp() {
2801 DIR *dir = opendir(
"/tmp");
2805 }
else if (ENOENT == errno) {
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.