14#include "kmp_affinity.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
32#include <sys/syscall.h>
38#include <sys/sysinfo.h>
54#include <sys/sysctl.h>
55#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
57#include <sys/sysctl.h>
59#include <pthread_np.h>
60#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
62#include <sys/sysctl.h>
70 struct timespec start;
74#define TS2NS(timespec) \
75 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
77static struct kmp_sys_timer __kmp_sys_timer_data;
80typedef void (*sig_func_t)(int);
81STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
82static sigset_t __kmp_sigset;
85static int __kmp_init_runtime = FALSE;
87static int __kmp_fork_count = 0;
89static pthread_condattr_t __kmp_suspend_cond_attr;
90static pthread_mutexattr_t __kmp_suspend_mutex_attr;
92static kmp_cond_align_t __kmp_wait_cv;
93static kmp_mutex_align_t __kmp_wait_mx;
95kmp_uint64 __kmp_ticks_per_msec = 1000000;
98static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
99 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
100 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101 cond->c_cond.__c_waiting);
105#if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
109void __kmp_affinity_bind_thread(
int which) {
110 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111 "Illegal set affinity operation when not capable");
113 kmp_affin_mask_t *mask;
114 KMP_CPU_ALLOC_ON_STACK(mask);
116 KMP_CPU_SET(which, mask);
117 __kmp_set_system_affinity(mask, TRUE);
118 KMP_CPU_FREE_FROM_STACK(mask);
124void __kmp_affinity_determine_capable(
const char *env_var) {
128#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
129#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
131#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
137 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
138 int verbose = __kmp_affinity.flags.verbose;
139 int warnings = __kmp_affinity.flags.warnings;
140 enum affinity_type type = __kmp_affinity.type;
144 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
145 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
146 "initial getaffinity call returned %ld errno = %d\n",
149 if (gCode < 0 && errno != EINVAL) {
152 (warnings && (type != affinity_none) && (type != affinity_default) &&
153 (type != affinity_disabled))) {
155 kmp_msg_t err_code = KMP_ERR(error);
156 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
157 err_code, __kmp_msg_null);
158 if (__kmp_generate_warnings == kmp_warnings_off) {
159 __kmp_str_free(&err_code.str);
162 KMP_AFFINITY_DISABLE();
163 KMP_INTERNAL_FREE(buf);
165 }
else if (gCode > 0) {
167 KMP_AFFINITY_ENABLE(gCode);
168 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
169 "affinity supported (mask size %d)\n",
170 (
int)__kmp_affin_mask_size));
171 KMP_INTERNAL_FREE(buf);
177 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
178 "searching for proper set size\n"));
180 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
181 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
182 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
183 "getaffinity for mask size %ld returned %ld errno = %d\n",
184 size, gCode, errno));
187 if (errno == ENOSYS) {
189 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
190 "inconsistent OS call behavior: errno == ENOSYS for mask "
194 (warnings && (type != affinity_none) &&
195 (type != affinity_default) && (type != affinity_disabled))) {
197 kmp_msg_t err_code = KMP_ERR(error);
198 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
199 err_code, __kmp_msg_null);
200 if (__kmp_generate_warnings == kmp_warnings_off) {
201 __kmp_str_free(&err_code.str);
204 KMP_AFFINITY_DISABLE();
205 KMP_INTERNAL_FREE(buf);
211 KMP_AFFINITY_ENABLE(gCode);
212 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
213 "affinity supported (mask size %d)\n",
214 (
int)__kmp_affin_mask_size));
215 KMP_INTERNAL_FREE(buf);
221 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
222 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
223 reinterpret_cast<cpuset_t *
>(buf));
224 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
225 "initial getaffinity call returned %d errno = %d\n",
228 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
229 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
230 "affinity supported (mask size %d)\n",
231 (
int)__kmp_affin_mask_size));
232 KMP_INTERNAL_FREE(buf);
236 KMP_INTERNAL_FREE(buf);
239 KMP_AFFINITY_DISABLE();
240 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
241 "cannot determine mask size - affinity not supported\n"));
242 if (verbose || (warnings && (type != affinity_none) &&
243 (type != affinity_default) && (type != affinity_disabled))) {
244 KMP_WARNING(AffCantGetMaskSize, env_var);
252int __kmp_futex_determine_capable() {
254 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
255 int retval = (rc == 0) || (errno != ENOSYS);
258 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
259 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
260 retval ?
"" :
" not"));
267#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
271kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
272 kmp_int8 old_value, new_value;
274 old_value = TCR_1(*p);
275 new_value = old_value | d;
277 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
279 old_value = TCR_1(*p);
280 new_value = old_value | d;
285kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
286 kmp_int8 old_value, new_value;
288 old_value = TCR_1(*p);
289 new_value = old_value & d;
291 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
293 old_value = TCR_1(*p);
294 new_value = old_value & d;
299kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
300 kmp_uint32 old_value, new_value;
302 old_value = TCR_4(*p);
303 new_value = old_value | d;
305 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
307 old_value = TCR_4(*p);
308 new_value = old_value | d;
313kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
314 kmp_uint32 old_value, new_value;
316 old_value = TCR_4(*p);
317 new_value = old_value & d;
319 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
321 old_value = TCR_4(*p);
322 new_value = old_value & d;
328kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
329 kmp_int8 old_value, new_value;
331 old_value = TCR_1(*p);
332 new_value = old_value + d;
334 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
336 old_value = TCR_1(*p);
337 new_value = old_value + d;
342kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
343 kmp_int64 old_value, new_value;
345 old_value = TCR_8(*p);
346 new_value = old_value + d;
348 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
350 old_value = TCR_8(*p);
351 new_value = old_value + d;
357kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
358 kmp_uint64 old_value, new_value;
360 old_value = TCR_8(*p);
361 new_value = old_value | d;
362 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
364 old_value = TCR_8(*p);
365 new_value = old_value | d;
370kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
371 kmp_uint64 old_value, new_value;
373 old_value = TCR_8(*p);
374 new_value = old_value & d;
375 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
377 old_value = TCR_8(*p);
378 new_value = old_value & d;
385void __kmp_terminate_thread(
int gtid) {
387 kmp_info_t *th = __kmp_threads[gtid];
392#ifdef KMP_CANCEL_THREADS
393 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
394 status = pthread_cancel(th->th.th_info.ds.ds_thread);
395 if (status != 0 && status != ESRCH) {
396 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
407static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
409#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
419 if (!KMP_UBER_GTID(gtid)) {
422 status = pthread_attr_init(&attr);
423 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
424#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
425 status = pthread_attr_get_np(pthread_self(), &attr);
426 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
428 status = pthread_getattr_np(pthread_self(), &attr);
429 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
431 status = pthread_attr_getstack(&attr, &addr, &size);
432 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
434 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
435 " %lu, low addr: %p\n",
437 status = pthread_attr_destroy(&attr);
438 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
441 if (size != 0 && addr != 0) {
443 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
444 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
445 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
451 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
452 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
453 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
457static void *__kmp_launch_worker(
void *thr) {
458 int status, old_type, old_state;
459#ifdef KMP_BLOCK_SIGNALS
460 sigset_t new_set, old_set;
463#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
464 KMP_OS_OPENBSD || KMP_OS_HURD
465 void *
volatile padding = 0;
469 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
470 __kmp_gtid_set_specific(gtid);
476 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
477 __kmp_stats_thread_ptr->startLife();
478 KMP_SET_THREAD_STATE(IDLE);
483 __kmp_itt_thread_name(gtid);
486#if KMP_AFFINITY_SUPPORTED
487 __kmp_affinity_set_init_mask(gtid, FALSE);
490#ifdef KMP_CANCEL_THREADS
491 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
492 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
494 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
495 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
498#if KMP_ARCH_X86 || KMP_ARCH_X86_64
500 __kmp_clear_x87_fpu_status_word();
501 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
502 __kmp_load_mxcsr(&__kmp_init_mxcsr);
505#ifdef KMP_BLOCK_SIGNALS
506 status = sigfillset(&new_set);
507 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
508 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
509 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
512#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
514 if (__kmp_stkoffset > 0 && gtid > 0) {
515 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
521 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
523 __kmp_check_stack_overlap((kmp_info_t *)thr);
525 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
527#ifdef KMP_BLOCK_SIGNALS
528 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
529 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
538static void *__kmp_launch_monitor(
void *thr) {
539 int status, old_type, old_state;
540#ifdef KMP_BLOCK_SIGNALS
543 struct timespec interval;
547 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
550 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
552 __kmp_gtid = KMP_GTID_MONITOR;
559 __kmp_itt_thread_ignore();
562 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
565 __kmp_check_stack_overlap((kmp_info_t *)thr);
567#ifdef KMP_CANCEL_THREADS
568 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
569 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
571 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
572 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
580 int sched = sched_getscheduler(0);
581 if (sched == SCHED_FIFO || sched == SCHED_RR) {
584 struct sched_param param;
585 int max_priority = sched_get_priority_max(sched);
587 KMP_WARNING(RealTimeSchedNotSupported);
588 sched_getparam(0, ¶m);
589 if (param.sched_priority < max_priority) {
590 param.sched_priority += 1;
591 rc = sched_setscheduler(0, sched, ¶m);
594 kmp_msg_t err_code = KMP_ERR(error);
595 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
596 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
597 if (__kmp_generate_warnings == kmp_warnings_off) {
598 __kmp_str_free(&err_code.str);
605 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
606 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
611 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
617 if (__kmp_monitor_wakeups == 1) {
619 interval.tv_nsec = 0;
622 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
625 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
627 while (!TCR_4(__kmp_global.g.g_done)) {
633 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
635 status = gettimeofday(&tval, NULL);
636 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
637 TIMEVAL_TO_TIMESPEC(&tval, &now);
639 now.tv_sec += interval.tv_sec;
640 now.tv_nsec += interval.tv_nsec;
642 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
644 now.tv_nsec -= KMP_NSEC_PER_SEC;
647 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
648 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
650 if (!TCR_4(__kmp_global.g.g_done)) {
651 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
652 &__kmp_wait_mx.m_mutex, &now);
654 if (status != ETIMEDOUT && status != EINTR) {
655 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
659 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
660 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
662 TCW_4(__kmp_global.g.g_time.dt.t_value,
663 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
668 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
670#ifdef KMP_BLOCK_SIGNALS
671 status = sigfillset(&new_set);
672 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
673 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
674 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
677 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
679 if (__kmp_global.g.g_abort != 0) {
685 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
686 __kmp_global.g.g_abort));
691 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
692 __kmp_terminate_thread(gtid);
696 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
697 __kmp_global.g.g_abort));
699 if (__kmp_global.g.g_abort > 0)
700 raise(__kmp_global.g.g_abort);
703 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
709void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
711 pthread_attr_t thread_attr;
714 th->th.th_info.ds.ds_gtid = gtid;
718 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
724 if (!KMP_UBER_GTID(gtid)) {
725 th->th.th_stats = __kmp_stats_list->push_back(gtid);
729 th->th.th_stats = __kmp_stats_thread_ptr;
731 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
735 if (KMP_UBER_GTID(gtid)) {
736 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
737 th->th.th_info.ds.ds_thread = pthread_self();
738 __kmp_set_stack_info(gtid, th);
739 __kmp_check_stack_overlap(th);
743 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
747#ifdef KMP_THREAD_ATTR
748 status = pthread_attr_init(&thread_attr);
750 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
752 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
754 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
764 stack_size += gtid * __kmp_stkoffset * 2;
766 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
767 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
768 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
770#ifdef _POSIX_THREAD_ATTR_STACKSIZE
771 status = pthread_attr_setstacksize(&thread_attr, stack_size);
772#ifdef KMP_BACKUP_STKSIZE
774 if (!__kmp_env_stksize) {
775 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
776 __kmp_stksize = KMP_BACKUP_STKSIZE;
777 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
778 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
780 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
781 status = pthread_attr_setstacksize(&thread_attr, stack_size);
786 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
787 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
794 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
795 if (status != 0 || !handle) {
796#ifdef _POSIX_THREAD_ATTR_STACKSIZE
797 if (status == EINVAL) {
798 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
799 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
801 if (status == ENOMEM) {
802 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
803 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
806 if (status == EAGAIN) {
807 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
808 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
810 KMP_SYSFAIL(
"pthread_create", status);
813 th->th.th_info.ds.ds_thread = handle;
815#ifdef KMP_THREAD_ATTR
816 status = pthread_attr_destroy(&thread_attr);
818 kmp_msg_t err_code = KMP_ERR(status);
819 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
821 if (__kmp_generate_warnings == kmp_warnings_off) {
822 __kmp_str_free(&err_code.str);
829 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
834void __kmp_create_monitor(kmp_info_t *th) {
836 pthread_attr_t thread_attr;
839 int auto_adj_size = FALSE;
841 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
843 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
845 th->th.th_info.ds.ds_tid = 0;
846 th->th.th_info.ds.ds_gtid = 0;
849 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
853 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
854 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
856 TCW_4(__kmp_global.g.g_time.dt.t_value,
859 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
862#ifdef KMP_THREAD_ATTR
863 if (__kmp_monitor_stksize == 0) {
864 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
865 auto_adj_size = TRUE;
867 status = pthread_attr_init(&thread_attr);
869 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
871 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
873 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
876#ifdef _POSIX_THREAD_ATTR_STACKSIZE
877 status = pthread_attr_getstacksize(&thread_attr, &size);
878 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
880 size = __kmp_sys_min_stksize;
884 if (__kmp_monitor_stksize == 0) {
885 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
887 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
888 __kmp_monitor_stksize = __kmp_sys_min_stksize;
891 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
892 "requested stacksize = %lu bytes\n",
893 size, __kmp_monitor_stksize));
898#ifdef _POSIX_THREAD_ATTR_STACKSIZE
899 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
900 __kmp_monitor_stksize));
901 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
904 __kmp_monitor_stksize *= 2;
907 kmp_msg_t err_code = KMP_ERR(status);
908 __kmp_msg(kmp_ms_warning,
909 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
910 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
911 if (__kmp_generate_warnings == kmp_warnings_off) {
912 __kmp_str_free(&err_code.str);
918 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
921#ifdef _POSIX_THREAD_ATTR_STACKSIZE
922 if (status == EINVAL) {
923 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
924 __kmp_monitor_stksize *= 2;
927 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
928 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
931 if (status == ENOMEM) {
932 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
933 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
937 if (status == EAGAIN) {
938 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
939 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
941 KMP_SYSFAIL(
"pthread_create", status);
944 th->th.th_info.ds.ds_thread = handle;
948 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
949 sizeof(__kmp_global.g.g_time.dt.t_value));
950 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
954#ifdef KMP_THREAD_ATTR
955 status = pthread_attr_destroy(&thread_attr);
957 kmp_msg_t err_code = KMP_ERR(status);
958 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
960 if (__kmp_generate_warnings == kmp_warnings_off) {
961 __kmp_str_free(&err_code.str);
968 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
969 th->th.th_info.ds.ds_thread));
974void __kmp_exit_thread(
int exit_status) {
975 pthread_exit((
void *)(intptr_t)exit_status);
979void __kmp_resume_monitor();
981void __kmp_reap_monitor(kmp_info_t *th) {
985 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
987 th->th.th_info.ds.ds_thread));
992 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
993 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
994 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1004 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1005 if (status != ESRCH) {
1006 __kmp_resume_monitor();
1008 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1009 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1010 if (exit_val != th) {
1011 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1014 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1015 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1017 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1019 th->th.th_info.ds.ds_thread));
1025void __kmp_reap_worker(kmp_info_t *th) {
1032 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1034 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1038 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1040 if (exit_val != th) {
1041 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1043 th->th.th_info.ds.ds_gtid, exit_val));
1049 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1050 th->th.th_info.ds.ds_gtid));
1055#if KMP_HANDLE_SIGNALS
1057static void __kmp_null_handler(
int signo) {
1061static void __kmp_team_handler(
int signo) {
1062 if (__kmp_global.g.g_abort == 0) {
1065 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1080 if (__kmp_debug_buf) {
1081 __kmp_dump_debug_buffer();
1083 __kmp_unregister_library();
1085 TCW_4(__kmp_global.g.g_abort, signo);
1087 TCW_4(__kmp_global.g.g_done, TRUE);
1092 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1099static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1100 struct sigaction *oldact) {
1101 int rc = sigaction(signum, act, oldact);
1102 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1105static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1106 int parallel_init) {
1109 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1110 if (parallel_init) {
1111 struct sigaction new_action;
1112 struct sigaction old_action;
1113 new_action.sa_handler = handler_func;
1114 new_action.sa_flags = 0;
1115 sigfillset(&new_action.sa_mask);
1116 __kmp_sigaction(sig, &new_action, &old_action);
1117 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1118 sigaddset(&__kmp_sigset, sig);
1121 __kmp_sigaction(sig, &old_action, NULL);
1125 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1130static void __kmp_remove_one_handler(
int sig) {
1131 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1132 if (sigismember(&__kmp_sigset, sig)) {
1133 struct sigaction old;
1135 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1136 if ((old.sa_handler != __kmp_team_handler) &&
1137 (old.sa_handler != __kmp_null_handler)) {
1139 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1140 "restoring: sig=%d\n",
1142 __kmp_sigaction(sig, &old, NULL);
1144 sigdelset(&__kmp_sigset, sig);
1149void __kmp_install_signals(
int parallel_init) {
1150 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1151 if (__kmp_handle_signals || !parallel_init) {
1154 sigemptyset(&__kmp_sigset);
1155 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1156 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1157 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1158 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1159 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1160 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1161 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1162 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1164 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1166 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1168 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1173void __kmp_remove_signals(
void) {
1175 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1176 for (sig = 1; sig < NSIG; ++sig) {
1177 __kmp_remove_one_handler(sig);
1183void __kmp_enable(
int new_state) {
1184#ifdef KMP_CANCEL_THREADS
1185 int status, old_state;
1186 status = pthread_setcancelstate(new_state, &old_state);
1187 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1188 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1192void __kmp_disable(
int *old_state) {
1193#ifdef KMP_CANCEL_THREADS
1195 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1196 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1200static void __kmp_atfork_prepare(
void) {
1201 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1202 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1205static void __kmp_atfork_parent(
void) {
1206 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1207 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1213static void __kmp_atfork_child(
void) {
1214 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1215 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1222#if KMP_AFFINITY_SUPPORTED
1223#if KMP_OS_LINUX || KMP_OS_FREEBSD
1226 kmp_set_thread_affinity_mask_initial();
1231 if (__kmp_nested_proc_bind.bind_types != NULL) {
1232 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1234 for (kmp_affinity_t *affinity : __kmp_affinities)
1235 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1236 __kmp_affin_fullMask =
nullptr;
1237 __kmp_affin_origMask =
nullptr;
1241 __kmp_init_monitor = 0;
1243 __kmp_init_parallel = FALSE;
1244 __kmp_init_middle = FALSE;
1245 __kmp_init_serial = FALSE;
1246 TCW_4(__kmp_init_gtid, FALSE);
1247 __kmp_init_common = FALSE;
1249 TCW_4(__kmp_init_user_locks, FALSE);
1250#if !KMP_USE_DYNAMIC_LOCK
1251 __kmp_user_lock_table.used = 1;
1252 __kmp_user_lock_table.allocated = 0;
1253 __kmp_user_lock_table.table = NULL;
1254 __kmp_lock_blocks = NULL;
1258 TCW_4(__kmp_nth, 0);
1260 __kmp_thread_pool = NULL;
1261 __kmp_thread_pool_insert_pt = NULL;
1262 __kmp_team_pool = NULL;
1266 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1267 __kmp_threadpriv_cache_list));
1269 while (__kmp_threadpriv_cache_list != NULL) {
1271 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1272 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1273 &(*__kmp_threadpriv_cache_list->addr)));
1275 *__kmp_threadpriv_cache_list->addr = NULL;
1277 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1280 __kmp_init_runtime = FALSE;
1283 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1284 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1285 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1286 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1296 __kmp_need_register_serial = FALSE;
1297 __kmp_serial_initialize();
1311void __kmp_register_atfork(
void) {
1312 if (__kmp_need_register_atfork) {
1313 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1314 __kmp_atfork_child);
1315 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1316 __kmp_need_register_atfork = FALSE;
1320void __kmp_suspend_initialize(
void) {
1322 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1323 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1324 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1325 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1328void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1329 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1330 int new_value = __kmp_fork_count + 1;
1332 if (old_value == new_value)
1335 if (old_value == -1 || !__kmp_atomic_compare_store(
1336 &th->th.th_suspend_init_count, old_value, -1)) {
1337 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1343 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1344 &__kmp_suspend_cond_attr);
1345 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1346 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1347 &__kmp_suspend_mutex_attr);
1348 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1349 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1353void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1354 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1359 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1360 if (status != 0 && status != EBUSY) {
1361 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1363 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1364 if (status != 0 && status != EBUSY) {
1365 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1367 --th->th.th_suspend_init_count;
1368 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1374int __kmp_try_suspend_mx(kmp_info_t *th) {
1375 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1378void __kmp_lock_suspend_mx(kmp_info_t *th) {
1379 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1380 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1383void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1384 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1385 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1391static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1392 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1393 kmp_info_t *th = __kmp_threads[th_gtid];
1395 typename C::flag_t old_spin;
1397 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1400 __kmp_suspend_initialize_thread(th);
1402 __kmp_lock_suspend_mx(th);
1404 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1405 th_gtid, flag->get()));
1409 old_spin = flag->set_sleeping();
1410 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1411 th->th.th_sleep_loc_type = flag->get_type();
1412 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1413 __kmp_pause_status != kmp_soft_paused) {
1414 flag->unset_sleeping();
1415 TCW_PTR(th->th.th_sleep_loc, NULL);
1416 th->th.th_sleep_loc_type = flag_unset;
1417 __kmp_unlock_suspend_mx(th);
1420 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1422 th_gtid, flag->get(), flag->load(), old_spin));
1424 if (flag->done_check_val(old_spin) || flag->done_check()) {
1425 flag->unset_sleeping();
1426 TCW_PTR(th->th.th_sleep_loc, NULL);
1427 th->th.th_sleep_loc_type = flag_unset;
1428 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1430 th_gtid, flag->get()));
1435 int deactivated = FALSE;
1437 while (flag->is_sleeping()) {
1440 __kmp_suspend_count++;
1441 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1442 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1448 th->th.th_active = FALSE;
1449 if (th->th.th_active_in_pool) {
1450 th->th.th_active_in_pool = FALSE;
1451 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1452 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1457 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1458 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1460#if USE_SUSPEND_TIMEOUT
1461 struct timespec now;
1462 struct timeval tval;
1465 status = gettimeofday(&tval, NULL);
1466 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1467 TIMEVAL_TO_TIMESPEC(&tval, &now);
1469 msecs = (4 * __kmp_dflt_blocktime) + 200;
1470 now.tv_sec += msecs / 1000;
1471 now.tv_nsec += (msecs % 1000) * 1000;
1473 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1474 "pthread_cond_timedwait\n",
1476 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1477 &th->th.th_suspend_mx.m_mutex, &now);
1479 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1480 " pthread_cond_wait\n",
1482 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1483 &th->th.th_suspend_mx.m_mutex);
1486 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1487 KMP_SYSFAIL(
"pthread_cond_wait", status);
1490 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1492 if (!flag->is_sleeping() &&
1493 ((status == EINTR) || (status == ETIMEDOUT))) {
1497 flag->unset_sleeping();
1498 TCW_PTR(th->th.th_sleep_loc, NULL);
1499 th->th.th_sleep_loc_type = flag_unset;
1502 if (status == ETIMEDOUT) {
1503 if (flag->is_sleeping()) {
1505 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1507 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1510 TCW_PTR(th->th.th_sleep_loc, NULL);
1511 th->th.th_sleep_loc_type = flag_unset;
1513 }
else if (flag->is_sleeping()) {
1515 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1522 th->th.th_active = TRUE;
1523 if (TCR_4(th->th.th_in_pool)) {
1524 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1525 th->th.th_active_in_pool = TRUE;
1531 TCW_PTR(th->th.th_sleep_loc, NULL);
1532 th->th.th_sleep_loc_type = flag_unset;
1534 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1535 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1539 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1540 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1545 __kmp_unlock_suspend_mx(th);
1546 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1549template <
bool C,
bool S>
1550void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1551 __kmp_suspend_template(th_gtid, flag);
1553template <
bool C,
bool S>
1554void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1555 __kmp_suspend_template(th_gtid, flag);
1557template <
bool C,
bool S>
1558void __kmp_atomic_suspend_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1559 __kmp_suspend_template(th_gtid, flag);
1561void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1562 __kmp_suspend_template(th_gtid, flag);
1565template void __kmp_suspend_32<false, false>(
int, kmp_flag_32<false, false> *);
1566template void __kmp_suspend_64<false, true>(
int, kmp_flag_64<false, true> *);
1567template void __kmp_suspend_64<true, false>(
int, kmp_flag_64<true, false> *);
1569__kmp_atomic_suspend_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1571__kmp_atomic_suspend_64<true, false>(
int, kmp_atomic_flag_64<true, false> *);
1577static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1578 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1579 kmp_info_t *th = __kmp_threads[target_gtid];
1583 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1586 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1587 gtid, target_gtid));
1588 KMP_DEBUG_ASSERT(gtid != target_gtid);
1590 __kmp_suspend_initialize_thread(th);
1592 __kmp_lock_suspend_mx(th);
1594 if (!flag || flag != th->th.th_sleep_loc) {
1597 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1603 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1604 "awake: flag(%p)\n",
1605 gtid, target_gtid, (
void *)NULL));
1606 __kmp_unlock_suspend_mx(th);
1608 }
else if (flag->get_type() != th->th.th_sleep_loc_type) {
1613 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1614 "spin(%p) type=%d ptr_type=%d\n",
1615 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1616 th->th.th_sleep_loc_type));
1617 __kmp_unlock_suspend_mx(th);
1618 __kmp_null_resume_wrapper(th);
1622 if (!flag->is_sleeping()) {
1623 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1624 "awake: flag(%p): %u\n",
1625 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1626 __kmp_unlock_suspend_mx(th);
1630 KMP_DEBUG_ASSERT(flag);
1631 flag->unset_sleeping();
1632 TCW_PTR(th->th.th_sleep_loc, NULL);
1633 th->th.th_sleep_loc_type = flag_unset;
1635 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1636 "sleep bit for flag's loc(%p): %u\n",
1637 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1642 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1643 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1644 target_gtid, buffer);
1647 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1648 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1649 __kmp_unlock_suspend_mx(th);
1650 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1652 gtid, target_gtid));
1655template <
bool C,
bool S>
1656void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1657 __kmp_resume_template(target_gtid, flag);
1659template <
bool C,
bool S>
1660void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1661 __kmp_resume_template(target_gtid, flag);
1663template <
bool C,
bool S>
1664void __kmp_atomic_resume_64(
int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1665 __kmp_resume_template(target_gtid, flag);
1667void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1668 __kmp_resume_template(target_gtid, flag);
1671template void __kmp_resume_32<false, true>(
int, kmp_flag_32<false, true> *);
1672template void __kmp_resume_32<false, false>(
int, kmp_flag_32<false, false> *);
1673template void __kmp_resume_64<false, true>(
int, kmp_flag_64<false, true> *);
1675__kmp_atomic_resume_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1678void __kmp_resume_monitor() {
1679 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1682 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1683 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1685 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1687 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1688 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1692 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1693 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1694 KMP_GTID_MONITOR, buffer);
1697 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1698 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1699 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1700 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1701 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1703 gtid, KMP_GTID_MONITOR));
1707void __kmp_yield() { sched_yield(); }
1709void __kmp_gtid_set_specific(
int gtid) {
1710 if (__kmp_init_gtid) {
1712 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1713 (
void *)(intptr_t)(gtid + 1));
1714 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1716 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1720int __kmp_gtid_get_specific() {
1722 if (!__kmp_init_gtid) {
1723 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1724 "KMP_GTID_SHUTDOWN\n"));
1725 return KMP_GTID_SHUTDOWN;
1727 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1729 gtid = KMP_GTID_DNE;
1733 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1734 __kmp_gtid_threadprivate_key, gtid));
1738double __kmp_read_cpu_time(
void) {
1744 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1745 (
double)CLOCKS_PER_SEC;
1748int __kmp_read_system_info(
struct kmp_sys_info *info) {
1750 struct rusage r_usage;
1752 memset(info, 0,
sizeof(*info));
1754 status = getrusage(RUSAGE_SELF, &r_usage);
1755 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1758 info->maxrss = r_usage.ru_maxrss;
1760 info->minflt = r_usage.ru_minflt;
1762 info->majflt = r_usage.ru_majflt;
1764 info->nswap = r_usage.ru_nswap;
1766 info->inblock = r_usage.ru_inblock;
1768 info->oublock = r_usage.ru_oublock;
1770 info->nvcsw = r_usage.ru_nvcsw;
1772 info->nivcsw = r_usage.ru_nivcsw;
1774 return (status != 0);
1777void __kmp_read_system_time(
double *delta) {
1779 struct timeval tval;
1780 struct timespec stop;
1783 status = gettimeofday(&tval, NULL);
1784 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1785 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1786 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1787 *delta = (t_ns * 1e-9);
1790void __kmp_clear_system_time(
void) {
1791 struct timeval tval;
1793 status = gettimeofday(&tval, NULL);
1794 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1795 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1798static int __kmp_get_xproc(
void) {
1804 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1806#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1809 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1817 host_basic_info_data_t info;
1818 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1819 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1820 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1823 r = info.avail_cpus;
1825 KMP_WARNING(CantGetNumAvailCPU);
1826 KMP_INFORM(AssumedNumCPU);
1831#error "Unknown or unsupported OS."
1835 return r > 0 ? r : 2;
1839int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1843 va_start(args, format);
1844 FILE *f = fopen(path,
"rb");
1847 result = vfscanf(f, format, args);
1853void __kmp_runtime_initialize(
void) {
1855 pthread_mutexattr_t mutex_attr;
1856 pthread_condattr_t cond_attr;
1858 if (__kmp_init_runtime) {
1862#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1863 if (!__kmp_cpuinfo.initialized) {
1864 __kmp_query_cpuid(&__kmp_cpuinfo);
1868 __kmp_xproc = __kmp_get_xproc();
1874 status = getrlimit(RLIMIT_STACK, &rlim);
1876 __kmp_stksize = rlim.rlim_cur;
1877 __kmp_check_stksize(&__kmp_stksize);
1881 if (sysconf(_SC_THREADS)) {
1884 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1885 if (__kmp_sys_max_nth == -1) {
1887 __kmp_sys_max_nth = INT_MAX;
1888 }
else if (__kmp_sys_max_nth <= 1) {
1890 __kmp_sys_max_nth = KMP_MAX_NTH;
1894 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1895 if (__kmp_sys_min_stksize <= 1) {
1896 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1901 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1903 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1904 __kmp_internal_end_dest);
1905 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1906 status = pthread_mutexattr_init(&mutex_attr);
1907 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1908 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1909 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1910 status = pthread_mutexattr_destroy(&mutex_attr);
1911 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1912 status = pthread_condattr_init(&cond_attr);
1913 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1914 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1915 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1916 status = pthread_condattr_destroy(&cond_attr);
1917 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1919 __kmp_itt_initialize();
1922 __kmp_init_runtime = TRUE;
1925void __kmp_runtime_destroy(
void) {
1928 if (!__kmp_init_runtime) {
1933 __kmp_itt_destroy();
1936 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1937 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1939 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1940 if (status != 0 && status != EBUSY) {
1941 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1943 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1944 if (status != 0 && status != EBUSY) {
1945 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1947#if KMP_AFFINITY_SUPPORTED
1948 __kmp_affinity_uninitialize();
1951 __kmp_init_runtime = FALSE;
1956void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1959void __kmp_elapsed(
double *t) {
1964 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1965 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1967 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1971 status = gettimeofday(&tv, NULL);
1972 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1974 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1979void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1982kmp_uint64 __kmp_now_nsec() {
1984 gettimeofday(&t, NULL);
1985 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1986 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1990#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1992void __kmp_initialize_system_tick() {
1993 kmp_uint64 now, nsec2, diff;
1994 kmp_uint64 delay = 100000;
1995 kmp_uint64 nsec = __kmp_now_nsec();
1996 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1997 while ((now = __kmp_hardware_timestamp()) < goal)
1999 nsec2 = __kmp_now_nsec();
2000 diff = nsec2 - nsec;
2002 kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
2004 __kmp_ticks_per_msec = tpms;
2012int __kmp_is_address_mapped(
void *addr) {
2017#if KMP_OS_LINUX || KMP_OS_HURD
2022 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2025 file = fopen(name,
"r");
2026 KMP_ASSERT(file != NULL);
2030 void *beginning = NULL;
2031 void *ending = NULL;
2034 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2038 KMP_ASSERT(rc == 3 &&
2039 KMP_STRLEN(perms) == 4);
2042 if ((addr >= beginning) && (addr < ending)) {
2044 if (strcmp(perms,
"rw") == 0) {
2054 KMP_INTERNAL_FREE(name);
2058 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2059 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2064 lstsz = lstsz * 4 / 3;
2065 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2066 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2073 char *up = buf + lstsz;
2076 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2077 size_t cursz = cur->kve_structsize;
2080 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2081 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2083 if ((addr >= start) && (addr < end)) {
2084 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2085 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2101 rc = vm_read_overwrite(
2103 (vm_address_t)(addr),
2105 (vm_address_t)(&buffer),
2118 mib[2] = VM_PROC_MAP;
2120 mib[4] =
sizeof(
struct kinfo_vmentry);
2123 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2127 size = size * 4 / 3;
2128 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2131 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2135 for (
size_t i = 0; i < size; i++) {
2136 if (kiv[i].kve_start >= (uint64_t)addr &&
2137 kiv[i].kve_end <= (uint64_t)addr) {
2142 KMP_INTERNAL_FREE(kiv);
2147 mib[1] = KERN_PROC_VMMAP;
2152 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2157 struct kinfo_vmentry kiv = {.kve_start = 0};
2159 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2161 if (kiv.kve_end == end)
2164 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2170#elif KMP_OS_DRAGONFLY
2177#error "Unknown or unsupported OS"
2185#ifdef USE_LOAD_BALANCE
2187#if KMP_OS_DARWIN || KMP_OS_NETBSD
2194int __kmp_get_load_balance(
int max) {
2198 int res = getloadavg(averages, 3);
2203 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2204 ret_avg = (int)averages[0];
2205 }
else if ((__kmp_load_balance_interval >= 180 &&
2206 __kmp_load_balance_interval < 600) &&
2208 ret_avg = (int)averages[1];
2209 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2210 ret_avg = (int)averages[2];
2224int __kmp_get_load_balance(
int max) {
2225 static int permanent_error = 0;
2226 static int glb_running_threads = 0;
2228 static double glb_call_time = 0;
2230 int running_threads = 0;
2232 DIR *proc_dir = NULL;
2233 struct dirent *proc_entry = NULL;
2235 kmp_str_buf_t task_path;
2236 DIR *task_dir = NULL;
2237 struct dirent *task_entry = NULL;
2238 int task_path_fixed_len;
2240 kmp_str_buf_t stat_path;
2242 int stat_path_fixed_len;
2245 int total_processes = 0;
2248 double call_time = 0.0;
2250 __kmp_str_buf_init(&task_path);
2251 __kmp_str_buf_init(&stat_path);
2253 __kmp_elapsed(&call_time);
2255 if (glb_call_time &&
2256 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2257 running_threads = glb_running_threads;
2261 glb_call_time = call_time;
2264 if (permanent_error) {
2265 running_threads = -1;
2274 proc_dir = opendir(
"/proc");
2275 if (proc_dir == NULL) {
2278 running_threads = -1;
2279 permanent_error = 1;
2284 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2285 task_path_fixed_len = task_path.used;
2287 proc_entry = readdir(proc_dir);
2288 while (proc_entry != NULL) {
2291 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2301 KMP_DEBUG_ASSERT(total_processes != 1 ||
2302 strcmp(proc_entry->d_name,
"1") == 0);
2305 task_path.used = task_path_fixed_len;
2306 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2307 KMP_STRLEN(proc_entry->d_name));
2308 __kmp_str_buf_cat(&task_path,
"/task", 5);
2310 task_dir = opendir(task_path.str);
2311 if (task_dir == NULL) {
2320 if (strcmp(proc_entry->d_name,
"1") == 0) {
2321 running_threads = -1;
2322 permanent_error = 1;
2327 __kmp_str_buf_clear(&stat_path);
2328 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2329 __kmp_str_buf_cat(&stat_path,
"/", 1);
2330 stat_path_fixed_len = stat_path.used;
2332 task_entry = readdir(task_dir);
2333 while (task_entry != NULL) {
2335 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2342 stat_path_fixed_len;
2343 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2344 KMP_STRLEN(task_entry->d_name));
2345 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2349 stat_file = open(stat_path.str, O_RDONLY);
2350 if (stat_file == -1) {
2380 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2387 char *close_parent = strstr(buffer,
") ");
2388 if (close_parent != NULL) {
2389 char state = *(close_parent + 2);
2392 if (running_threads >= max) {
2402 task_entry = readdir(task_dir);
2408 proc_entry = readdir(proc_dir);
2414 KMP_DEBUG_ASSERT(running_threads > 0);
2415 if (running_threads <= 0) {
2416 running_threads = 1;
2420 if (proc_dir != NULL) {
2423 __kmp_str_buf_free(&task_path);
2424 if (task_dir != NULL) {
2427 __kmp_str_buf_free(&stat_path);
2428 if (stat_file != -1) {
2432 glb_running_threads = running_threads;
2434 return running_threads;
2442#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2443 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2444 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64)
2448int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2452 void **exit_frame_ptr
2456 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2461 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2465 (*pkfn)(>id, &tid);
2468 (*pkfn)(>id, &tid, p_argv[0]);
2471 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2474 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2477 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2480 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2483 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2487 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2488 p_argv[5], p_argv[6]);
2491 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2492 p_argv[5], p_argv[6], p_argv[7]);
2495 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2496 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2499 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2500 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2503 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2504 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2507 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2508 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2512 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2513 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2514 p_argv[11], p_argv[12]);
2517 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2518 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2519 p_argv[11], p_argv[12], p_argv[13]);
2522 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2523 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2524 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2537pthread_cond_t hidden_helper_threads_initz_cond_var;
2538pthread_mutex_t hidden_helper_threads_initz_lock;
2539volatile int hidden_helper_initz_signaled = FALSE;
2542pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2543pthread_mutex_t hidden_helper_threads_deinitz_lock;
2544volatile int hidden_helper_deinitz_signaled = FALSE;
2547pthread_cond_t hidden_helper_main_thread_cond_var;
2548pthread_mutex_t hidden_helper_main_thread_lock;
2549volatile int hidden_helper_main_thread_signaled = FALSE;
2554sem_t hidden_helper_task_sem;
2557void __kmp_hidden_helper_worker_thread_wait() {
2558 int status = sem_wait(&hidden_helper_task_sem);
2559 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2562void __kmp_do_initialize_hidden_helper_threads() {
2565 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2566 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2568 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2569 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2571 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2572 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2574 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2575 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2577 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2578 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2580 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2581 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2584 status = sem_init(&hidden_helper_task_sem, 0, 0);
2585 KMP_CHECK_SYSFAIL(
"sem_init", status);
2589 status = pthread_create(
2591 [](
void *) ->
void * {
2592 __kmp_hidden_helper_threads_initz_routine();
2596 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2599void __kmp_hidden_helper_threads_initz_wait() {
2602 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2603 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2605 if (!TCR_4(hidden_helper_initz_signaled)) {
2606 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2607 &hidden_helper_threads_initz_lock);
2608 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2611 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2612 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2615void __kmp_hidden_helper_initz_release() {
2617 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2618 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2620 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2621 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2623 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2625 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2626 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2629void __kmp_hidden_helper_main_thread_wait() {
2632 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2633 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2635 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2636 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2637 &hidden_helper_main_thread_lock);
2638 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2641 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2642 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2645void __kmp_hidden_helper_main_thread_release() {
2648 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2649 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2651 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2652 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2655 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2657 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2658 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2661void __kmp_hidden_helper_worker_thread_signal() {
2662 int status = sem_post(&hidden_helper_task_sem);
2663 KMP_CHECK_SYSFAIL(
"sem_post", status);
2666void __kmp_hidden_helper_threads_deinitz_wait() {
2669 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2670 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2672 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2673 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2674 &hidden_helper_threads_deinitz_lock);
2675 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2678 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2679 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2682void __kmp_hidden_helper_threads_deinitz_release() {
2683 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2684 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2686 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2687 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2689 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2691 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2692 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2695void __kmp_hidden_helper_worker_thread_wait() {
2696 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2699void __kmp_do_initialize_hidden_helper_threads() {
2700 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2703void __kmp_hidden_helper_threads_initz_wait() {
2704 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2707void __kmp_hidden_helper_initz_release() {
2708 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2711void __kmp_hidden_helper_main_thread_wait() {
2712 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2715void __kmp_hidden_helper_main_thread_release() {
2716 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2719void __kmp_hidden_helper_worker_thread_signal() {
2720 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2723void __kmp_hidden_helper_threads_deinitz_wait() {
2724 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2727void __kmp_hidden_helper_threads_deinitz_release() {
2728 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.