14 #include "kmp_affinity.h"
19 #include "kmp_stats.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
28 #include <sys/resource.h>
29 #include <sys/syscall.h>
31 #include <sys/times.h>
34 #if KMP_OS_LINUX && !KMP_OS_CNK
35 #include <sys/sysinfo.h>
50 #include <mach/mach.h>
51 #include <sys/sysctl.h>
52 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
53 #include <pthread_np.h>
55 #include <sys/types.h>
56 #include <sys/sysctl.h>
63 #include "tsan_annotations.h"
65 struct kmp_sys_timer {
66 struct timespec start;
70 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
72 static struct kmp_sys_timer __kmp_sys_timer_data;
74 #if KMP_HANDLE_SIGNALS
75 typedef void (*sig_func_t)(int);
76 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
77 static sigset_t __kmp_sigset;
80 static int __kmp_init_runtime = FALSE;
82 static int __kmp_fork_count = 0;
84 static pthread_condattr_t __kmp_suspend_cond_attr;
85 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
87 static kmp_cond_align_t __kmp_wait_cv;
88 static kmp_mutex_align_t __kmp_wait_mx;
90 kmp_uint64 __kmp_ticks_per_msec = 1000000;
93 static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
94 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
95 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
96 cond->c_cond.__c_waiting);
100 #if (KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
104 void __kmp_affinity_bind_thread(
int which) {
105 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
106 "Illegal set affinity operation when not capable");
108 kmp_affin_mask_t *mask;
109 KMP_CPU_ALLOC_ON_STACK(mask);
111 KMP_CPU_SET(which, mask);
112 __kmp_set_system_affinity(mask, TRUE);
113 KMP_CPU_FREE_FROM_STACK(mask);
119 void __kmp_affinity_determine_capable(
const char *env_var) {
122 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
127 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
132 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
133 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
134 "initial getaffinity call returned %d errno = %d\n",
140 if (__kmp_affinity_verbose ||
141 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
142 (__kmp_affinity_type != affinity_default) &&
143 (__kmp_affinity_type != affinity_disabled))) {
145 kmp_msg_t err_code = KMP_ERR(error);
146 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
147 err_code, __kmp_msg_null);
148 if (__kmp_generate_warnings == kmp_warnings_off) {
149 __kmp_str_free(&err_code.str);
152 KMP_AFFINITY_DISABLE();
153 KMP_INTERNAL_FREE(buf);
161 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
162 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
163 "setaffinity for mask size %d returned %d errno = %d\n",
164 gCode, sCode, errno));
166 if (errno == ENOSYS) {
167 if (__kmp_affinity_verbose ||
168 (__kmp_affinity_warnings &&
169 (__kmp_affinity_type != affinity_none) &&
170 (__kmp_affinity_type != affinity_default) &&
171 (__kmp_affinity_type != affinity_disabled))) {
173 kmp_msg_t err_code = KMP_ERR(error);
174 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
175 err_code, __kmp_msg_null);
176 if (__kmp_generate_warnings == kmp_warnings_off) {
177 __kmp_str_free(&err_code.str);
180 KMP_AFFINITY_DISABLE();
181 KMP_INTERNAL_FREE(buf);
183 if (errno == EFAULT) {
184 KMP_AFFINITY_ENABLE(gCode);
185 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
186 "affinity supported (mask size %d)\n",
187 (
int)__kmp_affin_mask_size));
188 KMP_INTERNAL_FREE(buf);
196 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
197 "searching for proper set size\n"));
199 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
200 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
201 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
202 "getaffinity for mask size %d returned %d errno = %d\n",
203 size, gCode, errno));
206 if (errno == ENOSYS) {
208 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
209 "inconsistent OS call behavior: errno == ENOSYS for mask "
212 if (__kmp_affinity_verbose ||
213 (__kmp_affinity_warnings &&
214 (__kmp_affinity_type != affinity_none) &&
215 (__kmp_affinity_type != affinity_default) &&
216 (__kmp_affinity_type != affinity_disabled))) {
218 kmp_msg_t err_code = KMP_ERR(error);
219 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
220 err_code, __kmp_msg_null);
221 if (__kmp_generate_warnings == kmp_warnings_off) {
222 __kmp_str_free(&err_code.str);
225 KMP_AFFINITY_DISABLE();
226 KMP_INTERNAL_FREE(buf);
232 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
233 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
234 "setaffinity for mask size %d returned %d errno = %d\n",
235 gCode, sCode, errno));
237 if (errno == ENOSYS) {
239 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
240 "inconsistent OS call behavior: errno == ENOSYS for mask "
243 if (__kmp_affinity_verbose ||
244 (__kmp_affinity_warnings &&
245 (__kmp_affinity_type != affinity_none) &&
246 (__kmp_affinity_type != affinity_default) &&
247 (__kmp_affinity_type != affinity_disabled))) {
249 kmp_msg_t err_code = KMP_ERR(error);
250 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
251 err_code, __kmp_msg_null);
252 if (__kmp_generate_warnings == kmp_warnings_off) {
253 __kmp_str_free(&err_code.str);
256 KMP_AFFINITY_DISABLE();
257 KMP_INTERNAL_FREE(buf);
260 if (errno == EFAULT) {
261 KMP_AFFINITY_ENABLE(gCode);
262 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
263 "affinity supported (mask size %d)\n",
264 (
int)__kmp_affin_mask_size));
265 KMP_INTERNAL_FREE(buf);
272 KMP_INTERNAL_FREE(buf);
277 KMP_AFFINITY_DISABLE();
278 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
279 "cannot determine mask size - affinity not supported\n"));
280 if (__kmp_affinity_verbose ||
281 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
282 (__kmp_affinity_type != affinity_default) &&
283 (__kmp_affinity_type != affinity_disabled))) {
284 KMP_WARNING(AffCantGetMaskSize, env_var);
288 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
292 int __kmp_futex_determine_capable() {
294 int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
295 int retval = (rc == 0) || (errno != ENOSYS);
298 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
299 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
300 retval ?
"" :
" not"));
305 #endif // KMP_USE_FUTEX
307 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
311 kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
312 kmp_int8 old_value, new_value;
314 old_value = TCR_1(*p);
315 new_value = old_value | d;
317 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
319 old_value = TCR_1(*p);
320 new_value = old_value | d;
325 kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
326 kmp_int8 old_value, new_value;
328 old_value = TCR_1(*p);
329 new_value = old_value & d;
331 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
333 old_value = TCR_1(*p);
334 new_value = old_value & d;
339 kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
340 kmp_uint32 old_value, new_value;
342 old_value = TCR_4(*p);
343 new_value = old_value | d;
345 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
347 old_value = TCR_4(*p);
348 new_value = old_value | d;
353 kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
354 kmp_uint32 old_value, new_value;
356 old_value = TCR_4(*p);
357 new_value = old_value & d;
359 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
361 old_value = TCR_4(*p);
362 new_value = old_value & d;
368 kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
369 kmp_int8 old_value, new_value;
371 old_value = TCR_1(*p);
372 new_value = old_value + d;
374 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
376 old_value = TCR_1(*p);
377 new_value = old_value + d;
382 kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
383 kmp_int64 old_value, new_value;
385 old_value = TCR_8(*p);
386 new_value = old_value + d;
388 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
390 old_value = TCR_8(*p);
391 new_value = old_value + d;
397 kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
398 kmp_uint64 old_value, new_value;
400 old_value = TCR_8(*p);
401 new_value = old_value | d;
402 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
404 old_value = TCR_8(*p);
405 new_value = old_value | d;
410 kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
411 kmp_uint64 old_value, new_value;
413 old_value = TCR_8(*p);
414 new_value = old_value & d;
415 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
417 old_value = TCR_8(*p);
418 new_value = old_value & d;
425 void __kmp_terminate_thread(
int gtid) {
427 kmp_info_t *th = __kmp_threads[gtid];
432 #ifdef KMP_CANCEL_THREADS
433 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
434 status = pthread_cancel(th->th.th_info.ds.ds_thread);
435 if (status != 0 && status != ESRCH) {
436 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
447 static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
449 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
450 KMP_OS_HURD || KMP_OS_KFREEBSD
459 if (!KMP_UBER_GTID(gtid)) {
462 status = pthread_attr_init(&attr);
463 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
464 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
465 status = pthread_attr_get_np(pthread_self(), &attr);
466 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
468 status = pthread_getattr_np(pthread_self(), &attr);
469 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
471 status = pthread_attr_getstack(&attr, &addr, &size);
472 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
474 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
475 " %lu, low addr: %p\n",
477 status = pthread_attr_destroy(&attr);
478 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
481 if (size != 0 && addr != 0) {
483 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
484 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
485 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
491 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
492 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
493 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
497 static void *__kmp_launch_worker(
void *thr) {
498 int status, old_type, old_state;
499 #ifdef KMP_BLOCK_SIGNALS
500 sigset_t new_set, old_set;
503 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
504 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_KFREEBSD
505 void *
volatile padding = 0;
509 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
510 __kmp_gtid_set_specific(gtid);
511 #ifdef KMP_TDATA_GTID
514 #if KMP_STATS_ENABLED
516 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
517 __kmp_stats_thread_ptr->startLife();
518 KMP_SET_THREAD_STATE(IDLE);
523 __kmp_itt_thread_name(gtid);
526 #if KMP_AFFINITY_SUPPORTED
527 __kmp_affinity_set_init_mask(gtid, FALSE);
530 #ifdef KMP_CANCEL_THREADS
531 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
532 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
534 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
535 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
538 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
540 __kmp_clear_x87_fpu_status_word();
541 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
542 __kmp_load_mxcsr(&__kmp_init_mxcsr);
545 #ifdef KMP_BLOCK_SIGNALS
546 status = sigfillset(&new_set);
547 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
548 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
549 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
552 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
554 if (__kmp_stkoffset > 0 && gtid > 0) {
555 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
560 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
562 __kmp_check_stack_overlap((kmp_info_t *)thr);
564 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
566 #ifdef KMP_BLOCK_SIGNALS
567 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
568 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
577 static void *__kmp_launch_monitor(
void *thr) {
578 int status, old_type, old_state;
579 #ifdef KMP_BLOCK_SIGNALS
582 struct timespec interval;
584 int yield_cycles = 0;
588 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
591 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
592 #ifdef KMP_TDATA_GTID
593 __kmp_gtid = KMP_GTID_MONITOR;
600 __kmp_itt_thread_ignore();
603 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
606 __kmp_check_stack_overlap((kmp_info_t *)thr);
608 #ifdef KMP_CANCEL_THREADS
609 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
610 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
612 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
613 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
616 #if KMP_REAL_TIME_FIX
621 int sched = sched_getscheduler(0);
622 if (sched == SCHED_FIFO || sched == SCHED_RR) {
625 struct sched_param param;
626 int max_priority = sched_get_priority_max(sched);
628 KMP_WARNING(RealTimeSchedNotSupported);
629 sched_getparam(0, ¶m);
630 if (param.sched_priority < max_priority) {
631 param.sched_priority += 1;
632 rc = sched_setscheduler(0, sched, ¶m);
635 kmp_msg_t err_code = KMP_ERR(error);
636 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
637 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
638 if (__kmp_generate_warnings == kmp_warnings_off) {
639 __kmp_str_free(&err_code.str);
646 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
647 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
652 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
654 #endif // KMP_REAL_TIME_FIX
658 if (__kmp_monitor_wakeups == 1) {
660 interval.tv_nsec = 0;
663 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
666 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
668 if (__kmp_yield_cycle) {
669 __kmp_yielding_on = 0;
670 yield_count = __kmp_yield_off_count;
672 __kmp_yielding_on = 1;
675 while (!TCR_4(__kmp_global.g.g_done)) {
681 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
683 status = gettimeofday(&tval, NULL);
684 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
685 TIMEVAL_TO_TIMESPEC(&tval, &now);
687 now.tv_sec += interval.tv_sec;
688 now.tv_nsec += interval.tv_nsec;
690 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
692 now.tv_nsec -= KMP_NSEC_PER_SEC;
695 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
696 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
698 if (!TCR_4(__kmp_global.g.g_done)) {
699 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
700 &__kmp_wait_mx.m_mutex, &now);
702 if (status != ETIMEDOUT && status != EINTR) {
703 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
707 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
708 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
710 if (__kmp_yield_cycle) {
712 if ((yield_cycles % yield_count) == 0) {
713 if (__kmp_yielding_on) {
714 __kmp_yielding_on = 0;
715 yield_count = __kmp_yield_off_count;
717 __kmp_yielding_on = 1;
718 yield_count = __kmp_yield_on_count;
723 __kmp_yielding_on = 1;
726 TCW_4(__kmp_global.g.g_time.dt.t_value,
727 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
732 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
734 #ifdef KMP_BLOCK_SIGNALS
735 status = sigfillset(&new_set);
736 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
737 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
738 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
741 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
743 if (__kmp_global.g.g_abort != 0) {
749 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
750 __kmp_global.g.g_abort));
755 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
756 __kmp_terminate_thread(gtid);
760 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
761 __kmp_global.g.g_abort));
763 if (__kmp_global.g.g_abort > 0)
764 raise(__kmp_global.g.g_abort);
767 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
771 #endif // KMP_USE_MONITOR
773 void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
775 pthread_attr_t thread_attr;
778 th->th.th_info.ds.ds_gtid = gtid;
780 #if KMP_STATS_ENABLED
782 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
788 if (!KMP_UBER_GTID(gtid)) {
789 th->th.th_stats = __kmp_stats_list->push_back(gtid);
793 th->th.th_stats = __kmp_stats_thread_ptr;
795 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
797 #endif // KMP_STATS_ENABLED
799 if (KMP_UBER_GTID(gtid)) {
800 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
801 th->th.th_info.ds.ds_thread = pthread_self();
802 __kmp_set_stack_info(gtid, th);
803 __kmp_check_stack_overlap(th);
807 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
811 #ifdef KMP_THREAD_ATTR
812 status = pthread_attr_init(&thread_attr);
814 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
816 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
818 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
828 stack_size += gtid * __kmp_stkoffset * 2;
830 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
831 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
832 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
834 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
835 status = pthread_attr_setstacksize(&thread_attr, stack_size);
836 #ifdef KMP_BACKUP_STKSIZE
838 if (!__kmp_env_stksize) {
839 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
840 __kmp_stksize = KMP_BACKUP_STKSIZE;
841 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
842 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
844 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
845 status = pthread_attr_setstacksize(&thread_attr, stack_size);
850 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
851 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
858 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
859 if (status != 0 || !handle) {
860 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
861 if (status == EINVAL) {
862 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
863 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
865 if (status == ENOMEM) {
866 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
867 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
870 if (status == EAGAIN) {
871 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
872 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
874 KMP_SYSFAIL(
"pthread_create", status);
877 th->th.th_info.ds.ds_thread = handle;
879 #ifdef KMP_THREAD_ATTR
880 status = pthread_attr_destroy(&thread_attr);
882 kmp_msg_t err_code = KMP_ERR(status);
883 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
885 if (__kmp_generate_warnings == kmp_warnings_off) {
886 __kmp_str_free(&err_code.str);
893 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
898 void __kmp_create_monitor(kmp_info_t *th) {
900 pthread_attr_t thread_attr;
903 int auto_adj_size = FALSE;
905 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
907 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
909 th->th.th_info.ds.ds_tid = 0;
910 th->th.th_info.ds.ds_gtid = 0;
913 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
917 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
918 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
919 #if KMP_REAL_TIME_FIX
920 TCW_4(__kmp_global.g.g_time.dt.t_value,
923 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
924 #endif // KMP_REAL_TIME_FIX
926 #ifdef KMP_THREAD_ATTR
927 if (__kmp_monitor_stksize == 0) {
928 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
929 auto_adj_size = TRUE;
931 status = pthread_attr_init(&thread_attr);
933 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
935 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
937 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
940 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
941 status = pthread_attr_getstacksize(&thread_attr, &size);
942 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
944 size = __kmp_sys_min_stksize;
948 if (__kmp_monitor_stksize == 0) {
949 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
951 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
952 __kmp_monitor_stksize = __kmp_sys_min_stksize;
955 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
956 "requested stacksize = %lu bytes\n",
957 size, __kmp_monitor_stksize));
962 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
963 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
964 __kmp_monitor_stksize));
965 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
968 __kmp_monitor_stksize *= 2;
971 kmp_msg_t err_code = KMP_ERR(status);
972 __kmp_msg(kmp_ms_warning,
973 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
974 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
975 if (__kmp_generate_warnings == kmp_warnings_off) {
976 __kmp_str_free(&err_code.str);
982 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
985 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
986 if (status == EINVAL) {
987 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
988 __kmp_monitor_stksize *= 2;
991 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
992 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
995 if (status == ENOMEM) {
996 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
997 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1001 if (status == EAGAIN) {
1002 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1003 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1005 KMP_SYSFAIL(
"pthread_create", status);
1008 th->th.th_info.ds.ds_thread = handle;
1010 #if KMP_REAL_TIME_FIX
1012 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
1013 sizeof(__kmp_global.g.g_time.dt.t_value));
1014 __kmp_wait_yield_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value,
1015 -1, &__kmp_neq_4, NULL);
1016 #endif // KMP_REAL_TIME_FIX
1018 #ifdef KMP_THREAD_ATTR
1019 status = pthread_attr_destroy(&thread_attr);
1021 kmp_msg_t err_code = KMP_ERR(status);
1022 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1024 if (__kmp_generate_warnings == kmp_warnings_off) {
1025 __kmp_str_free(&err_code.str);
1032 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1033 th->th.th_info.ds.ds_thread));
1036 #endif // KMP_USE_MONITOR
1038 void __kmp_exit_thread(
int exit_status) {
1039 pthread_exit((
void *)(intptr_t)exit_status);
1043 void __kmp_resume_monitor();
1045 void __kmp_reap_monitor(kmp_info_t *th) {
1049 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1051 th->th.th_info.ds.ds_thread));
1056 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1057 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1058 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1068 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1069 if (status != ESRCH) {
1070 __kmp_resume_monitor();
1072 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1073 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1074 if (exit_val != th) {
1075 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1078 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1079 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1081 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1083 th->th.th_info.ds.ds_thread));
1087 #endif // KMP_USE_MONITOR
1089 void __kmp_reap_worker(kmp_info_t *th) {
1096 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1098 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1102 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1104 if (exit_val != th) {
1105 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1107 th->th.th_info.ds.ds_gtid, exit_val));
1111 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1112 th->th.th_info.ds.ds_gtid));
1117 #if KMP_HANDLE_SIGNALS
1119 static void __kmp_null_handler(
int signo) {
1123 static void __kmp_team_handler(
int signo) {
1124 if (__kmp_global.g.g_abort == 0) {
1127 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1142 if (__kmp_debug_buf) {
1143 __kmp_dump_debug_buffer();
1146 TCW_4(__kmp_global.g.g_abort, signo);
1148 TCW_4(__kmp_global.g.g_done, TRUE);
1153 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1160 static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1161 struct sigaction *oldact) {
1162 int rc = sigaction(signum, act, oldact);
1163 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1166 static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1167 int parallel_init) {
1170 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1171 if (parallel_init) {
1172 struct sigaction new_action;
1173 struct sigaction old_action;
1174 new_action.sa_handler = handler_func;
1175 new_action.sa_flags = 0;
1176 sigfillset(&new_action.sa_mask);
1177 __kmp_sigaction(sig, &new_action, &old_action);
1178 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1179 sigaddset(&__kmp_sigset, sig);
1182 __kmp_sigaction(sig, &old_action, NULL);
1186 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1191 static void __kmp_remove_one_handler(
int sig) {
1192 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1193 if (sigismember(&__kmp_sigset, sig)) {
1194 struct sigaction old;
1196 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1197 if ((old.sa_handler != __kmp_team_handler) &&
1198 (old.sa_handler != __kmp_null_handler)) {
1200 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1201 "restoring: sig=%d\n",
1203 __kmp_sigaction(sig, &old, NULL);
1205 sigdelset(&__kmp_sigset, sig);
1210 void __kmp_install_signals(
int parallel_init) {
1211 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1212 if (__kmp_handle_signals || !parallel_init) {
1215 sigemptyset(&__kmp_sigset);
1216 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1217 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1218 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1219 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1220 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1221 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1222 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1223 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1225 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1227 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1229 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1234 void __kmp_remove_signals(
void) {
1236 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1237 for (sig = 1; sig < NSIG; ++sig) {
1238 __kmp_remove_one_handler(sig);
1242 #endif // KMP_HANDLE_SIGNALS
1244 void __kmp_enable(
int new_state) {
1245 #ifdef KMP_CANCEL_THREADS
1246 int status, old_state;
1247 status = pthread_setcancelstate(new_state, &old_state);
1248 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1249 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1253 void __kmp_disable(
int *old_state) {
1254 #ifdef KMP_CANCEL_THREADS
1256 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1257 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1261 static void __kmp_atfork_prepare(
void) {
1262 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1263 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1266 static void __kmp_atfork_parent(
void) {
1267 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1268 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1274 static void __kmp_atfork_child(
void) {
1275 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1282 #if KMP_AFFINITY_SUPPORTED
1286 kmp_set_thread_affinity_mask_initial();
1291 __kmp_affinity_type = affinity_none;
1293 if (__kmp_nested_proc_bind.bind_types != NULL) {
1294 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1296 #endif // OMP_40_ENABLED
1297 #endif // KMP_AFFINITY_SUPPORTED
1299 __kmp_init_runtime = FALSE;
1301 __kmp_init_monitor = 0;
1303 __kmp_init_parallel = FALSE;
1304 __kmp_init_middle = FALSE;
1305 __kmp_init_serial = FALSE;
1306 TCW_4(__kmp_init_gtid, FALSE);
1307 __kmp_init_common = FALSE;
1309 TCW_4(__kmp_init_user_locks, FALSE);
1310 #if !KMP_USE_DYNAMIC_LOCK
1311 __kmp_user_lock_table.used = 1;
1312 __kmp_user_lock_table.allocated = 0;
1313 __kmp_user_lock_table.table = NULL;
1314 __kmp_lock_blocks = NULL;
1318 TCW_4(__kmp_nth, 0);
1320 __kmp_thread_pool = NULL;
1321 __kmp_thread_pool_insert_pt = NULL;
1322 __kmp_team_pool = NULL;
1326 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1327 __kmp_threadpriv_cache_list));
1329 while (__kmp_threadpriv_cache_list != NULL) {
1331 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1332 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1333 &(*__kmp_threadpriv_cache_list->addr)));
1335 *__kmp_threadpriv_cache_list->addr = NULL;
1337 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1340 __kmp_init_runtime = FALSE;
1343 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1344 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1345 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1346 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1363 void __kmp_register_atfork(
void) {
1364 if (__kmp_need_register_atfork) {
1365 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1366 __kmp_atfork_child);
1367 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1368 __kmp_need_register_atfork = FALSE;
1372 void __kmp_suspend_initialize(
void) {
1374 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1375 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1376 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1377 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1380 static void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1381 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1382 if (th->th.th_suspend_init_count <= __kmp_fork_count) {
1386 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1387 &__kmp_suspend_cond_attr);
1388 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1389 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1390 &__kmp_suspend_mutex_attr);
1391 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1392 *(
volatile int *)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1393 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1397 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1398 if (th->th.th_suspend_init_count > __kmp_fork_count) {
1403 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1404 if (status != 0 && status != EBUSY) {
1405 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1407 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1408 if (status != 0 && status != EBUSY) {
1409 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1411 --th->th.th_suspend_init_count;
1412 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1417 int __kmp_try_suspend_mx(kmp_info_t *th) {
1418 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1421 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1422 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1423 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1426 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1427 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1428 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1434 static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1435 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1436 kmp_info_t *th = __kmp_threads[th_gtid];
1438 typename C::flag_t old_spin;
1440 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1443 __kmp_suspend_initialize_thread(th);
1445 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1446 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1448 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1449 th_gtid, flag->get()));
1453 old_spin = flag->set_sleeping();
1455 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1456 __kmp_pause_status != kmp_soft_paused) {
1457 flag->unset_sleeping();
1458 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1459 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1463 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1465 th_gtid, flag->get(), flag->load(), old_spin));
1467 if (flag->done_check_val(old_spin)) {
1468 old_spin = flag->unset_sleeping();
1469 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1471 th_gtid, flag->get()));
1476 int deactivated = FALSE;
1477 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1479 while (flag->is_sleeping()) {
1480 #ifdef DEBUG_SUSPEND
1482 __kmp_suspend_count++;
1483 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1484 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1490 th->th.th_active = FALSE;
1491 if (th->th.th_active_in_pool) {
1492 th->th.th_active_in_pool = FALSE;
1493 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1494 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1499 #if USE_SUSPEND_TIMEOUT
1500 struct timespec now;
1501 struct timeval tval;
1504 status = gettimeofday(&tval, NULL);
1505 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1506 TIMEVAL_TO_TIMESPEC(&tval, &now);
1508 msecs = (4 * __kmp_dflt_blocktime) + 200;
1509 now.tv_sec += msecs / 1000;
1510 now.tv_nsec += (msecs % 1000) * 1000;
1512 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1513 "pthread_cond_timedwait\n",
1515 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1516 &th->th.th_suspend_mx.m_mutex, &now);
1518 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1519 " pthread_cond_wait\n",
1521 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1522 &th->th.th_suspend_mx.m_mutex);
1525 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1526 KMP_SYSFAIL(
"pthread_cond_wait", status);
1529 if (status == ETIMEDOUT) {
1530 if (flag->is_sleeping()) {
1532 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1534 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1538 }
else if (flag->is_sleeping()) {
1540 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1547 th->th.th_active = TRUE;
1548 if (TCR_4(th->th.th_in_pool)) {
1549 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1550 th->th.th_active_in_pool = TRUE;
1554 #ifdef DEBUG_SUSPEND
1557 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1558 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1563 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1564 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1565 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1568 void __kmp_suspend_32(
int th_gtid, kmp_flag_32 *flag) {
1569 __kmp_suspend_template(th_gtid, flag);
1571 void __kmp_suspend_64(
int th_gtid, kmp_flag_64 *flag) {
1572 __kmp_suspend_template(th_gtid, flag);
1574 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1575 __kmp_suspend_template(th_gtid, flag);
1582 static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1583 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1584 kmp_info_t *th = __kmp_threads[target_gtid];
1588 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1591 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1592 gtid, target_gtid));
1593 KMP_DEBUG_ASSERT(gtid != target_gtid);
1595 __kmp_suspend_initialize_thread(th);
1597 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1598 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1601 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1606 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1609 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1610 "awake: flag(%p)\n",
1611 gtid, target_gtid, NULL));
1612 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1613 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1617 typename C::flag_t old_spin = flag->unset_sleeping();
1618 if (!flag->is_sleeping_val(old_spin)) {
1619 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1622 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1623 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1624 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1627 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1628 "sleep bit for flag's loc(%p): "
1630 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1632 TCW_PTR(th->th.th_sleep_loc, NULL);
1634 #ifdef DEBUG_SUSPEND
1637 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1638 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1639 target_gtid, buffer);
1642 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1643 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1644 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1645 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1646 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1648 gtid, target_gtid));
1651 void __kmp_resume_32(
int target_gtid, kmp_flag_32 *flag) {
1652 __kmp_resume_template(target_gtid, flag);
1654 void __kmp_resume_64(
int target_gtid, kmp_flag_64 *flag) {
1655 __kmp_resume_template(target_gtid, flag);
1657 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1658 __kmp_resume_template(target_gtid, flag);
1662 void __kmp_resume_monitor() {
1663 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1666 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1667 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1669 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1671 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1672 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1673 #ifdef DEBUG_SUSPEND
1676 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1677 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1678 KMP_GTID_MONITOR, buffer);
1681 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1682 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1683 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1684 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1685 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1687 gtid, KMP_GTID_MONITOR));
1689 #endif // KMP_USE_MONITOR
1691 void __kmp_yield(
int cond) {
1695 if (!__kmp_yielding_on)
1698 if (__kmp_yield_cycle && !KMP_YIELD_NOW())
1704 void __kmp_gtid_set_specific(
int gtid) {
1705 if (__kmp_init_gtid) {
1707 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1708 (
void *)(intptr_t)(gtid + 1));
1709 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1711 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1715 int __kmp_gtid_get_specific() {
1717 if (!__kmp_init_gtid) {
1718 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1719 "KMP_GTID_SHUTDOWN\n"));
1720 return KMP_GTID_SHUTDOWN;
1722 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1724 gtid = KMP_GTID_DNE;
1728 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1729 __kmp_gtid_threadprivate_key, gtid));
1733 double __kmp_read_cpu_time(
void) {
1739 return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC;
1742 int __kmp_read_system_info(
struct kmp_sys_info *info) {
1744 struct rusage r_usage;
1746 memset(info, 0,
sizeof(*info));
1748 status = getrusage(RUSAGE_SELF, &r_usage);
1749 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1752 info->maxrss = r_usage.ru_maxrss;
1754 info->minflt = r_usage.ru_minflt;
1756 info->majflt = r_usage.ru_majflt;
1758 info->nswap = r_usage.ru_nswap;
1760 info->inblock = r_usage.ru_inblock;
1762 info->oublock = r_usage.ru_oublock;
1764 info->nvcsw = r_usage.ru_nvcsw;
1766 info->nivcsw = r_usage.ru_nivcsw;
1768 return (status != 0);
1771 void __kmp_read_system_time(
double *delta) {
1773 struct timeval tval;
1774 struct timespec stop;
1777 status = gettimeofday(&tval, NULL);
1778 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1779 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1780 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1781 *delta = (t_ns * 1e-9);
1784 void __kmp_clear_system_time(
void) {
1785 struct timeval tval;
1787 status = gettimeofday(&tval, NULL);
1788 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1789 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1792 static int __kmp_get_xproc(
void) {
1796 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
1797 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_KFREEBSD
1799 r = sysconf(_SC_NPROCESSORS_ONLN);
1807 host_basic_info_data_t info;
1808 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1809 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1810 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1813 r = info.avail_cpus;
1815 KMP_WARNING(CantGetNumAvailCPU);
1816 KMP_INFORM(AssumedNumCPU);
1821 #error "Unknown or unsupported OS."
1825 return r > 0 ? r : 2;
1829 int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1833 va_start(args, format);
1834 FILE *f = fopen(path,
"rb");
1837 result = vfscanf(f, format, args);
1843 void __kmp_runtime_initialize(
void) {
1845 pthread_mutexattr_t mutex_attr;
1846 pthread_condattr_t cond_attr;
1848 if (__kmp_init_runtime) {
1852 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1853 if (!__kmp_cpuinfo.initialized) {
1854 __kmp_query_cpuid(&__kmp_cpuinfo);
1858 __kmp_xproc = __kmp_get_xproc();
1860 if (sysconf(_SC_THREADS)) {
1863 __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX);
1864 if (__kmp_sys_max_nth == -1) {
1866 __kmp_sys_max_nth = INT_MAX;
1867 }
else if (__kmp_sys_max_nth <= 1) {
1869 __kmp_sys_max_nth = KMP_MAX_NTH;
1873 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1874 if (__kmp_sys_min_stksize <= 1) {
1875 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1880 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1882 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1883 __kmp_internal_end_dest);
1884 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1885 status = pthread_mutexattr_init(&mutex_attr);
1886 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1887 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1888 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1889 status = pthread_condattr_init(&cond_attr);
1890 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1891 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1892 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1894 __kmp_itt_initialize();
1897 __kmp_init_runtime = TRUE;
1900 void __kmp_runtime_destroy(
void) {
1903 if (!__kmp_init_runtime) {
1908 __kmp_itt_destroy();
1911 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1912 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1914 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1915 if (status != 0 && status != EBUSY) {
1916 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1918 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1919 if (status != 0 && status != EBUSY) {
1920 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1922 #if KMP_AFFINITY_SUPPORTED
1923 __kmp_affinity_uninitialize();
1926 __kmp_init_runtime = FALSE;
1931 void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1934 void __kmp_elapsed(
double *t) {
1936 #ifdef FIX_SGI_CLOCK
1939 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1940 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1942 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1946 status = gettimeofday(&tv, NULL);
1947 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1949 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1954 void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1957 kmp_uint64 __kmp_now_nsec() {
1959 gettimeofday(&t, NULL);
1960 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1961 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1965 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1967 void __kmp_initialize_system_tick() {
1968 kmp_uint64 now, nsec2, diff;
1969 kmp_uint64 delay = 100000;
1970 kmp_uint64 nsec = __kmp_now_nsec();
1971 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1972 while ((now = __kmp_hardware_timestamp()) < goal)
1974 nsec2 = __kmp_now_nsec();
1975 diff = nsec2 - nsec;
1977 kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff);
1979 __kmp_ticks_per_msec = tpms;
1987 int __kmp_is_address_mapped(
void *addr) {
1992 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_HURD || KMP_OS_KFREEBSD
1997 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2000 file = fopen(name,
"r");
2001 KMP_ASSERT(file != NULL);
2005 void *beginning = NULL;
2006 void *ending = NULL;
2009 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2013 KMP_ASSERT(rc == 3 &&
2014 KMP_STRLEN(perms) == 4);
2017 if ((addr >= beginning) && (addr < ending)) {
2019 if (strcmp(perms,
"rw") == 0) {
2029 KMP_INTERNAL_FREE(name);
2038 rc = vm_read_overwrite(
2040 (vm_address_t)(addr),
2042 (vm_address_t)(&buffer),
2055 mib[2] = VM_PROC_MAP;
2057 mib[4] =
sizeof(
struct kinfo_vmentry);
2060 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2064 size = size * 4 / 3;
2065 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2068 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2072 for (
size_t i = 0; i < size; i++) {
2073 if (kiv[i].kve_start >= (uint64_t)addr &&
2074 kiv[i].kve_end <= (uint64_t)addr) {
2079 KMP_INTERNAL_FREE(kiv);
2080 #elif KMP_OS_DRAGONFLY || KMP_OS_OPENBSD
2087 #error "Unknown or unsupported OS"
2095 #ifdef USE_LOAD_BALANCE
2097 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2104 int __kmp_get_load_balance(
int max) {
2108 int res = getloadavg(averages, 3);
2113 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2114 ret_avg = averages[0];
2115 }
else if ((__kmp_load_balance_interval >= 180 &&
2116 __kmp_load_balance_interval < 600) &&
2118 ret_avg = averages[1];
2119 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2120 ret_avg = averages[2];
2134 int __kmp_get_load_balance(
int max) {
2135 static int permanent_error = 0;
2136 static int glb_running_threads = 0;
2138 static double glb_call_time = 0;
2140 int running_threads = 0;
2142 DIR *proc_dir = NULL;
2143 struct dirent *proc_entry = NULL;
2145 kmp_str_buf_t task_path;
2146 DIR *task_dir = NULL;
2147 struct dirent *task_entry = NULL;
2148 int task_path_fixed_len;
2150 kmp_str_buf_t stat_path;
2152 int stat_path_fixed_len;
2154 int total_processes = 0;
2155 int total_threads = 0;
2157 double call_time = 0.0;
2159 __kmp_str_buf_init(&task_path);
2160 __kmp_str_buf_init(&stat_path);
2162 __kmp_elapsed(&call_time);
2164 if (glb_call_time &&
2165 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2166 running_threads = glb_running_threads;
2170 glb_call_time = call_time;
2173 if (permanent_error) {
2174 running_threads = -1;
2183 proc_dir = opendir(
"/proc");
2184 if (proc_dir == NULL) {
2187 running_threads = -1;
2188 permanent_error = 1;
2193 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2194 task_path_fixed_len = task_path.used;
2196 proc_entry = readdir(proc_dir);
2197 while (proc_entry != NULL) {
2200 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2208 KMP_DEBUG_ASSERT(total_processes != 1 ||
2209 strcmp(proc_entry->d_name,
"1") == 0);
2212 task_path.used = task_path_fixed_len;
2213 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2214 KMP_STRLEN(proc_entry->d_name));
2215 __kmp_str_buf_cat(&task_path,
"/task", 5);
2217 task_dir = opendir(task_path.str);
2218 if (task_dir == NULL) {
2227 if (strcmp(proc_entry->d_name,
"1") == 0) {
2228 running_threads = -1;
2229 permanent_error = 1;
2234 __kmp_str_buf_clear(&stat_path);
2235 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2236 __kmp_str_buf_cat(&stat_path,
"/", 1);
2237 stat_path_fixed_len = stat_path.used;
2239 task_entry = readdir(task_dir);
2240 while (task_entry != NULL) {
2242 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2250 stat_path_fixed_len;
2251 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2252 KMP_STRLEN(task_entry->d_name));
2253 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2257 stat_file = open(stat_path.str, O_RDONLY);
2258 if (stat_file == -1) {
2288 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2295 char *close_parent = strstr(buffer,
") ");
2296 if (close_parent != NULL) {
2297 char state = *(close_parent + 2);
2300 if (running_threads >= max) {
2310 task_entry = readdir(task_dir);
2316 proc_entry = readdir(proc_dir);
2322 KMP_DEBUG_ASSERT(running_threads > 0);
2323 if (running_threads <= 0) {
2324 running_threads = 1;
2328 if (proc_dir != NULL) {
2331 __kmp_str_buf_free(&task_path);
2332 if (task_dir != NULL) {
2335 __kmp_str_buf_free(&stat_path);
2336 if (stat_file != -1) {
2340 glb_running_threads = running_threads;
2342 return running_threads;
2346 #endif // KMP_OS_DARWIN
2348 #endif // USE_LOAD_BALANCE
2350 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2351 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || KMP_ARCH_PPC64)
2355 int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2359 void **exit_frame_ptr
2363 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2368 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2372 (*pkfn)(>id, &tid);
2375 (*pkfn)(>id, &tid, p_argv[0]);
2378 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2381 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2384 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2387 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2390 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2394 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2395 p_argv[5], p_argv[6]);
2398 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2399 p_argv[5], p_argv[6], p_argv[7]);
2402 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2403 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2406 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2407 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2410 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2411 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2414 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2415 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2419 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2420 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2421 p_argv[11], p_argv[12]);
2424 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2425 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2426 p_argv[11], p_argv[12], p_argv[13]);
2429 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2430 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2431 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2436 *exit_frame_ptr = 0;
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the paritioned timers to begin with name.