LLVM OpenMP* Runtime Library
z_Linux_util.cpp
1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #include <sys/syscall.h>
33 #include <sys/time.h>
34 #include <sys/times.h>
35 #include <unistd.h>
36 
37 #if KMP_OS_LINUX
38 #include <sys/sysinfo.h>
39 #if KMP_USE_FUTEX
40 // We should really include <futex.h>, but that causes compatibility problems on
41 // different Linux* OS distributions that either require that you include (or
42 // break when you try to include) <pci/types.h>. Since all we need is the two
43 // macros below (which are part of the kernel ABI, so can't change) we just
44 // define the constants here and don't include <futex.h>
45 #ifndef FUTEX_WAIT
46 #define FUTEX_WAIT 0
47 #endif
48 #ifndef FUTEX_WAKE
49 #define FUTEX_WAKE 1
50 #endif
51 #endif
52 #elif KMP_OS_DARWIN
53 #include <mach/mach.h>
54 #include <sys/sysctl.h>
55 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
56 #include <sys/types.h>
57 #include <sys/sysctl.h>
58 #include <sys/user.h>
59 #include <pthread_np.h>
60 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
61 #include <sys/types.h>
62 #include <sys/sysctl.h>
63 #endif
64 
65 #include <ctype.h>
66 #include <dirent.h>
67 #include <fcntl.h>
68 
69 struct kmp_sys_timer {
70  struct timespec start;
71 };
72 
73 // Convert timespec to nanoseconds.
74 #define TS2NS(timespec) \
75  (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
76 
77 static struct kmp_sys_timer __kmp_sys_timer_data;
78 
79 #if KMP_HANDLE_SIGNALS
80 typedef void (*sig_func_t)(int);
81 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
82 static sigset_t __kmp_sigset;
83 #endif
84 
85 static int __kmp_init_runtime = FALSE;
86 
87 static int __kmp_fork_count = 0;
88 
89 static pthread_condattr_t __kmp_suspend_cond_attr;
90 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
91 
92 static kmp_cond_align_t __kmp_wait_cv;
93 static kmp_mutex_align_t __kmp_wait_mx;
94 
95 kmp_uint64 __kmp_ticks_per_msec = 1000000;
96 kmp_uint64 __kmp_ticks_per_usec = 1000;
97 
98 #ifdef DEBUG_SUSPEND
99 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
100  KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
101  cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
102  cond->c_cond.__c_waiting);
103 }
104 #endif
105 
106 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
107 
108 /* Affinity support */
109 
110 void __kmp_affinity_bind_thread(int which) {
111  KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
112  "Illegal set affinity operation when not capable");
113 
114  kmp_affin_mask_t *mask;
115  KMP_CPU_ALLOC_ON_STACK(mask);
116  KMP_CPU_ZERO(mask);
117  KMP_CPU_SET(which, mask);
118  __kmp_set_system_affinity(mask, TRUE);
119  KMP_CPU_FREE_FROM_STACK(mask);
120 }
121 
122 /* Determine if we can access affinity functionality on this version of
123  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
124  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
125 void __kmp_affinity_determine_capable(const char *env_var) {
126  // Check and see if the OS supports thread affinity.
127 
128 #if KMP_OS_LINUX
129 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
130 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
131 #elif KMP_OS_FREEBSD
132 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
133 #endif
134 
135  int verbose = __kmp_affinity.flags.verbose;
136  int warnings = __kmp_affinity.flags.warnings;
137  enum affinity_type type = __kmp_affinity.type;
138 
139 #if KMP_OS_LINUX
140  long gCode;
141  unsigned char *buf;
142  buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
143 
144  // If the syscall returns a suggestion for the size,
145  // then we don't have to search for an appropriate size.
146  gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
147  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
148  "initial getaffinity call returned %ld errno = %d\n",
149  gCode, errno));
150 
151  if (gCode < 0 && errno != EINVAL) {
152  // System call not supported
153  if (verbose ||
154  (warnings && (type != affinity_none) && (type != affinity_default) &&
155  (type != affinity_disabled))) {
156  int error = errno;
157  kmp_msg_t err_code = KMP_ERR(error);
158  __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
159  err_code, __kmp_msg_null);
160  if (__kmp_generate_warnings == kmp_warnings_off) {
161  __kmp_str_free(&err_code.str);
162  }
163  }
164  KMP_AFFINITY_DISABLE();
165  KMP_INTERNAL_FREE(buf);
166  return;
167  } else if (gCode > 0) {
168  // The optimal situation: the OS returns the size of the buffer it expects.
169  KMP_AFFINITY_ENABLE(gCode);
170  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
171  "affinity supported (mask size %d)\n",
172  (int)__kmp_affin_mask_size));
173  KMP_INTERNAL_FREE(buf);
174  return;
175  }
176 
177  // Call the getaffinity system call repeatedly with increasing set sizes
178  // until we succeed, or reach an upper bound on the search.
179  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
180  "searching for proper set size\n"));
181  int size;
182  for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
183  gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
184  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
185  "getaffinity for mask size %ld returned %ld errno = %d\n",
186  size, gCode, errno));
187 
188  if (gCode < 0) {
189  if (errno == ENOSYS) {
190  // We shouldn't get here
191  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
192  "inconsistent OS call behavior: errno == ENOSYS for mask "
193  "size %d\n",
194  size));
195  if (verbose ||
196  (warnings && (type != affinity_none) &&
197  (type != affinity_default) && (type != affinity_disabled))) {
198  int error = errno;
199  kmp_msg_t err_code = KMP_ERR(error);
200  __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
201  err_code, __kmp_msg_null);
202  if (__kmp_generate_warnings == kmp_warnings_off) {
203  __kmp_str_free(&err_code.str);
204  }
205  }
206  KMP_AFFINITY_DISABLE();
207  KMP_INTERNAL_FREE(buf);
208  return;
209  }
210  continue;
211  }
212 
213  KMP_AFFINITY_ENABLE(gCode);
214  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
215  "affinity supported (mask size %d)\n",
216  (int)__kmp_affin_mask_size));
217  KMP_INTERNAL_FREE(buf);
218  return;
219  }
220 #elif KMP_OS_FREEBSD
221  long gCode;
222  unsigned char *buf;
223  buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
224  gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
225  reinterpret_cast<cpuset_t *>(buf));
226  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
227  "initial getaffinity call returned %d errno = %d\n",
228  gCode, errno));
229  if (gCode == 0) {
230  KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
231  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
232  "affinity supported (mask size %d)\n",
233  (int)__kmp_affin_mask_size));
234  KMP_INTERNAL_FREE(buf);
235  return;
236  }
237 #endif
238  KMP_INTERNAL_FREE(buf);
239 
240  // Affinity is not supported
241  KMP_AFFINITY_DISABLE();
242  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
243  "cannot determine mask size - affinity not supported\n"));
244  if (verbose || (warnings && (type != affinity_none) &&
245  (type != affinity_default) && (type != affinity_disabled))) {
246  KMP_WARNING(AffCantGetMaskSize, env_var);
247  }
248 }
249 
250 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
251 
252 #if KMP_USE_FUTEX
253 
254 int __kmp_futex_determine_capable() {
255  int loc = 0;
256  long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
257  int retval = (rc == 0) || (errno != ENOSYS);
258 
259  KA_TRACE(10,
260  ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
261  KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
262  retval ? "" : " not"));
263 
264  return retval;
265 }
266 
267 #endif // KMP_USE_FUTEX
268 
269 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
270 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
271  use compare_and_store for these routines */
272 
273 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
274  kmp_int8 old_value, new_value;
275 
276  old_value = TCR_1(*p);
277  new_value = old_value | d;
278 
279  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
280  KMP_CPU_PAUSE();
281  old_value = TCR_1(*p);
282  new_value = old_value | d;
283  }
284  return old_value;
285 }
286 
287 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
288  kmp_int8 old_value, new_value;
289 
290  old_value = TCR_1(*p);
291  new_value = old_value & d;
292 
293  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
294  KMP_CPU_PAUSE();
295  old_value = TCR_1(*p);
296  new_value = old_value & d;
297  }
298  return old_value;
299 }
300 
301 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
302  kmp_uint32 old_value, new_value;
303 
304  old_value = TCR_4(*p);
305  new_value = old_value | d;
306 
307  while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
308  KMP_CPU_PAUSE();
309  old_value = TCR_4(*p);
310  new_value = old_value | d;
311  }
312  return old_value;
313 }
314 
315 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
316  kmp_uint32 old_value, new_value;
317 
318  old_value = TCR_4(*p);
319  new_value = old_value & d;
320 
321  while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
322  KMP_CPU_PAUSE();
323  old_value = TCR_4(*p);
324  new_value = old_value & d;
325  }
326  return old_value;
327 }
328 
329 #if KMP_ARCH_X86
330 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
331  kmp_int8 old_value, new_value;
332 
333  old_value = TCR_1(*p);
334  new_value = old_value + d;
335 
336  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
337  KMP_CPU_PAUSE();
338  old_value = TCR_1(*p);
339  new_value = old_value + d;
340  }
341  return old_value;
342 }
343 
344 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
345  kmp_int64 old_value, new_value;
346 
347  old_value = TCR_8(*p);
348  new_value = old_value + d;
349 
350  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
351  KMP_CPU_PAUSE();
352  old_value = TCR_8(*p);
353  new_value = old_value + d;
354  }
355  return old_value;
356 }
357 #endif /* KMP_ARCH_X86 */
358 
359 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
360  kmp_uint64 old_value, new_value;
361 
362  old_value = TCR_8(*p);
363  new_value = old_value | d;
364  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
365  KMP_CPU_PAUSE();
366  old_value = TCR_8(*p);
367  new_value = old_value | d;
368  }
369  return old_value;
370 }
371 
372 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
373  kmp_uint64 old_value, new_value;
374 
375  old_value = TCR_8(*p);
376  new_value = old_value & d;
377  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
378  KMP_CPU_PAUSE();
379  old_value = TCR_8(*p);
380  new_value = old_value & d;
381  }
382  return old_value;
383 }
384 
385 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
386 
387 void __kmp_terminate_thread(int gtid) {
388  int status;
389  kmp_info_t *th = __kmp_threads[gtid];
390 
391  if (!th)
392  return;
393 
394 #ifdef KMP_CANCEL_THREADS
395  KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
396  status = pthread_cancel(th->th.th_info.ds.ds_thread);
397  if (status != 0 && status != ESRCH) {
398  __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
399  __kmp_msg_null);
400  }
401 #endif
402  KMP_YIELD(TRUE);
403 } //
404 
405 /* Set thread stack info according to values returned by pthread_getattr_np().
406  If values are unreasonable, assume call failed and use incremental stack
407  refinement method instead. Returns TRUE if the stack parameters could be
408  determined exactly, FALSE if incremental refinement is necessary. */
409 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
410  int stack_data;
411 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
412  KMP_OS_HURD
413  pthread_attr_t attr;
414  int status;
415  size_t size = 0;
416  void *addr = 0;
417 
418  /* Always do incremental stack refinement for ubermaster threads since the
419  initial thread stack range can be reduced by sibling thread creation so
420  pthread_attr_getstack may cause thread gtid aliasing */
421  if (!KMP_UBER_GTID(gtid)) {
422 
423  /* Fetch the real thread attributes */
424  status = pthread_attr_init(&attr);
425  KMP_CHECK_SYSFAIL("pthread_attr_init", status);
426 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
427  status = pthread_attr_get_np(pthread_self(), &attr);
428  KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
429 #else
430  status = pthread_getattr_np(pthread_self(), &attr);
431  KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
432 #endif
433  status = pthread_attr_getstack(&attr, &addr, &size);
434  KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
435  KA_TRACE(60,
436  ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
437  " %lu, low addr: %p\n",
438  gtid, size, addr));
439  status = pthread_attr_destroy(&attr);
440  KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
441  }
442 
443  if (size != 0 && addr != 0) { // was stack parameter determination successful?
444  /* Store the correct base and size */
445  TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
446  TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
447  TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
448  return TRUE;
449  }
450 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD \
451  || KMP_OS_HURD */
452  /* Use incremental refinement starting from initial conservative estimate */
453  TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
454  TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
455  TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
456  return FALSE;
457 }
458 
459 static void *__kmp_launch_worker(void *thr) {
460  int status, old_type, old_state;
461 #ifdef KMP_BLOCK_SIGNALS
462  sigset_t new_set, old_set;
463 #endif /* KMP_BLOCK_SIGNALS */
464  void *exit_val;
465 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
466  KMP_OS_OPENBSD || KMP_OS_HURD
467  void *volatile padding = 0;
468 #endif
469  int gtid;
470 
471  gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
472  __kmp_gtid_set_specific(gtid);
473 #ifdef KMP_TDATA_GTID
474  __kmp_gtid = gtid;
475 #endif
476 #if KMP_STATS_ENABLED
477  // set thread local index to point to thread-specific stats
478  __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
479  __kmp_stats_thread_ptr->startLife();
480  KMP_SET_THREAD_STATE(IDLE);
481  KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
482 #endif
483 
484 #if USE_ITT_BUILD
485  __kmp_itt_thread_name(gtid);
486 #endif /* USE_ITT_BUILD */
487 
488 #if KMP_AFFINITY_SUPPORTED
489  __kmp_affinity_bind_init_mask(gtid);
490 #endif
491 
492 #ifdef KMP_CANCEL_THREADS
493  status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
494  KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
495  // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
496  status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
497  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
498 #endif
499 
500 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
501  // Set FP control regs to be a copy of the parallel initialization thread's.
502  __kmp_clear_x87_fpu_status_word();
503  __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
504  __kmp_load_mxcsr(&__kmp_init_mxcsr);
505 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
506 
507 #ifdef KMP_BLOCK_SIGNALS
508  status = sigfillset(&new_set);
509  KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
510  status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
511  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
512 #endif /* KMP_BLOCK_SIGNALS */
513 
514 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
515  KMP_OS_OPENBSD
516  if (__kmp_stkoffset > 0 && gtid > 0) {
517  padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
518  (void)padding;
519  }
520 #endif
521 
522  KMP_MB();
523  __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
524 
525  __kmp_check_stack_overlap((kmp_info_t *)thr);
526 
527  exit_val = __kmp_launch_thread((kmp_info_t *)thr);
528 
529 #ifdef KMP_BLOCK_SIGNALS
530  status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
531  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
532 #endif /* KMP_BLOCK_SIGNALS */
533 
534  return exit_val;
535 }
536 
537 #if KMP_USE_MONITOR
538 /* The monitor thread controls all of the threads in the complex */
539 
540 static void *__kmp_launch_monitor(void *thr) {
541  int status, old_type, old_state;
542 #ifdef KMP_BLOCK_SIGNALS
543  sigset_t new_set;
544 #endif /* KMP_BLOCK_SIGNALS */
545  struct timespec interval;
546 
547  KMP_MB(); /* Flush all pending memory write invalidates. */
548 
549  KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
550 
551  /* register us as the monitor thread */
552  __kmp_gtid_set_specific(KMP_GTID_MONITOR);
553 #ifdef KMP_TDATA_GTID
554  __kmp_gtid = KMP_GTID_MONITOR;
555 #endif
556 
557  KMP_MB();
558 
559 #if USE_ITT_BUILD
560  // Instruct Intel(R) Threading Tools to ignore monitor thread.
561  __kmp_itt_thread_ignore();
562 #endif /* USE_ITT_BUILD */
563 
564  __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
565  (kmp_info_t *)thr);
566 
567  __kmp_check_stack_overlap((kmp_info_t *)thr);
568 
569 #ifdef KMP_CANCEL_THREADS
570  status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
571  KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
572  // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
573  status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
574  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
575 #endif
576 
577 #if KMP_REAL_TIME_FIX
578  // This is a potential fix which allows application with real-time scheduling
579  // policy work. However, decision about the fix is not made yet, so it is
580  // disabled by default.
581  { // Are program started with real-time scheduling policy?
582  int sched = sched_getscheduler(0);
583  if (sched == SCHED_FIFO || sched == SCHED_RR) {
584  // Yes, we are a part of real-time application. Try to increase the
585  // priority of the monitor.
586  struct sched_param param;
587  int max_priority = sched_get_priority_max(sched);
588  int rc;
589  KMP_WARNING(RealTimeSchedNotSupported);
590  sched_getparam(0, &param);
591  if (param.sched_priority < max_priority) {
592  param.sched_priority += 1;
593  rc = sched_setscheduler(0, sched, &param);
594  if (rc != 0) {
595  int error = errno;
596  kmp_msg_t err_code = KMP_ERR(error);
597  __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
598  err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
599  if (__kmp_generate_warnings == kmp_warnings_off) {
600  __kmp_str_free(&err_code.str);
601  }
602  }
603  } else {
604  // We cannot abort here, because number of CPUs may be enough for all
605  // the threads, including the monitor thread, so application could
606  // potentially work...
607  __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
608  KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
609  __kmp_msg_null);
610  }
611  }
612  // AC: free thread that waits for monitor started
613  TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
614  }
615 #endif // KMP_REAL_TIME_FIX
616 
617  KMP_MB(); /* Flush all pending memory write invalidates. */
618 
619  if (__kmp_monitor_wakeups == 1) {
620  interval.tv_sec = 1;
621  interval.tv_nsec = 0;
622  } else {
623  interval.tv_sec = 0;
624  interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
625  }
626 
627  KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
628 
629  while (!TCR_4(__kmp_global.g.g_done)) {
630  struct timespec now;
631  struct timeval tval;
632 
633  /* This thread monitors the state of the system */
634 
635  KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
636 
637  status = gettimeofday(&tval, NULL);
638  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
639  TIMEVAL_TO_TIMESPEC(&tval, &now);
640 
641  now.tv_sec += interval.tv_sec;
642  now.tv_nsec += interval.tv_nsec;
643 
644  if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
645  now.tv_sec += 1;
646  now.tv_nsec -= KMP_NSEC_PER_SEC;
647  }
648 
649  status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
650  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
651  // AC: the monitor should not fall asleep if g_done has been set
652  if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
653  status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
654  &__kmp_wait_mx.m_mutex, &now);
655  if (status != 0) {
656  if (status != ETIMEDOUT && status != EINTR) {
657  KMP_SYSFAIL("pthread_cond_timedwait", status);
658  }
659  }
660  }
661  status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
662  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
663 
664  TCW_4(__kmp_global.g.g_time.dt.t_value,
665  TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
666 
667  KMP_MB(); /* Flush all pending memory write invalidates. */
668  }
669 
670  KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
671 
672 #ifdef KMP_BLOCK_SIGNALS
673  status = sigfillset(&new_set);
674  KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
675  status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
676  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
677 #endif /* KMP_BLOCK_SIGNALS */
678 
679  KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
680 
681  if (__kmp_global.g.g_abort != 0) {
682  /* now we need to terminate the worker threads */
683  /* the value of t_abort is the signal we caught */
684 
685  int gtid;
686 
687  KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
688  __kmp_global.g.g_abort));
689 
690  /* terminate the OpenMP worker threads */
691  /* TODO this is not valid for sibling threads!!
692  * the uber master might not be 0 anymore.. */
693  for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
694  __kmp_terminate_thread(gtid);
695 
696  __kmp_cleanup();
697 
698  KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
699  __kmp_global.g.g_abort));
700 
701  if (__kmp_global.g.g_abort > 0)
702  raise(__kmp_global.g.g_abort);
703  }
704 
705  KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
706 
707  return thr;
708 }
709 #endif // KMP_USE_MONITOR
710 
711 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
712  pthread_t handle;
713  pthread_attr_t thread_attr;
714  int status;
715 
716  th->th.th_info.ds.ds_gtid = gtid;
717 
718 #if KMP_STATS_ENABLED
719  // sets up worker thread stats
720  __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
721 
722  // th->th.th_stats is used to transfer thread-specific stats-pointer to
723  // __kmp_launch_worker. So when thread is created (goes into
724  // __kmp_launch_worker) it will set its thread local pointer to
725  // th->th.th_stats
726  if (!KMP_UBER_GTID(gtid)) {
727  th->th.th_stats = __kmp_stats_list->push_back(gtid);
728  } else {
729  // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
730  // so set the th->th.th_stats field to it.
731  th->th.th_stats = __kmp_stats_thread_ptr;
732  }
733  __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
734 
735 #endif // KMP_STATS_ENABLED
736 
737  if (KMP_UBER_GTID(gtid)) {
738  KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
739  th->th.th_info.ds.ds_thread = pthread_self();
740  __kmp_set_stack_info(gtid, th);
741  __kmp_check_stack_overlap(th);
742  return;
743  }
744 
745  KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
746 
747  KMP_MB(); /* Flush all pending memory write invalidates. */
748 
749 #ifdef KMP_THREAD_ATTR
750  status = pthread_attr_init(&thread_attr);
751  if (status != 0) {
752  __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
753  }
754  status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
755  if (status != 0) {
756  __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
757  }
758 
759  /* Set stack size for this thread now.
760  The multiple of 2 is there because on some machines, requesting an unusual
761  stacksize causes the thread to have an offset before the dummy alloca()
762  takes place to create the offset. Since we want the user to have a
763  sufficient stacksize AND support a stack offset, we alloca() twice the
764  offset so that the upcoming alloca() does not eliminate any premade offset,
765  and also gives the user the stack space they requested for all threads */
766  stack_size += gtid * __kmp_stkoffset * 2;
767 
768  KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
769  "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
770  gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
771 
772 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
773  status = pthread_attr_setstacksize(&thread_attr, stack_size);
774 #ifdef KMP_BACKUP_STKSIZE
775  if (status != 0) {
776  if (!__kmp_env_stksize) {
777  stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
778  __kmp_stksize = KMP_BACKUP_STKSIZE;
779  KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
780  "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
781  "bytes\n",
782  gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
783  status = pthread_attr_setstacksize(&thread_attr, stack_size);
784  }
785  }
786 #endif /* KMP_BACKUP_STKSIZE */
787  if (status != 0) {
788  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
789  KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
790  }
791 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
792 
793 #endif /* KMP_THREAD_ATTR */
794 
795  status =
796  pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
797  if (status != 0 || !handle) { // ??? Why do we check handle??
798 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
799  if (status == EINVAL) {
800  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
801  KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
802  }
803  if (status == ENOMEM) {
804  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
805  KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
806  }
807 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
808  if (status == EAGAIN) {
809  __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
810  KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
811  }
812  KMP_SYSFAIL("pthread_create", status);
813  }
814 
815  th->th.th_info.ds.ds_thread = handle;
816 
817 #ifdef KMP_THREAD_ATTR
818  status = pthread_attr_destroy(&thread_attr);
819  if (status) {
820  kmp_msg_t err_code = KMP_ERR(status);
821  __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
822  __kmp_msg_null);
823  if (__kmp_generate_warnings == kmp_warnings_off) {
824  __kmp_str_free(&err_code.str);
825  }
826  }
827 #endif /* KMP_THREAD_ATTR */
828 
829  KMP_MB(); /* Flush all pending memory write invalidates. */
830 
831  KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
832 
833 } // __kmp_create_worker
834 
835 #if KMP_USE_MONITOR
836 void __kmp_create_monitor(kmp_info_t *th) {
837  pthread_t handle;
838  pthread_attr_t thread_attr;
839  size_t size;
840  int status;
841  int auto_adj_size = FALSE;
842 
843  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
844  // We don't need monitor thread in case of MAX_BLOCKTIME
845  KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
846  "MAX blocktime\n"));
847  th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
848  th->th.th_info.ds.ds_gtid = 0;
849  return;
850  }
851  KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
852 
853  KMP_MB(); /* Flush all pending memory write invalidates. */
854 
855  th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
856  th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
857 #if KMP_REAL_TIME_FIX
858  TCW_4(__kmp_global.g.g_time.dt.t_value,
859  -1); // Will use it for synchronization a bit later.
860 #else
861  TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
862 #endif // KMP_REAL_TIME_FIX
863 
864 #ifdef KMP_THREAD_ATTR
865  if (__kmp_monitor_stksize == 0) {
866  __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
867  auto_adj_size = TRUE;
868  }
869  status = pthread_attr_init(&thread_attr);
870  if (status != 0) {
871  __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
872  }
873  status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
874  if (status != 0) {
875  __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
876  }
877 
878 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
879  status = pthread_attr_getstacksize(&thread_attr, &size);
880  KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
881 #else
882  size = __kmp_sys_min_stksize;
883 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
884 #endif /* KMP_THREAD_ATTR */
885 
886  if (__kmp_monitor_stksize == 0) {
887  __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
888  }
889  if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
890  __kmp_monitor_stksize = __kmp_sys_min_stksize;
891  }
892 
893  KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
894  "requested stacksize = %lu bytes\n",
895  size, __kmp_monitor_stksize));
896 
897 retry:
898 
899 /* Set stack size for this thread now. */
900 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
901  KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
902  __kmp_monitor_stksize));
903  status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
904  if (status != 0) {
905  if (auto_adj_size) {
906  __kmp_monitor_stksize *= 2;
907  goto retry;
908  }
909  kmp_msg_t err_code = KMP_ERR(status);
910  __kmp_msg(kmp_ms_warning, // should this be fatal? BB
911  KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
912  err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
913  if (__kmp_generate_warnings == kmp_warnings_off) {
914  __kmp_str_free(&err_code.str);
915  }
916  }
917 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
918 
919  status =
920  pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
921 
922  if (status != 0) {
923 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
924  if (status == EINVAL) {
925  if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
926  __kmp_monitor_stksize *= 2;
927  goto retry;
928  }
929  __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
930  KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
931  __kmp_msg_null);
932  }
933  if (status == ENOMEM) {
934  __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
935  KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
936  __kmp_msg_null);
937  }
938 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
939  if (status == EAGAIN) {
940  __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
941  KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
942  }
943  KMP_SYSFAIL("pthread_create", status);
944  }
945 
946  th->th.th_info.ds.ds_thread = handle;
947 
948 #if KMP_REAL_TIME_FIX
949  // Wait for the monitor thread is really started and set its *priority*.
950  KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
951  sizeof(__kmp_global.g.g_time.dt.t_value));
952  __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
953  &__kmp_neq_4, NULL);
954 #endif // KMP_REAL_TIME_FIX
955 
956 #ifdef KMP_THREAD_ATTR
957  status = pthread_attr_destroy(&thread_attr);
958  if (status != 0) {
959  kmp_msg_t err_code = KMP_ERR(status);
960  __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
961  __kmp_msg_null);
962  if (__kmp_generate_warnings == kmp_warnings_off) {
963  __kmp_str_free(&err_code.str);
964  }
965  }
966 #endif
967 
968  KMP_MB(); /* Flush all pending memory write invalidates. */
969 
970  KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
971  th->th.th_info.ds.ds_thread));
972 
973 } // __kmp_create_monitor
974 #endif // KMP_USE_MONITOR
975 
976 void __kmp_exit_thread(int exit_status) {
977  pthread_exit((void *)(intptr_t)exit_status);
978 } // __kmp_exit_thread
979 
980 #if KMP_USE_MONITOR
981 void __kmp_resume_monitor();
982 
983 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
984  int status;
985  void *exit_val;
986 
987  KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
988  " %#.8lx\n",
989  th->th.th_info.ds.ds_thread));
990 
991  // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
992  // If both tid and gtid are 0, it means the monitor did not ever start.
993  // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
994  KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
995  if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
996  KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
997  return;
998  }
999 
1000  KMP_MB(); /* Flush all pending memory write invalidates. */
1001 
1002  /* First, check to see whether the monitor thread exists to wake it up. This
1003  is to avoid performance problem when the monitor sleeps during
1004  blocktime-size interval */
1005 
1006  status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1007  if (status != ESRCH) {
1008  __kmp_resume_monitor(); // Wake up the monitor thread
1009  }
1010  KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1011  status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1012  if (exit_val != th) {
1013  __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1014  }
1015 
1016  th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1017  th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1018 
1019  KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1020  " %#.8lx\n",
1021  th->th.th_info.ds.ds_thread));
1022 
1023  KMP_MB(); /* Flush all pending memory write invalidates. */
1024 }
1025 #else
1026 // Empty symbol to export (see exports_so.txt) when
1027 // monitor thread feature is disabled
1028 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1029  (void)th;
1030 }
1031 #endif // KMP_USE_MONITOR
1032 
1033 void __kmp_reap_worker(kmp_info_t *th) {
1034  int status;
1035  void *exit_val;
1036 
1037  KMP_MB(); /* Flush all pending memory write invalidates. */
1038 
1039  KA_TRACE(
1040  10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1041 
1042  status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1043 #ifdef KMP_DEBUG
1044  /* Don't expose these to the user until we understand when they trigger */
1045  if (status != 0) {
1046  __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1047  }
1048  if (exit_val != th) {
1049  KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1050  "exit_val = %p\n",
1051  th->th.th_info.ds.ds_gtid, exit_val));
1052  }
1053 #else
1054  (void)status; // unused variable
1055 #endif /* KMP_DEBUG */
1056 
1057  KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1058  th->th.th_info.ds.ds_gtid));
1059 
1060  KMP_MB(); /* Flush all pending memory write invalidates. */
1061 }
1062 
1063 #if KMP_HANDLE_SIGNALS
1064 
1065 static void __kmp_null_handler(int signo) {
1066  // Do nothing, for doing SIG_IGN-type actions.
1067 } // __kmp_null_handler
1068 
1069 static void __kmp_team_handler(int signo) {
1070  if (__kmp_global.g.g_abort == 0) {
1071 /* Stage 1 signal handler, let's shut down all of the threads */
1072 #ifdef KMP_DEBUG
1073  __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1074 #endif
1075  switch (signo) {
1076  case SIGHUP:
1077  case SIGINT:
1078  case SIGQUIT:
1079  case SIGILL:
1080  case SIGABRT:
1081  case SIGFPE:
1082  case SIGBUS:
1083  case SIGSEGV:
1084 #ifdef SIGSYS
1085  case SIGSYS:
1086 #endif
1087  case SIGTERM:
1088  if (__kmp_debug_buf) {
1089  __kmp_dump_debug_buffer();
1090  }
1091  __kmp_unregister_library(); // cleanup shared memory
1092  KMP_MB(); // Flush all pending memory write invalidates.
1093  TCW_4(__kmp_global.g.g_abort, signo);
1094  KMP_MB(); // Flush all pending memory write invalidates.
1095  TCW_4(__kmp_global.g.g_done, TRUE);
1096  KMP_MB(); // Flush all pending memory write invalidates.
1097  break;
1098  default:
1099 #ifdef KMP_DEBUG
1100  __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1101 #endif
1102  break;
1103  }
1104  }
1105 } // __kmp_team_handler
1106 
1107 static void __kmp_sigaction(int signum, const struct sigaction *act,
1108  struct sigaction *oldact) {
1109  int rc = sigaction(signum, act, oldact);
1110  KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1111 }
1112 
1113 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1114  int parallel_init) {
1115  KMP_MB(); // Flush all pending memory write invalidates.
1116  KB_TRACE(60,
1117  ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1118  if (parallel_init) {
1119  struct sigaction new_action;
1120  struct sigaction old_action;
1121  new_action.sa_handler = handler_func;
1122  new_action.sa_flags = 0;
1123  sigfillset(&new_action.sa_mask);
1124  __kmp_sigaction(sig, &new_action, &old_action);
1125  if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1126  sigaddset(&__kmp_sigset, sig);
1127  } else {
1128  // Restore/keep user's handler if one previously installed.
1129  __kmp_sigaction(sig, &old_action, NULL);
1130  }
1131  } else {
1132  // Save initial/system signal handlers to see if user handlers installed.
1133  __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1134  }
1135  KMP_MB(); // Flush all pending memory write invalidates.
1136 } // __kmp_install_one_handler
1137 
1138 static void __kmp_remove_one_handler(int sig) {
1139  KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1140  if (sigismember(&__kmp_sigset, sig)) {
1141  struct sigaction old;
1142  KMP_MB(); // Flush all pending memory write invalidates.
1143  __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1144  if ((old.sa_handler != __kmp_team_handler) &&
1145  (old.sa_handler != __kmp_null_handler)) {
1146  // Restore the users signal handler.
1147  KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1148  "restoring: sig=%d\n",
1149  sig));
1150  __kmp_sigaction(sig, &old, NULL);
1151  }
1152  sigdelset(&__kmp_sigset, sig);
1153  KMP_MB(); // Flush all pending memory write invalidates.
1154  }
1155 } // __kmp_remove_one_handler
1156 
1157 void __kmp_install_signals(int parallel_init) {
1158  KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1159  if (__kmp_handle_signals || !parallel_init) {
1160  // If ! parallel_init, we do not install handlers, just save original
1161  // handlers. Let us do it even __handle_signals is 0.
1162  sigemptyset(&__kmp_sigset);
1163  __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1164  __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1165  __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1166  __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1167  __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1168  __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1169  __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1170  __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1171 #ifdef SIGSYS
1172  __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1173 #endif // SIGSYS
1174  __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1175 #ifdef SIGPIPE
1176  __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1177 #endif // SIGPIPE
1178  }
1179 } // __kmp_install_signals
1180 
1181 void __kmp_remove_signals(void) {
1182  int sig;
1183  KB_TRACE(10, ("__kmp_remove_signals()\n"));
1184  for (sig = 1; sig < NSIG; ++sig) {
1185  __kmp_remove_one_handler(sig);
1186  }
1187 } // __kmp_remove_signals
1188 
1189 #endif // KMP_HANDLE_SIGNALS
1190 
1191 void __kmp_enable(int new_state) {
1192 #ifdef KMP_CANCEL_THREADS
1193  int status, old_state;
1194  status = pthread_setcancelstate(new_state, &old_state);
1195  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1196  KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1197 #endif
1198 }
1199 
1200 void __kmp_disable(int *old_state) {
1201 #ifdef KMP_CANCEL_THREADS
1202  int status;
1203  status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1204  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1205 #endif
1206 }
1207 
1208 static void __kmp_atfork_prepare(void) {
1209  __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1210  __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1211 }
1212 
1213 static void __kmp_atfork_parent(void) {
1214  __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1215  __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1216 }
1217 
1218 /* Reset the library so execution in the child starts "all over again" with
1219  clean data structures in initial states. Don't worry about freeing memory
1220  allocated by parent, just abandon it to be safe. */
1221 static void __kmp_atfork_child(void) {
1222  __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1223  __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1224  /* TODO make sure this is done right for nested/sibling */
1225  // ATT: Memory leaks are here? TODO: Check it and fix.
1226  /* KMP_ASSERT( 0 ); */
1227 
1228  ++__kmp_fork_count;
1229 
1230 #if KMP_AFFINITY_SUPPORTED
1231 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1232  // reset the affinity in the child to the initial thread
1233  // affinity in the parent
1234  kmp_set_thread_affinity_mask_initial();
1235 #endif
1236  // Set default not to bind threads tightly in the child (we're expecting
1237  // over-subscription after the fork and this can improve things for
1238  // scripting languages that use OpenMP inside process-parallel code).
1239  if (__kmp_nested_proc_bind.bind_types != NULL) {
1240  __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1241  }
1242  for (kmp_affinity_t *affinity : __kmp_affinities)
1243  *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1244  __kmp_affin_fullMask = nullptr;
1245  __kmp_affin_origMask = nullptr;
1246  __kmp_topology = nullptr;
1247 #endif // KMP_AFFINITY_SUPPORTED
1248 
1249 #if KMP_USE_MONITOR
1250  __kmp_init_monitor = 0;
1251 #endif
1252  __kmp_init_parallel = FALSE;
1253  __kmp_init_middle = FALSE;
1254  __kmp_init_serial = FALSE;
1255  TCW_4(__kmp_init_gtid, FALSE);
1256  __kmp_init_common = FALSE;
1257 
1258  TCW_4(__kmp_init_user_locks, FALSE);
1259 #if !KMP_USE_DYNAMIC_LOCK
1260  __kmp_user_lock_table.used = 1;
1261  __kmp_user_lock_table.allocated = 0;
1262  __kmp_user_lock_table.table = NULL;
1263  __kmp_lock_blocks = NULL;
1264 #endif
1265 
1266  __kmp_all_nth = 0;
1267  TCW_4(__kmp_nth, 0);
1268 
1269  __kmp_thread_pool = NULL;
1270  __kmp_thread_pool_insert_pt = NULL;
1271  __kmp_team_pool = NULL;
1272 
1273  /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1274  here so threadprivate doesn't use stale data */
1275  KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1276  __kmp_threadpriv_cache_list));
1277 
1278  while (__kmp_threadpriv_cache_list != NULL) {
1279 
1280  if (*__kmp_threadpriv_cache_list->addr != NULL) {
1281  KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1282  &(*__kmp_threadpriv_cache_list->addr)));
1283 
1284  *__kmp_threadpriv_cache_list->addr = NULL;
1285  }
1286  __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1287  }
1288 
1289  __kmp_init_runtime = FALSE;
1290 
1291  /* reset statically initialized locks */
1292  __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1293  __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1294  __kmp_init_bootstrap_lock(&__kmp_console_lock);
1295  __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1296 
1297 #if USE_ITT_BUILD
1298  __kmp_itt_reset(); // reset ITT's global state
1299 #endif /* USE_ITT_BUILD */
1300 
1301  {
1302  // Child process often get terminated without any use of OpenMP. That might
1303  // cause mapped shared memory file to be left unattended. Thus we postpone
1304  // library registration till middle initialization in the child process.
1305  __kmp_need_register_serial = FALSE;
1306  __kmp_serial_initialize();
1307  }
1308 
1309  /* This is necessary to make sure no stale data is left around */
1310  /* AC: customers complain that we use unsafe routines in the atfork
1311  handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1312  in dynamic_link when check the presence of shared tbbmalloc library.
1313  Suggestion is to make the library initialization lazier, similar
1314  to what done for __kmpc_begin(). */
1315  // TODO: synchronize all static initializations with regular library
1316  // startup; look at kmp_global.cpp and etc.
1317  //__kmp_internal_begin ();
1318 }
1319 
1320 void __kmp_register_atfork(void) {
1321  if (__kmp_need_register_atfork) {
1322  int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1323  __kmp_atfork_child);
1324  KMP_CHECK_SYSFAIL("pthread_atfork", status);
1325  __kmp_need_register_atfork = FALSE;
1326  }
1327 }
1328 
1329 void __kmp_suspend_initialize(void) {
1330  int status;
1331  status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1332  KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1333  status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1334  KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1335 }
1336 
1337 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1338  int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1339  int new_value = __kmp_fork_count + 1;
1340  // Return if already initialized
1341  if (old_value == new_value)
1342  return;
1343  // Wait, then return if being initialized
1344  if (old_value == -1 || !__kmp_atomic_compare_store(
1345  &th->th.th_suspend_init_count, old_value, -1)) {
1346  while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1347  KMP_CPU_PAUSE();
1348  }
1349  } else {
1350  // Claim to be the initializer and do initializations
1351  int status;
1352  status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1353  &__kmp_suspend_cond_attr);
1354  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1355  status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1356  &__kmp_suspend_mutex_attr);
1357  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1358  KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1359  }
1360 }
1361 
1362 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1363  if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1364  /* this means we have initialize the suspension pthread objects for this
1365  thread in this instance of the process */
1366  int status;
1367 
1368  status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1369  if (status != 0 && status != EBUSY) {
1370  KMP_SYSFAIL("pthread_cond_destroy", status);
1371  }
1372  status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1373  if (status != 0 && status != EBUSY) {
1374  KMP_SYSFAIL("pthread_mutex_destroy", status);
1375  }
1376  --th->th.th_suspend_init_count;
1377  KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1378  __kmp_fork_count);
1379  }
1380 }
1381 
1382 // return true if lock obtained, false otherwise
1383 int __kmp_try_suspend_mx(kmp_info_t *th) {
1384  return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1385 }
1386 
1387 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1388  int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1389  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1390 }
1391 
1392 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1393  int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1394  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1395 }
1396 
1397 /* This routine puts the calling thread to sleep after setting the
1398  sleep bit for the indicated flag variable to true. */
1399 template <class C>
1400 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1401  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1402  kmp_info_t *th = __kmp_threads[th_gtid];
1403  int status;
1404  typename C::flag_t old_spin;
1405 
1406  KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1407  flag->get()));
1408 
1409  __kmp_suspend_initialize_thread(th);
1410 
1411  __kmp_lock_suspend_mx(th);
1412 
1413  KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1414  th_gtid, flag->get()));
1415 
1416  /* TODO: shouldn't this use release semantics to ensure that
1417  __kmp_suspend_initialize_thread gets called first? */
1418  old_spin = flag->set_sleeping();
1419  TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1420  th->th.th_sleep_loc_type = flag->get_type();
1421  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1422  __kmp_pause_status != kmp_soft_paused) {
1423  flag->unset_sleeping();
1424  TCW_PTR(th->th.th_sleep_loc, NULL);
1425  th->th.th_sleep_loc_type = flag_unset;
1426  __kmp_unlock_suspend_mx(th);
1427  return;
1428  }
1429  KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1430  " was %x\n",
1431  th_gtid, flag->get(), flag->load(), old_spin));
1432 
1433  if (flag->done_check_val(old_spin) || flag->done_check()) {
1434  flag->unset_sleeping();
1435  TCW_PTR(th->th.th_sleep_loc, NULL);
1436  th->th.th_sleep_loc_type = flag_unset;
1437  KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1438  "for spin(%p)\n",
1439  th_gtid, flag->get()));
1440  } else {
1441  /* Encapsulate in a loop as the documentation states that this may
1442  "with low probability" return when the condition variable has
1443  not been signaled or broadcast */
1444  int deactivated = FALSE;
1445 
1446  while (flag->is_sleeping()) {
1447 #ifdef DEBUG_SUSPEND
1448  char buffer[128];
1449  __kmp_suspend_count++;
1450  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1451  __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1452  buffer);
1453 #endif
1454  // Mark the thread as no longer active (only in the first iteration of the
1455  // loop).
1456  if (!deactivated) {
1457  th->th.th_active = FALSE;
1458  if (th->th.th_active_in_pool) {
1459  th->th.th_active_in_pool = FALSE;
1460  KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1461  KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1462  }
1463  deactivated = TRUE;
1464  }
1465 
1466  KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1467  KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1468 
1469 #if USE_SUSPEND_TIMEOUT
1470  struct timespec now;
1471  struct timeval tval;
1472  int msecs;
1473 
1474  status = gettimeofday(&tval, NULL);
1475  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1476  TIMEVAL_TO_TIMESPEC(&tval, &now);
1477 
1478  msecs = (4 * __kmp_dflt_blocktime) + 200;
1479  now.tv_sec += msecs / 1000;
1480  now.tv_nsec += (msecs % 1000) * 1000;
1481 
1482  KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1483  "pthread_cond_timedwait\n",
1484  th_gtid));
1485  status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1486  &th->th.th_suspend_mx.m_mutex, &now);
1487 #else
1488  KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1489  " pthread_cond_wait\n",
1490  th_gtid));
1491  status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1492  &th->th.th_suspend_mx.m_mutex);
1493 #endif // USE_SUSPEND_TIMEOUT
1494 
1495  if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1496  KMP_SYSFAIL("pthread_cond_wait", status);
1497  }
1498 
1499  KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1500 
1501  if (!flag->is_sleeping() &&
1502  ((status == EINTR) || (status == ETIMEDOUT))) {
1503  // if interrupt or timeout, and thread is no longer sleeping, we need to
1504  // make sure sleep_loc gets reset; however, this shouldn't be needed if
1505  // we woke up with resume
1506  flag->unset_sleeping();
1507  TCW_PTR(th->th.th_sleep_loc, NULL);
1508  th->th.th_sleep_loc_type = flag_unset;
1509  }
1510 #ifdef KMP_DEBUG
1511  if (status == ETIMEDOUT) {
1512  if (flag->is_sleeping()) {
1513  KF_TRACE(100,
1514  ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1515  } else {
1516  KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1517  "not set!\n",
1518  th_gtid));
1519  TCW_PTR(th->th.th_sleep_loc, NULL);
1520  th->th.th_sleep_loc_type = flag_unset;
1521  }
1522  } else if (flag->is_sleeping()) {
1523  KF_TRACE(100,
1524  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1525  }
1526 #endif
1527  } // while
1528 
1529  // Mark the thread as active again (if it was previous marked as inactive)
1530  if (deactivated) {
1531  th->th.th_active = TRUE;
1532  if (TCR_4(th->th.th_in_pool)) {
1533  KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1534  th->th.th_active_in_pool = TRUE;
1535  }
1536  }
1537  }
1538  // We may have had the loop variable set before entering the loop body;
1539  // so we need to reset sleep_loc.
1540  TCW_PTR(th->th.th_sleep_loc, NULL);
1541  th->th.th_sleep_loc_type = flag_unset;
1542 
1543  KMP_DEBUG_ASSERT(!flag->is_sleeping());
1544  KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1545 #ifdef DEBUG_SUSPEND
1546  {
1547  char buffer[128];
1548  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1549  __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1550  buffer);
1551  }
1552 #endif
1553 
1554  __kmp_unlock_suspend_mx(th);
1555  KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1556 }
1557 
1558 template <bool C, bool S>
1559 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1560  __kmp_suspend_template(th_gtid, flag);
1561 }
1562 template <bool C, bool S>
1563 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1564  __kmp_suspend_template(th_gtid, flag);
1565 }
1566 template <bool C, bool S>
1567 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1568  __kmp_suspend_template(th_gtid, flag);
1569 }
1570 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1571  __kmp_suspend_template(th_gtid, flag);
1572 }
1573 
1574 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1575 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1576 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1577 template void
1578 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1579 template void
1580 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1581 
1582 /* This routine signals the thread specified by target_gtid to wake up
1583  after setting the sleep bit indicated by the flag argument to FALSE.
1584  The target thread must already have called __kmp_suspend_template() */
1585 template <class C>
1586 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1587  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1588  kmp_info_t *th = __kmp_threads[target_gtid];
1589  int status;
1590 
1591 #ifdef KMP_DEBUG
1592  int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1593 #endif
1594 
1595  KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1596  gtid, target_gtid));
1597  KMP_DEBUG_ASSERT(gtid != target_gtid);
1598 
1599  __kmp_suspend_initialize_thread(th);
1600 
1601  __kmp_lock_suspend_mx(th);
1602 
1603  if (!flag || flag != th->th.th_sleep_loc) {
1604  // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1605  // different location; wake up at new location
1606  flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1607  }
1608 
1609  // First, check if the flag is null or its type has changed. If so, someone
1610  // else woke it up.
1611  if (!flag) { // Thread doesn't appear to be sleeping on anything
1612  KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1613  "awake: flag(%p)\n",
1614  gtid, target_gtid, (void *)NULL));
1615  __kmp_unlock_suspend_mx(th);
1616  return;
1617  } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1618  // Flag type does not appear to match this function template; possibly the
1619  // thread is sleeping on something else. Try null resume again.
1620  KF_TRACE(
1621  5,
1622  ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1623  "spin(%p) type=%d ptr_type=%d\n",
1624  gtid, target_gtid, flag, flag->get(), flag->get_type(),
1625  th->th.th_sleep_loc_type));
1626  __kmp_unlock_suspend_mx(th);
1627  __kmp_null_resume_wrapper(th);
1628  return;
1629  } else { // if multiple threads are sleeping, flag should be internally
1630  // referring to a specific thread here
1631  if (!flag->is_sleeping()) {
1632  KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1633  "awake: flag(%p): %u\n",
1634  gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1635  __kmp_unlock_suspend_mx(th);
1636  return;
1637  }
1638  }
1639  KMP_DEBUG_ASSERT(flag);
1640  flag->unset_sleeping();
1641  TCW_PTR(th->th.th_sleep_loc, NULL);
1642  th->th.th_sleep_loc_type = flag_unset;
1643 
1644  KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1645  "sleep bit for flag's loc(%p): %u\n",
1646  gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1647 
1648 #ifdef DEBUG_SUSPEND
1649  {
1650  char buffer[128];
1651  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1652  __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1653  target_gtid, buffer);
1654  }
1655 #endif
1656  status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1657  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1658  __kmp_unlock_suspend_mx(th);
1659  KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1660  " for T#%d\n",
1661  gtid, target_gtid));
1662 }
1663 
1664 template <bool C, bool S>
1665 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1666  __kmp_resume_template(target_gtid, flag);
1667 }
1668 template <bool C, bool S>
1669 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1670  __kmp_resume_template(target_gtid, flag);
1671 }
1672 template <bool C, bool S>
1673 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1674  __kmp_resume_template(target_gtid, flag);
1675 }
1676 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1677  __kmp_resume_template(target_gtid, flag);
1678 }
1679 
1680 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1681 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1682 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1683 template void
1684 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1685 
1686 #if KMP_USE_MONITOR
1687 void __kmp_resume_monitor() {
1688  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1689  int status;
1690 #ifdef KMP_DEBUG
1691  int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1692  KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1693  KMP_GTID_MONITOR));
1694  KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1695 #endif
1696  status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1697  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1698 #ifdef DEBUG_SUSPEND
1699  {
1700  char buffer[128];
1701  __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1702  __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1703  KMP_GTID_MONITOR, buffer);
1704  }
1705 #endif
1706  status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1707  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1708  status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1709  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1710  KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1711  " for T#%d\n",
1712  gtid, KMP_GTID_MONITOR));
1713 }
1714 #endif // KMP_USE_MONITOR
1715 
1716 void __kmp_yield() { sched_yield(); }
1717 
1718 void __kmp_gtid_set_specific(int gtid) {
1719  if (__kmp_init_gtid) {
1720  int status;
1721  status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1722  (void *)(intptr_t)(gtid + 1));
1723  KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1724  } else {
1725  KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1726  }
1727 }
1728 
1729 int __kmp_gtid_get_specific() {
1730  int gtid;
1731  if (!__kmp_init_gtid) {
1732  KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1733  "KMP_GTID_SHUTDOWN\n"));
1734  return KMP_GTID_SHUTDOWN;
1735  }
1736  gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1737  if (gtid == 0) {
1738  gtid = KMP_GTID_DNE;
1739  } else {
1740  gtid--;
1741  }
1742  KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1743  __kmp_gtid_threadprivate_key, gtid));
1744  return gtid;
1745 }
1746 
1747 double __kmp_read_cpu_time(void) {
1748  /*clock_t t;*/
1749  struct tms buffer;
1750 
1751  /*t =*/times(&buffer);
1752 
1753  return (double)(buffer.tms_utime + buffer.tms_cutime) /
1754  (double)CLOCKS_PER_SEC;
1755 }
1756 
1757 int __kmp_read_system_info(struct kmp_sys_info *info) {
1758  int status;
1759  struct rusage r_usage;
1760 
1761  memset(info, 0, sizeof(*info));
1762 
1763  status = getrusage(RUSAGE_SELF, &r_usage);
1764  KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1765 
1766  // The maximum resident set size utilized (in kilobytes)
1767  info->maxrss = r_usage.ru_maxrss;
1768  // The number of page faults serviced without any I/O
1769  info->minflt = r_usage.ru_minflt;
1770  // The number of page faults serviced that required I/O
1771  info->majflt = r_usage.ru_majflt;
1772  // The number of times a process was "swapped" out of memory
1773  info->nswap = r_usage.ru_nswap;
1774  // The number of times the file system had to perform input
1775  info->inblock = r_usage.ru_inblock;
1776  // The number of times the file system had to perform output
1777  info->oublock = r_usage.ru_oublock;
1778  // The number of times a context switch was voluntarily
1779  info->nvcsw = r_usage.ru_nvcsw;
1780  // The number of times a context switch was forced
1781  info->nivcsw = r_usage.ru_nivcsw;
1782 
1783  return (status != 0);
1784 }
1785 
1786 void __kmp_read_system_time(double *delta) {
1787  double t_ns;
1788  struct timeval tval;
1789  struct timespec stop;
1790  int status;
1791 
1792  status = gettimeofday(&tval, NULL);
1793  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1794  TIMEVAL_TO_TIMESPEC(&tval, &stop);
1795  t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1796  *delta = (t_ns * 1e-9);
1797 }
1798 
1799 void __kmp_clear_system_time(void) {
1800  struct timeval tval;
1801  int status;
1802  status = gettimeofday(&tval, NULL);
1803  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1804  TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1805 }
1806 
1807 static int __kmp_get_xproc(void) {
1808 
1809  int r = 0;
1810 
1811 #if KMP_OS_LINUX
1812 
1813  __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1814 
1815 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1816  KMP_OS_HURD
1817 
1818  __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1819 
1820 #elif KMP_OS_DARWIN
1821 
1822  // Bug C77011 High "OpenMP Threads and number of active cores".
1823 
1824  // Find the number of available CPUs.
1825  kern_return_t rc;
1826  host_basic_info_data_t info;
1827  mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1828  rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1829  if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1830  // Cannot use KA_TRACE() here because this code works before trace support
1831  // is initialized.
1832  r = info.avail_cpus;
1833  } else {
1834  KMP_WARNING(CantGetNumAvailCPU);
1835  KMP_INFORM(AssumedNumCPU);
1836  }
1837 
1838 #else
1839 
1840 #error "Unknown or unsupported OS."
1841 
1842 #endif
1843 
1844  return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1845 
1846 } // __kmp_get_xproc
1847 
1848 int __kmp_read_from_file(char const *path, char const *format, ...) {
1849  int result;
1850  va_list args;
1851 
1852  va_start(args, format);
1853  FILE *f = fopen(path, "rb");
1854  if (f == NULL) {
1855  va_end(args);
1856  return 0;
1857  }
1858  result = vfscanf(f, format, args);
1859  fclose(f);
1860  va_end(args);
1861 
1862  return result;
1863 }
1864 
1865 void __kmp_runtime_initialize(void) {
1866  int status;
1867  pthread_mutexattr_t mutex_attr;
1868  pthread_condattr_t cond_attr;
1869 
1870  if (__kmp_init_runtime) {
1871  return;
1872  }
1873 
1874 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1875  if (!__kmp_cpuinfo.initialized) {
1876  __kmp_query_cpuid(&__kmp_cpuinfo);
1877  }
1878 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1879 
1880  __kmp_xproc = __kmp_get_xproc();
1881 
1882 #if !KMP_32_BIT_ARCH
1883  struct rlimit rlim;
1884  // read stack size of calling thread, save it as default for worker threads;
1885  // this should be done before reading environment variables
1886  status = getrlimit(RLIMIT_STACK, &rlim);
1887  if (status == 0) { // success?
1888  __kmp_stksize = rlim.rlim_cur;
1889  __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1890  }
1891 #endif /* KMP_32_BIT_ARCH */
1892 
1893  if (sysconf(_SC_THREADS)) {
1894 
1895  /* Query the maximum number of threads */
1896  __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1897  if (__kmp_sys_max_nth == -1) {
1898  /* Unlimited threads for NPTL */
1899  __kmp_sys_max_nth = INT_MAX;
1900  } else if (__kmp_sys_max_nth <= 1) {
1901  /* Can't tell, just use PTHREAD_THREADS_MAX */
1902  __kmp_sys_max_nth = KMP_MAX_NTH;
1903  }
1904 
1905  /* Query the minimum stack size */
1906  __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1907  if (__kmp_sys_min_stksize <= 1) {
1908  __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1909  }
1910  }
1911 
1912  /* Set up minimum number of threads to switch to TLS gtid */
1913  __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1914 
1915  status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1916  __kmp_internal_end_dest);
1917  KMP_CHECK_SYSFAIL("pthread_key_create", status);
1918  status = pthread_mutexattr_init(&mutex_attr);
1919  KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1920  status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1921  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1922  status = pthread_mutexattr_destroy(&mutex_attr);
1923  KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1924  status = pthread_condattr_init(&cond_attr);
1925  KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1926  status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1927  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1928  status = pthread_condattr_destroy(&cond_attr);
1929  KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1930 #if USE_ITT_BUILD
1931  __kmp_itt_initialize();
1932 #endif /* USE_ITT_BUILD */
1933 
1934  __kmp_init_runtime = TRUE;
1935 }
1936 
1937 void __kmp_runtime_destroy(void) {
1938  int status;
1939 
1940  if (!__kmp_init_runtime) {
1941  return; // Nothing to do.
1942  }
1943 
1944 #if USE_ITT_BUILD
1945  __kmp_itt_destroy();
1946 #endif /* USE_ITT_BUILD */
1947 
1948  status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1949  KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1950 
1951  status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1952  if (status != 0 && status != EBUSY) {
1953  KMP_SYSFAIL("pthread_mutex_destroy", status);
1954  }
1955  status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1956  if (status != 0 && status != EBUSY) {
1957  KMP_SYSFAIL("pthread_cond_destroy", status);
1958  }
1959 #if KMP_AFFINITY_SUPPORTED
1960  __kmp_affinity_uninitialize();
1961 #endif
1962 
1963  __kmp_init_runtime = FALSE;
1964 }
1965 
1966 /* Put the thread to sleep for a time period */
1967 /* NOTE: not currently used anywhere */
1968 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1969 
1970 /* Calculate the elapsed wall clock time for the user */
1971 void __kmp_elapsed(double *t) {
1972  int status;
1973 #ifdef FIX_SGI_CLOCK
1974  struct timespec ts;
1975 
1976  status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1977  KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
1978  *t =
1979  (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
1980 #else
1981  struct timeval tv;
1982 
1983  status = gettimeofday(&tv, NULL);
1984  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1985  *t =
1986  (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
1987 #endif
1988 }
1989 
1990 /* Calculate the elapsed wall clock tick for the user */
1991 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1992 
1993 /* Return the current time stamp in nsec */
1994 kmp_uint64 __kmp_now_nsec() {
1995  struct timeval t;
1996  gettimeofday(&t, NULL);
1997  kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1998  (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1999  return nsec;
2000 }
2001 
2002 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2003 /* Measure clock ticks per millisecond */
2004 void __kmp_initialize_system_tick() {
2005  kmp_uint64 now, nsec2, diff;
2006  kmp_uint64 delay = 1000000; // ~450 usec on most machines.
2007  kmp_uint64 nsec = __kmp_now_nsec();
2008  kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2009  while ((now = __kmp_hardware_timestamp()) < goal)
2010  ;
2011  nsec2 = __kmp_now_nsec();
2012  diff = nsec2 - nsec;
2013  if (diff > 0) {
2014  double tpus = 1000.0 * (double)(delay + (now - goal)) / (double)diff;
2015  if (tpus > 0.0) {
2016  __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2017  __kmp_ticks_per_usec = (kmp_uint64)tpus;
2018  }
2019  }
2020 }
2021 #endif
2022 
2023 /* Determine whether the given address is mapped into the current address
2024  space. */
2025 
2026 int __kmp_is_address_mapped(void *addr) {
2027 
2028  int found = 0;
2029  int rc;
2030 
2031 #if KMP_OS_LINUX || KMP_OS_HURD
2032 
2033  /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2034  address ranges mapped into the address space. */
2035 
2036  char *name = __kmp_str_format("/proc/%d/maps", getpid());
2037  FILE *file = NULL;
2038 
2039  file = fopen(name, "r");
2040  KMP_ASSERT(file != NULL);
2041 
2042  for (;;) {
2043 
2044  void *beginning = NULL;
2045  void *ending = NULL;
2046  char perms[5];
2047 
2048  rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2049  if (rc == EOF) {
2050  break;
2051  }
2052  KMP_ASSERT(rc == 3 &&
2053  KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2054 
2055  // Ending address is not included in the region, but beginning is.
2056  if ((addr >= beginning) && (addr < ending)) {
2057  perms[2] = 0; // 3th and 4th character does not matter.
2058  if (strcmp(perms, "rw") == 0) {
2059  // Memory we are looking for should be readable and writable.
2060  found = 1;
2061  }
2062  break;
2063  }
2064  }
2065 
2066  // Free resources.
2067  fclose(file);
2068  KMP_INTERNAL_FREE(name);
2069 #elif KMP_OS_FREEBSD
2070  char *buf;
2071  size_t lstsz;
2072  int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2073  rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2074  if (rc < 0)
2075  return 0;
2076  // We pass from number of vm entry's semantic
2077  // to size of whole entry map list.
2078  lstsz = lstsz * 4 / 3;
2079  buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2080  rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2081  if (rc < 0) {
2082  kmpc_free(buf);
2083  return 0;
2084  }
2085 
2086  char *lw = buf;
2087  char *up = buf + lstsz;
2088 
2089  while (lw < up) {
2090  struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2091  size_t cursz = cur->kve_structsize;
2092  if (cursz == 0)
2093  break;
2094  void *start = reinterpret_cast<void *>(cur->kve_start);
2095  void *end = reinterpret_cast<void *>(cur->kve_end);
2096  // Readable/Writable addresses within current map entry
2097  if ((addr >= start) && (addr < end)) {
2098  if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2099  (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2100  found = 1;
2101  break;
2102  }
2103  }
2104  lw += cursz;
2105  }
2106  kmpc_free(buf);
2107 
2108 #elif KMP_OS_DARWIN
2109 
2110  /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2111  using vm interface. */
2112 
2113  int buffer;
2114  vm_size_t count;
2115  rc = vm_read_overwrite(
2116  mach_task_self(), // Task to read memory of.
2117  (vm_address_t)(addr), // Address to read from.
2118  1, // Number of bytes to be read.
2119  (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2120  &count // Address of var to save number of read bytes in.
2121  );
2122  if (rc == 0) {
2123  // Memory successfully read.
2124  found = 1;
2125  }
2126 
2127 #elif KMP_OS_NETBSD
2128 
2129  int mib[5];
2130  mib[0] = CTL_VM;
2131  mib[1] = VM_PROC;
2132  mib[2] = VM_PROC_MAP;
2133  mib[3] = getpid();
2134  mib[4] = sizeof(struct kinfo_vmentry);
2135 
2136  size_t size;
2137  rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2138  KMP_ASSERT(!rc);
2139  KMP_ASSERT(size);
2140 
2141  size = size * 4 / 3;
2142  struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2143  KMP_ASSERT(kiv);
2144 
2145  rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2146  KMP_ASSERT(!rc);
2147  KMP_ASSERT(size);
2148 
2149  for (size_t i = 0; i < size; i++) {
2150  if (kiv[i].kve_start >= (uint64_t)addr &&
2151  kiv[i].kve_end <= (uint64_t)addr) {
2152  found = 1;
2153  break;
2154  }
2155  }
2156  KMP_INTERNAL_FREE(kiv);
2157 #elif KMP_OS_OPENBSD
2158 
2159  int mib[3];
2160  mib[0] = CTL_KERN;
2161  mib[1] = KERN_PROC_VMMAP;
2162  mib[2] = getpid();
2163 
2164  size_t size;
2165  uint64_t end;
2166  rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2167  KMP_ASSERT(!rc);
2168  KMP_ASSERT(size);
2169  end = size;
2170 
2171  struct kinfo_vmentry kiv = {.kve_start = 0};
2172 
2173  while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2174  KMP_ASSERT(size);
2175  if (kiv.kve_end == end)
2176  break;
2177 
2178  if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2179  found = 1;
2180  break;
2181  }
2182  kiv.kve_start += 1;
2183  }
2184 #elif KMP_OS_DRAGONFLY
2185 
2186  // FIXME(DragonFly): Implement this
2187  found = 1;
2188 
2189 #else
2190 
2191 #error "Unknown or unsupported OS"
2192 
2193 #endif
2194 
2195  return found;
2196 
2197 } // __kmp_is_address_mapped
2198 
2199 #ifdef USE_LOAD_BALANCE
2200 
2201 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2202 
2203 // The function returns the rounded value of the system load average
2204 // during given time interval which depends on the value of
2205 // __kmp_load_balance_interval variable (default is 60 sec, other values
2206 // may be 300 sec or 900 sec).
2207 // It returns -1 in case of error.
2208 int __kmp_get_load_balance(int max) {
2209  double averages[3];
2210  int ret_avg = 0;
2211 
2212  int res = getloadavg(averages, 3);
2213 
2214  // Check __kmp_load_balance_interval to determine which of averages to use.
2215  // getloadavg() may return the number of samples less than requested that is
2216  // less than 3.
2217  if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2218  ret_avg = (int)averages[0]; // 1 min
2219  } else if ((__kmp_load_balance_interval >= 180 &&
2220  __kmp_load_balance_interval < 600) &&
2221  (res >= 2)) {
2222  ret_avg = (int)averages[1]; // 5 min
2223  } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2224  ret_avg = (int)averages[2]; // 15 min
2225  } else { // Error occurred
2226  return -1;
2227  }
2228 
2229  return ret_avg;
2230 }
2231 
2232 #else // Linux* OS
2233 
2234 // The function returns number of running (not sleeping) threads, or -1 in case
2235 // of error. Error could be reported if Linux* OS kernel too old (without
2236 // "/proc" support). Counting running threads stops if max running threads
2237 // encountered.
2238 int __kmp_get_load_balance(int max) {
2239  static int permanent_error = 0;
2240  static int glb_running_threads = 0; // Saved count of the running threads for
2241  // the thread balance algorithm
2242  static double glb_call_time = 0; /* Thread balance algorithm call time */
2243 
2244  int running_threads = 0; // Number of running threads in the system.
2245 
2246  DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2247  struct dirent *proc_entry = NULL;
2248 
2249  kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2250  DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2251  struct dirent *task_entry = NULL;
2252  int task_path_fixed_len;
2253 
2254  kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2255  int stat_file = -1;
2256  int stat_path_fixed_len;
2257 
2258 #ifdef KMP_DEBUG
2259  int total_processes = 0; // Total number of processes in system.
2260 #endif
2261 
2262  double call_time = 0.0;
2263 
2264  __kmp_str_buf_init(&task_path);
2265  __kmp_str_buf_init(&stat_path);
2266 
2267  __kmp_elapsed(&call_time);
2268 
2269  if (glb_call_time &&
2270  (call_time - glb_call_time < __kmp_load_balance_interval)) {
2271  running_threads = glb_running_threads;
2272  goto finish;
2273  }
2274 
2275  glb_call_time = call_time;
2276 
2277  // Do not spend time on scanning "/proc/" if we have a permanent error.
2278  if (permanent_error) {
2279  running_threads = -1;
2280  goto finish;
2281  }
2282 
2283  if (max <= 0) {
2284  max = INT_MAX;
2285  }
2286 
2287  // Open "/proc/" directory.
2288  proc_dir = opendir("/proc");
2289  if (proc_dir == NULL) {
2290  // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2291  // error now and in subsequent calls.
2292  running_threads = -1;
2293  permanent_error = 1;
2294  goto finish;
2295  }
2296 
2297  // Initialize fixed part of task_path. This part will not change.
2298  __kmp_str_buf_cat(&task_path, "/proc/", 6);
2299  task_path_fixed_len = task_path.used; // Remember number of used characters.
2300 
2301  proc_entry = readdir(proc_dir);
2302  while (proc_entry != NULL) {
2303  // Proc entry is a directory and name starts with a digit. Assume it is a
2304  // process' directory.
2305  if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2306 
2307 #ifdef KMP_DEBUG
2308  ++total_processes;
2309 #endif
2310  // Make sure init process is the very first in "/proc", so we can replace
2311  // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2312  // 1. We are going to check that total_processes == 1 => d_name == "1" is
2313  // true (where "=>" is implication). Since C++ does not have => operator,
2314  // let us replace it with its equivalent: a => b == ! a || b.
2315  KMP_DEBUG_ASSERT(total_processes != 1 ||
2316  strcmp(proc_entry->d_name, "1") == 0);
2317 
2318  // Construct task_path.
2319  task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2320  __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2321  KMP_STRLEN(proc_entry->d_name));
2322  __kmp_str_buf_cat(&task_path, "/task", 5);
2323 
2324  task_dir = opendir(task_path.str);
2325  if (task_dir == NULL) {
2326  // Process can finish between reading "/proc/" directory entry and
2327  // opening process' "task/" directory. So, in general case we should not
2328  // complain, but have to skip this process and read the next one. But on
2329  // systems with no "task/" support we will spend lot of time to scan
2330  // "/proc/" tree again and again without any benefit. "init" process
2331  // (its pid is 1) should exist always, so, if we cannot open
2332  // "/proc/1/task/" directory, it means "task/" is not supported by
2333  // kernel. Report an error now and in the future.
2334  if (strcmp(proc_entry->d_name, "1") == 0) {
2335  running_threads = -1;
2336  permanent_error = 1;
2337  goto finish;
2338  }
2339  } else {
2340  // Construct fixed part of stat file path.
2341  __kmp_str_buf_clear(&stat_path);
2342  __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2343  __kmp_str_buf_cat(&stat_path, "/", 1);
2344  stat_path_fixed_len = stat_path.used;
2345 
2346  task_entry = readdir(task_dir);
2347  while (task_entry != NULL) {
2348  // It is a directory and name starts with a digit.
2349  if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2350 
2351  // Construct complete stat file path. Easiest way would be:
2352  // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2353  // task_entry->d_name );
2354  // but seriae of __kmp_str_buf_cat works a bit faster.
2355  stat_path.used =
2356  stat_path_fixed_len; // Reset stat path to its fixed part.
2357  __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2358  KMP_STRLEN(task_entry->d_name));
2359  __kmp_str_buf_cat(&stat_path, "/stat", 5);
2360 
2361  // Note: Low-level API (open/read/close) is used. High-level API
2362  // (fopen/fclose) works ~ 30 % slower.
2363  stat_file = open(stat_path.str, O_RDONLY);
2364  if (stat_file == -1) {
2365  // We cannot report an error because task (thread) can terminate
2366  // just before reading this file.
2367  } else {
2368  /* Content of "stat" file looks like:
2369  24285 (program) S ...
2370 
2371  It is a single line (if program name does not include funny
2372  symbols). First number is a thread id, then name of executable
2373  file name in paretheses, then state of the thread. We need just
2374  thread state.
2375 
2376  Good news: Length of program name is 15 characters max. Longer
2377  names are truncated.
2378 
2379  Thus, we need rather short buffer: 15 chars for program name +
2380  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2381 
2382  Bad news: Program name may contain special symbols like space,
2383  closing parenthesis, or even new line. This makes parsing
2384  "stat" file not 100 % reliable. In case of fanny program names
2385  parsing may fail (report incorrect thread state).
2386 
2387  Parsing "status" file looks more promissing (due to different
2388  file structure and escaping special symbols) but reading and
2389  parsing of "status" file works slower.
2390  -- ln
2391  */
2392  char buffer[65];
2393  ssize_t len;
2394  len = read(stat_file, buffer, sizeof(buffer) - 1);
2395  if (len >= 0) {
2396  buffer[len] = 0;
2397  // Using scanf:
2398  // sscanf( buffer, "%*d (%*s) %c ", & state );
2399  // looks very nice, but searching for a closing parenthesis
2400  // works a bit faster.
2401  char *close_parent = strstr(buffer, ") ");
2402  if (close_parent != NULL) {
2403  char state = *(close_parent + 2);
2404  if (state == 'R') {
2405  ++running_threads;
2406  if (running_threads >= max) {
2407  goto finish;
2408  }
2409  }
2410  }
2411  }
2412  close(stat_file);
2413  stat_file = -1;
2414  }
2415  }
2416  task_entry = readdir(task_dir);
2417  }
2418  closedir(task_dir);
2419  task_dir = NULL;
2420  }
2421  }
2422  proc_entry = readdir(proc_dir);
2423  }
2424 
2425  // There _might_ be a timing hole where the thread executing this
2426  // code get skipped in the load balance, and running_threads is 0.
2427  // Assert in the debug builds only!!!
2428  KMP_DEBUG_ASSERT(running_threads > 0);
2429  if (running_threads <= 0) {
2430  running_threads = 1;
2431  }
2432 
2433 finish: // Clean up and exit.
2434  if (proc_dir != NULL) {
2435  closedir(proc_dir);
2436  }
2437  __kmp_str_buf_free(&task_path);
2438  if (task_dir != NULL) {
2439  closedir(task_dir);
2440  }
2441  __kmp_str_buf_free(&stat_path);
2442  if (stat_file != -1) {
2443  close(stat_file);
2444  }
2445 
2446  glb_running_threads = running_threads;
2447 
2448  return running_threads;
2449 
2450 } // __kmp_get_load_balance
2451 
2452 #endif // KMP_OS_DARWIN
2453 
2454 #endif // USE_LOAD_BALANCE
2455 
2456 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2457  ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2458  KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2459  KMP_ARCH_ARM || KMP_ARCH_VE)
2460 
2461 // we really only need the case with 1 argument, because CLANG always build
2462 // a struct of pointers to shared variables referenced in the outlined function
2463 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2464  void *p_argv[]
2465 #if OMPT_SUPPORT
2466  ,
2467  void **exit_frame_ptr
2468 #endif
2469 ) {
2470 #if OMPT_SUPPORT
2471  *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2472 #endif
2473 
2474  switch (argc) {
2475  default:
2476  fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2477  fflush(stderr);
2478  exit(-1);
2479  case 0:
2480  (*pkfn)(&gtid, &tid);
2481  break;
2482  case 1:
2483  (*pkfn)(&gtid, &tid, p_argv[0]);
2484  break;
2485  case 2:
2486  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2487  break;
2488  case 3:
2489  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2490  break;
2491  case 4:
2492  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2493  break;
2494  case 5:
2495  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2496  break;
2497  case 6:
2498  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2499  p_argv[5]);
2500  break;
2501  case 7:
2502  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2503  p_argv[5], p_argv[6]);
2504  break;
2505  case 8:
2506  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2507  p_argv[5], p_argv[6], p_argv[7]);
2508  break;
2509  case 9:
2510  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2511  p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2512  break;
2513  case 10:
2514  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2515  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2516  break;
2517  case 11:
2518  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2519  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2520  break;
2521  case 12:
2522  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2523  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2524  p_argv[11]);
2525  break;
2526  case 13:
2527  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2528  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2529  p_argv[11], p_argv[12]);
2530  break;
2531  case 14:
2532  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2533  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2534  p_argv[11], p_argv[12], p_argv[13]);
2535  break;
2536  case 15:
2537  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2538  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2539  p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2540  break;
2541  }
2542 
2543  return 1;
2544 }
2545 
2546 #endif
2547 
2548 #if KMP_OS_LINUX
2549 // Functions for hidden helper task
2550 namespace {
2551 // Condition variable for initializing hidden helper team
2552 pthread_cond_t hidden_helper_threads_initz_cond_var;
2553 pthread_mutex_t hidden_helper_threads_initz_lock;
2554 volatile int hidden_helper_initz_signaled = FALSE;
2555 
2556 // Condition variable for deinitializing hidden helper team
2557 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2558 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2559 volatile int hidden_helper_deinitz_signaled = FALSE;
2560 
2561 // Condition variable for the wrapper function of main thread
2562 pthread_cond_t hidden_helper_main_thread_cond_var;
2563 pthread_mutex_t hidden_helper_main_thread_lock;
2564 volatile int hidden_helper_main_thread_signaled = FALSE;
2565 
2566 // Semaphore for worker threads. We don't use condition variable here in case
2567 // that when multiple signals are sent at the same time, only one thread might
2568 // be waken.
2569 sem_t hidden_helper_task_sem;
2570 } // namespace
2571 
2572 void __kmp_hidden_helper_worker_thread_wait() {
2573  int status = sem_wait(&hidden_helper_task_sem);
2574  KMP_CHECK_SYSFAIL("sem_wait", status);
2575 }
2576 
2577 void __kmp_do_initialize_hidden_helper_threads() {
2578  // Initialize condition variable
2579  int status =
2580  pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2581  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2582 
2583  status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2584  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2585 
2586  status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2587  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2588 
2589  status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2590  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2591 
2592  status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2593  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2594 
2595  status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2596  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2597 
2598  // Initialize the semaphore
2599  status = sem_init(&hidden_helper_task_sem, 0, 0);
2600  KMP_CHECK_SYSFAIL("sem_init", status);
2601 
2602  // Create a new thread to finish initialization
2603  pthread_t handle;
2604  status = pthread_create(
2605  &handle, nullptr,
2606  [](void *) -> void * {
2607  __kmp_hidden_helper_threads_initz_routine();
2608  return nullptr;
2609  },
2610  nullptr);
2611  KMP_CHECK_SYSFAIL("pthread_create", status);
2612 }
2613 
2614 void __kmp_hidden_helper_threads_initz_wait() {
2615  // Initial thread waits here for the completion of the initialization. The
2616  // condition variable will be notified by main thread of hidden helper teams.
2617  int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2618  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2619 
2620  if (!TCR_4(hidden_helper_initz_signaled)) {
2621  status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2622  &hidden_helper_threads_initz_lock);
2623  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2624  }
2625 
2626  status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2627  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2628 }
2629 
2630 void __kmp_hidden_helper_initz_release() {
2631  // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2632  int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2633  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2634 
2635  status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2636  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2637 
2638  TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2639 
2640  status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2641  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2642 }
2643 
2644 void __kmp_hidden_helper_main_thread_wait() {
2645  // The main thread of hidden helper team will be blocked here. The
2646  // condition variable can only be signal in the destructor of RTL.
2647  int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2648  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2649 
2650  if (!TCR_4(hidden_helper_main_thread_signaled)) {
2651  status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2652  &hidden_helper_main_thread_lock);
2653  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2654  }
2655 
2656  status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2657  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2658 }
2659 
2660 void __kmp_hidden_helper_main_thread_release() {
2661  // The initial thread of OpenMP RTL should call this function to wake up the
2662  // main thread of hidden helper team.
2663  int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2664  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2665 
2666  status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2667  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2668 
2669  // The hidden helper team is done here
2670  TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2671 
2672  status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2673  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2674 }
2675 
2676 void __kmp_hidden_helper_worker_thread_signal() {
2677  int status = sem_post(&hidden_helper_task_sem);
2678  KMP_CHECK_SYSFAIL("sem_post", status);
2679 }
2680 
2681 void __kmp_hidden_helper_threads_deinitz_wait() {
2682  // Initial thread waits here for the completion of the deinitialization. The
2683  // condition variable will be notified by main thread of hidden helper teams.
2684  int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2685  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2686 
2687  if (!TCR_4(hidden_helper_deinitz_signaled)) {
2688  status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2689  &hidden_helper_threads_deinitz_lock);
2690  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2691  }
2692 
2693  status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2694  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2695 }
2696 
2697 void __kmp_hidden_helper_threads_deinitz_release() {
2698  int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2699  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2700 
2701  status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2702  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2703 
2704  TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2705 
2706  status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2707  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2708 }
2709 #else // KMP_OS_LINUX
2710 void __kmp_hidden_helper_worker_thread_wait() {
2711  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2712 }
2713 
2714 void __kmp_do_initialize_hidden_helper_threads() {
2715  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2716 }
2717 
2718 void __kmp_hidden_helper_threads_initz_wait() {
2719  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2720 }
2721 
2722 void __kmp_hidden_helper_initz_release() {
2723  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2724 }
2725 
2726 void __kmp_hidden_helper_main_thread_wait() {
2727  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2728 }
2729 
2730 void __kmp_hidden_helper_main_thread_release() {
2731  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2732 }
2733 
2734 void __kmp_hidden_helper_worker_thread_signal() {
2735  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2736 }
2737 
2738 void __kmp_hidden_helper_threads_deinitz_wait() {
2739  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2740 }
2741 
2742 void __kmp_hidden_helper_threads_deinitz_release() {
2743  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2744 }
2745 #endif // KMP_OS_LINUX
2746 
2747 // end of file //
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.
Definition: kmp_stats.h:940