LLVM OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Modules Pages
kmp_global.cpp
1 /*
2  * kmp_global.cpp -- KPTS global variables for runtime support library
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #if KMP_USE_HIER_SCHED
16 #include "kmp_dispatch_hier.h"
17 #endif
18 
19 kmp_key_t __kmp_gtid_threadprivate_key;
20 
21 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
22 kmp_cpuinfo_t __kmp_cpuinfo = {0}; // Not initialized
23 #endif
24 
25 #if KMP_STATS_ENABLED
26 #include "kmp_stats.h"
27 // lock for modifying the global __kmp_stats_list
28 kmp_tas_lock_t __kmp_stats_lock;
29 
30 // global list of per thread stats, the head is a sentinel node which
31 // accumulates all stats produced before __kmp_create_worker is called.
32 kmp_stats_list *__kmp_stats_list;
33 
34 // thread local pointer to stats node within list
35 KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr = NULL;
36 
37 // gives reference tick for all events (considered the 0 tick)
38 tsc_tick_count __kmp_stats_start_time;
39 #endif
40 
41 /* ----------------------------------------------------- */
42 /* INITIALIZATION VARIABLES */
43 /* they are syncronized to write during init, but read anytime */
44 volatile int __kmp_init_serial = FALSE;
45 volatile int __kmp_init_gtid = FALSE;
46 volatile int __kmp_init_common = FALSE;
47 volatile int __kmp_init_middle = FALSE;
48 volatile int __kmp_init_parallel = FALSE;
49 #if KMP_USE_MONITOR
50 volatile int __kmp_init_monitor =
51  0; /* 1 - launched, 2 - actually started (Windows* OS only) */
52 #endif
53 volatile int __kmp_init_user_locks = FALSE;
54 
55 /* list of address of allocated caches for commons */
56 kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL;
57 
58 int __kmp_init_counter = 0;
59 int __kmp_root_counter = 0;
60 int __kmp_version = 0;
61 
62 std::atomic<kmp_int32> __kmp_team_counter = ATOMIC_VAR_INIT(0);
63 std::atomic<kmp_int32> __kmp_task_counter = ATOMIC_VAR_INIT(0);
64 
65 unsigned int __kmp_init_wait =
66  KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */
67 unsigned int __kmp_next_wait =
68  KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */
69 
70 size_t __kmp_stksize = KMP_DEFAULT_STKSIZE;
71 #if KMP_USE_MONITOR
72 size_t __kmp_monitor_stksize = 0; // auto adjust
73 #endif
74 size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET;
75 int __kmp_stkpadding = KMP_MIN_STKPADDING;
76 
77 size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR;
78 
79 // Barrier method defaults, settings, and strings.
80 // branch factor = 2^branch_bits (only relevant for tree & hyper barrier types)
81 kmp_uint32 __kmp_barrier_gather_bb_dflt = 2;
82 /* branch_factor = 4 */ /* hyper2: C78980 */
83 kmp_uint32 __kmp_barrier_release_bb_dflt = 2;
84 /* branch_factor = 4 */ /* hyper2: C78980 */
85 
86 kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar;
87 /* hyper2: C78980 */
88 kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar;
89 /* hyper2: C78980 */
90 
91 kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier] = {0};
92 kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier] = {0};
93 kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier] = {bp_linear_bar};
94 kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier] = {bp_linear_bar};
95 char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier] = {
96  "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER"
97 #if KMP_FAST_REDUCTION_BARRIER
98  ,
99  "KMP_REDUCTION_BARRIER"
100 #endif // KMP_FAST_REDUCTION_BARRIER
101 };
102 char const *__kmp_barrier_pattern_env_name[bs_last_barrier] = {
103  "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN"
104 #if KMP_FAST_REDUCTION_BARRIER
105  ,
106  "KMP_REDUCTION_BARRIER_PATTERN"
107 #endif // KMP_FAST_REDUCTION_BARRIER
108 };
109 char const *__kmp_barrier_type_name[bs_last_barrier] = {"plain", "forkjoin"
110 #if KMP_FAST_REDUCTION_BARRIER
111  ,
112  "reduction"
113 #endif // KMP_FAST_REDUCTION_BARRIER
114 };
115 char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear", "tree",
116  "hyper", "hierarchical"};
117 
118 int __kmp_allThreadsSpecified = 0;
119 size_t __kmp_align_alloc = CACHE_LINE;
120 
121 int __kmp_generate_warnings = kmp_warnings_low;
122 int __kmp_reserve_warn = 0;
123 int __kmp_xproc = 0;
124 int __kmp_avail_proc = 0;
125 size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
126 int __kmp_sys_max_nth = KMP_MAX_NTH;
127 int __kmp_max_nth = 0;
128 int __kmp_cg_max_nth = 0;
129 int __kmp_teams_max_nth = 0;
130 int __kmp_threads_capacity = 0;
131 int __kmp_dflt_team_nth = 0;
132 int __kmp_dflt_team_nth_ub = 0;
133 int __kmp_tp_capacity = 0;
134 int __kmp_tp_cached = 0;
135 int __kmp_dflt_nested = FALSE;
136 int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF;
137 int __kmp_dflt_max_active_levels =
138  KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */
139 #if KMP_NESTED_HOT_TEAMS
140 int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */
141 /* 1 - keep extra threads when reduced */
142 int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */
143 #endif
144 enum library_type __kmp_library = library_none;
145 enum sched_type __kmp_sched =
146  kmp_sch_default; /* scheduling method for runtime scheduling */
147 enum sched_type __kmp_static =
148  kmp_sch_static_greedy; /* default static scheduling method */
149 enum sched_type __kmp_guided =
150  kmp_sch_guided_iterative_chunked; /* default guided scheduling method */
151 enum sched_type __kmp_auto =
152  kmp_sch_guided_analytical_chunked; /* default auto scheduling method */
153 #if KMP_USE_HIER_SCHED
154 int __kmp_dispatch_hand_threading = 0;
155 int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST + 1];
156 int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST + 1];
157 kmp_hier_sched_env_t __kmp_hier_scheds = {0, 0, NULL, NULL, NULL};
158 #endif
159 int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
160 #if KMP_USE_MONITOR
161 int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS;
162 int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(KMP_DEFAULT_BLOCKTIME,
163  KMP_MIN_MONITOR_WAKEUPS);
164 #endif
165 #ifdef KMP_ADJUST_BLOCKTIME
166 int __kmp_zero_bt = FALSE;
167 #endif /* KMP_ADJUST_BLOCKTIME */
168 #ifdef KMP_DFLT_NTH_CORES
169 int __kmp_ncores = 0;
170 #endif
171 int __kmp_chunk = 0;
172 int __kmp_abort_delay = 0;
173 #if KMP_OS_LINUX && defined(KMP_TDATA_GTID)
174 int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */
175 int __kmp_adjust_gtid_mode = FALSE;
176 #elif KMP_OS_WINDOWS
177 int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */
178 int __kmp_adjust_gtid_mode = FALSE;
179 #else
180 int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */
181 int __kmp_adjust_gtid_mode = TRUE;
182 #endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */
183 #ifdef KMP_TDATA_GTID
184 KMP_THREAD_LOCAL int __kmp_gtid = KMP_GTID_DNE;
185 #endif /* KMP_TDATA_GTID */
186 int __kmp_tls_gtid_min = INT_MAX;
187 int __kmp_foreign_tp = TRUE;
188 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
189 int __kmp_inherit_fp_control = TRUE;
190 kmp_int16 __kmp_init_x87_fpu_control_word = 0;
191 kmp_uint32 __kmp_init_mxcsr = 0;
192 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
193 
194 #ifdef USE_LOAD_BALANCE
195 double __kmp_load_balance_interval = 1.0;
196 #endif /* USE_LOAD_BALANCE */
197 
198 kmp_nested_nthreads_t __kmp_nested_nth = {NULL, 0, 0};
199 
200 #if KMP_USE_ADAPTIVE_LOCKS
201 
202 kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = {
203  1, 1024}; // TODO: tune it!
204 
205 #if KMP_DEBUG_ADAPTIVE_LOCKS
206 const char *__kmp_speculative_statsfile = "-";
207 #endif
208 
209 #endif // KMP_USE_ADAPTIVE_LOCKS
210 
211 #if OMP_40_ENABLED
212 int __kmp_display_env = FALSE;
213 int __kmp_display_env_verbose = FALSE;
214 int __kmp_omp_cancellation = FALSE;
215 #endif
216 
217 /* map OMP 3.0 schedule types with our internal schedule types */
218 enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext +
219  kmp_sched_upper_std - kmp_sched_lower - 2] = {
220  kmp_sch_static_chunked, // ==> kmp_sched_static = 1
221  kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2
222  kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3
223  kmp_sch_auto, // ==> kmp_sched_auto = 4
224  kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101
225  // will likely not be used, introduced here just to debug the code
226  // of public intel extension schedules
227 };
228 
229 #if KMP_OS_LINUX
230 enum clock_function_type __kmp_clock_function;
231 int __kmp_clock_function_param;
232 #endif /* KMP_OS_LINUX */
233 
234 #if KMP_MIC_SUPPORTED
235 enum mic_type __kmp_mic_type = non_mic;
236 #endif
237 
238 #if KMP_AFFINITY_SUPPORTED
239 
240 KMPAffinity *__kmp_affinity_dispatch = NULL;
241 
242 #if KMP_USE_HWLOC
243 int __kmp_hwloc_error = FALSE;
244 hwloc_topology_t __kmp_hwloc_topology = NULL;
245 int __kmp_numa_detected = FALSE;
246 int __kmp_tile_depth = 0;
247 #endif
248 
249 #if KMP_OS_WINDOWS
250 #if KMP_GROUP_AFFINITY
251 int __kmp_num_proc_groups = 1;
252 #endif /* KMP_GROUP_AFFINITY */
253 kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL;
254 kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL;
255 kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL;
256 kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL;
257 #endif /* KMP_OS_WINDOWS */
258 
259 size_t __kmp_affin_mask_size = 0;
260 enum affinity_type __kmp_affinity_type = affinity_default;
261 enum affinity_gran __kmp_affinity_gran = affinity_gran_default;
262 int __kmp_affinity_gran_levels = -1;
263 int __kmp_affinity_dups = TRUE;
264 enum affinity_top_method __kmp_affinity_top_method =
265  affinity_top_method_default;
266 int __kmp_affinity_compact = 0;
267 int __kmp_affinity_offset = 0;
268 int __kmp_affinity_verbose = FALSE;
269 int __kmp_affinity_warnings = TRUE;
270 int __kmp_affinity_respect_mask = affinity_respect_mask_default;
271 char *__kmp_affinity_proclist = NULL;
272 kmp_affin_mask_t *__kmp_affinity_masks = NULL;
273 unsigned __kmp_affinity_num_masks = 0;
274 
275 char *__kmp_cpuinfo_file = NULL;
276 
277 #endif /* KMP_AFFINITY_SUPPORTED */
278 
279 #if OMP_40_ENABLED
280 kmp_nested_proc_bind_t __kmp_nested_proc_bind = {NULL, 0, 0};
281 int __kmp_affinity_num_places = 0;
282 #endif
283 
284 #if OMP_50_ENABLED
285 int __kmp_display_affinity = FALSE;
286 char *__kmp_affinity_format = NULL;
287 #endif // OMP_50_ENABLED
288 
289 kmp_hws_item_t __kmp_hws_socket = {0, 0};
290 kmp_hws_item_t __kmp_hws_node = {0, 0};
291 kmp_hws_item_t __kmp_hws_tile = {0, 0};
292 kmp_hws_item_t __kmp_hws_core = {0, 0};
293 kmp_hws_item_t __kmp_hws_proc = {0, 0};
294 int __kmp_hws_requested = 0;
295 int __kmp_hws_abs_flag = 0; // absolute or per-item number requested
296 
297 #if OMP_40_ENABLED
298 kmp_int32 __kmp_default_device = 0;
299 #endif
300 
301 kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams;
302 #if OMP_45_ENABLED
303 kmp_int32 __kmp_max_task_priority = 0;
304 kmp_uint64 __kmp_taskloop_min_tasks = 0;
305 #endif
306 
307 #if OMP_50_ENABLED
308 int __kmp_memkind_available = 0;
309 int __kmp_hbw_mem_available = 0;
310 const omp_allocator_t *OMP_NULL_ALLOCATOR = NULL;
311 const omp_allocator_t *omp_default_mem_alloc = (const omp_allocator_t *)1;
312 const omp_allocator_t *omp_large_cap_mem_alloc = (const omp_allocator_t *)2;
313 const omp_allocator_t *omp_const_mem_alloc = (const omp_allocator_t *)3;
314 const omp_allocator_t *omp_high_bw_mem_alloc = (const omp_allocator_t *)4;
315 const omp_allocator_t *omp_low_lat_mem_alloc = (const omp_allocator_t *)5;
316 const omp_allocator_t *omp_cgroup_mem_alloc = (const omp_allocator_t *)6;
317 const omp_allocator_t *omp_pteam_mem_alloc = (const omp_allocator_t *)7;
318 const omp_allocator_t *omp_thread_mem_alloc = (const omp_allocator_t *)8;
319 void *const *__kmp_def_allocator = omp_default_mem_alloc;
320 #endif
321 
322 /* This check ensures that the compiler is passing the correct data type for the
323  flags formal parameter of the function kmpc_omp_task_alloc(). If the type is
324  not a 4-byte type, then give an error message about a non-positive length
325  array pointing here. If that happens, the kmp_tasking_flags_t structure must
326  be redefined to have exactly 32 bits. */
327 KMP_BUILD_ASSERT(sizeof(kmp_tasking_flags_t) == 4);
328 
329 int __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */
330 
331 #ifdef DEBUG_SUSPEND
332 int __kmp_suspend_count = 0;
333 #endif
334 
335 int __kmp_settings = FALSE;
336 int __kmp_duplicate_library_ok = 0;
337 #if USE_ITT_BUILD
338 int __kmp_forkjoin_frames = 1;
339 int __kmp_forkjoin_frames_mode = 3;
340 #endif
341 PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method =
342  reduction_method_not_defined;
343 int __kmp_determ_red = FALSE;
344 
345 #ifdef KMP_DEBUG
346 int kmp_a_debug = 0;
347 int kmp_b_debug = 0;
348 int kmp_c_debug = 0;
349 int kmp_d_debug = 0;
350 int kmp_e_debug = 0;
351 int kmp_f_debug = 0;
352 int kmp_diag = 0;
353 #endif
354 
355 /* For debug information logging using rotating buffer */
356 int __kmp_debug_buf =
357  FALSE; /* TRUE means use buffer, FALSE means print to stderr */
358 int __kmp_debug_buf_lines =
359  KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */
360 int __kmp_debug_buf_chars =
361  KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */
362 int __kmp_debug_buf_atomic =
363  FALSE; /* TRUE means use atomic update of buffer entry pointer */
364 
365 char *__kmp_debug_buffer = NULL; /* Debug buffer itself */
366 std::atomic<int> __kmp_debug_count =
367  ATOMIC_VAR_INIT(0); /* number of lines printed in buffer so far */
368 int __kmp_debug_buf_warn_chars =
369  0; /* Keep track of char increase recommended in warnings */
370 /* end rotating debug buffer */
371 
372 #ifdef KMP_DEBUG
373 int __kmp_par_range; /* +1 => only go par for constructs in range */
374 /* -1 => only go par for constructs outside range */
375 char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {'\0'};
376 char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {'\0'};
377 int __kmp_par_range_lb = 0;
378 int __kmp_par_range_ub = INT_MAX;
379 #endif /* KMP_DEBUG */
380 
381 /* For printing out dynamic storage map for threads and teams */
382 int __kmp_storage_map =
383  FALSE; /* True means print storage map for threads and teams */
384 int __kmp_storage_map_verbose =
385  FALSE; /* True means storage map includes placement info */
386 int __kmp_storage_map_verbose_specified = FALSE;
387 /* Initialize the library data structures when we fork a child process, defaults
388  * to TRUE */
389 int __kmp_need_register_atfork =
390  TRUE; /* At initialization, call pthread_atfork to install fork handler */
391 int __kmp_need_register_atfork_specified = TRUE;
392 
393 int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */
394 int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */
395 int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */
396 int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */
397 
398 kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT;
399 kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT;
400 
401 #if KMP_USE_MONITOR
402 kmp_uint32 __kmp_yielding_on = 1;
403 #endif
404 #if KMP_OS_CNK
405 kmp_uint32 __kmp_yield_cycle = 0;
406 #else
407 kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */
408 #endif
409 kmp_int32 __kmp_yield_on_count =
410  10; /* By default, yielding is on for 10 monitor periods. */
411 kmp_int32 __kmp_yield_off_count =
412  1; /* By default, yielding is off for 1 monitor periods. */
413 
414 /* ------------------------------------------------------ */
415 /* STATE mostly syncronized with global lock */
416 /* data written to rarely by masters, read often by workers */
417 /* TODO: None of this global padding stuff works consistently because the order
418  of declaration is not necessarily correlated to storage order. To fix this,
419  all the important globals must be put in a big structure instead. */
420 KMP_ALIGN_CACHE
421 kmp_info_t **__kmp_threads = NULL;
422 kmp_root_t **__kmp_root = NULL;
423 
424 /* data read/written to often by masters */
425 KMP_ALIGN_CACHE
426 volatile int __kmp_nth = 0;
427 volatile int __kmp_all_nth = 0;
428 int __kmp_thread_pool_nth = 0;
429 volatile kmp_info_t *__kmp_thread_pool = NULL;
430 volatile kmp_team_t *__kmp_team_pool = NULL;
431 
432 KMP_ALIGN_CACHE
433 std::atomic<int> __kmp_thread_pool_active_nth = ATOMIC_VAR_INIT(0);
434 
435 /* -------------------------------------------------
436  * GLOBAL/ROOT STATE */
437 KMP_ALIGN_CACHE
438 kmp_global_t __kmp_global = {{0}};
439 
440 /* ----------------------------------------------- */
441 /* GLOBAL SYNCHRONIZATION LOCKS */
442 /* TODO verify the need for these locks and if they need to be global */
443 
444 #if KMP_USE_INTERNODE_ALIGNMENT
445 /* Multinode systems have larger cache line granularity which can cause
446  * false sharing if the alignment is not large enough for these locks */
447 KMP_ALIGN_CACHE_INTERNODE
448 
449 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */
450 KMP_ALIGN_CACHE_INTERNODE
451 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */
452 KMP_ALIGN_CACHE_INTERNODE
453 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */
454 #if KMP_USE_MONITOR
455 /* control monitor thread creation */
456 KMP_ALIGN_CACHE_INTERNODE
457 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
458 #endif
459 /* used for the hack to allow threadprivate cache and __kmp_threads expansion
460  to co-exist */
461 KMP_ALIGN_CACHE_INTERNODE
462 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock);
463 
464 KMP_ALIGN_CACHE_INTERNODE
465 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */
466 KMP_ALIGN_CACHE_INTERNODE
467 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */
468 KMP_ALIGN_CACHE_INTERNODE
469 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */
470 #else
471 KMP_ALIGN_CACHE
472 
473 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */
474 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */
475 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */
476 #if KMP_USE_MONITOR
477 /* control monitor thread creation */
478 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
479 #endif
480 /* used for the hack to allow threadprivate cache and __kmp_threads expansion
481  to co-exist */
482 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock);
483 
484 KMP_ALIGN(128)
485 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */
486 KMP_ALIGN(128)
487 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */
488 KMP_ALIGN(128)
489 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */
490 #endif
491 
492 /* ----------------------------------------------- */
493 
494 #if KMP_HANDLE_SIGNALS
495 /* Signal handling is disabled by default, because it confuses users: In case of
496  sigsegv (or other trouble) in user code signal handler catches the signal,
497  which then "appears" in the monitor thread (when the monitor executes raise()
498  function). Users see signal in the monitor thread and blame OpenMP RTL.
499 
500  Grant said signal handling required on some older OSes (Irix?) supported by
501  KAI, because bad applications hung but not aborted. Currently it is not a
502  problem for Linux* OS, OS X* and Windows* OS.
503 
504  Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm
505  putting the default back for now to see if that fixes hangs on those
506  machines.
507 
508  2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of
509  stack backtrace when program is aborting, but the code is not signal-safe.
510  When multiple signals raised at the same time (which occurs in dynamic
511  negative tests because all the worker threads detects the same error),
512  Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided
513  by Steve R., and will be available soon. */
514 int __kmp_handle_signals = FALSE;
515 #endif
516 
517 #ifdef DEBUG_SUSPEND
518 int get_suspend_count_(void) {
519  int count = __kmp_suspend_count;
520  __kmp_suspend_count = 0;
521  return count;
522 }
523 void set_suspend_count_(int *value) { __kmp_suspend_count = *value; }
524 #endif
525 
526 // Symbols for MS mutual detection.
527 int _You_must_link_with_exactly_one_OpenMP_library = 1;
528 int _You_must_link_with_Intel_OpenMP_library = 1;
529 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
530 int _You_must_link_with_Microsoft_OpenMP_library = 1;
531 #endif
532 
533 #if OMP_50_ENABLED
534 kmp_target_offload_kind_t __kmp_target_offload = tgt_default;
535 
536 // OMP Pause Resources
537 kmp_pause_status_t __kmp_pause_status = kmp_not_paused;
538 #endif // OMP_50_ENABLED
539 
540 // end of file //
sched_type
Definition: kmp.h:336