13 #ifndef KMP_WAIT_RELEASE_H
14 #define KMP_WAIT_RELEASE_H
18 #include "kmp_stats.h"
20 #include "ompt-specific.h"
55 volatile P *
get() {
return loc; }
56 void *get_void_p() {
return RCAST(
void *, CCAST(P *, loc)); }
57 void set(
volatile P *new_loc) { loc = new_loc; }
59 P load() {
return *loc; }
60 void store(P val) { *loc = val; }
77 std::atomic<P> *
get() {
return loc; }
85 void set(std::atomic<P> *new_loc) {
loc = new_loc; }
93 P
load() {
return loc->load(std::memory_order_acquire); }
97 void store(P val) {
loc->store(val, std::memory_order_release); }
122 static void __ompt_implicit_task_end(kmp_info_t *this_thr,
123 ompt_state_t ompt_state,
125 int ds_tid = this_thr->th.th_info.ds.ds_tid;
126 if (ompt_state == ompt_state_wait_barrier_implicit) {
127 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
129 void *codeptr = NULL;
130 if (ompt_enabled.ompt_callback_sync_region_wait) {
131 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
132 ompt_sync_region_barrier, ompt_scope_end, NULL, tId, codeptr);
134 if (ompt_enabled.ompt_callback_sync_region) {
135 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
136 ompt_sync_region_barrier, ompt_scope_end, NULL, tId, codeptr);
139 if (!KMP_MASTER_TID(ds_tid)) {
140 if (ompt_enabled.ompt_callback_implicit_task) {
141 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
142 ompt_scope_end, NULL, tId, 0, ds_tid, ompt_task_implicit);
145 this_thr->th.ompt_thread_info.state = ompt_state_idle;
147 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
158 template <
class C,
int final_spin,
bool cancellable =
false,
159 bool sleepable =
true>
161 __kmp_wait_template(kmp_info_t *this_thr,
162 C *flag USE_ITT_BUILD_ARG(
void *itt_sync_obj)) {
163 #if USE_ITT_BUILD && USE_ITT_NOTIFY
164 volatile void *spin = flag->get();
168 int tasks_completed = FALSE;
171 kmp_uint64 poll_count;
172 kmp_uint64 hibernate_goal;
174 kmp_uint32 hibernate;
177 KMP_FSYNC_SPIN_INIT(spin, NULL);
178 if (flag->done_check()) {
179 KMP_FSYNC_SPIN_ACQUIRED(CCAST(
void *, spin));
182 th_gtid = this_thr->th.th_info.ds.ds_gtid;
184 kmp_team_t *team = this_thr->th.th_team;
185 if (team && team->t.t_cancel_request == cancel_parallel)
190 KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking,
true);
193 (
"__kmp_wait_sleep: T#%d waiting for flag(%p)\n", th_gtid, flag));
194 #if KMP_STATS_ENABLED
249 ompt_state_t ompt_entry_state;
251 if (ompt_enabled.enabled) {
252 ompt_entry_state = this_thr->th.ompt_thread_info.state;
253 if (!final_spin || ompt_entry_state != ompt_state_wait_barrier_implicit ||
254 KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid)) {
255 ompt_lw_taskteam_t *team =
256 this_thr->th.th_team->t.ompt_serialized_team_info;
258 tId = &(team->ompt_task_info.task_data);
260 tId = OMPT_CUR_TASK_DATA(this_thr);
263 tId = &(this_thr->th.ompt_thread_info.task_data);
265 if (final_spin && (__kmp_tasking_mode == tskm_immediate_exec ||
266 this_thr->th.th_task_team == NULL)) {
268 __ompt_implicit_task_end(this_thr, ompt_entry_state, tId);
274 KMP_INIT_YIELD(spins);
276 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME
278 || __kmp_pause_status == kmp_soft_paused
284 #ifdef KMP_ADJUST_BLOCKTIME
287 __kmp_pause_status == kmp_soft_paused ||
289 (__kmp_zero_bt && !this_thr->th.th_team_bt_set))
294 hibernate = this_thr->th.th_team_bt_intervals;
296 hibernate = this_thr->th.th_team_bt_intervals;
307 hibernate += TCR_4(__kmp_global.g.g_time.dt.t_value);
308 KF_TRACE(20, (
"__kmp_wait_sleep: T#%d now=%d, hibernate=%d, intervals=%d\n",
309 th_gtid, __kmp_global.g.g_time.dt.t_value, hibernate,
310 hibernate - __kmp_global.g.g_time.dt.t_value));
313 if (__kmp_pause_status == kmp_soft_paused) {
315 hibernate_goal = KMP_NOW();
318 hibernate_goal = KMP_NOW() + this_thr->th.th_team_bt_intervals;
320 #endif // KMP_USE_MONITOR
323 oversubscribed = (TCR_4(__kmp_nth) > __kmp_avail_proc);
327 while (flag->notdone_check()) {
329 kmp_task_team_t *task_team = NULL;
330 if (__kmp_tasking_mode != tskm_immediate_exec) {
331 task_team = this_thr->th.th_task_team;
339 if (task_team != NULL) {
340 if (TCR_SYNC_4(task_team->tt.tt_active)) {
341 if (KMP_TASKING_ENABLED(task_team))
343 this_thr, th_gtid, final_spin,
344 &tasks_completed USE_ITT_BUILD_ARG(itt_sync_obj), 0);
346 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
348 KMP_DEBUG_ASSERT(!KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid));
351 if (final_spin && ompt_enabled.enabled)
352 __ompt_implicit_task_end(this_thr, ompt_entry_state, tId);
354 this_thr->th.th_task_team = NULL;
355 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
358 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
362 KMP_FSYNC_SPIN_PREPARE(CCAST(
void *, spin));
363 if (TCR_4(__kmp_global.g.g_done)) {
364 if (__kmp_global.g.g_abort)
365 __kmp_abort_thread();
374 if (oversubscribed) {
377 KMP_YIELD_SPIN(spins);
381 in_pool = !!TCR_4(this_thr->th.th_in_pool);
382 if (in_pool != !!this_thr->th.th_active_in_pool) {
384 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
385 this_thr->th.th_active_in_pool = TRUE;
394 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
395 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
396 this_thr->th.th_active_in_pool = FALSE;
400 #if KMP_STATS_ENABLED
403 if (this_thr->th.th_stats->isIdle() &&
404 KMP_GET_THREAD_STATE() == FORK_JOIN_BARRIER) {
405 KMP_SET_THREAD_STATE(IDLE);
406 KMP_PUSH_PARTITIONED_TIMER(OMP_idle);
411 kmp_team_t *team = this_thr->th.th_team;
412 if (team && team->t.t_cancel_request == cancel_parallel)
417 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME
419 && __kmp_pause_status != kmp_soft_paused
425 if ((task_team != NULL) && TCR_4(task_team->tt.tt_found_tasks))
430 if (TCR_4(__kmp_global.g.g_time.dt.t_value) < hibernate)
433 if (KMP_BLOCKING(hibernate_goal, poll_count++))
442 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
443 __kmp_pause_status != kmp_soft_paused)
447 KF_TRACE(50, (
"__kmp_wait_sleep: T#%d suspend time reached\n", th_gtid));
451 KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking,
false);
453 flag->suspend(th_gtid);
456 KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking,
true);
459 if (TCR_4(__kmp_global.g.g_done)) {
460 if (__kmp_global.g.g_abort)
461 __kmp_abort_thread();
463 }
else if (__kmp_tasking_mode != tskm_immediate_exec &&
464 this_thr->th.th_reap_state == KMP_SAFE_TO_REAP) {
465 this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
471 ompt_state_t ompt_exit_state = this_thr->th.ompt_thread_info.state;
472 if (ompt_enabled.enabled && ompt_exit_state != ompt_state_undefined) {
475 __ompt_implicit_task_end(this_thr, ompt_exit_state, tId);
476 ompt_exit_state = this_thr->th.ompt_thread_info.state;
479 if (ompt_exit_state == ompt_state_idle) {
480 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
484 #if KMP_STATS_ENABLED
486 if (KMP_GET_THREAD_STATE() == IDLE) {
487 KMP_POP_PARTITIONED_TIMER();
488 KMP_SET_THREAD_STATE(thread_state);
489 this_thr->th.th_stats->resetIdleFlag();
495 KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking,
false);
497 KMP_FSYNC_SPIN_ACQUIRED(CCAST(
void *, spin));
499 kmp_team_t *team = this_thr->th.th_team;
500 if (team && team->t.t_cancel_request == cancel_parallel) {
501 if (tasks_completed) {
504 kmp_task_team_t *task_team = this_thr->th.th_task_team;
505 std::atomic<kmp_int32> *unfinished_threads =
506 &(task_team->tt.tt_unfinished_threads);
507 KMP_ATOMIC_INC(unfinished_threads);
519 template <
class C>
static inline void __kmp_release_template(C *flag) {
521 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
523 KF_TRACE(20, (
"__kmp_release: T#%d releasing flag(%x)\n", gtid, flag->get()));
524 KMP_DEBUG_ASSERT(flag->get());
525 KMP_FSYNC_RELEASING(flag->get_void_p());
527 flag->internal_release();
529 KF_TRACE(100, (
"__kmp_release: T#%d set new spin=%d\n", gtid, flag->get(),
532 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
535 if (flag->is_any_sleeping()) {
536 for (
unsigned int i = 0; i < flag->get_num_waiters(); ++i) {
538 kmp_info_t *waiter = flag->get_waiter(i);
540 int wait_gtid = waiter->th.th_info.ds.ds_gtid;
542 KF_TRACE(50, (
"__kmp_release: T#%d waking up thread T#%d since sleep "
544 gtid, wait_gtid, flag->get()));
545 flag->resume(wait_gtid);
552 template <
typename FlagType>
struct flag_traits {};
554 template <>
struct flag_traits<kmp_uint32> {
555 typedef kmp_uint32 flag_t;
557 static inline flag_t tcr(flag_t f) {
return TCR_4(f); }
558 static inline flag_t test_then_add4(
volatile flag_t *f) {
559 return KMP_TEST_THEN_ADD4_32(RCAST(
volatile kmp_int32 *, f));
561 static inline flag_t test_then_or(
volatile flag_t *f, flag_t v) {
562 return KMP_TEST_THEN_OR32(f, v);
564 static inline flag_t test_then_and(
volatile flag_t *f, flag_t v) {
565 return KMP_TEST_THEN_AND32(f, v);
569 template <>
struct flag_traits<kmp_uint64> {
570 typedef kmp_uint64 flag_t;
572 static inline flag_t tcr(flag_t f) {
return TCR_8(f); }
573 static inline flag_t test_then_add4(
volatile flag_t *f) {
574 return KMP_TEST_THEN_ADD4_64(RCAST(
volatile kmp_int64 *, f));
576 static inline flag_t test_then_or(
volatile flag_t *f, flag_t v) {
577 return KMP_TEST_THEN_OR64(f, v);
579 static inline flag_t test_then_and(
volatile flag_t *f, flag_t v) {
580 return KMP_TEST_THEN_AND64(f, v);
585 template <
typename FlagType>
587 typedef flag_traits<FlagType> traits_type;
595 kmp_basic_flag_native(
volatile FlagType *p)
596 :
kmp_flag_native<FlagType>(p, traits_type::t), num_waiting_threads(0) {}
597 kmp_basic_flag_native(
volatile FlagType *p, kmp_info_t *thr)
598 :
kmp_flag_native<FlagType>(p, traits_type::t), num_waiting_threads(1) {
599 waiting_threads[0] = thr;
601 kmp_basic_flag_native(
volatile FlagType *p, FlagType c)
603 num_waiting_threads(0) {}
608 kmp_info_t *get_waiter(kmp_uint32 i) {
609 KMP_DEBUG_ASSERT(i < num_waiting_threads);
610 return waiting_threads[i];
615 kmp_uint32 get_num_waiters() {
return num_waiting_threads; }
621 void set_waiter(kmp_info_t *thr) {
622 waiting_threads[0] = thr;
623 num_waiting_threads = 1;
628 bool done_check() {
return traits_type::tcr(*(this->
get())) == checker; }
633 bool done_check_val(FlagType old_loc) {
return old_loc == checker; }
641 bool notdone_check() {
return traits_type::tcr(*(this->
get())) != checker; }
646 void internal_release() {
647 (void)traits_type::test_then_add4((
volatile FlagType *)this->
get());
654 FlagType set_sleeping() {
655 return traits_type::test_then_or((
volatile FlagType *)this->
get(),
656 KMP_BARRIER_SLEEP_STATE);
663 FlagType unset_sleeping() {
664 return traits_type::test_then_and((
volatile FlagType *)this->
get(),
665 ~KMP_BARRIER_SLEEP_STATE);
671 bool is_sleeping_val(FlagType old_loc) {
672 return old_loc & KMP_BARRIER_SLEEP_STATE;
677 bool is_sleeping() {
return is_sleeping_val(*(this->
get())); }
678 bool is_any_sleeping() {
return is_sleeping_val(*(this->
get())); }
679 kmp_uint8 *get_stolen() {
return NULL; }
680 enum barrier_type get_bt() {
return bs_last_barrier; }
683 template <
typename FlagType>
class kmp_basic_flag :
public kmp_flag<FlagType> {
684 typedef flag_traits<FlagType> traits_type;
692 kmp_basic_flag(std::atomic<FlagType> *p)
693 :
kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(0) {}
694 kmp_basic_flag(std::atomic<FlagType> *p, kmp_info_t *thr)
695 :
kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(1) {
696 waiting_threads[0] = thr;
698 kmp_basic_flag(std::atomic<FlagType> *p, FlagType c)
699 :
kmp_flag<FlagType>(p, traits_type::t), checker(c),
700 num_waiting_threads(0) {}
705 kmp_info_t *get_waiter(kmp_uint32 i) {
706 KMP_DEBUG_ASSERT(i < num_waiting_threads);
707 return waiting_threads[i];
712 kmp_uint32 get_num_waiters() {
return num_waiting_threads; }
718 void set_waiter(kmp_info_t *thr) {
719 waiting_threads[0] = thr;
720 num_waiting_threads = 1;
725 bool done_check() {
return this->
load() == checker; }
730 bool done_check_val(FlagType old_loc) {
return old_loc == checker; }
738 bool notdone_check() {
return this->
load() != checker; }
743 void internal_release() { KMP_ATOMIC_ADD(this->
get(), 4); }
749 FlagType set_sleeping() {
750 return KMP_ATOMIC_OR(this->
get(), KMP_BARRIER_SLEEP_STATE);
757 FlagType unset_sleeping() {
758 return KMP_ATOMIC_AND(this->
get(), ~KMP_BARRIER_SLEEP_STATE);
764 bool is_sleeping_val(FlagType old_loc) {
765 return old_loc & KMP_BARRIER_SLEEP_STATE;
770 bool is_sleeping() {
return is_sleeping_val(this->
load()); }
771 bool is_any_sleeping() {
return is_sleeping_val(this->
load()); }
772 kmp_uint8 *get_stolen() {
return NULL; }
773 enum barrier_type get_bt() {
return bs_last_barrier; }
776 class kmp_flag_32 :
public kmp_basic_flag<kmp_uint32> {
778 kmp_flag_32(std::atomic<kmp_uint32> *p) : kmp_basic_flag<kmp_uint32>(p) {}
779 kmp_flag_32(std::atomic<kmp_uint32> *p, kmp_info_t *thr)
780 : kmp_basic_flag<kmp_uint32>(p, thr) {}
781 kmp_flag_32(std::atomic<kmp_uint32> *p, kmp_uint32 c)
782 : kmp_basic_flag<kmp_uint32>(p, c) {}
783 void suspend(
int th_gtid) { __kmp_suspend_32(th_gtid,
this); }
784 void resume(
int th_gtid) { __kmp_resume_32(th_gtid,
this); }
785 int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid,
int final_spin,
786 int *thread_finished USE_ITT_BUILD_ARG(
void *itt_sync_obj),
787 kmp_int32 is_constrained) {
788 return __kmp_execute_tasks_32(
789 this_thr, gtid,
this, final_spin,
790 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
792 void wait(kmp_info_t *this_thr,
793 int final_spin USE_ITT_BUILD_ARG(
void *itt_sync_obj)) {
795 __kmp_wait_template<kmp_flag_32, TRUE>(
796 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
798 __kmp_wait_template<kmp_flag_32, FALSE>(
799 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
801 void release() { __kmp_release_template(
this); }
805 class kmp_flag_64 :
public kmp_basic_flag_native<kmp_uint64> {
807 kmp_flag_64(
volatile kmp_uint64 *p) : kmp_basic_flag_native<kmp_uint64>(p) {}
808 kmp_flag_64(
volatile kmp_uint64 *p, kmp_info_t *thr)
809 : kmp_basic_flag_native<kmp_uint64>(p, thr) {}
810 kmp_flag_64(
volatile kmp_uint64 *p, kmp_uint64 c)
811 : kmp_basic_flag_native<kmp_uint64>(p, c) {}
812 void suspend(
int th_gtid) { __kmp_suspend_64(th_gtid,
this); }
813 void resume(
int th_gtid) { __kmp_resume_64(th_gtid,
this); }
814 int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid,
int final_spin,
815 int *thread_finished USE_ITT_BUILD_ARG(
void *itt_sync_obj),
816 kmp_int32 is_constrained) {
817 return __kmp_execute_tasks_64(
818 this_thr, gtid,
this, final_spin,
819 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
821 void wait(kmp_info_t *this_thr,
822 int final_spin USE_ITT_BUILD_ARG(
void *itt_sync_obj)) {
824 __kmp_wait_template<kmp_flag_64, TRUE>(
825 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
827 __kmp_wait_template<kmp_flag_64, FALSE>(
828 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
830 bool wait_cancellable_nosleep(kmp_info_t *this_thr,
832 USE_ITT_BUILD_ARG(
void *itt_sync_obj)) {
835 retval = __kmp_wait_template<kmp_flag_64, TRUE, true, false>(
836 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
838 retval = __kmp_wait_template<kmp_flag_64, FALSE, true, false>(
839 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
842 void release() { __kmp_release_template(
this); }
849 kmp_info_t *waiting_threads[1];
850 kmp_uint32 num_waiting_threads;
854 enum barrier_type bt;
855 kmp_info_t *this_thr;
861 unsigned char &byteref(
volatile kmp_uint64 *loc,
size_t offset) {
862 return (RCAST(
unsigned char *, CCAST(kmp_uint64 *, loc)))[offset];
866 kmp_flag_oncore(
volatile kmp_uint64 *p)
868 flag_switch(false) {}
869 kmp_flag_oncore(
volatile kmp_uint64 *p, kmp_uint32 idx)
871 offset(idx), flag_switch(false) {}
872 kmp_flag_oncore(
volatile kmp_uint64 *p, kmp_uint64 c, kmp_uint32 idx,
873 enum barrier_type bar_t,
874 kmp_info_t *thr USE_ITT_BUILD_ARG(
void *itt))
876 num_waiting_threads(0), offset(idx), flag_switch(false), bt(bar_t),
877 this_thr(thr) USE_ITT_BUILD_ARG(itt_sync_obj(itt)) {}
878 kmp_info_t *get_waiter(kmp_uint32 i) {
879 KMP_DEBUG_ASSERT(i < num_waiting_threads);
880 return waiting_threads[i];
882 kmp_uint32 get_num_waiters() {
return num_waiting_threads; }
883 void set_waiter(kmp_info_t *thr) {
884 waiting_threads[0] = thr;
885 num_waiting_threads = 1;
887 bool done_check_val(kmp_uint64 old_loc) {
888 return byteref(&old_loc, offset) == checker;
890 bool done_check() {
return done_check_val(*
get()); }
891 bool notdone_check() {
893 if (this_thr->th.th_bar[bt].bb.wait_flag == KMP_BARRIER_SWITCH_TO_OWN_FLAG)
895 if (byteref(
get(), offset) != 1 && !flag_switch)
897 else if (flag_switch) {
898 this_thr->th.th_bar[bt].bb.wait_flag = KMP_BARRIER_SWITCHING;
899 kmp_flag_64 flag(&this_thr->th.th_bar[bt].bb.b_go,
900 (kmp_uint64)KMP_BARRIER_STATE_BUMP);
901 __kmp_wait_64(this_thr, &flag, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
905 void internal_release() {
907 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
908 byteref(
get(), offset) = 1;
911 byteref(&mask, offset) = 1;
912 KMP_TEST_THEN_OR64(
get(), mask);
915 kmp_uint64 set_sleeping() {
916 return KMP_TEST_THEN_OR64(
get(), KMP_BARRIER_SLEEP_STATE);
918 kmp_uint64 unset_sleeping() {
919 return KMP_TEST_THEN_AND64(
get(), ~KMP_BARRIER_SLEEP_STATE);
921 bool is_sleeping_val(kmp_uint64 old_loc) {
922 return old_loc & KMP_BARRIER_SLEEP_STATE;
924 bool is_sleeping() {
return is_sleeping_val(*
get()); }
925 bool is_any_sleeping() {
return is_sleeping_val(*
get()); }
926 void wait(kmp_info_t *this_thr,
int final_spin) {
928 __kmp_wait_template<kmp_flag_oncore, TRUE>(
929 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
931 __kmp_wait_template<kmp_flag_oncore, FALSE>(
932 this_thr,
this USE_ITT_BUILD_ARG(itt_sync_obj));
934 void release() { __kmp_release_template(
this); }
935 void suspend(
int th_gtid) { __kmp_suspend_oncore(th_gtid,
this); }
936 void resume(
int th_gtid) { __kmp_resume_oncore(th_gtid,
this); }
937 int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid,
int final_spin,
938 int *thread_finished USE_ITT_BUILD_ARG(
void *itt_sync_obj),
939 kmp_int32 is_constrained) {
940 return __kmp_execute_tasks_oncore(
941 this_thr, gtid,
this, final_spin,
942 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
944 kmp_uint8 *get_stolen() {
return NULL; }
945 enum barrier_type get_bt() {
return bt; }
951 static inline void __kmp_null_resume_wrapper(
int gtid,
volatile void *flag) {
955 switch (RCAST(kmp_flag_64 *, CCAST(
void *, flag))->get_type()) {
957 __kmp_resume_32(gtid, NULL);
960 __kmp_resume_64(gtid, NULL);
963 __kmp_resume_oncore(gtid, NULL);
972 #endif // KMP_WAIT_RELEASE_H
void set(std::atomic< P > *new_loc)
stats_state_e
the states which a thread can be in