13 #ifndef KMP_DISPATCH_H
14 #define KMP_DISPATCH_H
20 #include "kmp_error.h"
23 #include "kmp_stats.h"
25 #if KMP_OS_WINDOWS && KMP_ARCH_X86
30 #include "ompt-internal.h"
31 #include "ompt-specific.h"
36 #if KMP_USE_HIER_SCHED
38 template <
typename T>
struct kmp_hier_t;
39 template <
typename T>
struct kmp_hier_top_unit_t;
40 #endif // KMP_USE_HIER_SCHED
42 template <
typename T>
struct dispatch_shared_info_template;
43 template <
typename T>
struct dispatch_private_info_template;
46 extern void __kmp_dispatch_init_algorithm(
ident_t *loc,
int gtid,
47 dispatch_private_info_template<T> *pr,
49 typename traits_t<T>::signed_t st,
51 kmp_uint64 *cur_chunk,
53 typename traits_t<T>::signed_t chunk,
56 extern int __kmp_dispatch_next_algorithm(
57 int gtid, dispatch_private_info_template<T> *pr,
58 dispatch_shared_info_template<T>
volatile *sh, kmp_int32 *p_last, T *p_lb,
59 T *p_ub,
typename traits_t<T>::signed_t *p_st, T nproc, T unit_id);
61 void __kmp_dispatch_dxo_error(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
62 void __kmp_dispatch_deo_error(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
64 #if KMP_STATIC_STEAL_ENABLED
68 template <
typename T>
struct dispatch_private_infoXX_template {
69 typedef typename traits_t<T>::unsigned_t UT;
70 typedef typename traits_t<T>::signed_t ST;
77 T static_steal_counter;
87 struct KMP_ALIGN(32) {
105 template <
typename T>
struct dispatch_private_infoXX_template {
106 typedef typename traits_t<T>::unsigned_t UT;
107 typedef typename traits_t<T>::signed_t ST;
128 template <
typename T>
struct KMP_ALIGN_CACHE dispatch_private_info_template {
131 union KMP_ALIGN_CACHE private_info_tmpl {
132 dispatch_private_infoXX_template<T> p;
133 dispatch_private_info64_t p64;
136 kmp_sched_flags_t flags;
137 kmp_uint32 ordered_bumped;
139 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 3];
140 dispatch_private_info *next;
141 kmp_uint32 type_size;
142 #if KMP_USE_HIER_SCHED
144 kmp_hier_top_unit_t<T> *hier_parent;
146 kmp_int32 get_hier_id()
const {
return hier_id; }
147 kmp_hier_top_unit_t<T> *get_parent() {
return hier_parent; }
149 enum cons_type pushed_ws;
154 template <
typename T>
struct dispatch_shared_infoXX_template {
155 typedef typename traits_t<T>::unsigned_t UT;
158 volatile UT iteration;
159 volatile UT num_done;
160 volatile UT ordered_iteration;
162 UT ordered_dummy[KMP_MAX_ORDERED - 3];
166 template <
typename T>
struct dispatch_shared_info_template {
167 typedef typename traits_t<T>::unsigned_t UT;
169 union shared_info_tmpl {
170 dispatch_shared_infoXX_template<UT> s;
171 dispatch_shared_info64_t s64;
173 volatile kmp_uint32 buffer_index;
175 volatile kmp_int32 doacross_buf_idx;
176 kmp_uint32 *doacross_flags;
177 kmp_int32 doacross_num_done;
179 #if KMP_USE_HIER_SCHED
193 #undef USE_TEST_LOCKS
196 template <
typename T>
static __forceinline T test_then_add(
volatile T *p, T d);
199 __forceinline kmp_int32 test_then_add<kmp_int32>(
volatile kmp_int32 *p,
202 r = KMP_TEST_THEN_ADD32(p, d);
207 __forceinline kmp_int64 test_then_add<kmp_int64>(
volatile kmp_int64 *p,
210 r = KMP_TEST_THEN_ADD64(p, d);
215 template <
typename T>
static __forceinline T test_then_inc_acq(
volatile T *p);
218 __forceinline kmp_int32 test_then_inc_acq<kmp_int32>(
volatile kmp_int32 *p) {
220 r = KMP_TEST_THEN_INC_ACQ32(p);
225 __forceinline kmp_int64 test_then_inc_acq<kmp_int64>(
volatile kmp_int64 *p) {
227 r = KMP_TEST_THEN_INC_ACQ64(p);
232 template <
typename T>
static __forceinline T test_then_inc(
volatile T *p);
235 __forceinline kmp_int32 test_then_inc<kmp_int32>(
volatile kmp_int32 *p) {
237 r = KMP_TEST_THEN_INC32(p);
242 __forceinline kmp_int64 test_then_inc<kmp_int64>(
volatile kmp_int64 *p) {
244 r = KMP_TEST_THEN_INC64(p);
249 template <
typename T>
250 static __forceinline kmp_int32 compare_and_swap(
volatile T *p, T c, T s);
253 __forceinline kmp_int32 compare_and_swap<kmp_int32>(
volatile kmp_int32 *p,
254 kmp_int32 c, kmp_int32 s) {
255 return KMP_COMPARE_AND_STORE_REL32(p, c, s);
259 __forceinline kmp_int32 compare_and_swap<kmp_int64>(
volatile kmp_int64 *p,
260 kmp_int64 c, kmp_int64 s) {
261 return KMP_COMPARE_AND_STORE_REL64(p, c, s);
264 template <
typename T> kmp_uint32 __kmp_ge(T value, T checker) {
265 return value >= checker;
267 template <
typename T> kmp_uint32 __kmp_eq(T value, T checker) {
268 return value == checker;
290 template <
typename UT>
291 static UT __kmp_wait_yield(
volatile UT *spinner, UT checker,
292 kmp_uint32 (*pred)(UT, UT)
293 USE_ITT_BUILD_ARG(
void *obj)) {
295 volatile UT *spin = spinner;
298 kmp_uint32 (*f)(UT, UT) = pred;
301 KMP_FSYNC_SPIN_INIT(obj, CCAST(UT *, spin));
302 KMP_INIT_YIELD(spins);
304 while (!f(r = *spin, check)) {
305 KMP_FSYNC_SPIN_PREPARE(obj);
315 KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
316 KMP_YIELD_SPIN(spins);
318 KMP_FSYNC_SPIN_ACQUIRED(obj);
325 template <
typename UT>
326 void __kmp_dispatch_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
327 dispatch_private_info_template<UT> *pr;
329 int gtid = *gtid_ref;
331 kmp_info_t *th = __kmp_threads[gtid];
332 KMP_DEBUG_ASSERT(th->th.th_dispatch);
334 KD_TRACE(100, (
"__kmp_dispatch_deo: T#%d called\n", gtid));
335 if (__kmp_env_consistency_check) {
336 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
337 th->th.th_dispatch->th_dispatch_pr_current);
338 if (pr->pushed_ws != ct_none) {
339 #if KMP_USE_DYNAMIC_LOCK
340 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL, 0);
342 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL);
347 if (!th->th.th_team->t.t_serialized) {
348 dispatch_shared_info_template<UT> *sh =
349 reinterpret_cast<dispatch_shared_info_template<UT> *
>(
350 th->th.th_dispatch->th_dispatch_sh_current);
353 if (!__kmp_env_consistency_check) {
354 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
355 th->th.th_dispatch->th_dispatch_pr_current);
357 lower = pr->u.p.ordered_lower;
359 #if !defined(KMP_GOMP_COMPAT)
360 if (__kmp_env_consistency_check) {
361 if (pr->ordered_bumped) {
362 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
363 __kmp_error_construct2(kmp_i18n_msg_CnsMultipleNesting,
364 ct_ordered_in_pdo, loc_ref,
365 &p->stack_data[p->w_top]);
375 buff = __kmp_str_format(
"__kmp_dispatch_deo: T#%%d before wait: "
376 "ordered_iter:%%%s lower:%%%s\n",
377 traits_t<UT>::spec, traits_t<UT>::spec);
378 KD_TRACE(1000, (buff, gtid, sh->u.s.ordered_iteration, lower));
379 __kmp_str_free(&buff);
382 __kmp_wait_yield<UT>(&sh->u.s.ordered_iteration, lower,
383 __kmp_ge<UT> USE_ITT_BUILD_ARG(NULL));
389 buff = __kmp_str_format(
"__kmp_dispatch_deo: T#%%d after wait: "
390 "ordered_iter:%%%s lower:%%%s\n",
391 traits_t<UT>::spec, traits_t<UT>::spec);
392 KD_TRACE(1000, (buff, gtid, sh->u.s.ordered_iteration, lower));
393 __kmp_str_free(&buff);
397 KD_TRACE(100, (
"__kmp_dispatch_deo: T#%d returned\n", gtid));
400 template <
typename UT>
401 void __kmp_dispatch_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
402 typedef typename traits_t<UT>::signed_t ST;
403 dispatch_private_info_template<UT> *pr;
405 int gtid = *gtid_ref;
407 kmp_info_t *th = __kmp_threads[gtid];
408 KMP_DEBUG_ASSERT(th->th.th_dispatch);
410 KD_TRACE(100, (
"__kmp_dispatch_dxo: T#%d called\n", gtid));
411 if (__kmp_env_consistency_check) {
412 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
413 th->th.th_dispatch->th_dispatch_pr_current);
414 if (pr->pushed_ws != ct_none) {
415 __kmp_pop_sync(gtid, ct_ordered_in_pdo, loc_ref);
419 if (!th->th.th_team->t.t_serialized) {
420 dispatch_shared_info_template<UT> *sh =
421 reinterpret_cast<dispatch_shared_info_template<UT> *
>(
422 th->th.th_dispatch->th_dispatch_sh_current);
424 if (!__kmp_env_consistency_check) {
425 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
426 th->th.th_dispatch->th_dispatch_pr_current);
429 KMP_FSYNC_RELEASING(CCAST(UT *, &sh->u.s.ordered_iteration));
430 #if !defined(KMP_GOMP_COMPAT)
431 if (__kmp_env_consistency_check) {
432 if (pr->ordered_bumped != 0) {
433 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
435 __kmp_error_construct2(kmp_i18n_msg_CnsMultipleNesting,
436 ct_ordered_in_pdo, loc_ref,
437 &p->stack_data[p->w_top]);
444 pr->ordered_bumped += 1;
447 (
"__kmp_dispatch_dxo: T#%d bumping ordered ordered_bumped=%d\n",
448 gtid, pr->ordered_bumped));
453 test_then_inc<ST>((
volatile ST *)&sh->u.s.ordered_iteration);
457 KD_TRACE(100, (
"__kmp_dispatch_dxo: T#%d returned\n", gtid));
462 template <
typename UT>
463 static __forceinline
long double __kmp_pow(
long double x, UT y) {
464 long double s = 1.0L;
466 KMP_DEBUG_ASSERT(x > 0.0 && x < 1.0);
485 template <
typename T>
486 static __inline
typename traits_t<T>::unsigned_t
487 __kmp_dispatch_guided_remaining(T tc,
typename traits_t<T>::floating_t base,
488 typename traits_t<T>::unsigned_t idx) {
496 typedef typename traits_t<T>::unsigned_t UT;
498 long double x = tc * __kmp_pow<UT>(base, idx);
511 static const int guided_int_param = 2;
512 static const double guided_flt_param = 0.5;
513 #endif // KMP_DISPATCH_H