20 #include "kmp_error.h"
23 #include "kmp_stats.h"
27 #include "ompt-specific.h"
33 char const *traits_t<int>::spec =
"d";
34 char const *traits_t<unsigned int>::spec =
"u";
35 char const *traits_t<long long>::spec =
"lld";
36 char const *traits_t<unsigned long long>::spec =
"llu";
37 char const *traits_t<long>::spec =
"ld";
42 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
43 kmp_int32 schedtype, kmp_int32 *plastiter,
45 typename traits_t<T>::signed_t *pstride,
46 typename traits_t<T>::signed_t incr,
47 typename traits_t<T>::signed_t chunk
48 #
if OMPT_SUPPORT && OMPT_OPTIONAL
54 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
55 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
57 typedef typename traits_t<T>::unsigned_t UT;
58 typedef typename traits_t<T>::signed_t ST;
60 kmp_int32 gtid = global_tid;
65 kmp_info_t *th = __kmp_threads[gtid];
67 #if OMPT_SUPPORT && OMPT_OPTIONAL
68 ompt_team_info_t *team_info = NULL;
69 ompt_task_info_t *task_info = NULL;
70 ompt_work_t ompt_work_type = ompt_work_loop;
72 static kmp_int8 warn = 0;
74 if (ompt_enabled.ompt_callback_work) {
76 team_info = __ompt_get_teaminfo(0, NULL);
77 task_info = __ompt_get_task_info_object(0);
81 ompt_work_type = ompt_work_loop;
83 ompt_work_type = ompt_work_sections;
85 ompt_work_type = ompt_work_distribute;
88 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
90 KMP_WARNING(OmptOutdatedWorkshare);
92 KMP_DEBUG_ASSERT(ompt_work_type);
97 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
98 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
103 buff = __kmp_str_format(
104 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
105 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
106 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
107 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
108 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
109 *pstride, incr, chunk));
110 __kmp_str_free(&buff);
114 if (__kmp_env_consistency_check) {
115 __kmp_push_workshare(global_tid, ct_pdo, loc);
117 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
122 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
123 if (plastiter != NULL)
135 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
136 "lower=%%%s upper=%%%s stride = %%%s "
137 "signed?<%s>, loc = %%s\n",
138 traits_t<T>::spec, traits_t<T>::spec,
139 traits_t<ST>::spec, traits_t<T>::spec);
141 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
142 __kmp_str_free(&buff);
145 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
147 #if OMPT_SUPPORT && OMPT_OPTIONAL
148 if (ompt_enabled.ompt_callback_work) {
149 ompt_callbacks.ompt_callback(ompt_callback_work)(
150 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
151 &(task_info->task_data), 0, codeptr);
166 tid = th->th.th_team->t.t_master_tid;
167 team = th->th.th_team->t.t_parent;
171 tid = __kmp_tid_from_gtid(global_tid);
172 team = th->th.th_team;
176 if (team->t.t_serialized) {
178 if (plastiter != NULL)
182 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
188 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
189 "lower=%%%s upper=%%%s stride = %%%s\n",
190 traits_t<T>::spec, traits_t<T>::spec,
192 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
193 __kmp_str_free(&buff);
196 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
198 #if OMPT_SUPPORT && OMPT_OPTIONAL
199 if (ompt_enabled.ompt_callback_work) {
200 ompt_callbacks.ompt_callback(ompt_callback_work)(
201 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
202 &(task_info->task_data), *pstride, codeptr);
207 nth = team->t.t_nproc;
209 if (plastiter != NULL)
212 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
217 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
218 "lower=%%%s upper=%%%s stride = %%%s\n",
219 traits_t<T>::spec, traits_t<T>::spec,
221 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
222 __kmp_str_free(&buff);
225 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
227 #if OMPT_SUPPORT && OMPT_OPTIONAL
228 if (ompt_enabled.ompt_callback_work) {
229 ompt_callbacks.ompt_callback(ompt_callback_work)(
230 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
231 &(task_info->task_data), *pstride, codeptr);
239 trip_count = *pupper - *plower + 1;
240 }
else if (incr == -1) {
241 trip_count = *plower - *pupper + 1;
242 }
else if (incr > 0) {
244 trip_count = (UT)(*pupper - *plower) / incr + 1;
246 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
249 if (__kmp_env_consistency_check) {
251 if (trip_count == 0 && *pupper != *plower) {
252 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
260 if (trip_count < nth) {
262 __kmp_static == kmp_sch_static_greedy ||
264 kmp_sch_static_balanced);
265 if (tid < trip_count) {
266 *pupper = *plower = *plower + tid * incr;
268 *plower = *pupper + incr;
270 if (plastiter != NULL)
271 *plastiter = (tid == trip_count - 1);
273 if (__kmp_static == kmp_sch_static_balanced) {
274 UT small_chunk = trip_count / nth;
275 UT extras = trip_count % nth;
276 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
277 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
278 if (plastiter != NULL)
279 *plastiter = (tid == nth - 1);
281 T big_chunk_inc_count =
282 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
283 T old_upper = *pupper;
285 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
288 *plower += tid * big_chunk_inc_count;
289 *pupper = *plower + big_chunk_inc_count - incr;
291 if (*pupper < *plower)
292 *pupper = traits_t<T>::max_value;
293 if (plastiter != NULL)
294 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
295 if (*pupper > old_upper)
298 if (*pupper > *plower)
299 *pupper = traits_t<T>::min_value;
300 if (plastiter != NULL)
301 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
302 if (*pupper < old_upper)
307 *pstride = trip_count;
310 case kmp_sch_static_chunked: {
316 *pstride = span * nth;
317 *plower = *plower + (span * tid);
318 *pupper = *plower + span - incr;
319 if (plastiter != NULL)
320 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
324 case kmp_sch_static_balanced_chunked: {
325 T old_upper = *pupper;
327 UT span = (trip_count + nth - 1) / nth;
330 chunk = (span + chunk - 1) & ~(chunk - 1);
333 *plower = *plower + (span * tid);
334 *pupper = *plower + span - incr;
336 if (*pupper > old_upper)
338 }
else if (*pupper < old_upper)
341 if (plastiter != NULL)
342 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
347 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
353 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
354 __kmp_forkjoin_frames_mode == 3 &&
356 th->th.th_teams_microtask == NULL &&
358 team->t.t_active_level == 1) {
359 kmp_uint64 cur_chunk = chunk;
363 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
366 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
373 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
374 "upper=%%%s stride = %%%s signed?<%s>\n",
375 traits_t<T>::spec, traits_t<T>::spec,
376 traits_t<ST>::spec, traits_t<T>::spec);
377 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
378 __kmp_str_free(&buff);
381 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
383 #if OMPT_SUPPORT && OMPT_OPTIONAL
384 if (ompt_enabled.ompt_callback_work) {
385 ompt_callbacks.ompt_callback(ompt_callback_work)(
386 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
387 &(task_info->task_data), trip_count, codeptr);
391 #if KMP_STATS_ENABLED
394 kmp_int64 u = (kmp_int64)(*pupper);
395 kmp_int64 l = (kmp_int64)(*plower);
396 kmp_int64 i = (kmp_int64)incr;
400 }
else if (i == -1) {
405 t = (l - u) / (-i) + 1;
408 KMP_POP_PARTITIONED_TIMER();
414 template <
typename T>
415 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
416 kmp_int32 schedule, kmp_int32 *plastiter,
417 T *plower, T *pupper, T *pupperDist,
418 typename traits_t<T>::signed_t *pstride,
419 typename traits_t<T>::signed_t incr,
420 typename traits_t<T>::signed_t chunk) {
422 typedef typename traits_t<T>::unsigned_t UT;
423 typedef typename traits_t<T>::signed_t ST;
432 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
433 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
438 buff = __kmp_str_format(
439 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
440 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
441 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
442 traits_t<ST>::spec, traits_t<T>::spec);
444 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
445 __kmp_str_free(&buff);
449 if (__kmp_env_consistency_check) {
450 __kmp_push_workshare(gtid, ct_pdo, loc);
452 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
455 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
465 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
468 tid = __kmp_tid_from_gtid(gtid);
469 th = __kmp_threads[gtid];
470 nth = th->th.th_team_nproc;
471 team = th->th.th_team;
473 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
474 nteams = th->th.th_teams_size.nteams;
476 team_id = team->t.t_master_tid;
477 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
481 trip_count = *pupper - *plower + 1;
482 }
else if (incr == -1) {
483 trip_count = *plower - *pupper + 1;
484 }
else if (incr > 0) {
486 trip_count = (UT)(*pupper - *plower) / incr + 1;
488 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
491 *pstride = *pupper - *plower;
492 if (trip_count <= nteams) {
494 __kmp_static == kmp_sch_static_greedy ||
496 kmp_sch_static_balanced);
499 if (team_id < trip_count && tid == 0) {
500 *pupper = *pupperDist = *plower = *plower + team_id * incr;
502 *pupperDist = *pupper;
503 *plower = *pupper + incr;
505 if (plastiter != NULL)
506 *plastiter = (tid == 0 && team_id == trip_count - 1);
509 if (__kmp_static == kmp_sch_static_balanced) {
510 UT chunkD = trip_count / nteams;
511 UT extras = trip_count % nteams;
513 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
514 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
515 if (plastiter != NULL)
516 *plastiter = (team_id == nteams - 1);
519 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
521 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
523 *plower += team_id * chunk_inc_count;
524 *pupperDist = *plower + chunk_inc_count - incr;
527 if (*pupperDist < *plower)
528 *pupperDist = traits_t<T>::max_value;
529 if (plastiter != NULL)
530 *plastiter = *plower <= upper && *pupperDist > upper - incr;
531 if (*pupperDist > upper)
533 if (*plower > *pupperDist) {
534 *pupper = *pupperDist;
538 if (*pupperDist > *plower)
539 *pupperDist = traits_t<T>::min_value;
540 if (plastiter != NULL)
541 *plastiter = *plower >= upper && *pupperDist < upper - incr;
542 if (*pupperDist < upper)
544 if (*plower < *pupperDist) {
545 *pupper = *pupperDist;
553 trip_count = *pupperDist - *plower + 1;
554 }
else if (incr == -1) {
555 trip_count = *plower - *pupperDist + 1;
556 }
else if (incr > 1) {
558 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
560 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
562 KMP_DEBUG_ASSERT(trip_count);
565 if (trip_count <= nth) {
567 __kmp_static == kmp_sch_static_greedy ||
569 kmp_sch_static_balanced);
570 if (tid < trip_count)
571 *pupper = *plower = *plower + tid * incr;
573 *plower = *pupper + incr;
574 if (plastiter != NULL)
575 if (*plastiter != 0 && !(tid == trip_count - 1))
578 if (__kmp_static == kmp_sch_static_balanced) {
579 UT chunkL = trip_count / nth;
580 UT extras = trip_count % nth;
581 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
582 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
583 if (plastiter != NULL)
584 if (*plastiter != 0 && !(tid == nth - 1))
588 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
589 T upper = *pupperDist;
590 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
592 *plower += tid * chunk_inc_count;
593 *pupper = *plower + chunk_inc_count - incr;
595 if (*pupper < *plower)
596 *pupper = traits_t<T>::max_value;
597 if (plastiter != NULL)
598 if (*plastiter != 0 &&
599 !(*plower <= upper && *pupper > upper - incr))
604 if (*pupper > *plower)
605 *pupper = traits_t<T>::min_value;
606 if (plastiter != NULL)
607 if (*plastiter != 0 &&
608 !(*plower >= upper && *pupper < upper - incr))
617 case kmp_sch_static_chunked: {
622 *pstride = span * nth;
623 *plower = *plower + (span * tid);
624 *pupper = *plower + span - incr;
625 if (plastiter != NULL)
626 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
632 "__kmpc_dist_for_static_init: unknown loop scheduling type");
641 buff = __kmp_str_format(
642 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
643 "stride=%%%s signed?<%s>\n",
644 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
645 traits_t<ST>::spec, traits_t<T>::spec);
646 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
647 __kmp_str_free(&buff);
650 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
654 template <
typename T>
655 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
656 kmp_int32 *p_last, T *p_lb, T *p_ub,
657 typename traits_t<T>::signed_t *p_st,
658 typename traits_t<T>::signed_t incr,
659 typename traits_t<T>::signed_t chunk) {
665 typedef typename traits_t<T>::unsigned_t UT;
666 typedef typename traits_t<T>::signed_t ST;
676 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
677 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
682 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
683 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
684 traits_t<T>::spec, traits_t<T>::spec,
685 traits_t<ST>::spec, traits_t<ST>::spec,
687 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
688 __kmp_str_free(&buff);
694 if (__kmp_env_consistency_check) {
696 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
699 if (incr > 0 ? (upper < lower) : (lower < upper)) {
709 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
712 th = __kmp_threads[gtid];
713 team = th->th.th_team;
715 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
716 nteams = th->th.th_teams_size.nteams;
718 team_id = team->t.t_master_tid;
719 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
723 trip_count = upper - lower + 1;
724 }
else if (incr == -1) {
725 trip_count = lower - upper + 1;
726 }
else if (incr > 0) {
728 trip_count = (UT)(upper - lower) / incr + 1;
730 trip_count = (UT)(lower - upper) / (-incr) + 1;
735 *p_st = span * nteams;
736 *p_lb = lower + (span * team_id);
737 *p_ub = *p_lb + span - incr;
739 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
743 *p_ub = traits_t<T>::max_value;
748 *p_ub = traits_t<T>::min_value;
757 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
758 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
759 traits_t<T>::spec, traits_t<T>::spec,
760 traits_t<ST>::spec, traits_t<ST>::spec);
761 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
762 __kmp_str_free(&buff);
791 kmp_int32 *plastiter, kmp_int32 *plower,
792 kmp_int32 *pupper, kmp_int32 *pstride,
793 kmp_int32 incr, kmp_int32 chunk) {
794 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
795 pupper, pstride, incr, chunk
796 #if OMPT_SUPPORT && OMPT_OPTIONAL
798 OMPT_GET_RETURN_ADDRESS(0)
807 kmp_int32 schedtype, kmp_int32 *plastiter,
808 kmp_uint32 *plower, kmp_uint32 *pupper,
809 kmp_int32 *pstride, kmp_int32 incr,
811 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
812 pupper, pstride, incr, chunk
813 #if OMPT_SUPPORT && OMPT_OPTIONAL
815 OMPT_GET_RETURN_ADDRESS(0)
824 kmp_int32 *plastiter, kmp_int64 *plower,
825 kmp_int64 *pupper, kmp_int64 *pstride,
826 kmp_int64 incr, kmp_int64 chunk) {
827 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
828 pupper, pstride, incr, chunk
829 #if OMPT_SUPPORT && OMPT_OPTIONAL
831 OMPT_GET_RETURN_ADDRESS(0)
840 kmp_int32 schedtype, kmp_int32 *plastiter,
841 kmp_uint64 *plower, kmp_uint64 *pupper,
842 kmp_int64 *pstride, kmp_int64 incr,
844 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
845 pupper, pstride, incr, chunk
846 #if OMPT_SUPPORT && OMPT_OPTIONAL
848 OMPT_GET_RETURN_ADDRESS(0)
879 kmp_int32 schedule, kmp_int32 *plastiter,
880 kmp_int32 *plower, kmp_int32 *pupper,
881 kmp_int32 *pupperD, kmp_int32 *pstride,
882 kmp_int32 incr, kmp_int32 chunk) {
883 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
884 pupper, pupperD, pstride, incr, chunk);
891 kmp_int32 schedule, kmp_int32 *plastiter,
892 kmp_uint32 *plower, kmp_uint32 *pupper,
893 kmp_uint32 *pupperD, kmp_int32 *pstride,
894 kmp_int32 incr, kmp_int32 chunk) {
895 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
896 pupper, pupperD, pstride, incr, chunk);
903 kmp_int32 schedule, kmp_int32 *plastiter,
904 kmp_int64 *plower, kmp_int64 *pupper,
905 kmp_int64 *pupperD, kmp_int64 *pstride,
906 kmp_int64 incr, kmp_int64 chunk) {
907 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
908 pupper, pupperD, pstride, incr, chunk);
915 kmp_int32 schedule, kmp_int32 *plastiter,
916 kmp_uint64 *plower, kmp_uint64 *pupper,
917 kmp_uint64 *pupperD, kmp_int64 *pstride,
918 kmp_int64 incr, kmp_int64 chunk) {
919 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
920 pupper, pupperD, pstride, incr, chunk);
953 kmp_int32 *p_lb, kmp_int32 *p_ub,
954 kmp_int32 *p_st, kmp_int32 incr,
956 KMP_DEBUG_ASSERT(__kmp_init_serial);
957 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
965 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
966 kmp_int32 *p_st, kmp_int32 incr,
968 KMP_DEBUG_ASSERT(__kmp_init_serial);
969 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
977 kmp_int64 *p_lb, kmp_int64 *p_ub,
978 kmp_int64 *p_st, kmp_int64 incr,
980 KMP_DEBUG_ASSERT(__kmp_init_serial);
981 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
989 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
990 kmp_int64 *p_st, kmp_int64 incr,
992 KMP_DEBUG_ASSERT(__kmp_init_serial);
993 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)