13 #ifndef OMPT_SPECIFIC_H
14 #define OMPT_SPECIFIC_H
25 void __ompt_force_initialization();
27 void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid);
28 void __ompt_thread_assign_wait_id(
void *variable);
30 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
int gtid,
31 ompt_data_t *ompt_pid,
void *codeptr);
33 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
34 int on_heap,
bool always =
false);
36 void __ompt_lw_taskteam_unlink(kmp_info_t *thr);
38 ompt_team_info_t *__ompt_get_teaminfo(
int depth,
int *size);
40 ompt_data_t *__ompt_get_task_data();
42 ompt_data_t *__ompt_get_target_task_data();
44 ompt_task_info_t *__ompt_get_task_info_object(
int depth);
46 int __ompt_get_parallel_info_internal(
int ancestor_level,
47 ompt_data_t **parallel_data,
50 int __ompt_get_task_info_internal(
int ancestor_level,
int *type,
51 ompt_data_t **task_data,
52 ompt_frame_t **task_frame,
53 ompt_data_t **parallel_data,
int *thread_num);
55 ompt_data_t *__ompt_get_thread_data_internal();
61 static inline void __ompt_task_init(kmp_taskdata_t *task,
int tid) {
63 task->ompt_task_info.task_data.value = 0;
64 task->ompt_task_info.frame.exit_frame = ompt_data_none;
65 task->ompt_task_info.frame.enter_frame = ompt_data_none;
66 task->ompt_task_info.frame.exit_frame_flags =
67 task->ompt_task_info.frame.enter_frame_flags = OMPT_FRAME_FLAGS_RUNTIME;
68 task->ompt_task_info.dispatch_chunk.start = 0;
69 task->ompt_task_info.dispatch_chunk.iterations = 0;
77 ompt_sync_region_t __ompt_get_barrier_kind(
enum barrier_type, kmp_info_t *);
83 #define OMPT_CUR_TASK_INFO(thr) (&((thr)->th.th_current_task->ompt_task_info))
84 #define OMPT_CUR_TASK_DATA(thr) \
85 (&((thr)->th.th_current_task->ompt_task_info.task_data))
86 #define OMPT_CUR_TEAM_INFO(thr) (&((thr)->th.th_team->t.ompt_team_info))
87 #define OMPT_CUR_TEAM_DATA(thr) \
88 (&((thr)->th.th_team->t.ompt_team_info.parallel_data))
90 #define OMPT_HAVE_WEAK_ATTRIBUTE KMP_HAVE_WEAK_ATTRIBUTE
91 #define OMPT_HAVE_PSAPI KMP_HAVE_PSAPI
92 #define OMPT_STR_MATCH(haystack, needle) __kmp_str_match(haystack, 0, needle)
94 inline void *__ompt_load_return_address(
int gtid) {
95 kmp_info_t *thr = __kmp_threads[gtid];
96 void *return_address = thr->th.ompt_thread_info.return_address;
97 thr->th.ompt_thread_info.return_address = NULL;
98 return return_address;
106 #define OMPT_STORE_RETURN_ADDRESS(gtid) \
107 OmptReturnAddressGuard ReturnAddressGuard{gtid, __builtin_return_address(0)};
108 #define OMPT_LOAD_RETURN_ADDRESS(gtid) __ompt_load_return_address(gtid)
109 #define OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid) \
110 ((ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
111 __kmp_threads[gtid]->th.ompt_thread_info.return_address) \
112 ? __ompt_load_return_address(gtid) \
113 : __builtin_return_address(0))
115 #define OMPT_GET_DISPATCH_CHUNK(chunk, lb, ub, incr) \
118 chunk.start = static_cast<uint64_t>(lb); \
119 chunk.iterations = static_cast<uint64_t>(((ub) - (lb)) / (incr) + 1); \
121 chunk.start = static_cast<uint64_t>(ub); \
122 chunk.iterations = static_cast<uint64_t>(((lb) - (ub)) / -(incr) + 1); \
130 inline kmp_info_t *ompt_get_thread_gtid(
int gtid) {
131 return (gtid >= 0) ? __kmp_thread_from_gtid(gtid) : NULL;
134 inline kmp_info_t *ompt_get_thread() {
135 int gtid = __kmp_get_gtid();
136 return ompt_get_thread_gtid(gtid);
139 inline void ompt_set_thread_state(kmp_info_t *thread, ompt_state_t state) {
141 thread->th.ompt_thread_info.state = state;
144 inline const char *ompt_get_runtime_version() {
145 return &__kmp_version_lib_ver[KMP_VERSION_MAGIC_LEN];
148 inline ompt_work_t ompt_get_work_schedule(
enum sched_type schedule) {
149 switch (SCHEDULE_WITHOUT_MODIFIERS(schedule)) {
150 case kmp_sch_static_chunked:
151 case kmp_sch_static_balanced:
152 case kmp_sch_static_greedy:
153 return ompt_work_loop_static;
154 case kmp_sch_dynamic_chunked:
155 case kmp_sch_static_steal:
156 return ompt_work_loop_dynamic;
157 case kmp_sch_guided_iterative_chunked:
158 case kmp_sch_guided_analytical_chunked:
161 return ompt_work_loop_guided;
163 return ompt_work_loop_other;
167 class OmptReturnAddressGuard {
169 bool SetAddress{
false};
173 OmptReturnAddressGuard(
int Gtid,
void *ReturnAddress) : Gtid(Gtid) {
174 if (ompt_enabled.enabled && Gtid >= 0 && __kmp_threads[Gtid] &&
175 !__kmp_threads[Gtid]->th.ompt_thread_info.return_address) {
177 __kmp_threads[Gtid]->th.ompt_thread_info.return_address = ReturnAddress;
180 ~OmptReturnAddressGuard() {
182 __kmp_threads[Gtid]->th.ompt_thread_info.return_address = NULL;
189 #if OMPT_SUPPORT && OMPT_OPTIONAL
190 #define OMPT_REDUCTION_DECL(this_thr, gtid) \
191 ompt_data_t *my_task_data = OMPT_CUR_TASK_DATA(this_thr); \
192 ompt_data_t *my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr); \
193 void *return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
194 #define OMPT_REDUCTION_BEGIN \
195 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
196 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
197 ompt_sync_region_reduction, ompt_scope_begin, my_parallel_data, \
198 my_task_data, return_address); \
200 #define OMPT_REDUCTION_END \
201 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
202 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
203 ompt_sync_region_reduction, ompt_scope_end, my_parallel_data, \
204 my_task_data, return_address); \
207 #define OMPT_REDUCTION_DECL(this_thr, gtid)
208 #define OMPT_REDUCTION_BEGIN
209 #define OMPT_REDUCTION_END