14 #include "kmp_error.h"
22 static char const *cons_text_c[] = {
23 "(none)",
"\"parallel\"",
"work-sharing",
26 "\"ordered\" work-sharing",
31 "\"taskq\"",
"\"taskq\"",
"\"taskq ordered\"",
"\"critical\"",
35 "\"master\"",
"\"reduce\"",
"\"barrier\""};
37 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
39 #define PUSH_MSG(ct, ident) \
40 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
42 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
43 get_src((p)->stack_data[tos].ident)
45 static int const cons_text_c_num =
sizeof(cons_text_c) /
sizeof(
char const *);
49 static void __kmp_check_null_func(
void) {
52 static void __kmp_expand_cons_stack(
int gtid,
struct cons_header *p) {
58 __kmp_check_null_func();
60 KE_TRACE(10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
64 p->stack_size = (p->stack_size * 2) + 100;
67 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
70 for (i = p->stack_top; i >= 0; --i)
71 p->stack_data[i] = d[i];
77 static char *__kmp_pragma(
int ct,
ident_t const *
ident) {
78 char const *cons = NULL;
84 __kmp_str_buf_init(&buffer);
85 if (0 < ct && ct < cons_text_c_num) {
86 cons = cons_text_c[ct];
90 if (ident != NULL && ident->
psource != NULL) {
92 __kmp_str_buf_print(&buffer,
"%s",
96 __kmp_str_split(tail,
';', NULL, &tail);
97 __kmp_str_split(tail,
';', &file, &tail);
98 __kmp_str_split(tail,
';', &func, &tail);
99 __kmp_str_split(tail,
';', &line, &tail);
101 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
102 __kmp_str_buf_free(&buffer);
108 void __kmp_error_construct(kmp_i18n_id_t
id,
112 char *construct = __kmp_pragma(ct, ident);
113 __kmp_fatal(__kmp_msg_format(
id, construct), __kmp_msg_null);
114 KMP_INTERNAL_FREE(construct);
117 void __kmp_error_construct2(kmp_i18n_id_t
id,
120 struct cons_data
const *cons
122 char *construct1 = __kmp_pragma(ct, ident);
123 char *construct2 = __kmp_pragma(cons->type, cons->ident);
124 __kmp_fatal(__kmp_msg_format(
id, construct1, construct2), __kmp_msg_null);
125 KMP_INTERNAL_FREE(construct1);
126 KMP_INTERNAL_FREE(construct2);
129 struct cons_header *__kmp_allocate_cons_stack(
int gtid) {
130 struct cons_header *p;
134 __kmp_check_null_func();
136 KE_TRACE(10, (
"allocate cons_stack (%d)\n", gtid));
137 p = (
struct cons_header *)__kmp_allocate(
sizeof(
struct cons_header));
138 p->p_top = p->w_top = p->s_top = 0;
139 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
141 p->stack_size = MIN_STACK;
143 p->stack_data[0].type = ct_none;
144 p->stack_data[0].prev = 0;
145 p->stack_data[0].ident = NULL;
149 void __kmp_free_cons_stack(
void *ptr) {
150 struct cons_header *p = (
struct cons_header *)ptr;
152 if (p->stack_data != NULL) {
153 __kmp_free(p->stack_data);
154 p->stack_data = NULL;
161 static void dump_cons_stack(
int gtid,
struct cons_header *p) {
163 int tos = p->stack_top;
164 kmp_str_buf_t buffer;
165 __kmp_str_buf_init(&buffer);
168 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
169 __kmp_str_buf_print(&buffer,
170 "Begin construct stack with %d items for thread %d\n",
172 __kmp_str_buf_print(&buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
173 p->p_top, p->w_top, p->s_top);
174 for (i = tos; i > 0; i--) {
175 struct cons_data *c = &(p->stack_data[i]);
177 &buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i,
178 cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
180 __kmp_str_buf_print(&buffer,
"End construct stack for thread %d\n", gtid);
183 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
184 __kmp_debug_printf(
"%s", buffer.str);
185 __kmp_str_buf_free(&buffer);
189 void __kmp_push_parallel(
int gtid,
ident_t const *ident) {
191 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
193 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
194 KE_TRACE(10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
195 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
196 if (p->stack_top >= p->stack_size) {
197 __kmp_expand_cons_stack(gtid, p);
199 tos = ++p->stack_top;
200 p->stack_data[tos].type = ct_parallel;
201 p->stack_data[tos].prev = p->p_top;
202 p->stack_data[tos].ident = ident;
203 p->stack_data[tos].name = NULL;
205 KE_DUMP(1000, dump_cons_stack(gtid, p));
208 void __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
209 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
211 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
212 KE_TRACE(10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
214 if (p->stack_top >= p->stack_size) {
215 __kmp_expand_cons_stack(gtid, p);
217 if (p->w_top > p->p_top &&
218 !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
219 IS_CONS_TYPE_TASKQ(ct))) {
221 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
222 &p->stack_data[p->w_top]);
224 if (p->s_top > p->p_top) {
226 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
227 &p->stack_data[p->s_top]);
231 void __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
233 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
234 KE_TRACE(10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
235 __kmp_check_workshare(gtid, ct, ident);
236 KE_TRACE(100, (PUSH_MSG(ct, ident)));
237 tos = ++p->stack_top;
238 p->stack_data[tos].type = ct;
239 p->stack_data[tos].prev = p->w_top;
240 p->stack_data[tos].ident = ident;
241 p->stack_data[tos].name = NULL;
243 KE_DUMP(1000, dump_cons_stack(gtid, p));
247 #if KMP_USE_DYNAMIC_LOCK
248 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
250 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
253 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
255 KE_TRACE(10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
257 if (p->stack_top >= p->stack_size)
258 __kmp_expand_cons_stack(gtid, p);
260 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
261 ct == ct_ordered_in_taskq) {
262 if (p->w_top <= p->p_top) {
264 #ifdef BUILD_PARALLEL_ORDERED
266 KMP_ASSERT(ct == ct_ordered_in_parallel);
268 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
272 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
273 if (p->stack_data[p->w_top].type == ct_taskq) {
274 __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
275 &p->stack_data[p->w_top]);
277 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
278 &p->stack_data[p->w_top]);
282 if (p->s_top > p->p_top && p->s_top > p->w_top) {
284 int index = p->s_top;
285 enum cons_type stack_type;
287 stack_type = p->stack_data[index].type;
289 if (stack_type == ct_critical ||
290 ((stack_type == ct_ordered_in_parallel ||
291 stack_type == ct_ordered_in_pdo ||
293 ct_ordered_in_taskq) &&
295 p->stack_data[index].ident != NULL &&
298 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
299 &p->stack_data[index]);
302 }
else if (ct == ct_critical) {
303 #if KMP_USE_DYNAMIC_LOCK
305 __kmp_get_user_lock_owner(lck, seq) ==
309 __kmp_get_user_lock_owner(lck) ==
312 int index = p->s_top;
313 struct cons_data cons = {NULL, ct_critical, 0, NULL};
315 while (index != 0 && p->stack_data[index].name != lck) {
316 index = p->stack_data[index].prev;
321 cons = p->stack_data[index];
324 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
326 }
else if (ct == ct_master || ct == ct_reduce) {
327 if (p->w_top > p->p_top) {
329 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
330 &p->stack_data[p->w_top]);
332 if (ct == ct_reduce && p->s_top > p->p_top) {
334 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
335 &p->stack_data[p->s_top]);
341 #if KMP_USE_DYNAMIC_LOCK
342 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
344 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
348 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
350 KMP_ASSERT(gtid == __kmp_get_gtid());
351 KE_TRACE(10, (
"__kmp_push_sync (gtid=%d)\n", gtid));
352 #if KMP_USE_DYNAMIC_LOCK
353 __kmp_check_sync(gtid, ct, ident, lck, seq);
355 __kmp_check_sync(gtid, ct, ident, lck);
357 KE_TRACE(100, (PUSH_MSG(ct, ident)));
358 tos = ++p->stack_top;
359 p->stack_data[tos].type = ct;
360 p->stack_data[tos].prev = p->s_top;
361 p->stack_data[tos].ident = ident;
362 p->stack_data[tos].name = lck;
364 KE_DUMP(1000, dump_cons_stack(gtid, p));
369 void __kmp_pop_parallel(
int gtid,
ident_t const *ident) {
371 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
373 KE_TRACE(10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
374 if (tos == 0 || p->p_top == 0) {
375 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
377 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
378 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
379 &p->stack_data[tos]);
381 KE_TRACE(100, (POP_MSG(p)));
382 p->p_top = p->stack_data[tos].prev;
383 p->stack_data[tos].type = ct_none;
384 p->stack_data[tos].ident = NULL;
385 p->stack_top = tos - 1;
386 KE_DUMP(1000, dump_cons_stack(gtid, p));
389 enum cons_type __kmp_pop_workshare(
int gtid,
enum cons_type ct,
392 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
395 KE_TRACE(10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
396 if (tos == 0 || p->w_top == 0) {
397 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
400 if (tos != p->w_top ||
401 (p->stack_data[tos].type != ct &&
403 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
404 !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
405 __kmp_check_null_func();
406 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
407 &p->stack_data[tos]);
409 KE_TRACE(100, (POP_MSG(p)));
410 p->w_top = p->stack_data[tos].prev;
411 p->stack_data[tos].type = ct_none;
412 p->stack_data[tos].ident = NULL;
413 p->stack_top = tos - 1;
414 KE_DUMP(1000, dump_cons_stack(gtid, p));
415 return p->stack_data[p->w_top].type;
418 void __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const *ident) {
420 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
422 KE_TRACE(10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
423 if (tos == 0 || p->s_top == 0) {
424 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
426 if (tos != p->s_top || p->stack_data[tos].type != ct) {
427 __kmp_check_null_func();
428 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
429 &p->stack_data[tos]);
432 __kmp_check_null_func();
434 KE_TRACE(100, (POP_MSG(p)));
435 p->s_top = p->stack_data[tos].prev;
436 p->stack_data[tos].type = ct_none;
437 p->stack_data[tos].ident = NULL;
438 p->stack_top = tos - 1;
439 KE_DUMP(1000, dump_cons_stack(gtid, p));
444 void __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const *ident) {
445 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
446 KE_TRACE(10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
449 __kmp_check_null_func();
451 if (p->w_top > p->p_top) {
453 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
454 &p->stack_data[p->w_top]);
456 if (p->s_top > p->p_top) {
458 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
459 &p->stack_data[p->s_top]);