LLVM OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Modules Pages
kmp_error.cpp
1 /*
2  * kmp_error.cpp -- KPTS functions for error checking at runtime
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_error.h"
15 #include "kmp_i18n.h"
16 #include "kmp_str.h"
17 
18 /* ------------------------------------------------------------------------ */
19 
20 #define MIN_STACK 100
21 
22 static char const *cons_text_c[] = {
23  "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
24  because of lowering of
25  "sections" pragmas */
26  "\"ordered\" work-sharing", /* this is not called "for ordered" because of
27  lowering of "sections" pragmas */
28  "\"sections\"",
29  "work-sharing", /* this is not called "single" because of lowering of
30  "sections" pragmas */
31  "\"taskq\"", "\"taskq\"", "\"taskq ordered\"", "\"critical\"",
32  "\"ordered\"", /* in PARALLEL */
33  "\"ordered\"", /* in PDO */
34  "\"ordered\"", /* in TASKQ */
35  "\"master\"", "\"reduce\"", "\"barrier\""};
36 
37 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
38 
39 #define PUSH_MSG(ct, ident) \
40  "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
41 #define POP_MSG(p) \
42  "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
43  get_src((p)->stack_data[tos].ident)
44 
45 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
46 
47 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
48 
49 static void __kmp_check_null_func(void) { /* nothing to do */
50 }
51 
52 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
53  int i;
54  struct cons_data *d;
55 
56  /* TODO for monitor perhaps? */
57  if (gtid < 0)
58  __kmp_check_null_func();
59 
60  KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
61 
62  d = p->stack_data;
63 
64  p->stack_size = (p->stack_size * 2) + 100;
65 
66  /* TODO free the old data */
67  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
68  (p->stack_size + 1));
69 
70  for (i = p->stack_top; i >= 0; --i)
71  p->stack_data[i] = d[i];
72 
73  /* NOTE: we do not free the old stack_data */
74 }
75 
76 // NOTE: Function returns allocated memory, caller must free it!
77 static char *__kmp_pragma(int ct, ident_t const *ident) {
78  char const *cons = NULL; // Construct name.
79  char *file = NULL; // File name.
80  char *func = NULL; // Function (routine) name.
81  char *line = NULL; // Line number.
82  kmp_str_buf_t buffer;
83  kmp_msg_t prgm;
84  __kmp_str_buf_init(&buffer);
85  if (0 < ct && ct < cons_text_c_num) {
86  cons = cons_text_c[ct];
87  } else {
88  KMP_DEBUG_ASSERT(0);
89  }
90  if (ident != NULL && ident->psource != NULL) {
91  char *tail = NULL;
92  __kmp_str_buf_print(&buffer, "%s",
93  ident->psource); // Copy source to buffer.
94  // Split string in buffer to file, func, and line.
95  tail = buffer.str;
96  __kmp_str_split(tail, ';', NULL, &tail);
97  __kmp_str_split(tail, ';', &file, &tail);
98  __kmp_str_split(tail, ';', &func, &tail);
99  __kmp_str_split(tail, ';', &line, &tail);
100  }
101  prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
102  __kmp_str_buf_free(&buffer);
103  return prgm.str;
104 } // __kmp_pragma
105 
106 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
107 
108 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
109  enum cons_type ct, // Construct type.
110  ident_t const *ident // Construct ident.
111  ) {
112  char *construct = __kmp_pragma(ct, ident);
113  __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
114  KMP_INTERNAL_FREE(construct);
115 }
116 
117 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
118  enum cons_type ct, // First construct type.
119  ident_t const *ident, // First construct ident.
120  struct cons_data const *cons // Second construct.
121  ) {
122  char *construct1 = __kmp_pragma(ct, ident);
123  char *construct2 = __kmp_pragma(cons->type, cons->ident);
124  __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
125  KMP_INTERNAL_FREE(construct1);
126  KMP_INTERNAL_FREE(construct2);
127 }
128 
129 struct cons_header *__kmp_allocate_cons_stack(int gtid) {
130  struct cons_header *p;
131 
132  /* TODO for monitor perhaps? */
133  if (gtid < 0) {
134  __kmp_check_null_func();
135  }
136  KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
137  p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
138  p->p_top = p->w_top = p->s_top = 0;
139  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
140  (MIN_STACK + 1));
141  p->stack_size = MIN_STACK;
142  p->stack_top = 0;
143  p->stack_data[0].type = ct_none;
144  p->stack_data[0].prev = 0;
145  p->stack_data[0].ident = NULL;
146  return p;
147 }
148 
149 void __kmp_free_cons_stack(void *ptr) {
150  struct cons_header *p = (struct cons_header *)ptr;
151  if (p != NULL) {
152  if (p->stack_data != NULL) {
153  __kmp_free(p->stack_data);
154  p->stack_data = NULL;
155  }
156  __kmp_free(p);
157  }
158 }
159 
160 #if KMP_DEBUG
161 static void dump_cons_stack(int gtid, struct cons_header *p) {
162  int i;
163  int tos = p->stack_top;
164  kmp_str_buf_t buffer;
165  __kmp_str_buf_init(&buffer);
166  __kmp_str_buf_print(
167  &buffer,
168  "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
169  __kmp_str_buf_print(&buffer,
170  "Begin construct stack with %d items for thread %d\n",
171  tos, gtid);
172  __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
173  p->p_top, p->w_top, p->s_top);
174  for (i = tos; i > 0; i--) {
175  struct cons_data *c = &(p->stack_data[i]);
176  __kmp_str_buf_print(
177  &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
178  cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
179  }
180  __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
181  __kmp_str_buf_print(
182  &buffer,
183  "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
184  __kmp_debug_printf("%s", buffer.str);
185  __kmp_str_buf_free(&buffer);
186 }
187 #endif
188 
189 void __kmp_push_parallel(int gtid, ident_t const *ident) {
190  int tos;
191  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
192 
193  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
194  KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
195  KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
196  if (p->stack_top >= p->stack_size) {
197  __kmp_expand_cons_stack(gtid, p);
198  }
199  tos = ++p->stack_top;
200  p->stack_data[tos].type = ct_parallel;
201  p->stack_data[tos].prev = p->p_top;
202  p->stack_data[tos].ident = ident;
203  p->stack_data[tos].name = NULL;
204  p->p_top = tos;
205  KE_DUMP(1000, dump_cons_stack(gtid, p));
206 }
207 
208 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
209  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
210 
211  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
212  KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
213 
214  if (p->stack_top >= p->stack_size) {
215  __kmp_expand_cons_stack(gtid, p);
216  }
217  if (p->w_top > p->p_top &&
218  !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
219  IS_CONS_TYPE_TASKQ(ct))) {
220  // We are already in a WORKSHARE construct for this PARALLEL region.
221  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
222  &p->stack_data[p->w_top]);
223  }
224  if (p->s_top > p->p_top) {
225  // We are already in a SYNC construct for this PARALLEL region.
226  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
227  &p->stack_data[p->s_top]);
228  }
229 }
230 
231 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
232  int tos;
233  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
234  KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
235  __kmp_check_workshare(gtid, ct, ident);
236  KE_TRACE(100, (PUSH_MSG(ct, ident)));
237  tos = ++p->stack_top;
238  p->stack_data[tos].type = ct;
239  p->stack_data[tos].prev = p->w_top;
240  p->stack_data[tos].ident = ident;
241  p->stack_data[tos].name = NULL;
242  p->w_top = tos;
243  KE_DUMP(1000, dump_cons_stack(gtid, p));
244 }
245 
246 void
247 #if KMP_USE_DYNAMIC_LOCK
248 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
249 #else
250 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
251 #endif
252 {
253  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
254 
255  KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
256 
257  if (p->stack_top >= p->stack_size)
258  __kmp_expand_cons_stack(gtid, p);
259 
260  if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
261  ct == ct_ordered_in_taskq) {
262  if (p->w_top <= p->p_top) {
263 /* we are not in a worksharing construct */
264 #ifdef BUILD_PARALLEL_ORDERED
265  /* do not report error messages for PARALLEL ORDERED */
266  KMP_ASSERT(ct == ct_ordered_in_parallel);
267 #else
268  __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
269 #endif /* BUILD_PARALLEL_ORDERED */
270  } else {
271  /* inside a WORKSHARING construct for this PARALLEL region */
272  if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
273  if (p->stack_data[p->w_top].type == ct_taskq) {
274  __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
275  &p->stack_data[p->w_top]);
276  } else {
277  __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
278  &p->stack_data[p->w_top]);
279  }
280  }
281  }
282  if (p->s_top > p->p_top && p->s_top > p->w_top) {
283  /* inside a sync construct which is inside a worksharing construct */
284  int index = p->s_top;
285  enum cons_type stack_type;
286 
287  stack_type = p->stack_data[index].type;
288 
289  if (stack_type == ct_critical ||
290  ((stack_type == ct_ordered_in_parallel ||
291  stack_type == ct_ordered_in_pdo ||
292  stack_type ==
293  ct_ordered_in_taskq) && /* C doesn't allow named ordered;
294  ordered in ordered gets error */
295  p->stack_data[index].ident != NULL &&
296  (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
297  /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
298  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
299  &p->stack_data[index]);
300  }
301  }
302  } else if (ct == ct_critical) {
303 #if KMP_USE_DYNAMIC_LOCK
304  if (lck != NULL &&
305  __kmp_get_user_lock_owner(lck, seq) ==
306  gtid) { /* this thread already has lock for this critical section */
307 #else
308  if (lck != NULL &&
309  __kmp_get_user_lock_owner(lck) ==
310  gtid) { /* this thread already has lock for this critical section */
311 #endif
312  int index = p->s_top;
313  struct cons_data cons = {NULL, ct_critical, 0, NULL};
314  /* walk up construct stack and try to find critical with matching name */
315  while (index != 0 && p->stack_data[index].name != lck) {
316  index = p->stack_data[index].prev;
317  }
318  if (index != 0) {
319  /* found match on the stack (may not always because of interleaved
320  * critical for Fortran) */
321  cons = p->stack_data[index];
322  }
323  /* we are in CRITICAL which is inside a CRITICAL construct of same name */
324  __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
325  }
326  } else if (ct == ct_master || ct == ct_reduce) {
327  if (p->w_top > p->p_top) {
328  /* inside a WORKSHARING construct for this PARALLEL region */
329  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
330  &p->stack_data[p->w_top]);
331  }
332  if (ct == ct_reduce && p->s_top > p->p_top) {
333  /* inside a another SYNC construct for this PARALLEL region */
334  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
335  &p->stack_data[p->s_top]);
336  }
337  }
338 }
339 
340 void
341 #if KMP_USE_DYNAMIC_LOCK
342 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
343 #else
344 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
345 #endif
346 {
347  int tos;
348  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
349 
350  KMP_ASSERT(gtid == __kmp_get_gtid());
351  KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
352 #if KMP_USE_DYNAMIC_LOCK
353  __kmp_check_sync(gtid, ct, ident, lck, seq);
354 #else
355  __kmp_check_sync(gtid, ct, ident, lck);
356 #endif
357  KE_TRACE(100, (PUSH_MSG(ct, ident)));
358  tos = ++p->stack_top;
359  p->stack_data[tos].type = ct;
360  p->stack_data[tos].prev = p->s_top;
361  p->stack_data[tos].ident = ident;
362  p->stack_data[tos].name = lck;
363  p->s_top = tos;
364  KE_DUMP(1000, dump_cons_stack(gtid, p));
365 }
366 
367 /* ------------------------------------------------------------------------ */
368 
369 void __kmp_pop_parallel(int gtid, ident_t const *ident) {
370  int tos;
371  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
372  tos = p->stack_top;
373  KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
374  if (tos == 0 || p->p_top == 0) {
375  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
376  }
377  if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
378  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
379  &p->stack_data[tos]);
380  }
381  KE_TRACE(100, (POP_MSG(p)));
382  p->p_top = p->stack_data[tos].prev;
383  p->stack_data[tos].type = ct_none;
384  p->stack_data[tos].ident = NULL;
385  p->stack_top = tos - 1;
386  KE_DUMP(1000, dump_cons_stack(gtid, p));
387 }
388 
389 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
390  ident_t const *ident) {
391  int tos;
392  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
393 
394  tos = p->stack_top;
395  KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
396  if (tos == 0 || p->w_top == 0) {
397  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
398  }
399 
400  if (tos != p->w_top ||
401  (p->stack_data[tos].type != ct &&
402  // below are two exceptions to the rule that construct types must match
403  !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
404  !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
405  __kmp_check_null_func();
406  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
407  &p->stack_data[tos]);
408  }
409  KE_TRACE(100, (POP_MSG(p)));
410  p->w_top = p->stack_data[tos].prev;
411  p->stack_data[tos].type = ct_none;
412  p->stack_data[tos].ident = NULL;
413  p->stack_top = tos - 1;
414  KE_DUMP(1000, dump_cons_stack(gtid, p));
415  return p->stack_data[p->w_top].type;
416 }
417 
418 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
419  int tos;
420  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
421  tos = p->stack_top;
422  KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
423  if (tos == 0 || p->s_top == 0) {
424  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
425  }
426  if (tos != p->s_top || p->stack_data[tos].type != ct) {
427  __kmp_check_null_func();
428  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
429  &p->stack_data[tos]);
430  }
431  if (gtid < 0) {
432  __kmp_check_null_func();
433  }
434  KE_TRACE(100, (POP_MSG(p)));
435  p->s_top = p->stack_data[tos].prev;
436  p->stack_data[tos].type = ct_none;
437  p->stack_data[tos].ident = NULL;
438  p->stack_top = tos - 1;
439  KE_DUMP(1000, dump_cons_stack(gtid, p));
440 }
441 
442 /* ------------------------------------------------------------------------ */
443 
444 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
445  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
446  KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
447  __kmp_get_gtid()));
448  if (ident != 0) {
449  __kmp_check_null_func();
450  }
451  if (p->w_top > p->p_top) {
452  /* we are already in a WORKSHARING construct for this PARALLEL region */
453  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
454  &p->stack_data[p->w_top]);
455  }
456  if (p->s_top > p->p_top) {
457  /* we are already in a SYNC construct for this PARALLEL region */
458  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
459  &p->stack_data[p->s_top]);
460  }
461 }
Definition: kmp.h:223
char const * psource
Definition: kmp.h:233