17 #define USE_CHECKS_COMMON 19 #define KMP_INLINE_SUBR 1 21 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
22 void *data_addr,
size_t pc_size);
23 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
27 struct shared_table __kmp_threadprivate_d_table;
30 #ifdef KMP_INLINE_SUBR 33 struct private_common *
34 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
38 struct private_common *tn;
40 #ifdef KMP_TASK_COMMON_DEBUG 41 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with " 47 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
48 if (tn->gbl_addr == pc_addr) {
49 #ifdef KMP_TASK_COMMON_DEBUG 50 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, found " 61 #ifdef KMP_INLINE_SUBR 64 struct shared_common *
65 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
67 struct shared_common *tn;
69 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
70 if (tn->gbl_addr == pc_addr) {
71 #ifdef KMP_TASK_COMMON_DEBUG 74 (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
85 static struct private_data *__kmp_init_common_data(
void *pc_addr,
87 struct private_data *d;
91 d = (
struct private_data *)__kmp_allocate(
sizeof(
struct private_data));
102 for (i = pc_size; i > 0; --i) {
104 d->data = __kmp_allocate(pc_size);
105 KMP_MEMCPY(d->data, pc_addr, pc_size);
114 static void __kmp_copy_common_data(
void *pc_addr,
struct private_data *d) {
115 char *addr = (
char *)pc_addr;
118 for (offset = 0; d != 0; d = d->next) {
119 for (i = d->more; i > 0; --i) {
121 memset(&addr[offset],
'\0', d->size);
123 KMP_MEMCPY(&addr[offset], d->data, d->size);
130 void __kmp_common_initialize(
void) {
131 if (!TCR_4(__kmp_init_common)) {
137 __kmp_threadpriv_cache_list = NULL;
141 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
142 if (__kmp_root[gtid]) {
143 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
144 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
146 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
152 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
153 __kmp_threadprivate_d_table.data[q] = 0;
155 TCW_4(__kmp_init_common, TRUE);
161 void __kmp_common_destroy(
void) {
162 if (TCR_4(__kmp_init_common)) {
165 TCW_4(__kmp_init_common, FALSE);
167 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
169 struct private_common *tn;
170 struct shared_common *d_tn;
176 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
179 if (d_tn->dt.dtorv != 0) {
180 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
181 if (__kmp_threads[gtid]) {
182 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
183 : (!KMP_UBER_GTID(gtid))) {
184 tn = __kmp_threadprivate_find_task_common(
185 __kmp_threads[gtid]->th.th_pri_common, gtid,
188 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
193 if (d_tn->obj_init != 0) {
194 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
198 if (d_tn->dt.dtor != 0) {
199 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
200 if (__kmp_threads[gtid]) {
201 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
202 : (!KMP_UBER_GTID(gtid))) {
203 tn = __kmp_threadprivate_find_task_common(
204 __kmp_threads[gtid]->th.th_pri_common, gtid,
207 (*d_tn->dt.dtor)(tn->par_addr);
212 if (d_tn->obj_init != 0) {
213 (*d_tn->dt.dtor)(d_tn->obj_init);
218 __kmp_threadprivate_d_table.data[q] = 0;
224 void __kmp_common_destroy_gtid(
int gtid) {
225 struct private_common *tn;
226 struct shared_common *d_tn;
228 if (!TCR_4(__kmp_init_gtid)) {
235 KC_TRACE(10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid));
236 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
238 if (TCR_4(__kmp_init_common)) {
243 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
245 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
250 if (d_tn->dt.dtorv != 0) {
251 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
253 if (d_tn->obj_init != 0) {
254 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
257 if (d_tn->dt.dtor != 0) {
258 (void)(*d_tn->dt.dtor)(tn->par_addr);
260 if (d_tn->obj_init != 0) {
261 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
265 KC_TRACE(30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors " 272 #ifdef KMP_TASK_COMMON_DEBUG 273 static void dump_list(
void) {
276 for (p = 0; p < __kmp_all_nth; ++p) {
277 if (!__kmp_threads[p])
279 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
280 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
281 struct private_common *tn;
283 KC_TRACE(10, (
"\tdump_list: gtid:%d addresses\n", p));
285 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
288 (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
289 tn->gbl_addr, tn->par_addr));
298 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
299 void *data_addr,
size_t pc_size) {
300 struct shared_common **lnk_tn, *d_tn;
301 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
302 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
304 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
308 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
310 d_tn->gbl_addr = pc_addr;
311 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
321 d_tn->cmn_size = pc_size;
323 __kmp_acquire_lock(&__kmp_global_lock, gtid);
325 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
327 d_tn->next = *lnk_tn;
330 __kmp_release_lock(&__kmp_global_lock, gtid);
334 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
337 struct private_common *tn, **tt;
338 struct shared_common *d_tn;
341 __kmp_acquire_lock(&__kmp_global_lock, gtid);
343 tn = (
struct private_common *)__kmp_allocate(
sizeof(
struct private_common));
345 tn->gbl_addr = pc_addr;
347 d_tn = __kmp_find_shared_task_common(
348 &__kmp_threadprivate_d_table, gtid,
354 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
355 d_tn->cmn_size = pc_size;
358 if (d_tn->ct.ctorv != 0) {
361 }
else if (d_tn->cct.cctorv != 0) {
364 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
365 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
367 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
370 if (d_tn->ct.ctor != 0) {
373 }
else if (d_tn->cct.cctor != 0) {
376 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
377 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
379 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
384 struct shared_common **lnk_tn;
386 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
387 d_tn->gbl_addr = pc_addr;
388 d_tn->cmn_size = pc_size;
389 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
399 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
401 d_tn->next = *lnk_tn;
405 tn->cmn_size = d_tn->cmn_size;
407 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
408 tn->par_addr = (
void *)pc_addr;
410 tn->par_addr = (
void *)__kmp_allocate(tn->cmn_size);
413 __kmp_release_lock(&__kmp_global_lock, gtid);
416 #ifdef USE_CHECKS_COMMON 417 if (pc_size > d_tn->cmn_size) {
419 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
420 " ,%" KMP_UINTPTR_SPEC
")\n",
421 pc_addr, pc_size, d_tn->cmn_size));
422 KMP_FATAL(TPCommonBlocksInconsist);
426 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
428 #ifdef KMP_TASK_COMMON_DEBUG 432 (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
439 #ifdef KMP_TASK_COMMON_DEBUG 441 (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
448 tn->link = __kmp_threads[gtid]->th.th_pri_head;
449 __kmp_threads[gtid]->th.th_pri_head = tn;
451 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
465 if (d_tn->ct.ctorv != 0) {
466 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
467 }
else if (d_tn->cct.cctorv != 0) {
468 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
469 }
else if (tn->par_addr != tn->gbl_addr) {
470 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
473 if (d_tn->ct.ctor != 0) {
474 (void)(*d_tn->ct.ctor)(tn->par_addr);
475 }
else if (d_tn->cct.cctor != 0) {
476 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
477 }
else if (tn->par_addr != tn->gbl_addr) {
478 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
506 struct shared_common *d_tn, **lnk_tn;
508 KC_TRACE(10, (
"__kmpc_threadprivate_register: called\n"));
510 #ifdef USE_CHECKS_COMMON 512 KMP_ASSERT(cctor == 0);
516 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
519 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
520 d_tn->gbl_addr = data;
522 d_tn->ct.ctor = ctor;
523 d_tn->cct.cctor = cctor;
524 d_tn->dt.dtor = dtor;
532 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
534 d_tn->next = *lnk_tn;
539 void *__kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
542 struct private_common *tn;
544 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d called\n", global_tid));
546 #ifdef USE_CHECKS_COMMON 547 if (!__kmp_init_serial)
548 KMP_FATAL(RTLNotInitialized);
551 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
556 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting private data\n",
558 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
564 (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
566 tn = __kmp_threadprivate_find_task_common(
567 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
570 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid));
571 #ifdef USE_CHECKS_COMMON 572 if ((
size_t)size > tn->cmn_size) {
573 KC_TRACE(10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
574 " ,%" KMP_UINTPTR_SPEC
")\n",
575 data, size, tn->cmn_size));
576 KMP_FATAL(TPCommonBlocksInconsist);
583 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid));
584 tn = kmp_threadprivate_insert(global_tid, data, data, size);
589 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
595 static kmp_cached_addr_t *__kmp_find_cache(
void *data) {
596 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
597 while (ptr && ptr->data != data)
615 kmp_int32 global_tid,
619 KC_TRACE(10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, " 620 "address: %p, size: %" KMP_SIZE_T_SPEC
"\n",
621 global_tid, *cache, data, size));
623 if (TCR_PTR(*cache) == 0) {
624 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
626 if (TCR_PTR(*cache) == 0) {
627 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
630 kmp_cached_addr_t *tp_cache_addr;
632 tp_cache_addr = __kmp_find_cache(data);
633 if (!tp_cache_addr) {
635 KMP_ITT_IGNORE(my_cache = (
void **)__kmp_allocate(
636 sizeof(
void *) * __kmp_tp_capacity +
637 sizeof(kmp_cached_addr_t)););
639 KC_TRACE(50, (
"__kmpc_threadprivate_cached: T#%d allocated cache at " 641 global_tid, my_cache));
645 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
646 tp_cache_addr->addr = my_cache;
647 tp_cache_addr->data = data;
648 tp_cache_addr->compiler_cache = cache;
649 tp_cache_addr->next = __kmp_threadpriv_cache_list;
650 __kmp_threadpriv_cache_list = tp_cache_addr;
652 my_cache = tp_cache_addr->addr;
653 tp_cache_addr->compiler_cache = cache;
657 TCW_PTR(*cache, my_cache);
658 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
662 __kmp_release_lock(&__kmp_global_lock, global_tid);
666 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
667 ret = __kmpc_threadprivate(loc, global_tid, data, (
size_t)size);
669 TCW_PTR((*cache)[global_tid], ret);
672 (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
679 void __kmp_threadprivate_resize_cache(
int newCapacity) {
680 KC_TRACE(10, (
"__kmp_threadprivate_resize_cache: called with size: %d\n",
683 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
688 KMP_ITT_IGNORE(my_cache =
689 (
void **)__kmp_allocate(
sizeof(
void *) * newCapacity +
690 sizeof(kmp_cached_addr_t)););
692 KC_TRACE(50, (
"__kmp_threadprivate_resize_cache: allocated cache at %p\n",
695 void **old_cache = ptr->addr;
696 for (
int i = 0; i < __kmp_tp_capacity; ++i) {
697 my_cache[i] = old_cache[i];
701 kmp_cached_addr_t *tp_cache_addr;
702 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[newCapacity];
703 tp_cache_addr->addr = my_cache;
704 tp_cache_addr->data = ptr->data;
705 tp_cache_addr->compiler_cache = ptr->compiler_cache;
706 tp_cache_addr->next = __kmp_threadpriv_cache_list;
707 __kmp_threadpriv_cache_list = tp_cache_addr;
718 (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr->compiler_cache, old_cache,
733 *(
volatile int *)&__kmp_tp_capacity = newCapacity;
749 size_t vector_length) {
750 struct shared_common *d_tn, **lnk_tn;
752 KC_TRACE(10, (
"__kmpc_threadprivate_register_vec: called\n"));
754 #ifdef USE_CHECKS_COMMON 756 KMP_ASSERT(cctor == 0);
759 d_tn = __kmp_find_shared_task_common(
760 &__kmp_threadprivate_d_table, -1,
764 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
765 d_tn->gbl_addr = data;
767 d_tn->ct.ctorv = ctor;
768 d_tn->cct.cctorv = cctor;
769 d_tn->dt.dtorv = dtor;
771 d_tn->vec_len = (size_t)vector_length;
774 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
776 d_tn->next = *lnk_tn;
781 void __kmp_cleanup_threadprivate_caches() {
782 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
785 void **cache = ptr->addr;
786 __kmp_threadpriv_cache_list = ptr->next;
787 if (*ptr->compiler_cache)
788 *ptr->compiler_cache = NULL;
789 ptr->compiler_cache = NULL;
796 ptr = __kmp_threadpriv_cache_list;
void(* kmpc_dtor)(void *)
void(* kmpc_dtor_vec)(void *, size_t)
void *(* kmpc_ctor_vec)(void *, size_t)
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_cctor)(void *, void *)
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void *(* kmpc_ctor)(void *)
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)