13 #ifndef KMP_AFFINITY_H
14 #define KMP_AFFINITY_H
19 #if KMP_AFFINITY_SUPPORTED
21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal set affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int set_process_affinity(
bool abort_on_error)
const override {
84 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
85 "Illegal set process affinity operation when not capable");
87 const hwloc_topology_support *support =
88 hwloc_topology_get_support(__kmp_hwloc_topology);
89 if (support->cpubind->set_proc_cpubind) {
91 retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask,
92 HWLOC_CPUBIND_PROCESS);
97 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
102 int get_proc_group()
const override {
105 if (__kmp_num_proc_groups == 1) {
108 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
110 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
111 unsigned long second_32_bits =
112 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
113 if (first_32_bits == 0 && second_32_bits == 0) {
125 void determine_capable(
const char *var)
override {
126 const hwloc_topology_support *topology_support;
127 if (__kmp_hwloc_topology == NULL) {
128 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
129 __kmp_hwloc_error = TRUE;
130 if (__kmp_affinity_verbose)
131 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
133 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
134 __kmp_hwloc_error = TRUE;
135 if (__kmp_affinity_verbose)
136 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
139 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
144 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
145 topology_support->cpubind->get_thisthread_cpubind &&
146 topology_support->discovery->pu && !__kmp_hwloc_error) {
148 KMP_AFFINITY_ENABLE(TRUE);
151 __kmp_hwloc_error = TRUE;
152 KMP_AFFINITY_DISABLE();
155 void bind_thread(
int which)
override {
156 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
157 "Illegal set affinity operation when not capable");
158 KMPAffinity::Mask *mask;
159 KMP_CPU_ALLOC_ON_STACK(mask);
161 KMP_CPU_SET(which, mask);
162 __kmp_set_system_affinity(mask, TRUE);
163 KMP_CPU_FREE_FROM_STACK(mask);
165 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
166 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
167 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
168 return new Mask[num];
170 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
171 Mask *hwloc_array =
static_cast<Mask *
>(array);
172 delete[] hwloc_array;
174 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
175 int index)
override {
176 Mask *hwloc_array =
static_cast<Mask *
>(array);
177 return &(hwloc_array[index]);
179 api_type get_api_type()
const override {
return HWLOC; }
183 #if KMP_OS_LINUX || KMP_OS_FREEBSD
189 #include <sys/syscall.h>
190 #if KMP_ARCH_X86 || KMP_ARCH_ARM
191 #ifndef __NR_sched_setaffinity
192 #define __NR_sched_setaffinity 241
193 #elif __NR_sched_setaffinity != 241
194 #error Wrong code for setaffinity system call.
196 #ifndef __NR_sched_getaffinity
197 #define __NR_sched_getaffinity 242
198 #elif __NR_sched_getaffinity != 242
199 #error Wrong code for getaffinity system call.
201 #elif KMP_ARCH_AARCH64
202 #ifndef __NR_sched_setaffinity
203 #define __NR_sched_setaffinity 122
204 #elif __NR_sched_setaffinity != 122
205 #error Wrong code for setaffinity system call.
207 #ifndef __NR_sched_getaffinity
208 #define __NR_sched_getaffinity 123
209 #elif __NR_sched_getaffinity != 123
210 #error Wrong code for getaffinity system call.
212 #elif KMP_ARCH_X86_64
213 #ifndef __NR_sched_setaffinity
214 #define __NR_sched_setaffinity 203
215 #elif __NR_sched_setaffinity != 203
216 #error Wrong code for setaffinity system call.
218 #ifndef __NR_sched_getaffinity
219 #define __NR_sched_getaffinity 204
220 #elif __NR_sched_getaffinity != 204
221 #error Wrong code for getaffinity system call.
224 #ifndef __NR_sched_setaffinity
225 #define __NR_sched_setaffinity 222
226 #elif __NR_sched_setaffinity != 222
227 #error Wrong code for setaffinity system call.
229 #ifndef __NR_sched_getaffinity
230 #define __NR_sched_getaffinity 223
231 #elif __NR_sched_getaffinity != 223
232 #error Wrong code for getaffinity system call.
235 # ifndef __NR_sched_setaffinity
236 # define __NR_sched_setaffinity 4239
237 # elif __NR_sched_setaffinity != 4239
238 # error Wrong code for setaffinity system call.
240 # ifndef __NR_sched_getaffinity
241 # define __NR_sched_getaffinity 4240
242 # elif __NR_sched_getaffinity != 4240
243 # error Wrong code for getaffinity system call.
245 # elif KMP_ARCH_MIPS64
246 # ifndef __NR_sched_setaffinity
247 # define __NR_sched_setaffinity 5195
248 # elif __NR_sched_setaffinity != 5195
249 # error Wrong code for setaffinity system call.
251 # ifndef __NR_sched_getaffinity
252 # define __NR_sched_getaffinity 5196
253 # elif __NR_sched_getaffinity != 5196
254 # error Wrong code for getaffinity system call.
257 #error Unknown or unsupported architecture
261 #include <pthread_np.h>
263 class KMPNativeAffinity :
public KMPAffinity {
264 class Mask :
public KMPAffinity::Mask {
265 typedef unsigned long mask_t;
266 typedef decltype(__kmp_affin_mask_size) mask_size_type;
267 static const unsigned int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
268 static const mask_t ONE = 1;
269 mask_size_type get_num_mask_types()
const {
270 return __kmp_affin_mask_size /
sizeof(mask_t);
275 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
280 void set(
int i)
override {
281 mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
283 bool is_set(
int i)
const override {
284 return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
286 void clear(
int i)
override {
287 mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
289 void zero()
override {
290 mask_size_type e = get_num_mask_types();
291 for (mask_size_type i = 0; i < e; ++i)
294 void copy(
const KMPAffinity::Mask *src)
override {
295 const Mask *convert =
static_cast<const Mask *
>(src);
296 mask_size_type e = get_num_mask_types();
297 for (mask_size_type i = 0; i < e; ++i)
298 mask[i] = convert->mask[i];
300 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
301 const Mask *convert =
static_cast<const Mask *
>(rhs);
302 mask_size_type e = get_num_mask_types();
303 for (mask_size_type i = 0; i < e; ++i)
304 mask[i] &= convert->mask[i];
306 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
307 const Mask *convert =
static_cast<const Mask *
>(rhs);
308 mask_size_type e = get_num_mask_types();
309 for (mask_size_type i = 0; i < e; ++i)
310 mask[i] |= convert->mask[i];
312 void bitwise_not()
override {
313 mask_size_type e = get_num_mask_types();
314 for (mask_size_type i = 0; i < e; ++i)
315 mask[i] = ~(mask[i]);
317 int begin()
const override {
319 while (retval < end() && !is_set(retval))
323 int end()
const override {
325 __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
328 int next(
int previous)
const override {
329 int retval = previous + 1;
330 while (retval < end() && !is_set(retval))
334 int get_system_affinity(
bool abort_on_error)
override {
335 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
336 "Illegal get affinity operation when not capable");
339 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
341 int r = pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
342 reinterpret_cast<cpuset_t *
>(mask));
343 int retval = (r == 0 ? 0 : -1);
349 if (abort_on_error) {
350 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
354 int set_system_affinity(
bool abort_on_error)
const override {
355 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
356 "Illegal set affinity operation when not capable");
359 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
361 int r = pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
362 reinterpret_cast<cpuset_t *
>(mask));
363 int retval = (r == 0 ? 0 : -1);
369 if (abort_on_error) {
370 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
375 void determine_capable(
const char *env_var)
override {
376 __kmp_affinity_determine_capable(env_var);
378 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
379 KMPAffinity::Mask *allocate_mask()
override {
380 KMPNativeAffinity::Mask *retval =
new Mask();
383 void deallocate_mask(KMPAffinity::Mask *m)
override {
384 KMPNativeAffinity::Mask *native_mask =
385 static_cast<KMPNativeAffinity::Mask *
>(m);
388 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
389 return new Mask[num];
391 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
392 Mask *linux_array =
static_cast<Mask *
>(array);
393 delete[] linux_array;
395 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
396 int index)
override {
397 Mask *linux_array =
static_cast<Mask *
>(array);
398 return &(linux_array[index]);
400 api_type get_api_type()
const override {
return NATIVE_OS; }
405 class KMPNativeAffinity :
public KMPAffinity {
406 class Mask :
public KMPAffinity::Mask {
407 typedef ULONG_PTR mask_t;
408 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
413 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
419 void set(
int i)
override {
420 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
422 bool is_set(
int i)
const override {
423 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
425 void clear(
int i)
override {
426 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
428 void zero()
override {
429 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
432 void copy(
const KMPAffinity::Mask *src)
override {
433 const Mask *convert =
static_cast<const Mask *
>(src);
434 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
435 mask[i] = convert->mask[i];
437 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
438 const Mask *convert =
static_cast<const Mask *
>(rhs);
439 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
440 mask[i] &= convert->mask[i];
442 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
443 const Mask *convert =
static_cast<const Mask *
>(rhs);
444 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
445 mask[i] |= convert->mask[i];
447 void bitwise_not()
override {
448 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
449 mask[i] = ~(mask[i]);
451 int begin()
const override {
453 while (retval < end() && !is_set(retval))
457 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
458 int next(
int previous)
const override {
459 int retval = previous + 1;
460 while (retval < end() && !is_set(retval))
464 int set_process_affinity(
bool abort_on_error)
const override {
465 if (__kmp_num_proc_groups <= 1) {
466 if (!SetProcessAffinityMask(GetCurrentProcess(), *mask)) {
467 DWORD error = GetLastError();
468 if (abort_on_error) {
469 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
477 int set_system_affinity(
bool abort_on_error)
const override {
478 if (__kmp_num_proc_groups > 1) {
481 int group = get_proc_group();
483 if (abort_on_error) {
484 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
491 ga.Mask = mask[group];
492 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
494 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
495 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
496 DWORD error = GetLastError();
497 if (abort_on_error) {
498 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
504 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
505 DWORD error = GetLastError();
506 if (abort_on_error) {
507 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
515 int get_system_affinity(
bool abort_on_error)
override {
516 if (__kmp_num_proc_groups > 1) {
519 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
520 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
521 DWORD error = GetLastError();
522 if (abort_on_error) {
523 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
524 KMP_ERR(error), __kmp_msg_null);
528 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
532 mask[ga.Group] = ga.Mask;
534 mask_t newMask, sysMask, retval;
535 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
536 DWORD error = GetLastError();
537 if (abort_on_error) {
538 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
539 KMP_ERR(error), __kmp_msg_null);
543 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
545 DWORD error = GetLastError();
546 if (abort_on_error) {
547 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
548 KMP_ERR(error), __kmp_msg_null);
552 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
554 DWORD error = GetLastError();
555 if (abort_on_error) {
556 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
557 KMP_ERR(error), __kmp_msg_null);
564 int get_proc_group()
const override {
566 if (__kmp_num_proc_groups == 1) {
569 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
579 void determine_capable(
const char *env_var)
override {
580 __kmp_affinity_determine_capable(env_var);
582 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
583 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
584 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
585 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
586 return new Mask[num];
588 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
589 Mask *windows_array =
static_cast<Mask *
>(array);
590 delete[] windows_array;
592 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
593 int index)
override {
594 Mask *windows_array =
static_cast<Mask *
>(array);
595 return &(windows_array[index]);
597 api_type get_api_type()
const override {
return NATIVE_OS; }
602 typedef enum kmp_hw_core_type_t {
603 KMP_HW_CORE_TYPE_UNKNOWN = 0x0,
604 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
605 KMP_HW_CORE_TYPE_ATOM = 0x20,
606 KMP_HW_CORE_TYPE_CORE = 0x40,
607 KMP_HW_MAX_NUM_CORE_TYPES = 3,
609 KMP_HW_MAX_NUM_CORE_TYPES = 1,
611 } kmp_hw_core_type_t;
613 class kmp_hw_thread_t {
615 static const int UNKNOWN_ID = -1;
616 static int compare_ids(
const void *a,
const void *b);
617 static int compare_compact(
const void *a,
const void *b);
618 int ids[KMP_HW_LAST];
619 int sub_ids[KMP_HW_LAST];
622 kmp_hw_core_type_t core_type;
626 for (
int i = 0; i < (int)KMP_HW_LAST; ++i)
629 core_type = KMP_HW_CORE_TYPE_UNKNOWN;
633 class kmp_topology_t {
659 kmp_hw_core_type_t core_types[KMP_HW_MAX_NUM_CORE_TYPES];
660 int core_types_count[KMP_HW_MAX_NUM_CORE_TYPES];
666 kmp_hw_thread_t *hw_threads;
672 kmp_hw_t equivalent[KMP_HW_LAST];
678 void _insert_layer(kmp_hw_t type,
const int *ids);
680 #if KMP_GROUP_AFFINITY
682 void _insert_windows_proc_groups();
688 void _gather_enumeration_information();
692 void _remove_radix1_layers();
695 void _discover_uniformity();
706 void _set_last_level_cache();
709 void _increment_core_type(kmp_hw_core_type_t type) {
710 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) {
711 if (core_types[i] == KMP_HW_CORE_TYPE_UNKNOWN) {
712 core_types[i] = type;
713 core_types_count[i] = 1;
715 }
else if (core_types[i] == type) {
716 core_types_count[i]++;
724 kmp_topology_t() =
delete;
725 kmp_topology_t(
const kmp_topology_t &t) =
delete;
726 kmp_topology_t(kmp_topology_t &&t) =
delete;
727 kmp_topology_t &operator=(
const kmp_topology_t &t) =
delete;
728 kmp_topology_t &operator=(kmp_topology_t &&t) =
delete;
730 static kmp_topology_t *allocate(
int nproc,
int ndepth,
const kmp_hw_t *types);
731 static void deallocate(kmp_topology_t *);
734 kmp_hw_thread_t &at(
int index) {
735 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
736 return hw_threads[index];
738 const kmp_hw_thread_t &at(
int index)
const {
739 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
740 return hw_threads[index];
742 int get_num_hw_threads()
const {
return num_hw_threads; }
744 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
745 kmp_hw_thread_t::compare_ids);
749 bool check_ids()
const;
753 void canonicalize(
int pkgs,
int cores_per_pkg,
int thr_per_core,
int cores);
756 bool filter_hw_subset();
757 bool is_close(
int hwt1,
int hwt2,
int level)
const;
758 bool is_uniform()
const {
return flags.uniform; }
761 kmp_hw_t get_equivalent_type(kmp_hw_t type)
const {
return equivalent[type]; }
763 void set_equivalent_type(kmp_hw_t type1, kmp_hw_t type2) {
764 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type1);
765 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type2);
766 kmp_hw_t real_type2 = equivalent[type2];
767 if (real_type2 == KMP_HW_UNKNOWN)
769 equivalent[type1] = real_type2;
772 KMP_FOREACH_HW_TYPE(type) {
773 if (equivalent[type] == type1) {
774 equivalent[type] = real_type2;
780 int calculate_ratio(
int level1,
int level2)
const {
781 KMP_DEBUG_ASSERT(level1 >= 0 && level1 < depth);
782 KMP_DEBUG_ASSERT(level2 >= 0 && level2 < depth);
784 for (
int level = level1; level > level2; --level)
788 int get_ratio(
int level)
const {
789 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
792 int get_depth()
const {
return depth; };
793 kmp_hw_t get_type(
int level)
const {
794 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
797 int get_level(kmp_hw_t type)
const {
798 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type);
799 int eq_type = equivalent[type];
800 if (eq_type == KMP_HW_UNKNOWN)
802 for (
int i = 0; i < depth; ++i)
803 if (types[i] == eq_type)
807 int get_count(
int level)
const {
808 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
811 #if KMP_AFFINITY_SUPPORTED
812 void sort_compact() {
813 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
814 kmp_hw_thread_t::compare_compact);
817 void print(
const char *env_var =
"KMP_AFFINITY")
const;
820 extern kmp_topology_t *__kmp_topology;
822 class kmp_hw_subset_t {
837 KMP_BUILD_ASSERT(
sizeof(set) * 8 >= KMP_HW_LAST);
840 static int hw_subset_compare(
const void *i1,
const void *i2) {
841 kmp_hw_t type1 = ((
const item_t *)i1)->type;
842 kmp_hw_t type2 = ((
const item_t *)i2)->type;
843 int level1 = __kmp_topology->get_level(type1);
844 int level2 = __kmp_topology->get_level(type2);
845 return level1 - level2;
850 kmp_hw_subset_t() =
delete;
851 kmp_hw_subset_t(
const kmp_hw_subset_t &t) =
delete;
852 kmp_hw_subset_t(kmp_hw_subset_t &&t) =
delete;
853 kmp_hw_subset_t &operator=(
const kmp_hw_subset_t &t) =
delete;
854 kmp_hw_subset_t &operator=(kmp_hw_subset_t &&t) =
delete;
856 static kmp_hw_subset_t *allocate() {
857 int initial_capacity = 5;
858 kmp_hw_subset_t *retval =
859 (kmp_hw_subset_t *)__kmp_allocate(
sizeof(kmp_hw_subset_t));
861 retval->capacity = initial_capacity;
863 retval->absolute =
false;
864 retval->items = (item_t *)__kmp_allocate(
sizeof(item_t) * initial_capacity);
867 static void deallocate(kmp_hw_subset_t *subset) {
868 __kmp_free(subset->items);
871 void set_absolute() { absolute =
true; }
872 bool is_absolute()
const {
return absolute; }
873 void push_back(
int num, kmp_hw_t type,
int offset) {
874 if (depth == capacity - 1) {
876 item_t *new_items = (item_t *)__kmp_allocate(
sizeof(item_t) * capacity);
877 for (
int i = 0; i < depth; ++i)
878 new_items[i] = items[i];
882 items[depth].num = num;
883 items[depth].type = type;
884 items[depth].offset = offset;
886 set |= (1ull << type);
888 int get_depth()
const {
return depth; }
889 const item_t &at(
int index)
const {
890 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
893 item_t &at(
int index) {
894 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
897 void remove(
int index) {
898 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
899 set &= ~(1ull << items[index].type);
900 for (
int j = index + 1; j < depth; ++j) {
901 items[j - 1] = items[j];
906 KMP_DEBUG_ASSERT(__kmp_topology);
907 qsort(items, depth,
sizeof(item_t), hw_subset_compare);
909 bool specified(kmp_hw_t type)
const {
return ((set & (1ull << type)) > 0); }
911 printf(
"**********************\n");
912 printf(
"*** kmp_hw_subset: ***\n");
913 printf(
"* depth: %d\n", depth);
914 printf(
"* items:\n");
915 for (
int i = 0; i < depth; ++i) {
916 printf(
"num: %d, type: %s, offset: %d\n", items[i].num,
917 __kmp_hw_get_keyword(items[i].type), items[i].offset);
919 printf(
"* set: 0x%llx\n", set);
920 printf(
"* absolute: %d\n", absolute);
921 printf(
"**********************\n");
924 extern kmp_hw_subset_t *__kmp_hw_subset;
932 class hierarchy_info {
936 static const kmp_uint32 maxLeaves = 4;
937 static const kmp_uint32 minBranch = 4;
943 kmp_uint32 maxLevels;
950 kmp_uint32 base_num_threads;
951 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
952 volatile kmp_int8 uninitialized;
954 volatile kmp_int8 resizing;
960 kmp_uint32 *numPerLevel;
961 kmp_uint32 *skipPerLevel;
963 void deriveLevels() {
964 int hier_depth = __kmp_topology->get_depth();
965 for (
int i = hier_depth - 1, level = 0; i >= 0; --i, ++level) {
966 numPerLevel[level] = __kmp_topology->get_ratio(i);
971 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
974 if (!uninitialized && numPerLevel) {
975 __kmp_free(numPerLevel);
977 uninitialized = not_initialized;
981 void init(
int num_addrs) {
982 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
983 &uninitialized, not_initialized, initializing);
984 if (bool_result == 0) {
985 while (TCR_1(uninitialized) != initialized)
989 KMP_DEBUG_ASSERT(bool_result == 1);
999 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
1000 skipPerLevel = &(numPerLevel[maxLevels]);
1001 for (kmp_uint32 i = 0; i < maxLevels;
1004 skipPerLevel[i] = 1;
1008 if (__kmp_topology && __kmp_topology->get_depth() > 0) {
1011 numPerLevel[0] = maxLeaves;
1012 numPerLevel[1] = num_addrs / maxLeaves;
1013 if (num_addrs % maxLeaves)
1017 base_num_threads = num_addrs;
1018 for (
int i = maxLevels - 1; i >= 0;
1020 if (numPerLevel[i] != 1 || depth > 1)
1023 kmp_uint32 branch = minBranch;
1024 if (numPerLevel[0] == 1)
1025 branch = num_addrs / maxLeaves;
1026 if (branch < minBranch)
1028 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
1029 while (numPerLevel[d] > branch ||
1030 (d == 0 && numPerLevel[d] > maxLeaves)) {
1031 if (numPerLevel[d] & 1)
1033 numPerLevel[d] = numPerLevel[d] >> 1;
1034 if (numPerLevel[d + 1] == 1)
1036 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
1038 if (numPerLevel[0] == 1) {
1039 branch = branch >> 1;
1045 for (kmp_uint32 i = 1; i < depth; ++i)
1046 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
1048 for (kmp_uint32 i = depth; i < maxLevels; ++i)
1049 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1051 uninitialized = initialized;
1055 void resize(kmp_uint32 nproc) {
1056 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1057 while (bool_result == 0) {
1059 if (nproc <= base_num_threads)
1062 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1064 KMP_DEBUG_ASSERT(bool_result != 0);
1065 if (nproc <= base_num_threads)
1069 kmp_uint32 old_sz = skipPerLevel[depth - 1];
1070 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
1072 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
1073 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1074 numPerLevel[i - 1] *= 2;
1078 if (nproc > old_sz) {
1079 while (nproc > old_sz) {
1087 kmp_uint32 *old_numPerLevel = numPerLevel;
1088 kmp_uint32 *old_skipPerLevel = skipPerLevel;
1089 numPerLevel = skipPerLevel = NULL;
1091 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
1092 skipPerLevel = &(numPerLevel[maxLevels]);
1095 for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
1097 numPerLevel[i] = old_numPerLevel[i];
1098 skipPerLevel[i] = old_skipPerLevel[i];
1102 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
1105 skipPerLevel[i] = 1;
1109 __kmp_free(old_numPerLevel);
1113 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
1114 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1116 base_num_threads = nproc;