14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
32 kmp_topology_t *__kmp_topology =
nullptr;
34 kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
37 static hierarchy_info machine_hierarchy;
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
41 #if KMP_AFFINITY_SUPPORTED
43 class kmp_full_mask_modifier_t {
44 kmp_affin_mask_t *mask;
47 kmp_full_mask_modifier_t() {
51 ~kmp_full_mask_modifier_t() {
55 void include(
const kmp_affin_mask_t *other) { KMP_CPU_UNION(mask, other); }
58 bool restrict_to_mask() {
60 if (KMP_CPU_EQUAL(__kmp_affin_fullMask, mask) || KMP_CPU_ISEMPTY(mask))
62 return __kmp_topology->restrict_to_mask(mask);
66 static inline const char *
67 __kmp_get_affinity_env_var(
const kmp_affinity_t &affinity,
68 bool for_binding =
false) {
69 if (affinity.flags.omp_places) {
71 return "OMP_PROC_BIND";
74 return affinity.env_var;
78 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
82 if (TCR_1(machine_hierarchy.uninitialized))
83 machine_hierarchy.init(nproc);
86 if (nproc > machine_hierarchy.base_num_threads)
87 machine_hierarchy.resize(nproc);
89 depth = machine_hierarchy.depth;
90 KMP_DEBUG_ASSERT(depth > 0);
92 thr_bar->depth = depth;
93 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
94 &(thr_bar->base_leaf_kids));
95 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
98 static int nCoresPerPkg, nPackages;
99 static int __kmp_nThreadsPerCore;
100 #ifndef KMP_DFLT_NTH_CORES
101 static int __kmp_ncores;
104 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
107 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
109 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
111 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
113 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
115 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
117 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
119 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
121 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
123 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
125 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
127 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
128 case KMP_HW_PROC_GROUP:
129 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
132 return KMP_I18N_STR(Unknown);
134 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
135 KMP_BUILTIN_UNREACHABLE;
138 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
141 return ((plural) ?
"sockets" :
"socket");
143 return ((plural) ?
"dice" :
"die");
145 return ((plural) ?
"modules" :
"module");
147 return ((plural) ?
"tiles" :
"tile");
149 return ((plural) ?
"numa_domains" :
"numa_domain");
151 return ((plural) ?
"l3_caches" :
"l3_cache");
153 return ((plural) ?
"l2_caches" :
"l2_cache");
155 return ((plural) ?
"l1_caches" :
"l1_cache");
157 return ((plural) ?
"ll_caches" :
"ll_cache");
159 return ((plural) ?
"cores" :
"core");
161 return ((plural) ?
"threads" :
"thread");
162 case KMP_HW_PROC_GROUP:
163 return ((plural) ?
"proc_groups" :
"proc_group");
166 return ((plural) ?
"unknowns" :
"unknown");
168 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
169 KMP_BUILTIN_UNREACHABLE;
172 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
174 case KMP_HW_CORE_TYPE_UNKNOWN:
175 case KMP_HW_MAX_NUM_CORE_TYPES:
177 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
178 case KMP_HW_CORE_TYPE_ATOM:
179 return "Intel Atom(R) processor";
180 case KMP_HW_CORE_TYPE_CORE:
181 return "Intel(R) Core(TM) processor";
184 KMP_ASSERT2(
false,
"Unhandled kmp_hw_core_type_t enumeration");
185 KMP_BUILTIN_UNREACHABLE;
188 #if KMP_AFFINITY_SUPPORTED
191 #define KMP_AFF_WARNING(s, ...) \
192 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
193 KMP_WARNING(__VA_ARGS__); \
196 #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
201 int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
202 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
203 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
204 int depth = __kmp_topology->get_depth();
205 for (
int level = 0; level < depth; ++level) {
208 if (__kmp_is_hybrid_cpu() &&
209 __kmp_topology->get_type(level) == KMP_HW_CORE &&
210 ahwthread->attrs.is_core_eff_valid() &&
211 bhwthread->attrs.is_core_eff_valid()) {
212 if (ahwthread->attrs.get_core_eff() < bhwthread->attrs.get_core_eff())
214 if (ahwthread->attrs.get_core_eff() > bhwthread->attrs.get_core_eff())
217 if (ahwthread->ids[level] == bhwthread->ids[level])
221 if (ahwthread->ids[level] == UNKNOWN_ID)
223 else if (bhwthread->ids[level] == UNKNOWN_ID)
225 else if (ahwthread->ids[level] < bhwthread->ids[level])
227 else if (ahwthread->ids[level] > bhwthread->ids[level])
230 if (ahwthread->os_id < bhwthread->os_id)
232 else if (ahwthread->os_id > bhwthread->os_id)
237 #if KMP_AFFINITY_SUPPORTED
238 int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
240 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
241 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
242 int depth = __kmp_topology->get_depth();
243 int compact = __kmp_topology->compact;
244 KMP_DEBUG_ASSERT(compact >= 0);
245 KMP_DEBUG_ASSERT(compact <= depth);
246 for (i = 0; i < compact; i++) {
247 int j = depth - i - 1;
248 if (aa->sub_ids[j] < bb->sub_ids[j])
250 if (aa->sub_ids[j] > bb->sub_ids[j])
253 for (; i < depth; i++) {
255 if (aa->sub_ids[j] < bb->sub_ids[j])
257 if (aa->sub_ids[j] > bb->sub_ids[j])
264 void kmp_hw_thread_t::print()
const {
265 int depth = __kmp_topology->get_depth();
266 printf(
"%4d ", os_id);
267 for (
int i = 0; i < depth; ++i) {
268 printf(
"%4d (%d) ", ids[i], sub_ids[i]);
271 if (attrs.is_core_type_valid())
272 printf(
" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
273 if (attrs.is_core_eff_valid())
274 printf(
" (eff=%d)", attrs.get_core_eff());
286 void kmp_topology_t::insert_layer(kmp_hw_t type,
const int *ids) {
290 int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
291 int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
295 for (target_layer = 0; target_layer < depth; ++target_layer) {
296 bool layers_equal =
true;
297 bool strictly_above_target_layer =
false;
298 for (
int i = 0; i < num_hw_threads; ++i) {
299 int id = hw_threads[i].ids[target_layer];
301 if (
id != previous_id && new_id == previous_new_id) {
303 strictly_above_target_layer =
true;
304 layers_equal =
false;
306 }
else if (
id == previous_id && new_id != previous_new_id) {
308 layers_equal =
false;
312 previous_new_id = new_id;
314 if (strictly_above_target_layer || layers_equal)
320 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
322 types[target_layer] = type;
323 for (
int k = 0; k < num_hw_threads; ++k) {
324 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
325 hw_threads[k].ids[j] = hw_threads[k].ids[i];
326 hw_threads[k].ids[target_layer] = ids[k];
328 equivalent[type] = type;
332 #if KMP_GROUP_AFFINITY
334 void kmp_topology_t::_insert_windows_proc_groups() {
336 if (__kmp_num_proc_groups == 1)
338 kmp_affin_mask_t *mask;
339 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
341 for (
int i = 0; i < num_hw_threads; ++i) {
343 KMP_CPU_SET(hw_threads[i].os_id, mask);
344 ids[i] = __kmp_get_proc_group(mask);
347 insert_layer(KMP_HW_PROC_GROUP, ids);
351 __kmp_topology->sort_ids();
357 void kmp_topology_t::_remove_radix1_layers() {
358 int preference[KMP_HW_LAST];
359 int top_index1, top_index2;
361 preference[KMP_HW_SOCKET] = 110;
362 preference[KMP_HW_PROC_GROUP] = 100;
363 preference[KMP_HW_CORE] = 95;
364 preference[KMP_HW_THREAD] = 90;
365 preference[KMP_HW_NUMA] = 85;
366 preference[KMP_HW_DIE] = 80;
367 preference[KMP_HW_TILE] = 75;
368 preference[KMP_HW_MODULE] = 73;
369 preference[KMP_HW_L3] = 70;
370 preference[KMP_HW_L2] = 65;
371 preference[KMP_HW_L1] = 60;
372 preference[KMP_HW_LLC] = 5;
375 while (top_index1 < depth - 1 && top_index2 < depth) {
376 kmp_hw_t type1 = types[top_index1];
377 kmp_hw_t type2 = types[top_index2];
378 KMP_ASSERT_VALID_HW_TYPE(type1);
379 KMP_ASSERT_VALID_HW_TYPE(type2);
382 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
383 type1 == KMP_HW_SOCKET) &&
384 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
385 type2 == KMP_HW_SOCKET)) {
386 top_index1 = top_index2++;
390 bool all_same =
true;
391 int id1 = hw_threads[0].ids[top_index1];
392 int id2 = hw_threads[0].ids[top_index2];
393 int pref1 = preference[type1];
394 int pref2 = preference[type2];
395 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
396 if (hw_threads[hwidx].ids[top_index1] == id1 &&
397 hw_threads[hwidx].ids[top_index2] != id2) {
401 if (hw_threads[hwidx].ids[top_index2] != id2)
403 id1 = hw_threads[hwidx].ids[top_index1];
404 id2 = hw_threads[hwidx].ids[top_index2];
408 kmp_hw_t remove_type, keep_type;
409 int remove_layer, remove_layer_ids;
412 remove_layer = remove_layer_ids = top_index2;
416 remove_layer = remove_layer_ids = top_index1;
422 remove_layer_ids = top_index2;
425 set_equivalent_type(remove_type, keep_type);
426 for (
int idx = 0; idx < num_hw_threads; ++idx) {
427 kmp_hw_thread_t &hw_thread = hw_threads[idx];
428 for (
int d = remove_layer_ids; d < depth - 1; ++d)
429 hw_thread.ids[d] = hw_thread.ids[d + 1];
431 for (
int idx = remove_layer; idx < depth - 1; ++idx)
432 types[idx] = types[idx + 1];
435 top_index1 = top_index2++;
438 KMP_ASSERT(depth > 0);
441 void kmp_topology_t::_set_last_level_cache() {
442 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
443 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
444 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
445 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
446 #if KMP_MIC_SUPPORTED
447 else if (__kmp_mic_type == mic3) {
448 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
449 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
450 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
451 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
454 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
457 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
458 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
460 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
461 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
462 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
463 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
464 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
466 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
470 void kmp_topology_t::_gather_enumeration_information() {
471 int previous_id[KMP_HW_LAST];
472 int max[KMP_HW_LAST];
474 for (
int i = 0; i < depth; ++i) {
475 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
480 int core_level = get_level(KMP_HW_CORE);
481 for (
int i = 0; i < num_hw_threads; ++i) {
482 kmp_hw_thread_t &hw_thread = hw_threads[i];
483 for (
int layer = 0; layer < depth; ++layer) {
484 int id = hw_thread.ids[layer];
485 if (
id != previous_id[layer]) {
487 for (
int l = layer; l < depth; ++l) {
488 if (hw_thread.ids[l] != kmp_hw_thread_t::UNKNOWN_ID)
492 if (hw_thread.ids[layer] != kmp_hw_thread_t::UNKNOWN_ID)
494 for (
int l = layer + 1; l < depth; ++l) {
495 if (max[l] > ratio[l])
501 if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
502 if (hw_thread.attrs.is_core_eff_valid() &&
503 hw_thread.attrs.core_eff >= num_core_efficiencies) {
506 num_core_efficiencies = hw_thread.attrs.core_eff + 1;
508 if (hw_thread.attrs.is_core_type_valid()) {
510 for (
int j = 0; j < num_core_types; ++j) {
511 if (hw_thread.attrs.get_core_type() == core_types[j]) {
517 KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
518 core_types[num_core_types++] = hw_thread.attrs.get_core_type();
525 for (
int layer = 0; layer < depth; ++layer) {
526 previous_id[layer] = hw_thread.ids[layer];
529 for (
int layer = 0; layer < depth; ++layer) {
530 if (max[layer] > ratio[layer])
531 ratio[layer] = max[layer];
535 int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
537 bool find_all)
const {
538 int current, current_max;
539 int previous_id[KMP_HW_LAST];
540 for (
int i = 0; i < depth; ++i)
541 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
542 int core_level = get_level(KMP_HW_CORE);
545 KMP_ASSERT(above_level < core_level);
548 for (
int i = 0; i < num_hw_threads; ++i) {
549 kmp_hw_thread_t &hw_thread = hw_threads[i];
550 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
551 if (current > current_max)
552 current_max = current;
553 current = hw_thread.attrs.contains(attr);
555 for (
int level = above_level + 1; level <= core_level; ++level) {
556 if (hw_thread.ids[level] != previous_id[level]) {
557 if (hw_thread.attrs.contains(attr))
563 for (
int level = 0; level < depth; ++level)
564 previous_id[level] = hw_thread.ids[level];
566 if (current > current_max)
567 current_max = current;
572 void kmp_topology_t::_discover_uniformity() {
574 for (
int level = 0; level < depth; ++level)
576 flags.uniform = (num == count[depth - 1]);
580 void kmp_topology_t::_set_sub_ids() {
581 int previous_id[KMP_HW_LAST];
582 int sub_id[KMP_HW_LAST];
584 for (
int i = 0; i < depth; ++i) {
588 for (
int i = 0; i < num_hw_threads; ++i) {
589 kmp_hw_thread_t &hw_thread = hw_threads[i];
591 for (
int j = 0; j < depth; ++j) {
592 if (hw_thread.ids[j] != previous_id[j]) {
594 for (
int k = j + 1; k < depth; ++k) {
601 for (
int j = 0; j < depth; ++j) {
602 previous_id[j] = hw_thread.ids[j];
605 for (
int j = 0; j < depth; ++j) {
606 hw_thread.sub_ids[j] = sub_id[j];
611 void kmp_topology_t::_set_globals() {
613 int core_level, thread_level, package_level;
614 package_level = get_level(KMP_HW_SOCKET);
615 #if KMP_GROUP_AFFINITY
616 if (package_level == -1)
617 package_level = get_level(KMP_HW_PROC_GROUP);
619 core_level = get_level(KMP_HW_CORE);
620 thread_level = get_level(KMP_HW_THREAD);
622 KMP_ASSERT(core_level != -1);
623 KMP_ASSERT(thread_level != -1);
625 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
626 if (package_level != -1) {
627 nCoresPerPkg = calculate_ratio(core_level, package_level);
628 nPackages = get_count(package_level);
631 nCoresPerPkg = get_count(core_level);
634 #ifndef KMP_DFLT_NTH_CORES
635 __kmp_ncores = get_count(core_level);
639 kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
640 const kmp_hw_t *types) {
641 kmp_topology_t *retval;
643 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
644 sizeof(int) * (
size_t)KMP_HW_LAST * 3;
645 char *bytes = (
char *)__kmp_allocate(size);
646 retval = (kmp_topology_t *)bytes;
648 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
650 retval->hw_threads =
nullptr;
652 retval->num_hw_threads = nproc;
653 retval->depth = ndepth;
655 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
656 retval->types = (kmp_hw_t *)arr;
657 retval->ratio = arr + (size_t)KMP_HW_LAST;
658 retval->count = arr + 2 * (size_t)KMP_HW_LAST;
659 retval->num_core_efficiencies = 0;
660 retval->num_core_types = 0;
662 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
663 retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
664 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
665 for (
int i = 0; i < ndepth; ++i) {
666 retval->types[i] = types[i];
667 retval->equivalent[types[i]] = types[i];
672 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
674 __kmp_free(topology);
677 bool kmp_topology_t::check_ids()
const {
679 if (num_hw_threads == 0)
681 for (
int i = 1; i < num_hw_threads; ++i) {
682 kmp_hw_thread_t ¤t_thread = hw_threads[i];
683 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
685 for (
int j = 0; j < depth; ++j) {
686 if (previous_thread.ids[j] != current_thread.ids[j]) {
698 void kmp_topology_t::dump()
const {
699 printf(
"***********************\n");
700 printf(
"*** __kmp_topology: ***\n");
701 printf(
"***********************\n");
702 printf(
"* depth: %d\n", depth);
705 for (
int i = 0; i < depth; ++i)
706 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
710 for (
int i = 0; i < depth; ++i) {
711 printf(
"%15d ", ratio[i]);
716 for (
int i = 0; i < depth; ++i) {
717 printf(
"%15d ", count[i]);
721 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
722 printf(
"* num_core_types: %d\n", num_core_types);
723 printf(
"* core_types: ");
724 for (
int i = 0; i < num_core_types; ++i)
725 printf(
"%3d ", core_types[i]);
728 printf(
"* equivalent map:\n");
729 KMP_FOREACH_HW_TYPE(i) {
730 const char *key = __kmp_hw_get_keyword(i);
731 const char *value = __kmp_hw_get_keyword(equivalent[i]);
732 printf(
"%-15s -> %-15s\n", key, value);
735 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
737 printf(
"* num_hw_threads: %d\n", num_hw_threads);
738 printf(
"* hw_threads:\n");
739 for (
int i = 0; i < num_hw_threads; ++i) {
740 hw_threads[i].print();
742 printf(
"***********************\n");
745 void kmp_topology_t::print(
const char *env_var)
const {
747 int print_types_depth;
748 __kmp_str_buf_init(&buf);
749 kmp_hw_t print_types[KMP_HW_LAST + 2];
752 if (num_hw_threads) {
753 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
755 KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc);
760 KMP_INFORM(Uniform, env_var);
762 KMP_INFORM(NonUniform, env_var);
766 KMP_FOREACH_HW_TYPE(type) {
767 kmp_hw_t eq_type = equivalent[type];
768 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
769 KMP_INFORM(AffEqualTopologyTypes, env_var,
770 __kmp_hw_get_catalog_string(type),
771 __kmp_hw_get_catalog_string(eq_type));
776 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
779 print_types_depth = 0;
780 for (
int level = 0; level < depth; ++level)
781 print_types[print_types_depth++] = types[level];
782 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
784 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
787 print_types[print_types_depth - 1] = KMP_HW_CORE;
788 print_types[print_types_depth++] = KMP_HW_THREAD;
790 print_types[print_types_depth++] = KMP_HW_CORE;
794 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
795 print_types[print_types_depth++] = KMP_HW_THREAD;
797 __kmp_str_buf_clear(&buf);
798 kmp_hw_t numerator_type;
799 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
800 int core_level = get_level(KMP_HW_CORE);
801 int ncores = get_count(core_level);
803 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
806 numerator_type = print_types[plevel];
807 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
808 if (equivalent[numerator_type] != numerator_type)
811 c = get_ratio(level++);
814 __kmp_str_buf_print(&buf,
"%d %s", c,
815 __kmp_hw_get_catalog_string(numerator_type, plural));
817 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
818 __kmp_hw_get_catalog_string(numerator_type, plural),
819 __kmp_hw_get_catalog_string(denominator_type));
821 denominator_type = numerator_type;
823 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
826 if (__kmp_is_hybrid_cpu()) {
827 for (
int i = 0; i < num_core_types; ++i) {
828 kmp_hw_core_type_t core_type = core_types[i];
831 attr.set_core_type(core_type);
832 int ncores = get_ncores_with_attr(attr);
834 KMP_INFORM(TopologyHybrid, env_var, ncores,
835 __kmp_hw_get_core_type_string(core_type));
836 KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
837 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
838 attr.set_core_eff(eff);
839 int ncores_with_eff = get_ncores_with_attr(attr);
840 if (ncores_with_eff > 0) {
841 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
848 if (num_hw_threads <= 0) {
849 __kmp_str_buf_free(&buf);
854 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
855 for (
int i = 0; i < num_hw_threads; i++) {
856 __kmp_str_buf_clear(&buf);
857 for (
int level = 0; level < depth; ++level) {
858 if (hw_threads[i].ids[level] == kmp_hw_thread_t::UNKNOWN_ID)
860 kmp_hw_t type = types[level];
861 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
862 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
864 if (__kmp_is_hybrid_cpu())
867 __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
868 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
871 __kmp_str_buf_free(&buf);
874 #if KMP_AFFINITY_SUPPORTED
875 void kmp_topology_t::set_granularity(kmp_affinity_t &affinity)
const {
876 const char *env_var = __kmp_get_affinity_env_var(affinity);
880 if (!__kmp_is_hybrid_cpu()) {
881 if (affinity.core_attr_gran.valid) {
885 affinity, AffIgnoringNonHybrid, env_var,
886 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true));
887 affinity.gran = KMP_HW_CORE;
888 affinity.gran_levels = -1;
889 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
890 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
891 }
else if (affinity.flags.core_types_gran ||
892 affinity.flags.core_effs_gran) {
894 if (affinity.flags.omp_places) {
896 affinity, AffIgnoringNonHybrid, env_var,
897 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true));
900 KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
901 "Intel(R) Hybrid Technology core attribute",
902 __kmp_hw_get_catalog_string(KMP_HW_CORE));
904 affinity.gran = KMP_HW_CORE;
905 affinity.gran_levels = -1;
906 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
907 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
911 if (affinity.gran_levels < 0) {
912 kmp_hw_t gran_type = get_equivalent_type(affinity.gran);
914 if (gran_type == KMP_HW_UNKNOWN) {
916 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
917 for (
auto g : gran_types) {
918 if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
923 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
925 KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
926 __kmp_hw_get_catalog_string(affinity.gran),
927 __kmp_hw_get_catalog_string(gran_type));
928 affinity.gran = gran_type;
930 #if KMP_GROUP_AFFINITY
938 if (__kmp_num_proc_groups > 1) {
939 int gran_depth = get_level(gran_type);
940 int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
941 if (gran_depth >= 0 && proc_group_depth >= 0 &&
942 gran_depth < proc_group_depth) {
943 KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var,
944 __kmp_hw_get_catalog_string(affinity.gran));
945 affinity.gran = gran_type = KMP_HW_PROC_GROUP;
949 affinity.gran_levels = 0;
950 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
951 affinity.gran_levels++;
956 void kmp_topology_t::canonicalize() {
957 #if KMP_GROUP_AFFINITY
958 _insert_windows_proc_groups();
960 _remove_radix1_layers();
961 _gather_enumeration_information();
962 _discover_uniformity();
965 _set_last_level_cache();
967 #if KMP_MIC_SUPPORTED
969 if (__kmp_mic_type == mic3) {
970 if (get_level(KMP_HW_L2) != -1)
971 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
972 else if (get_level(KMP_HW_TILE) != -1)
973 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
978 KMP_ASSERT(depth > 0);
979 for (
int level = 0; level < depth; ++level) {
981 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
982 KMP_ASSERT_VALID_HW_TYPE(types[level]);
984 KMP_ASSERT(equivalent[types[level]] == types[level]);
989 void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
990 int nthreads_per_core,
int ncores) {
993 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
994 for (
int level = 0; level < depth; ++level) {
998 count[0] = npackages;
1000 count[2] = __kmp_xproc;
1001 ratio[0] = npackages;
1002 ratio[1] = ncores_per_pkg;
1003 ratio[2] = nthreads_per_core;
1004 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
1005 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
1006 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
1007 types[0] = KMP_HW_SOCKET;
1008 types[1] = KMP_HW_CORE;
1009 types[2] = KMP_HW_THREAD;
1011 _discover_uniformity();
1014 #if KMP_AFFINITY_SUPPORTED
1015 static kmp_str_buf_t *
1016 __kmp_hw_get_catalog_core_string(
const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
1018 __kmp_str_buf_init(buf);
1019 if (attr.is_core_type_valid())
1020 __kmp_str_buf_print(buf,
"%s %s",
1021 __kmp_hw_get_core_type_string(attr.get_core_type()),
1022 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
1024 __kmp_str_buf_print(buf,
"%s eff=%d",
1025 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
1026 attr.get_core_eff());
1030 bool kmp_topology_t::restrict_to_mask(
const kmp_affin_mask_t *mask) {
1034 for (
int i = 0; i < num_hw_threads; ++i) {
1035 int os_id = hw_threads[i].os_id;
1036 if (KMP_CPU_ISSET(os_id, mask)) {
1038 hw_threads[new_index] = hw_threads[i];
1041 KMP_CPU_CLR(os_id, __kmp_affin_fullMask);
1046 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1047 affected = (num_hw_threads != new_index);
1048 num_hw_threads = new_index;
1052 _gather_enumeration_information();
1053 _discover_uniformity();
1055 _set_last_level_cache();
1058 if (__kmp_num_proc_groups <= 1)
1060 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
1068 bool kmp_topology_t::filter_hw_subset() {
1070 if (!__kmp_hw_subset)
1074 __kmp_hw_subset->sort();
1076 __kmp_hw_subset->canonicalize(__kmp_topology);
1079 bool using_core_types =
false;
1080 bool using_core_effs =
false;
1081 bool is_absolute = __kmp_hw_subset->is_absolute();
1082 int hw_subset_depth = __kmp_hw_subset->get_depth();
1083 kmp_hw_t specified[KMP_HW_LAST];
1084 int *topology_levels = (
int *)KMP_ALLOCA(
sizeof(
int) * hw_subset_depth);
1085 KMP_ASSERT(hw_subset_depth > 0);
1086 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
1087 int core_level = get_level(KMP_HW_CORE);
1088 for (
int i = 0; i < hw_subset_depth; ++i) {
1090 const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
1091 int num = item.num[0];
1092 int offset = item.offset[0];
1093 kmp_hw_t type = item.type;
1094 kmp_hw_t equivalent_type = equivalent[type];
1095 int level = get_level(type);
1096 topology_levels[i] = level;
1099 if (equivalent_type != KMP_HW_UNKNOWN) {
1100 __kmp_hw_subset->at(i).type = equivalent_type;
1102 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric,
1103 __kmp_hw_get_catalog_string(type));
1109 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
1110 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers,
1111 __kmp_hw_get_catalog_string(type),
1112 __kmp_hw_get_catalog_string(specified[equivalent_type]));
1115 specified[equivalent_type] = type;
1118 max_count = get_ratio(level);
1120 if (max_count < 0 ||
1121 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1122 bool plural = (num > 1);
1123 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric,
1124 __kmp_hw_get_catalog_string(type, plural));
1130 if (core_level == level) {
1132 for (
int j = 0; j < item.num_attrs; ++j) {
1133 if (item.attr[j].is_core_type_valid())
1134 using_core_types =
true;
1135 if (item.attr[j].is_core_eff_valid())
1136 using_core_effs =
true;
1144 if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1145 if (item.num_attrs == 1) {
1146 if (using_core_effs) {
1147 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1150 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1153 using_core_effs =
false;
1154 using_core_types =
false;
1156 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid);
1162 if (using_core_types && using_core_effs) {
1163 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
"core_type",
1169 if (using_core_effs) {
1170 for (
int j = 0; j < item.num_attrs; ++j) {
1171 if (item.attr[j].is_core_eff_valid()) {
1172 int core_eff = item.attr[j].get_core_eff();
1173 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1175 __kmp_str_buf_init(&buf);
1176 __kmp_str_buf_print(&buf,
"%d", item.attr[j].get_core_eff());
1177 __kmp_msg(kmp_ms_warning,
1178 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency", buf.str),
1179 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1181 __kmp_str_buf_free(&buf);
1189 if ((using_core_types || using_core_effs) && !is_absolute) {
1190 for (
int j = 0; j < item.num_attrs; ++j) {
1191 int num = item.num[j];
1192 int offset = item.offset[j];
1193 int level_above = core_level - 1;
1194 if (level_above >= 0) {
1195 max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1196 if (max_count <= 0 ||
1197 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1199 __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1200 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str);
1201 __kmp_str_buf_free(&buf);
1208 if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1209 for (
int j = 0; j < item.num_attrs; ++j) {
1212 if (!item.attr[j]) {
1213 kmp_hw_attr_t other_attr;
1214 for (
int k = 0; k < item.num_attrs; ++k) {
1215 if (item.attr[k] != item.attr[j]) {
1216 other_attr = item.attr[k];
1221 __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1222 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
1223 __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1224 __kmp_str_buf_free(&buf);
1228 for (
int k = 0; k < j; ++k) {
1229 if (!item.attr[j] || !item.attr[k])
1231 if (item.attr[k] == item.attr[j]) {
1233 __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1235 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str);
1236 __kmp_str_buf_free(&buf);
1247 int prev_sub_ids[KMP_HW_LAST];
1248 int abs_sub_ids[KMP_HW_LAST];
1249 int core_eff_sub_ids[KMP_HW_MAX_NUM_CORE_EFFS];
1250 int core_type_sub_ids[KMP_HW_MAX_NUM_CORE_TYPES];
1251 for (
size_t i = 0; i < KMP_HW_LAST; ++i) {
1252 abs_sub_ids[i] = -1;
1253 prev_sub_ids[i] = -1;
1255 for (
size_t i = 0; i < KMP_HW_MAX_NUM_CORE_EFFS; ++i)
1256 core_eff_sub_ids[i] = -1;
1257 for (
size_t i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
1258 core_type_sub_ids[i] = -1;
1263 auto is_targeted = [&](
int level) {
1265 for (
int i = 0; i < hw_subset_depth; ++i)
1266 if (topology_levels[i] == level)
1275 auto get_core_type_index = [](
const kmp_hw_thread_t &t) {
1276 switch (t.attrs.get_core_type()) {
1277 case KMP_HW_CORE_TYPE_UNKNOWN:
1278 case KMP_HW_MAX_NUM_CORE_TYPES:
1280 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1281 case KMP_HW_CORE_TYPE_ATOM:
1283 case KMP_HW_CORE_TYPE_CORE:
1287 KMP_ASSERT2(
false,
"Unhandled kmp_hw_thread_t enumeration");
1288 KMP_BUILTIN_UNREACHABLE;
1292 auto get_core_eff_index = [](
const kmp_hw_thread_t &t) {
1293 return t.attrs.get_core_eff();
1296 int num_filtered = 0;
1297 kmp_affin_mask_t *filtered_mask;
1298 KMP_CPU_ALLOC(filtered_mask);
1299 KMP_CPU_COPY(filtered_mask, __kmp_affin_fullMask);
1300 for (
int i = 0; i < num_hw_threads; ++i) {
1301 kmp_hw_thread_t &hw_thread = hw_threads[i];
1304 if (is_absolute || using_core_effs || using_core_types) {
1305 for (
int level = 0; level < get_depth(); ++level) {
1306 if (hw_thread.sub_ids[level] != prev_sub_ids[level]) {
1307 bool found_targeted =
false;
1308 for (
int j = level; j < get_depth(); ++j) {
1309 bool targeted = is_targeted(j);
1310 if (!found_targeted && targeted) {
1311 found_targeted =
true;
1313 if (j == core_level && using_core_effs)
1314 core_eff_sub_ids[get_core_eff_index(hw_thread)]++;
1315 if (j == core_level && using_core_types)
1316 core_type_sub_ids[get_core_type_index(hw_thread)]++;
1317 }
else if (targeted) {
1319 if (j == core_level && using_core_effs)
1320 core_eff_sub_ids[get_core_eff_index(hw_thread)] = 0;
1321 if (j == core_level && using_core_types)
1322 core_type_sub_ids[get_core_type_index(hw_thread)] = 0;
1328 for (
int level = 0; level < get_depth(); ++level)
1329 prev_sub_ids[level] = hw_thread.sub_ids[level];
1333 bool should_be_filtered =
false;
1334 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1335 ++hw_subset_index) {
1336 const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1337 int level = topology_levels[hw_subset_index];
1340 if ((using_core_effs || using_core_types) && level == core_level) {
1346 kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1347 int core_eff = hw_thread.attrs.get_core_eff();
1348 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1349 if (using_core_types &&
1350 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1352 if (using_core_effs &&
1353 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1357 if (attr_idx == hw_subset_item.num_attrs) {
1358 should_be_filtered =
true;
1362 int num = hw_subset_item.num[attr_idx];
1363 int offset = hw_subset_item.offset[attr_idx];
1364 if (using_core_types)
1365 sub_id = core_type_sub_ids[get_core_type_index(hw_thread)];
1367 sub_id = core_eff_sub_ids[get_core_eff_index(hw_thread)];
1368 if (sub_id < offset ||
1369 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1370 should_be_filtered =
true;
1375 int num = hw_subset_item.num[0];
1376 int offset = hw_subset_item.offset[0];
1378 sub_id = abs_sub_ids[level];
1380 sub_id = hw_thread.sub_ids[level];
1381 if (hw_thread.ids[level] == kmp_hw_thread_t::UNKNOWN_ID ||
1383 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1384 should_be_filtered =
true;
1390 if (should_be_filtered) {
1391 KMP_CPU_CLR(hw_thread.os_id, filtered_mask);
1397 if (num_filtered == num_hw_threads) {
1398 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered);
1399 KMP_CPU_FREE(filtered_mask);
1404 restrict_to_mask(filtered_mask);
1405 KMP_CPU_FREE(filtered_mask);
1409 bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
1410 const kmp_affinity_t &stgs)
const {
1411 int hw_level = stgs.gran_levels;
1412 if (hw_level >= depth)
1415 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1416 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1417 if (stgs.flags.core_types_gran)
1418 return t1.attrs.get_core_type() == t2.attrs.get_core_type();
1419 if (stgs.flags.core_effs_gran)
1420 return t1.attrs.get_core_eff() == t2.attrs.get_core_eff();
1421 for (
int i = 0; i < (depth - hw_level); ++i) {
1422 if (t1.ids[i] != t2.ids[i])
1430 bool KMPAffinity::picked_api =
false;
1432 void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1433 void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1434 void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
1435 void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
1436 void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1437 void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
1439 void KMPAffinity::pick_api() {
1440 KMPAffinity *affinity_dispatch;
1446 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1447 __kmp_affinity.type != affinity_disabled) {
1448 affinity_dispatch =
new KMPHwlocAffinity();
1449 __kmp_hwloc_available =
true;
1453 affinity_dispatch =
new KMPNativeAffinity();
1455 __kmp_affinity_dispatch = affinity_dispatch;
1459 void KMPAffinity::destroy_api() {
1460 if (__kmp_affinity_dispatch != NULL) {
1461 delete __kmp_affinity_dispatch;
1462 __kmp_affinity_dispatch = NULL;
1467 #define KMP_ADVANCE_SCAN(scan) \
1468 while (*scan != '\0') { \
1476 char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
1477 kmp_affin_mask_t *mask) {
1478 int start = 0, finish = 0, previous = 0;
1481 KMP_ASSERT(buf_len >= 40);
1484 char *end = buf + buf_len - 1;
1487 if (mask->begin() == mask->end()) {
1488 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
1489 KMP_ADVANCE_SCAN(scan);
1490 KMP_ASSERT(scan <= end);
1495 start = mask->begin();
1499 for (finish = mask->next(start), previous = start;
1500 finish == previous + 1 && finish != mask->end();
1501 finish = mask->next(finish)) {
1508 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
1509 KMP_ADVANCE_SCAN(scan);
1511 first_range =
false;
1514 if (previous - start > 1) {
1515 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
1518 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
1519 KMP_ADVANCE_SCAN(scan);
1520 if (previous - start > 0) {
1521 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
1524 KMP_ADVANCE_SCAN(scan);
1527 if (start == mask->end())
1535 KMP_ASSERT(scan <= end);
1538 #undef KMP_ADVANCE_SCAN
1544 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1545 kmp_affin_mask_t *mask) {
1546 int start = 0, finish = 0, previous = 0;
1551 __kmp_str_buf_clear(buf);
1554 if (mask->begin() == mask->end()) {
1555 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
1560 start = mask->begin();
1564 for (finish = mask->next(start), previous = start;
1565 finish == previous + 1 && finish != mask->end();
1566 finish = mask->next(finish)) {
1573 __kmp_str_buf_print(buf,
"%s",
",");
1575 first_range =
false;
1578 if (previous - start > 1) {
1579 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
1582 __kmp_str_buf_print(buf,
"%u", start);
1583 if (previous - start > 0) {
1584 __kmp_str_buf_print(buf,
",%u", previous);
1589 if (start == mask->end())
1595 static kmp_affin_mask_t *__kmp_parse_cpu_list(
const char *path) {
1596 kmp_affin_mask_t *mask;
1597 KMP_CPU_ALLOC(mask);
1600 int n, begin_cpu, end_cpu;
1602 auto skip_ws = [](FILE *f) {
1606 }
while (isspace(c));
1612 int status = file.
try_open(path,
"r");
1615 while (!feof(file)) {
1617 n = fscanf(file,
"%d", &begin_cpu);
1621 int c = fgetc(file);
1622 if (c == EOF || c ==
',') {
1624 end_cpu = begin_cpu;
1625 }
else if (c ==
'-') {
1628 n = fscanf(file,
"%d", &end_cpu);
1638 if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1639 end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1643 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1644 KMP_CPU_SET(cpu, mask);
1653 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1654 return __kmp_parse_cpu_list(
"/sys/devices/system/cpu/offline");
1658 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1662 #if KMP_GROUP_AFFINITY
1664 if (__kmp_num_proc_groups > 1) {
1666 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1667 for (group = 0; group < __kmp_num_proc_groups; group++) {
1669 int num = __kmp_GetActiveProcessorCount(group);
1670 for (i = 0; i < num; i++) {
1671 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1681 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1682 for (proc = 0; proc < __kmp_xproc; proc++) {
1684 if (KMP_CPU_ISSET(proc, offline_cpus))
1686 KMP_CPU_SET(proc, mask);
1689 KMP_CPU_FREE(offline_cpus);
1698 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1700 kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1703 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1704 #if HWLOC_API_VERSION >= 0x00020000
1705 return hwloc_obj_type_is_cache(obj->type);
1707 return obj->type == HWLOC_OBJ_CACHE;
1712 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1714 if (__kmp_hwloc_is_cache_type(obj)) {
1715 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1716 return KMP_HW_UNKNOWN;
1717 switch (obj->attr->cache.depth) {
1721 #if KMP_MIC_SUPPORTED
1722 if (__kmp_mic_type == mic3) {
1730 return KMP_HW_UNKNOWN;
1733 switch (obj->type) {
1734 case HWLOC_OBJ_PACKAGE:
1735 return KMP_HW_SOCKET;
1736 case HWLOC_OBJ_NUMANODE:
1738 case HWLOC_OBJ_CORE:
1741 return KMP_HW_THREAD;
1742 case HWLOC_OBJ_GROUP:
1743 #if HWLOC_API_VERSION >= 0x00020000
1744 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1746 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1748 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1749 return KMP_HW_MODULE;
1750 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1751 return KMP_HW_PROC_GROUP;
1753 return KMP_HW_UNKNOWN;
1754 #if HWLOC_API_VERSION >= 0x00020100
1759 return KMP_HW_UNKNOWN;
1766 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1767 hwloc_obj_type_t type) {
1770 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1771 obj->logical_index, type, 0);
1772 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1773 obj->type, first) == obj;
1774 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1783 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1784 hwloc_obj_t lower) {
1786 hwloc_obj_type_t ltype = lower->type;
1787 int lindex = lower->logical_index - 1;
1790 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1791 while (obj && lindex >= 0 &&
1792 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1793 if (obj->userdata) {
1794 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1799 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1802 lower->userdata = RCAST(
void *, sub_id + 1);
1806 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1808 int hw_thread_index, sub_id;
1810 hwloc_obj_t pu, obj, root, prev;
1811 kmp_hw_t types[KMP_HW_LAST];
1812 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1814 hwloc_topology_t tp = __kmp_hwloc_topology;
1815 *msg_id = kmp_i18n_null;
1816 if (__kmp_affinity.flags.verbose) {
1817 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1820 if (!KMP_AFFINITY_CAPABLE()) {
1823 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1825 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1827 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1830 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1832 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1834 __kmp_nThreadsPerCore = 1;
1835 if (__kmp_nThreadsPerCore == 0)
1836 __kmp_nThreadsPerCore = 1;
1837 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1838 if (nCoresPerPkg == 0)
1840 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1844 #if HWLOC_API_VERSION >= 0x00020400
1846 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1848 typedef struct kmp_hwloc_cpukinds_info_t {
1850 kmp_hw_core_type_t core_type;
1851 hwloc_bitmap_t mask;
1852 } kmp_hwloc_cpukinds_info_t;
1853 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1855 if (nr_cpu_kinds > 0) {
1857 struct hwloc_info_s *infos;
1858 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1859 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1860 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1861 cpukinds[idx].efficiency = -1;
1862 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1863 cpukinds[idx].mask = hwloc_bitmap_alloc();
1864 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1865 &cpukinds[idx].efficiency, &nr_infos, &infos,
1867 for (
unsigned i = 0; i < nr_infos; ++i) {
1868 if (__kmp_str_match(
"CoreType", 8, infos[i].name)) {
1869 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1870 if (__kmp_str_match(
"IntelAtom", 9, infos[i].value)) {
1871 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1873 }
else if (__kmp_str_match(
"IntelCore", 9, infos[i].value)) {
1874 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1885 root = hwloc_get_root_obj(tp);
1889 obj = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1890 while (obj && obj != root) {
1891 #if HWLOC_API_VERSION >= 0x00020000
1892 if (obj->memory_arity) {
1894 for (memory = obj->memory_first_child; memory;
1895 memory = hwloc_get_next_child(tp, obj, memory)) {
1896 if (memory->type == HWLOC_OBJ_NUMANODE)
1899 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1900 types[depth] = KMP_HW_NUMA;
1901 hwloc_types[depth] = memory->type;
1906 type = __kmp_hwloc_type_2_topology_type(obj);
1907 if (type != KMP_HW_UNKNOWN) {
1908 types[depth] = type;
1909 hwloc_types[depth] = obj->type;
1914 KMP_ASSERT(depth > 0);
1917 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1918 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1919 kmp_hw_t temp = types[i];
1920 types[i] = types[j];
1922 hwloc_types[i] = hwloc_types[j];
1923 hwloc_types[j] = hwloc_temp;
1927 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1929 hw_thread_index = 0;
1931 while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1932 int index = depth - 1;
1933 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1934 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1937 hw_thread.ids[index] = pu->logical_index;
1938 hw_thread.os_id = pu->os_index;
1939 hw_thread.original_idx = hw_thread_index;
1941 #if HWLOC_API_VERSION >= 0x00020400
1943 int cpukind_index = -1;
1944 for (
int i = 0; i < nr_cpu_kinds; ++i) {
1945 if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1950 if (cpukind_index >= 0) {
1951 hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1952 hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1960 while (obj != root && obj != NULL) {
1962 #if HWLOC_API_VERSION >= 0x00020000
1966 if (obj->memory_arity) {
1968 for (memory = obj->memory_first_child; memory;
1969 memory = hwloc_get_next_child(tp, obj, memory)) {
1970 if (memory->type == HWLOC_OBJ_NUMANODE)
1973 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1974 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1976 hw_thread.ids[index] = memory->logical_index;
1977 hw_thread.ids[index + 1] = sub_id;
1984 type = __kmp_hwloc_type_2_topology_type(obj);
1985 if (type != KMP_HW_UNKNOWN) {
1986 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1988 hw_thread.ids[index] = obj->logical_index;
1989 hw_thread.ids[index + 1] = sub_id;
1999 #if HWLOC_API_VERSION >= 0x00020400
2002 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
2003 hwloc_bitmap_free(cpukinds[idx].mask);
2004 __kmp_free(cpukinds);
2007 __kmp_topology->sort_ids();
2015 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
2016 *msg_id = kmp_i18n_null;
2018 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
2020 if (__kmp_affinity.flags.verbose) {
2021 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
2027 if (!KMP_AFFINITY_CAPABLE()) {
2028 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2029 __kmp_ncores = nPackages = __kmp_xproc;
2030 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
2038 __kmp_ncores = nPackages = __kmp_avail_proc;
2039 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
2042 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
2045 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2047 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2050 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
2052 hw_thread.os_id = i;
2053 hw_thread.original_idx = avail_ct;
2054 hw_thread.ids[0] = i;
2055 hw_thread.ids[1] = 0;
2056 hw_thread.ids[2] = 0;
2059 if (__kmp_affinity.flags.verbose) {
2060 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
2065 #if KMP_GROUP_AFFINITY
2070 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
2071 *msg_id = kmp_i18n_null;
2073 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
2074 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
2076 if (__kmp_affinity.flags.verbose) {
2077 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
2081 if (!KMP_AFFINITY_CAPABLE()) {
2082 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2083 nPackages = __kmp_num_proc_groups;
2084 __kmp_nThreadsPerCore = 1;
2085 __kmp_ncores = __kmp_xproc;
2086 nCoresPerPkg = nPackages / __kmp_ncores;
2091 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
2094 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2096 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2099 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
2101 hw_thread.os_id = i;
2102 hw_thread.original_idx = avail_ct;
2103 hw_thread.ids[0] = i / BITS_PER_GROUP;
2104 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
2111 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2113 template <kmp_u
int32 LSB, kmp_u
int32 MSB>
2114 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
2115 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
2116 const kmp_uint32 SHIFT_RIGHT = LSB;
2117 kmp_uint32 retval = v;
2118 retval <<= SHIFT_LEFT;
2119 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
2123 static int __kmp_cpuid_mask_width(
int count) {
2126 while ((1 << r) < count)
2131 class apicThreadInfo {
2135 unsigned maxCoresPerPkg;
2136 unsigned maxThreadsPerPkg;
2142 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
2144 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
2145 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
2146 if (aa->pkgId < bb->pkgId)
2148 if (aa->pkgId > bb->pkgId)
2150 if (aa->coreId < bb->coreId)
2152 if (aa->coreId > bb->coreId)
2154 if (aa->threadId < bb->threadId)
2156 if (aa->threadId > bb->threadId)
2161 class cpuid_cache_info_t {
2166 bool operator==(
const info_t &rhs)
const {
2167 return level == rhs.level && mask == rhs.mask;
2169 bool operator!=(
const info_t &rhs)
const {
return !operator==(rhs); }
2171 cpuid_cache_info_t() : depth(0) {
2172 table[MAX_CACHE_LEVEL].level = 0;
2173 table[MAX_CACHE_LEVEL].mask = 0;
2175 size_t get_depth()
const {
return depth; }
2176 info_t &operator[](
size_t index) {
return table[index]; }
2177 const info_t &operator[](
size_t index)
const {
return table[index]; }
2178 bool operator==(
const cpuid_cache_info_t &rhs)
const {
2179 if (rhs.depth != depth)
2181 for (
size_t i = 0; i < depth; ++i)
2182 if (table[i] != rhs.table[i])
2186 bool operator!=(
const cpuid_cache_info_t &rhs)
const {
2187 return !operator==(rhs);
2191 const info_t &get_level(
unsigned level)
const {
2192 for (
size_t i = 0; i < depth; ++i) {
2193 if (table[i].level == level)
2196 return table[MAX_CACHE_LEVEL];
2199 static kmp_hw_t get_topology_type(
unsigned level) {
2200 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2209 return KMP_HW_UNKNOWN;
2211 void get_leaf4_levels() {
2213 while (depth < MAX_CACHE_LEVEL) {
2214 unsigned cache_type, max_threads_sharing;
2215 unsigned cache_level, cache_mask_width;
2217 __kmp_x86_cpuid(4, level, &buf2);
2218 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2222 if (cache_type == 2) {
2226 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2227 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2228 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2229 table[depth].level = cache_level;
2230 table[depth].mask = ((0xffffffffu) << cache_mask_width);
2235 static const int MAX_CACHE_LEVEL = 3;
2239 info_t table[MAX_CACHE_LEVEL + 1];
2246 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2248 *msg_id = kmp_i18n_null;
2250 if (__kmp_affinity.flags.verbose) {
2251 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2255 __kmp_x86_cpuid(0, 0, &buf);
2257 *msg_id = kmp_i18n_str_NoLeaf4Support;
2266 if (!KMP_AFFINITY_CAPABLE()) {
2269 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2275 __kmp_x86_cpuid(1, 0, &buf);
2276 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2277 if (maxThreadsPerPkg == 0) {
2278 maxThreadsPerPkg = 1;
2292 __kmp_x86_cpuid(0, 0, &buf);
2294 __kmp_x86_cpuid(4, 0, &buf);
2295 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2313 __kmp_ncores = __kmp_xproc;
2314 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2315 __kmp_nThreadsPerCore = 1;
2324 kmp_affinity_raii_t previous_affinity;
2352 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2353 __kmp_avail_proc *
sizeof(apicThreadInfo));
2354 unsigned nApics = 0;
2355 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2357 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2360 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
2362 __kmp_affinity_dispatch->bind_thread(i);
2363 threadInfo[nApics].osId = i;
2366 __kmp_x86_cpuid(1, 0, &buf);
2367 if (((buf.edx >> 9) & 1) == 0) {
2368 __kmp_free(threadInfo);
2369 *msg_id = kmp_i18n_str_ApicNotPresent;
2372 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2373 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2374 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2375 threadInfo[nApics].maxThreadsPerPkg = 1;
2384 __kmp_x86_cpuid(0, 0, &buf);
2386 __kmp_x86_cpuid(4, 0, &buf);
2387 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2389 threadInfo[nApics].maxCoresPerPkg = 1;
2393 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2394 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2396 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2397 int widthT = widthCT - widthC;
2402 __kmp_free(threadInfo);
2403 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2407 int maskC = (1 << widthC) - 1;
2408 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2410 int maskT = (1 << widthT) - 1;
2411 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2418 previous_affinity.restore();
2421 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2422 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2439 __kmp_nThreadsPerCore = 1;
2440 unsigned nCores = 1;
2443 unsigned lastPkgId = threadInfo[0].pkgId;
2444 unsigned coreCt = 1;
2445 unsigned lastCoreId = threadInfo[0].coreId;
2446 unsigned threadCt = 1;
2447 unsigned lastThreadId = threadInfo[0].threadId;
2450 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2451 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2453 for (i = 1; i < nApics; i++) {
2454 if (threadInfo[i].pkgId != lastPkgId) {
2457 lastPkgId = threadInfo[i].pkgId;
2458 if ((
int)coreCt > nCoresPerPkg)
2459 nCoresPerPkg = coreCt;
2461 lastCoreId = threadInfo[i].coreId;
2462 if ((
int)threadCt > __kmp_nThreadsPerCore)
2463 __kmp_nThreadsPerCore = threadCt;
2465 lastThreadId = threadInfo[i].threadId;
2469 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2470 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2474 if (threadInfo[i].coreId != lastCoreId) {
2477 lastCoreId = threadInfo[i].coreId;
2478 if ((
int)threadCt > __kmp_nThreadsPerCore)
2479 __kmp_nThreadsPerCore = threadCt;
2481 lastThreadId = threadInfo[i].threadId;
2482 }
else if (threadInfo[i].threadId != lastThreadId) {
2484 lastThreadId = threadInfo[i].threadId;
2486 __kmp_free(threadInfo);
2487 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2493 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2494 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2495 __kmp_free(threadInfo);
2496 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2504 if ((
int)coreCt > nCoresPerPkg)
2505 nCoresPerPkg = coreCt;
2506 if ((
int)threadCt > __kmp_nThreadsPerCore)
2507 __kmp_nThreadsPerCore = threadCt;
2508 __kmp_ncores = nCores;
2509 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
2517 int threadLevel = 2;
2519 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2522 types[idx++] = KMP_HW_SOCKET;
2524 types[idx++] = KMP_HW_CORE;
2525 if (threadLevel >= 0)
2526 types[idx++] = KMP_HW_THREAD;
2528 KMP_ASSERT(depth > 0);
2529 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2531 for (i = 0; i < nApics; ++i) {
2533 unsigned os = threadInfo[i].osId;
2534 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2537 if (pkgLevel >= 0) {
2538 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2540 if (coreLevel >= 0) {
2541 hw_thread.ids[idx++] = threadInfo[i].coreId;
2543 if (threadLevel >= 0) {
2544 hw_thread.ids[idx++] = threadInfo[i].threadId;
2546 hw_thread.os_id = os;
2547 hw_thread.original_idx = i;
2550 __kmp_free(threadInfo);
2551 __kmp_topology->sort_ids();
2552 if (!__kmp_topology->check_ids()) {
2553 kmp_topology_t::deallocate(__kmp_topology);
2554 __kmp_topology =
nullptr;
2555 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2563 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type,
int *efficiency,
2564 unsigned *native_model_id) {
2566 __kmp_x86_cpuid(0x1a, 0, &buf);
2567 *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2569 case KMP_HW_CORE_TYPE_ATOM:
2572 case KMP_HW_CORE_TYPE_CORE:
2578 *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2600 INTEL_LEVEL_TYPE_INVALID = 0,
2601 INTEL_LEVEL_TYPE_SMT = 1,
2602 INTEL_LEVEL_TYPE_CORE = 2,
2603 INTEL_LEVEL_TYPE_MODULE = 3,
2604 INTEL_LEVEL_TYPE_TILE = 4,
2605 INTEL_LEVEL_TYPE_DIE = 5,
2606 INTEL_LEVEL_TYPE_LAST = 6,
2608 KMP_BUILD_ASSERT(INTEL_LEVEL_TYPE_LAST <
sizeof(
unsigned) * CHAR_BIT);
2609 #define KMP_LEAF_1F_KNOWN_LEVELS ((1u << INTEL_LEVEL_TYPE_LAST) - 1u)
2611 static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2612 switch (intel_type) {
2613 case INTEL_LEVEL_TYPE_INVALID:
2614 return KMP_HW_SOCKET;
2615 case INTEL_LEVEL_TYPE_SMT:
2616 return KMP_HW_THREAD;
2617 case INTEL_LEVEL_TYPE_CORE:
2619 case INTEL_LEVEL_TYPE_TILE:
2621 case INTEL_LEVEL_TYPE_MODULE:
2622 return KMP_HW_MODULE;
2623 case INTEL_LEVEL_TYPE_DIE:
2626 return KMP_HW_UNKNOWN;
2629 static int __kmp_topology_type_2_intel_type(kmp_hw_t type) {
2632 return INTEL_LEVEL_TYPE_INVALID;
2634 return INTEL_LEVEL_TYPE_SMT;
2636 return INTEL_LEVEL_TYPE_CORE;
2638 return INTEL_LEVEL_TYPE_TILE;
2640 return INTEL_LEVEL_TYPE_MODULE;
2642 return INTEL_LEVEL_TYPE_DIE;
2644 return INTEL_LEVEL_TYPE_INVALID;
2648 struct cpuid_level_info_t {
2649 unsigned level_type, mask, mask_width, nitems, cache_mask;
2652 class cpuid_topo_desc_t {
2656 void clear() { desc = 0; }
2657 bool contains(
int intel_type)
const {
2658 KMP_DEBUG_ASSERT(intel_type >= 0 && intel_type < INTEL_LEVEL_TYPE_LAST);
2659 if ((1u << intel_type) & desc)
2663 bool contains_topology_type(kmp_hw_t type)
const {
2664 KMP_DEBUG_ASSERT(type >= 0 && type < KMP_HW_LAST);
2665 int intel_type = __kmp_topology_type_2_intel_type(type);
2666 return contains(intel_type);
2668 bool contains(cpuid_topo_desc_t rhs)
const {
2669 return ((desc | rhs.desc) == desc);
2671 void add(
int intel_type) { desc |= (1u << intel_type); }
2672 void add(cpuid_topo_desc_t rhs) { desc |= rhs.desc; }
2675 struct cpuid_proc_info_t {
2681 unsigned native_model_id;
2683 kmp_hw_core_type_t type;
2684 cpuid_topo_desc_t description;
2686 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2692 static bool __kmp_x2apicid_get_levels(
int leaf, cpuid_proc_info_t *info,
2693 kmp_hw_t total_types[KMP_HW_LAST],
2695 cpuid_topo_desc_t *total_description) {
2696 unsigned level, levels_index;
2697 unsigned level_type, mask_width, nitems;
2699 cpuid_level_info_t(&levels)[INTEL_LEVEL_TYPE_LAST] = info->levels;
2700 bool retval =
false;
2709 level = levels_index = 0;
2711 __kmp_x86_cpuid(leaf, level, &buf);
2712 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2713 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2714 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2715 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0) {
2720 if (KMP_LEAF_1F_KNOWN_LEVELS & (1u << level_type)) {
2722 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2723 levels[levels_index].level_type = level_type;
2724 levels[levels_index].mask_width = mask_width;
2725 levels[levels_index].nitems = nitems;
2729 if (levels_index > 0) {
2730 levels[levels_index - 1].mask_width = mask_width;
2731 levels[levels_index - 1].nitems = nitems;
2735 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2736 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2737 info->description.clear();
2738 info->depth = levels_index;
2742 if (*total_depth == 0) {
2743 *total_depth = info->depth;
2744 total_description->clear();
2745 for (
int i = *total_depth - 1, j = 0; i >= 0; --i, ++j) {
2747 __kmp_intel_type_2_topology_type(info->levels[i].level_type);
2748 total_description->add(info->levels[i].level_type);
2754 if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID)
2758 for (
unsigned i = 0; i < levels_index; ++i) {
2759 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2760 levels[i].mask = ~((0xffffffffu) << levels[i].mask_width);
2761 levels[i].cache_mask = (0xffffffffu) << levels[i].mask_width;
2762 for (
unsigned j = 0; j < i; ++j)
2763 levels[i].mask ^= levels[j].mask;
2765 KMP_DEBUG_ASSERT(i > 0);
2766 levels[i].mask = (0xffffffffu) << levels[i - 1].mask_width;
2767 levels[i].cache_mask = 0;
2769 info->description.add(info->levels[i].level_type);
2777 if (!total_description->contains(info->description)) {
2778 for (
int i = info->depth - 1, j = 0; i >= 0; --i, ++j) {
2780 if (total_description->contains(levels[i].level_type))
2783 kmp_hw_t curr_type =
2784 __kmp_intel_type_2_topology_type(levels[i].level_type);
2785 KMP_ASSERT(j != 0 &&
"Bad APIC Id information");
2787 for (
int k = info->depth - 1; k >= j; --k) {
2788 KMP_DEBUG_ASSERT(k + 1 < KMP_HW_LAST);
2789 total_types[k + 1] = total_types[k];
2792 total_types[j] = curr_type;
2795 total_description->add(info->description);
2801 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2803 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2805 int topology_leaf, highest_leaf;
2808 cpuid_topo_desc_t total_description;
2809 static int leaves[] = {0, 0};
2812 int ninfos = (__kmp_avail_proc > 0 ? __kmp_avail_proc : 1);
2813 cpuid_proc_info_t *proc_info = (cpuid_proc_info_t *)__kmp_allocate(
2814 (
sizeof(cpuid_proc_info_t) +
sizeof(cpuid_cache_info_t)) * ninfos);
2815 cpuid_cache_info_t *cache_info = (cpuid_cache_info_t *)(proc_info + ninfos);
2817 kmp_i18n_id_t leaf_message_id;
2819 *msg_id = kmp_i18n_null;
2820 if (__kmp_affinity.flags.verbose) {
2821 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2825 __kmp_x86_cpuid(0, 0, &buf);
2826 highest_leaf = buf.eax;
2831 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2834 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2835 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2838 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2843 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2847 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2849 for (
int i = 0; i < num_leaves; ++i) {
2850 int leaf = leaves[i];
2851 if (highest_leaf < leaf)
2853 __kmp_x86_cpuid(leaf, 0, &buf);
2856 topology_leaf = leaf;
2857 __kmp_x2apicid_get_levels(leaf, &proc_info[0], types, &depth,
2858 &total_description);
2863 if (topology_leaf == -1 || depth == 0) {
2864 *msg_id = leaf_message_id;
2865 __kmp_free(proc_info);
2868 KMP_ASSERT(depth <= INTEL_LEVEL_TYPE_LAST);
2875 if (!KMP_AFFINITY_CAPABLE()) {
2878 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2879 for (
int i = 0; i < depth; ++i) {
2880 if (proc_info[0].levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2881 __kmp_nThreadsPerCore = proc_info[0].levels[i].nitems;
2882 }
else if (proc_info[0].levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2883 nCoresPerPkg = proc_info[0].levels[i].nitems;
2886 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2887 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2888 __kmp_free(proc_info);
2897 kmp_affinity_raii_t previous_affinity;
2902 int hw_thread_index = 0;
2903 bool uniform_caches =
true;
2905 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2907 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2910 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2913 __kmp_affinity_dispatch->bind_thread(proc);
2914 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2915 proc_info[hw_thread_index].os_id = proc;
2916 proc_info[hw_thread_index].apic_id = buf.edx;
2917 __kmp_x2apicid_get_levels(topology_leaf, &proc_info[hw_thread_index], types,
2918 &depth, &total_description);
2919 if (proc_info[hw_thread_index].depth == 0) {
2920 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2921 __kmp_free(proc_info);
2925 cache_info[hw_thread_index].get_leaf4_levels();
2926 if (uniform_caches && hw_thread_index > 0)
2927 if (cache_info[0] != cache_info[hw_thread_index])
2928 uniform_caches =
false;
2930 if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2931 __kmp_get_hybrid_info(&proc_info[hw_thread_index].type,
2932 &proc_info[hw_thread_index].efficiency,
2933 &proc_info[hw_thread_index].native_model_id);
2937 KMP_ASSERT(hw_thread_index > 0);
2938 previous_affinity.restore();
2941 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
2944 for (
int i = 0; i < __kmp_topology->get_num_hw_threads(); ++i) {
2945 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2947 hw_thread.os_id = proc_info[i].os_id;
2948 hw_thread.original_idx = i;
2949 unsigned apic_id = proc_info[i].apic_id;
2951 for (
int j = 0, idx = depth - 1; j < depth; ++j, --idx) {
2952 if (!(proc_info[i].description.contains_topology_type(
2953 __kmp_topology->get_type(j)))) {
2954 hw_thread.ids[idx] = kmp_hw_thread_t::UNKNOWN_ID;
2956 hw_thread.ids[idx] = apic_id & proc_info[i].levels[j].mask;
2958 hw_thread.ids[idx] >>= proc_info[i].levels[j - 1].mask_width;
2962 hw_thread.attrs.set_core_type(proc_info[i].type);
2963 hw_thread.attrs.set_core_eff(proc_info[i].efficiency);
2966 __kmp_topology->sort_ids();
2969 for (
int j = 0; j < depth - 1; ++j) {
2971 int prev_id = __kmp_topology->at(0).ids[j];
2972 int curr_id = __kmp_topology->at(0).ids[j + 1];
2973 __kmp_topology->at(0).ids[j + 1] = new_id;
2974 for (
int i = 1; i < __kmp_topology->get_num_hw_threads(); ++i) {
2975 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2976 if (hw_thread.ids[j] == prev_id && hw_thread.ids[j + 1] == curr_id) {
2977 hw_thread.ids[j + 1] = new_id;
2978 }
else if (hw_thread.ids[j] == prev_id &&
2979 hw_thread.ids[j + 1] != curr_id) {
2980 curr_id = hw_thread.ids[j + 1];
2981 hw_thread.ids[j + 1] = ++new_id;
2983 prev_id = hw_thread.ids[j];
2984 curr_id = hw_thread.ids[j + 1];
2985 hw_thread.ids[j + 1] = ++new_id;
2992 if (uniform_caches) {
2993 for (
size_t i = 0; i < cache_info[0].get_depth(); ++i) {
2994 unsigned cache_mask = cache_info[0][i].mask;
2995 unsigned cache_level = cache_info[0][i].level;
2996 KMP_ASSERT(cache_level <= cpuid_cache_info_t::MAX_CACHE_LEVEL);
2997 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(cache_level);
2998 __kmp_topology->set_equivalent_type(cache_type, cache_type);
2999 for (
int j = 0; j < depth; ++j) {
3000 unsigned hw_cache_mask = proc_info[0].levels[j].cache_mask;
3001 if (hw_cache_mask == cache_mask && j < depth - 1) {
3002 kmp_hw_t type = __kmp_intel_type_2_topology_type(
3003 proc_info[0].levels[j + 1].level_type);
3004 __kmp_topology->set_equivalent_type(cache_type, type);
3010 for (
int i = 0; i < __kmp_topology->get_num_hw_threads(); ++i) {
3011 for (
size_t j = 0; j < cache_info[i].get_depth(); ++j) {
3012 unsigned cache_level = cache_info[i][j].level;
3013 kmp_hw_t cache_type =
3014 cpuid_cache_info_t::get_topology_type(cache_level);
3015 if (__kmp_topology->get_equivalent_type(cache_type) == KMP_HW_UNKNOWN)
3016 __kmp_topology->set_equivalent_type(cache_type, cache_type);
3022 bool unresolved_cache_levels =
false;
3023 for (
unsigned level = 1; level <= cpuid_cache_info_t::MAX_CACHE_LEVEL;
3025 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(level);
3028 if (__kmp_topology->get_equivalent_type(cache_type) == cache_type) {
3029 unresolved_cache_levels =
true;
3035 if (unresolved_cache_levels) {
3036 int num_hw_threads = __kmp_topology->get_num_hw_threads();
3037 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
3038 for (
unsigned l = 1; l <= cpuid_cache_info_t::MAX_CACHE_LEVEL; ++l) {
3039 kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(l);
3040 if (__kmp_topology->get_equivalent_type(cache_type) != cache_type)
3042 for (
int i = 0; i < num_hw_threads; ++i) {
3043 int original_idx = __kmp_topology->at(i).original_idx;
3044 ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
3045 const cpuid_cache_info_t::info_t &info =
3046 cache_info[original_idx].get_level(l);
3048 if (info.level == 0)
3050 ids[i] = info.mask & proc_info[original_idx].apic_id;
3052 __kmp_topology->insert_layer(cache_type, ids);
3056 if (!__kmp_topology->check_ids()) {
3057 kmp_topology_t::deallocate(__kmp_topology);
3058 __kmp_topology =
nullptr;
3059 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
3060 __kmp_free(proc_info);
3063 __kmp_free(proc_info);
3069 #define threadIdIndex 1
3070 #define coreIdIndex 2
3071 #define pkgIdIndex 3
3072 #define nodeIdIndex 4
3074 typedef unsigned *ProcCpuInfo;
3075 static unsigned maxIndex = pkgIdIndex;
3077 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
3080 const unsigned *aa = *(
unsigned *
const *)a;
3081 const unsigned *bb = *(
unsigned *
const *)b;
3082 for (i = maxIndex;; i--) {
3093 #if KMP_USE_HIER_SCHED
3095 static void __kmp_dispatch_set_hierarchy_values() {
3101 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
3102 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
3103 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
3104 #if KMP_ARCH_X86_64 && \
3105 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3106 KMP_OS_WINDOWS) && \
3108 if (__kmp_mic_type >= mic3)
3109 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
3112 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
3113 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
3114 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
3115 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
3118 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
3119 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
3120 __kmp_nThreadsPerCore;
3121 #if KMP_ARCH_X86_64 && \
3122 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
3123 KMP_OS_WINDOWS) && \
3125 if (__kmp_mic_type >= mic3)
3126 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
3127 2 * __kmp_nThreadsPerCore;
3130 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
3131 __kmp_nThreadsPerCore;
3132 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
3133 nCoresPerPkg * __kmp_nThreadsPerCore;
3134 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
3135 nCoresPerPkg * __kmp_nThreadsPerCore;
3136 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
3137 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
3142 int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
3143 int index = type + 1;
3144 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
3145 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
3146 if (type == kmp_hier_layer_e::LAYER_THREAD)
3148 else if (type == kmp_hier_layer_e::LAYER_LOOP)
3150 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
3151 if (tid >= num_hw_threads)
3152 tid = tid % num_hw_threads;
3153 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
3157 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
3160 KMP_DEBUG_ASSERT(i1 <= i2);
3161 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
3162 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
3163 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
3165 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
3169 static inline const char *__kmp_cpuinfo_get_filename() {
3170 const char *filename;
3171 if (__kmp_cpuinfo_file !=
nullptr)
3172 filename = __kmp_cpuinfo_file;
3174 filename =
"/proc/cpuinfo";
3178 static inline const char *__kmp_cpuinfo_get_envvar() {
3179 const char *envvar =
nullptr;
3180 if (__kmp_cpuinfo_file !=
nullptr)
3181 envvar =
"KMP_CPUINFO_FILE";
3185 static bool __kmp_package_id_from_core_siblings_list(
unsigned **threadInfo,
3188 if (!KMP_AFFINITY_CAPABLE())
3192 KMP_SNPRINTF(path,
sizeof(path),
3193 "/sys/devices/system/cpu/cpu%u/topology/core_siblings_list",
3194 threadInfo[idx][osIdIndex]);
3195 kmp_affin_mask_t *siblings = __kmp_parse_cpu_list(path);
3196 for (
unsigned i = 0; i < num_avail; ++i) {
3197 unsigned cpu_id = threadInfo[i][osIdIndex];
3198 KMP_ASSERT(cpu_id < __kmp_affin_mask_size * CHAR_BIT);
3199 if (!KMP_CPU_ISSET(cpu_id, siblings))
3201 if (threadInfo[i][pkgIdIndex] == UINT_MAX) {
3204 threadInfo[i][pkgIdIndex] = idx;
3205 }
else if (threadInfo[i][pkgIdIndex] != idx) {
3207 KMP_CPU_FREE(siblings);
3211 KMP_ASSERT(threadInfo[idx][pkgIdIndex] != UINT_MAX);
3212 KMP_CPU_FREE(siblings);
3219 static bool __kmp_affinity_create_cpuinfo_map(
int *line,
3220 kmp_i18n_id_t *
const msg_id) {
3221 *msg_id = kmp_i18n_null;
3224 unsigned num_records = __kmp_xproc;
3226 const char *filename = __kmp_cpuinfo_get_filename();
3227 const char *envvar = __kmp_cpuinfo_get_envvar();
3229 if (__kmp_affinity.flags.verbose) {
3230 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
3238 unsigned num_records = 0;
3240 buf[
sizeof(buf) - 1] = 1;
3241 if (!fgets(buf,
sizeof(buf), f)) {
3246 char s1[] =
"processor";
3247 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
3254 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
3256 if (level > (
unsigned)__kmp_xproc) {
3257 level = __kmp_xproc;
3259 if (nodeIdIndex + level >= maxIndex) {
3260 maxIndex = nodeIdIndex + level;
3268 if (num_records == 0) {
3269 *msg_id = kmp_i18n_str_NoProcRecords;
3272 if (num_records > (
unsigned)__kmp_xproc) {
3273 *msg_id = kmp_i18n_str_TooManyProcRecords;
3282 if (fseek(f, 0, SEEK_SET) != 0) {
3283 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
3290 unsigned **threadInfo =
3291 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
3293 for (i = 0; i <= num_records; i++) {
3295 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3298 #define CLEANUP_THREAD_INFO \
3299 for (i = 0; i <= num_records; i++) { \
3300 __kmp_free(threadInfo[i]); \
3302 __kmp_free(threadInfo);
3307 #define INIT_PROC_INFO(p) \
3308 for (__index = 0; __index <= maxIndex; __index++) { \
3309 (p)[__index] = UINT_MAX; \
3312 for (i = 0; i <= num_records; i++) {
3313 INIT_PROC_INFO(threadInfo[i]);
3318 lpar_info_format1_t cpuinfo;
3319 unsigned num_avail = __kmp_xproc;
3321 if (__kmp_affinity.flags.verbose)
3322 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY",
"system info for topology");
3325 smt_threads = syssmt(GET_NUMBER_SMT_SETS, 0, 0, NULL);
3328 rsethandle_t sys_rset = rs_alloc(RS_SYSTEM);
3329 if (sys_rset == NULL) {
3330 CLEANUP_THREAD_INFO;
3331 *msg_id = kmp_i18n_str_UnknownTopology;
3335 rsethandle_t srad = rs_alloc(RS_EMPTY);
3338 CLEANUP_THREAD_INFO;
3339 *msg_id = kmp_i18n_str_UnknownTopology;
3344 int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0);
3348 CLEANUP_THREAD_INFO;
3349 *msg_id = kmp_i18n_str_UnknownTopology;
3353 int num_rads = rs_numrads(sys_rset, sradsdl, 0);
3357 CLEANUP_THREAD_INFO;
3358 *msg_id = kmp_i18n_str_UnknownTopology;
3363 int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0);
3364 if (max_procs < 0) {
3367 CLEANUP_THREAD_INFO;
3368 *msg_id = kmp_i18n_str_UnknownTopology;
3374 for (
int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS;
3377 if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0)
3380 for (
int cpu = 0; cpu < max_procs; cpu++) {
3382 if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) {
3383 threadInfo[cpu][osIdIndex] = cpu;
3384 threadInfo[cpu][pkgIdIndex] = cur_rad;
3385 threadInfo[cpu][coreIdIndex] = cpu / smt_threads;
3387 if (num_set >= num_avail) {
3401 unsigned num_avail = 0;
3404 bool reading_s390x_sys_info =
true;
3411 buf[
sizeof(buf) - 1] = 1;
3412 bool long_line =
false;
3413 if (!fgets(buf,
sizeof(buf), f)) {
3418 for (i = 0; i <= maxIndex; i++) {
3419 if (threadInfo[num_avail][i] != UINT_MAX) {
3427 }
else if (!buf[
sizeof(buf) - 1]) {
3432 #define CHECK_LINE \
3434 CLEANUP_THREAD_INFO; \
3435 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
3441 #if KMP_ARCH_LOONGARCH64
3448 if (*buf ==
'\n' && *line == 2)
3454 if (reading_s390x_sys_info) {
3456 reading_s390x_sys_info =
false;
3462 char s1[] =
"cpu number";
3464 char s1[] =
"processor";
3466 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
3468 char *p = strchr(buf +
sizeof(s1) - 1,
':');
3470 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3472 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
3473 #if KMP_ARCH_AARCH64
3482 threadInfo[num_avail][osIdIndex] = val;
3483 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
3487 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
3488 threadInfo[num_avail][osIdIndex]);
3489 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
3494 KMP_SNPRINTF(path,
sizeof(path),
3495 "/sys/devices/system/cpu/cpu%u/topology/book_id",
3496 threadInfo[num_avail][osIdIndex]);
3497 __kmp_read_from_file(path,
"%u", &book_id);
3498 threadInfo[num_avail][pkgIdIndex] |= (book_id << 8);
3501 KMP_SNPRINTF(path,
sizeof(path),
3502 "/sys/devices/system/cpu/cpu%u/topology/drawer_id",
3503 threadInfo[num_avail][osIdIndex]);
3504 __kmp_read_from_file(path,
"%u", &drawer_id);
3505 threadInfo[num_avail][pkgIdIndex] |= (drawer_id << 16);
3508 KMP_SNPRINTF(path,
sizeof(path),
3509 "/sys/devices/system/cpu/cpu%u/topology/core_id",
3510 threadInfo[num_avail][osIdIndex]);
3511 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
3515 char s2[] =
"physical id";
3516 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
3518 char *p = strchr(buf +
sizeof(s2) - 1,
':');
3520 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3522 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
3524 threadInfo[num_avail][pkgIdIndex] = val;
3527 char s3[] =
"core id";
3528 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
3530 char *p = strchr(buf +
sizeof(s3) - 1,
':');
3532 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3534 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
3536 threadInfo[num_avail][coreIdIndex] = val;
3540 char s4[] =
"thread id";
3541 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
3543 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3545 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3547 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3549 threadInfo[num_avail][threadIdIndex] = val;
3553 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
3555 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3557 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3560 if (level > (
unsigned)__kmp_xproc) {
3561 level = __kmp_xproc;
3563 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3565 threadInfo[num_avail][nodeIdIndex + level] = val;
3572 if ((*buf != 0) && (*buf !=
'\n')) {
3577 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
3585 if ((
int)num_avail == __kmp_xproc) {
3586 CLEANUP_THREAD_INFO;
3587 *msg_id = kmp_i18n_str_TooManyEntries;
3593 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3594 CLEANUP_THREAD_INFO;
3595 *msg_id = kmp_i18n_str_MissingProcField;
3600 if (KMP_AFFINITY_CAPABLE() &&
3601 !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3602 __kmp_affin_fullMask)) {
3603 INIT_PROC_INFO(threadInfo[num_avail]);
3610 KMP_ASSERT(num_avail <= num_records);
3611 INIT_PROC_INFO(threadInfo[num_avail]);
3616 CLEANUP_THREAD_INFO;
3617 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3621 CLEANUP_THREAD_INFO;
3622 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3629 for (i = 0; i < num_avail; ++i) {
3630 if (threadInfo[i][pkgIdIndex] == UINT_MAX) {
3631 if (!__kmp_package_id_from_core_siblings_list(threadInfo, num_avail, i)) {
3632 CLEANUP_THREAD_INFO;
3633 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3639 #if KMP_MIC && REDUCE_TEAM_SIZE
3640 unsigned teamSize = 0;
3648 KMP_ASSERT(num_avail > 0);
3649 KMP_ASSERT(num_avail <= num_records);
3652 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3653 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3667 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3669 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3671 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3673 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3675 bool assign_thread_ids =
false;
3676 unsigned threadIdCt;
3679 restart_radix_check:
3683 if (assign_thread_ids) {
3684 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3685 threadInfo[0][threadIdIndex] = threadIdCt++;
3686 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3687 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3690 for (index = 0; index <= maxIndex; index++) {
3694 lastId[index] = threadInfo[0][index];
3699 for (i = 1; i < num_avail; i++) {
3702 for (index = maxIndex; index >= threadIdIndex; index--) {
3703 if (assign_thread_ids && (index == threadIdIndex)) {
3705 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3706 threadInfo[i][threadIdIndex] = threadIdCt++;
3710 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3711 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3714 if (threadInfo[i][index] != lastId[index]) {
3719 for (index2 = threadIdIndex; index2 < index; index2++) {
3721 if (counts[index2] > maxCt[index2]) {
3722 maxCt[index2] = counts[index2];
3725 lastId[index2] = threadInfo[i][index2];
3729 lastId[index] = threadInfo[i][index];
3731 if (assign_thread_ids && (index > threadIdIndex)) {
3733 #if KMP_MIC && REDUCE_TEAM_SIZE
3736 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3743 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3744 threadInfo[i][threadIdIndex] = threadIdCt++;
3750 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3751 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3757 if (index < threadIdIndex) {
3761 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3766 CLEANUP_THREAD_INFO;
3767 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3773 assign_thread_ids =
true;
3774 goto restart_radix_check;
3778 #if KMP_MIC && REDUCE_TEAM_SIZE
3781 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3784 for (index = threadIdIndex; index <= maxIndex; index++) {
3785 if (counts[index] > maxCt[index]) {
3786 maxCt[index] = counts[index];
3790 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3791 nCoresPerPkg = maxCt[coreIdIndex];
3792 nPackages = totals[pkgIdIndex];
3798 __kmp_ncores = totals[coreIdIndex];
3799 if (!KMP_AFFINITY_CAPABLE()) {
3800 KMP_ASSERT(__kmp_affinity.type == affinity_none);
3804 #if KMP_MIC && REDUCE_TEAM_SIZE
3806 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3807 __kmp_dflt_team_nth = teamSize;
3808 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3809 "__kmp_dflt_team_nth = %d\n",
3810 __kmp_dflt_team_nth));
3814 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
3821 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
3822 for (index = threadIdIndex; index < maxIndex; index++) {
3823 KMP_ASSERT(totals[index] >= totals[index + 1]);
3824 inMap[index] = (totals[index] > totals[index + 1]);
3826 inMap[maxIndex] = (totals[maxIndex] > 1);
3827 inMap[pkgIdIndex] =
true;
3828 inMap[coreIdIndex] =
true;
3829 inMap[threadIdIndex] =
true;
3833 kmp_hw_t types[KMP_HW_LAST];
3836 int threadLevel = -1;
3837 for (index = threadIdIndex; index <= maxIndex; index++) {
3842 if (inMap[pkgIdIndex]) {
3844 types[idx++] = KMP_HW_SOCKET;
3846 if (inMap[coreIdIndex]) {
3848 types[idx++] = KMP_HW_CORE;
3850 if (inMap[threadIdIndex]) {
3852 types[idx++] = KMP_HW_THREAD;
3854 KMP_ASSERT(depth > 0);
3857 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3859 for (i = 0; i < num_avail; ++i) {
3860 unsigned os = threadInfo[i][osIdIndex];
3862 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3864 hw_thread.os_id = os;
3865 hw_thread.original_idx = i;
3868 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3869 if (!inMap[src_index]) {
3872 if (src_index == pkgIdIndex) {
3873 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3874 }
else if (src_index == coreIdIndex) {
3875 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3876 }
else if (src_index == threadIdIndex) {
3877 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3887 CLEANUP_THREAD_INFO;
3888 __kmp_topology->sort_ids();
3890 int tlevel = __kmp_topology->get_level(KMP_HW_THREAD);
3893 if (__kmp_topology->at(0).ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID) {
3894 __kmp_topology->at(0).ids[tlevel] = 0;
3896 for (
int i = 1; i < __kmp_topology->get_num_hw_threads(); ++i) {
3897 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3898 if (hw_thread.ids[tlevel] != kmp_hw_thread_t::UNKNOWN_ID)
3900 kmp_hw_thread_t &prev_hw_thread = __kmp_topology->at(i - 1);
3904 for (
int j = 0; j < tlevel; ++j) {
3905 if (hw_thread.ids[j] != prev_hw_thread.ids[j]) {
3906 hw_thread.ids[tlevel] = 0;
3910 if (hw_thread.ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID)
3911 hw_thread.ids[tlevel] = prev_hw_thread.ids[tlevel] + 1;
3915 if (!__kmp_topology->check_ids()) {
3916 kmp_topology_t::deallocate(__kmp_topology);
3917 __kmp_topology =
nullptr;
3918 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3927 template <
typename FindNextFunctionType>
3928 static void __kmp_create_os_id_masks(
unsigned *numUnique,
3929 kmp_affinity_t &affinity,
3930 FindNextFunctionType find_next) {
3934 int numAddrs = __kmp_topology->get_num_hw_threads();
3935 int depth = __kmp_topology->get_depth();
3936 const char *env_var = __kmp_get_affinity_env_var(affinity);
3937 KMP_ASSERT(numAddrs);
3947 for (i = numAddrs - 1;; --i) {
3948 int osId = __kmp_topology->at(i).os_id;
3949 if (osId > maxOsId) {
3955 affinity.num_os_id_masks = maxOsId + 1;
3956 KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks);
3957 KMP_ASSERT(affinity.gran_levels >= 0);
3958 if (affinity.flags.verbose && (affinity.gran_levels > 0)) {
3959 KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels);
3961 if (affinity.gran_levels >= (
int)depth) {
3962 KMP_AFF_WARNING(affinity, AffThreadsMayMigrate);
3972 kmp_affin_mask_t *sum;
3973 KMP_CPU_ALLOC_ON_STACK(sum);
3976 i = j = leader = find_next(-1);
3977 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3978 kmp_full_mask_modifier_t full_mask;
3979 for (i = find_next(i); i < numAddrs; i = find_next(i)) {
3983 if (__kmp_topology->is_close(leader, i, affinity)) {
3984 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3990 for (; j < i; j = find_next(j)) {
3991 int osId = __kmp_topology->at(j).os_id;
3992 KMP_DEBUG_ASSERT(osId <= maxOsId);
3993 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3994 KMP_CPU_COPY(mask, sum);
3995 __kmp_topology->at(j).leader = (j == leader);
4001 full_mask.include(sum);
4003 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
4008 for (; j < i; j = find_next(j)) {
4009 int osId = __kmp_topology->at(j).os_id;
4010 KMP_DEBUG_ASSERT(osId <= maxOsId);
4011 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4012 KMP_CPU_COPY(mask, sum);
4013 __kmp_topology->at(j).leader = (j == leader);
4015 full_mask.include(sum);
4017 KMP_CPU_FREE_FROM_STACK(sum);
4020 if (full_mask.restrict_to_mask() && affinity.flags.verbose) {
4021 __kmp_topology->print(env_var);
4024 *numUnique = unique;
4030 static kmp_affin_mask_t *newMasks;
4031 static int numNewMasks;
4032 static int nextNewMask;
4034 #define ADD_MASK(_mask) \
4036 if (nextNewMask >= numNewMasks) { \
4039 kmp_affin_mask_t *temp; \
4040 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
4041 for (i = 0; i < numNewMasks / 2; i++) { \
4042 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
4043 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
4044 KMP_CPU_COPY(dest, src); \
4046 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
4049 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
4053 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
4055 if (((_osId) > _maxOsId) || \
4056 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
4057 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
4059 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
4065 static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) {
4067 kmp_affin_mask_t **out_masks = &affinity.masks;
4068 unsigned *out_numMasks = &affinity.num_masks;
4069 const char *proclist = affinity.proclist;
4070 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4071 int maxOsId = affinity.num_os_id_masks - 1;
4072 const char *scan = proclist;
4073 const char *next = proclist;
4078 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
4080 kmp_affin_mask_t *sumMask;
4081 KMP_CPU_ALLOC(sumMask);
4085 int start, end, stride;
4089 if (*next ==
'\0') {
4101 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
4103 num = __kmp_str_to_int(scan, *next);
4104 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
4107 if ((num > maxOsId) ||
4108 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4109 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
4110 KMP_CPU_ZERO(sumMask);
4112 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
4132 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4135 num = __kmp_str_to_int(scan, *next);
4136 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
4139 if ((num > maxOsId) ||
4140 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4141 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
4143 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
4160 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4162 start = __kmp_str_to_int(scan, *next);
4163 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
4168 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4182 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4184 end = __kmp_str_to_int(scan, *next);
4185 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
4202 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
4204 stride = __kmp_str_to_int(scan, *next);
4205 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
4210 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
4212 KMP_ASSERT2(start <= end,
"bad explicit proc list");
4214 KMP_ASSERT2(start >= end,
"bad explicit proc list");
4216 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
4221 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4223 if (end - start < stride)
4226 }
while (start <= end);
4229 ADD_MASK_OSID(start, osId2Mask, maxOsId);
4231 }
while (start >= end);
4242 *out_numMasks = nextNewMask;
4243 if (nextNewMask == 0) {
4245 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4246 KMP_CPU_FREE(sumMask);
4249 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4250 for (i = 0; i < nextNewMask; i++) {
4251 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
4252 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
4253 KMP_CPU_COPY(dest, src);
4255 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4256 KMP_CPU_FREE(sumMask);
4279 static void __kmp_process_subplace_list(
const char **scan,
4280 kmp_affinity_t &affinity,
int maxOsId,
4281 kmp_affin_mask_t *tempMask,
4284 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4287 int start, count, stride, i;
4291 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4294 start = __kmp_str_to_int(*scan, *next);
4295 KMP_ASSERT(start >= 0);
4300 if (**scan ==
'}' || **scan ==
',') {
4301 if ((start > maxOsId) ||
4302 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4303 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
4305 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4308 if (**scan ==
'}') {
4314 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4319 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4322 count = __kmp_str_to_int(*scan, *next);
4323 KMP_ASSERT(count >= 0);
4328 if (**scan ==
'}' || **scan ==
',') {
4329 for (i = 0; i < count; i++) {
4330 if ((start > maxOsId) ||
4331 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4332 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
4335 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4340 if (**scan ==
'}') {
4346 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4353 if (**scan ==
'+') {
4357 if (**scan ==
'-') {
4365 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4368 stride = __kmp_str_to_int(*scan, *next);
4369 KMP_ASSERT(stride >= 0);
4375 if (**scan ==
'}' || **scan ==
',') {
4376 for (i = 0; i < count; i++) {
4377 if ((start > maxOsId) ||
4378 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4379 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
4382 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4387 if (**scan ==
'}') {
4394 KMP_ASSERT2(0,
"bad explicit places list");
4398 static void __kmp_process_place(
const char **scan, kmp_affinity_t &affinity,
4399 int maxOsId, kmp_affin_mask_t *tempMask,
4402 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4406 if (**scan ==
'{') {
4408 __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize);
4409 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
4411 }
else if (**scan ==
'!') {
4413 __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize);
4414 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
4415 KMP_CPU_AND(tempMask, __kmp_affin_fullMask);
4416 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
4419 int num = __kmp_str_to_int(*scan, *next);
4420 KMP_ASSERT(num >= 0);
4421 if ((num > maxOsId) ||
4422 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4423 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
4425 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
4430 KMP_ASSERT2(0,
"bad explicit places list");
4435 void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) {
4436 int i, j, count, stride, sign;
4437 kmp_affin_mask_t **out_masks = &affinity.masks;
4438 unsigned *out_numMasks = &affinity.num_masks;
4439 const char *placelist = affinity.proclist;
4440 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4441 int maxOsId = affinity.num_os_id_masks - 1;
4442 const char *scan = placelist;
4443 const char *next = placelist;
4446 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
4452 kmp_affin_mask_t *tempMask;
4453 kmp_affin_mask_t *previousMask;
4454 KMP_CPU_ALLOC(tempMask);
4455 KMP_CPU_ZERO(tempMask);
4456 KMP_CPU_ALLOC(previousMask);
4457 KMP_CPU_ZERO(previousMask);
4461 __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize);
4465 if (*scan ==
'\0' || *scan ==
',') {
4469 KMP_CPU_ZERO(tempMask);
4471 if (*scan ==
'\0') {
4478 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4483 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4486 count = __kmp_str_to_int(scan, *next);
4487 KMP_ASSERT(count >= 0);
4492 if (*scan ==
'\0' || *scan ==
',') {
4495 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4514 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4517 stride = __kmp_str_to_int(scan, *next);
4518 KMP_DEBUG_ASSERT(stride >= 0);
4524 for (i = 0; i < count; i++) {
4529 KMP_CPU_COPY(previousMask, tempMask);
4530 ADD_MASK(previousMask);
4531 KMP_CPU_ZERO(tempMask);
4533 KMP_CPU_SET_ITERATE(j, previousMask) {
4534 if (!KMP_CPU_ISSET(j, previousMask)) {
4537 if ((j + stride > maxOsId) || (j + stride < 0) ||
4538 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
4539 (!KMP_CPU_ISSET(j + stride,
4540 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
4541 if (i < count - 1) {
4542 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride);
4546 KMP_CPU_SET(j + stride, tempMask);
4550 KMP_CPU_ZERO(tempMask);
4555 if (*scan ==
'\0') {
4563 KMP_ASSERT2(0,
"bad explicit places list");
4566 *out_numMasks = nextNewMask;
4567 if (nextNewMask == 0) {
4569 KMP_CPU_FREE(tempMask);
4570 KMP_CPU_FREE(previousMask);
4571 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4574 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4575 KMP_CPU_FREE(tempMask);
4576 KMP_CPU_FREE(previousMask);
4577 for (i = 0; i < nextNewMask; i++) {
4578 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
4579 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
4580 KMP_CPU_COPY(dest, src);
4582 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4586 #undef ADD_MASK_OSID
4590 static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
4593 for (
int i = 0; i < nprocs; i++) {
4594 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
4595 for (
int j = bottom_level; j > 0; j--) {
4596 if (hw_thread.ids[j] > 0) {
4597 if (core_level < (j - 1)) {
4607 static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
4609 return __kmp_topology->get_count(core_level);
4612 static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4615 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4616 for (
int i = 0; i <= proc; ++i) {
4617 if (i + 1 <= proc) {
4618 for (
int j = 0; j <= core_level; ++j) {
4619 if (__kmp_topology->at(i + 1).sub_ids[j] !=
4620 __kmp_topology->at(i).sub_ids[j]) {
4632 static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4634 if (core_level >= bottom_level)
4636 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4637 return __kmp_topology->calculate_ratio(thread_level, core_level);
4640 static int *procarr = NULL;
4641 static int __kmp_aff_depth = 0;
4642 static int *__kmp_osid_to_hwthread_map = NULL;
4644 static void __kmp_affinity_get_mask_topology_info(
const kmp_affin_mask_t *mask,
4645 kmp_affinity_ids_t &ids,
4646 kmp_affinity_attrs_t &attrs) {
4647 if (!KMP_AFFINITY_CAPABLE())
4651 for (
int i = 0; i < KMP_HW_LAST; ++i)
4652 ids.ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
4653 attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4658 int depth = __kmp_topology->get_depth();
4659 KMP_CPU_SET_ITERATE(cpu, mask) {
4660 int osid_idx = __kmp_osid_to_hwthread_map[cpu];
4662 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(osid_idx);
4663 for (
int level = 0; level < depth; ++level) {
4664 kmp_hw_t type = __kmp_topology->get_type(level);
4665 int id = hw_thread.sub_ids[level];
4666 if (ids.ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids.ids[type] ==
id) {
4671 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4672 for (; level < depth; ++level) {
4673 kmp_hw_t type = __kmp_topology->get_type(level);
4674 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4679 attrs.core_type = hw_thread.attrs.get_core_type();
4680 attrs.core_eff = hw_thread.attrs.get_core_eff();
4684 if (attrs.core_type != hw_thread.attrs.get_core_type())
4685 attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN;
4686 if (attrs.core_eff != hw_thread.attrs.get_core_eff())
4687 attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF;
4692 static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) {
4693 if (!KMP_AFFINITY_CAPABLE())
4695 const kmp_affin_mask_t *mask = th->th.th_affin_mask;
4696 kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4697 kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs;
4698 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4704 static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) {
4705 if (!KMP_AFFINITY_CAPABLE())
4707 if (affinity.type != affinity_none) {
4708 KMP_ASSERT(affinity.num_os_id_masks);
4709 KMP_ASSERT(affinity.os_id_masks);
4711 KMP_ASSERT(affinity.num_masks);
4712 KMP_ASSERT(affinity.masks);
4713 KMP_ASSERT(__kmp_affin_fullMask);
4715 int max_cpu = __kmp_affin_fullMask->get_max_cpu();
4716 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4719 if (!affinity.ids) {
4720 affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(
4721 sizeof(kmp_affinity_ids_t) * affinity.num_masks);
4723 if (!affinity.attrs) {
4724 affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate(
4725 sizeof(kmp_affinity_attrs_t) * affinity.num_masks);
4727 if (!__kmp_osid_to_hwthread_map) {
4729 __kmp_osid_to_hwthread_map =
4730 (
int *)__kmp_allocate(
sizeof(
int) * (max_cpu + 1));
4734 for (
int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) {
4735 int os_id = __kmp_topology->at(hw_thread).os_id;
4736 if (KMP_CPU_ISSET(os_id, __kmp_affin_fullMask))
4737 __kmp_osid_to_hwthread_map[os_id] = hw_thread;
4740 for (
unsigned i = 0; i < affinity.num_masks; ++i) {
4741 kmp_affinity_ids_t &ids = affinity.ids[i];
4742 kmp_affinity_attrs_t &attrs = affinity.attrs[i];
4743 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i);
4744 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4749 static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t &affinity) {
4751 if (__kmp_topology && __kmp_topology->get_num_hw_threads()) {
4752 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4753 __kmp_affinity_get_topology_info(affinity);
4754 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
4755 __kmp_first_osid_with_ecore = __kmp_get_first_osid_with_ecore();
4762 static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) {
4763 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4764 KMP_ASSERT(affinity.type == affinity_none);
4765 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4766 affinity.num_masks = 1;
4767 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4768 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0);
4769 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4770 __kmp_aux_affinity_initialize_other_data(affinity);
4773 static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) {
4778 int verbose = affinity.flags.verbose;
4779 const char *env_var = affinity.env_var;
4782 if (__kmp_affin_fullMask && __kmp_affin_origMask)
4785 if (__kmp_affin_fullMask == NULL) {
4786 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4788 if (__kmp_affin_origMask == NULL) {
4789 KMP_CPU_ALLOC(__kmp_affin_origMask);
4791 if (KMP_AFFINITY_CAPABLE()) {
4792 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4794 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4795 if (affinity.flags.respect) {
4798 __kmp_avail_proc = 0;
4799 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4800 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4805 if (__kmp_avail_proc > __kmp_xproc) {
4806 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4807 affinity.type = affinity_none;
4808 KMP_AFFINITY_DISABLE();
4813 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4814 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4815 __kmp_affin_fullMask);
4816 KMP_INFORM(InitOSProcSetRespect, env_var, buf);
4820 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4821 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4822 __kmp_affin_fullMask);
4823 KMP_INFORM(InitOSProcSetNotRespect, env_var, buf);
4826 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4828 if (__kmp_num_proc_groups <= 1) {
4830 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4834 __kmp_affin_fullMask->set_process_affinity(
true);
4840 static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
4841 bool success =
false;
4842 const char *env_var = affinity.env_var;
4843 kmp_i18n_id_t msg_id = kmp_i18n_null;
4844 int verbose = affinity.flags.verbose;
4848 if ((__kmp_cpuinfo_file != NULL) &&
4849 (__kmp_affinity_top_method == affinity_top_method_all)) {
4850 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4853 if (__kmp_affinity_top_method == affinity_top_method_all) {
4859 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4860 if (!__kmp_hwloc_error) {
4861 success = __kmp_affinity_create_hwloc_map(&msg_id);
4862 if (!success && verbose) {
4863 KMP_INFORM(AffIgnoringHwloc, env_var);
4865 }
else if (verbose) {
4866 KMP_INFORM(AffIgnoringHwloc, env_var);
4871 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4873 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4874 if (!success && verbose && msg_id != kmp_i18n_null) {
4875 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4879 success = __kmp_affinity_create_apicid_map(&msg_id);
4880 if (!success && verbose && msg_id != kmp_i18n_null) {
4881 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4886 #if KMP_OS_LINUX || KMP_OS_AIX
4889 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4890 if (!success && verbose && msg_id != kmp_i18n_null) {
4891 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4896 #if KMP_GROUP_AFFINITY
4897 if (!success && (__kmp_num_proc_groups > 1)) {
4898 success = __kmp_affinity_create_proc_group_map(&msg_id);
4899 if (!success && verbose && msg_id != kmp_i18n_null) {
4900 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4906 success = __kmp_affinity_create_flat_map(&msg_id);
4907 if (!success && verbose && msg_id != kmp_i18n_null) {
4908 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4910 KMP_ASSERT(success);
4918 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4919 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4920 success = __kmp_affinity_create_hwloc_map(&msg_id);
4922 KMP_ASSERT(msg_id != kmp_i18n_null);
4923 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4928 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4929 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4930 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4931 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4933 KMP_ASSERT(msg_id != kmp_i18n_null);
4934 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4936 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4937 success = __kmp_affinity_create_apicid_map(&msg_id);
4939 KMP_ASSERT(msg_id != kmp_i18n_null);
4940 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4945 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4947 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4949 KMP_ASSERT(msg_id != kmp_i18n_null);
4950 const char *filename = __kmp_cpuinfo_get_filename();
4952 KMP_FATAL(FileLineMsgExiting, filename, line,
4953 __kmp_i18n_catgets(msg_id));
4955 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4960 #if KMP_GROUP_AFFINITY
4961 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4962 success = __kmp_affinity_create_proc_group_map(&msg_id);
4963 KMP_ASSERT(success);
4965 KMP_ASSERT(msg_id != kmp_i18n_null);
4966 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4971 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4972 success = __kmp_affinity_create_flat_map(&msg_id);
4974 KMP_ASSERT(success);
4978 if (!__kmp_topology) {
4979 if (KMP_AFFINITY_CAPABLE()) {
4980 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4982 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4984 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4985 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4986 __kmp_nThreadsPerCore, __kmp_ncores);
4988 __kmp_topology->print(env_var);
4995 __kmp_topology->canonicalize();
4997 __kmp_topology->print(env_var);
4998 bool filtered = __kmp_topology->filter_hw_subset();
4999 if (filtered && verbose)
5000 __kmp_topology->print(
"KMP_HW_SUBSET");
5004 static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) {
5005 bool is_regular_affinity = (&affinity == &__kmp_affinity);
5006 bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity);
5007 const char *env_var = __kmp_get_affinity_env_var(affinity);
5009 if (affinity.flags.initialized) {
5010 KMP_ASSERT(__kmp_affin_fullMask != NULL);
5014 if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask))
5015 __kmp_aux_affinity_initialize_masks(affinity);
5017 if (is_regular_affinity && !__kmp_topology) {
5018 bool success = __kmp_aux_affinity_initialize_topology(affinity);
5020 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
5022 affinity.type = affinity_none;
5023 KMP_AFFINITY_DISABLE();
5030 if (affinity.type == affinity_none) {
5031 __kmp_create_affinity_none_places(affinity);
5032 #if KMP_USE_HIER_SCHED
5033 __kmp_dispatch_set_hierarchy_values();
5035 affinity.flags.initialized = TRUE;
5039 __kmp_topology->set_granularity(affinity);
5040 int depth = __kmp_topology->get_depth();
5043 unsigned numUnique = 0;
5044 int numAddrs = __kmp_topology->get_num_hw_threads();
5047 if (affinity.core_attr_gran.valid) {
5048 __kmp_create_os_id_masks(&numUnique, affinity, [&](
int idx) {
5049 KMP_ASSERT(idx >= -1);
5050 for (
int i = idx + 1; i < numAddrs; ++i)
5051 if (__kmp_topology->at(i).attrs.contains(affinity.core_attr_gran))
5055 if (!affinity.os_id_masks) {
5056 const char *core_attribute;
5057 if (affinity.core_attr_gran.core_eff != kmp_hw_attr_t::UNKNOWN_CORE_EFF)
5058 core_attribute =
"core_efficiency";
5060 core_attribute =
"core_type";
5061 KMP_AFF_WARNING(affinity, AffIgnoringNotAvailable, env_var,
5063 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true))
5069 if (!affinity.os_id_masks) {
5070 int gran = affinity.gran_levels;
5071 int gran_level = depth - 1 - affinity.gran_levels;
5072 if (gran >= 0 && gran_level >= 0 && gran_level < depth) {
5073 __kmp_create_os_id_masks(
5074 &numUnique, affinity, [depth, numAddrs, &affinity](
int idx) {
5075 KMP_ASSERT(idx >= -1);
5076 int gran = affinity.gran_levels;
5077 int gran_level = depth - 1 - affinity.gran_levels;
5078 for (
int i = idx + 1; i < numAddrs; ++i)
5079 if ((gran >= depth) ||
5080 (gran < depth && __kmp_topology->at(i).ids[gran_level] !=
5081 kmp_hw_thread_t::UNKNOWN_ID))
5088 if (!affinity.os_id_masks) {
5089 __kmp_create_os_id_masks(&numUnique, affinity, [](
int idx) {
5090 KMP_ASSERT(idx >= -1);
5095 switch (affinity.type) {
5097 case affinity_explicit:
5098 KMP_DEBUG_ASSERT(affinity.proclist != NULL);
5099 if (is_hidden_helper_affinity ||
5100 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
5101 __kmp_affinity_process_proclist(affinity);
5103 __kmp_affinity_process_placelist(affinity);
5105 if (affinity.num_masks == 0) {
5106 KMP_AFF_WARNING(affinity, AffNoValidProcID);
5107 affinity.type = affinity_none;
5108 __kmp_create_affinity_none_places(affinity);
5109 affinity.flags.initialized = TRUE;
5118 case affinity_logical:
5119 affinity.compact = 0;
5120 if (affinity.offset) {
5122 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
5126 case affinity_physical:
5127 if (__kmp_nThreadsPerCore > 1) {
5128 affinity.compact = 1;
5129 if (affinity.compact >= depth) {
5130 affinity.compact = 0;
5133 affinity.compact = 0;
5135 if (affinity.offset) {
5137 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
5141 case affinity_scatter:
5142 if (affinity.compact >= depth) {
5143 affinity.compact = 0;
5145 affinity.compact = depth - 1 - affinity.compact;
5149 case affinity_compact:
5150 if (affinity.compact >= depth) {
5151 affinity.compact = depth - 1;
5155 case affinity_balanced:
5156 if (depth <= 1 || is_hidden_helper_affinity) {
5157 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
5158 affinity.type = affinity_none;
5159 __kmp_create_affinity_none_places(affinity);
5160 affinity.flags.initialized = TRUE;
5162 }
else if (!__kmp_topology->is_uniform()) {
5164 __kmp_aff_depth = depth;
5167 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
5168 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
5170 int maxprocpercore = __kmp_affinity_max_proc_per_core(
5171 __kmp_avail_proc, depth - 1, core_level);
5173 int nproc = ncores * maxprocpercore;
5174 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
5175 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
5176 affinity.type = affinity_none;
5177 __kmp_create_affinity_none_places(affinity);
5178 affinity.flags.initialized = TRUE;
5182 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5183 for (
int i = 0; i < nproc; i++) {
5189 for (
int i = 0; i < __kmp_avail_proc; i++) {
5190 int proc = __kmp_topology->at(i).os_id;
5191 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
5193 if (core == lastcore) {
5200 procarr[core * maxprocpercore + inlastcore] = proc;
5203 if (affinity.compact >= depth) {
5204 affinity.compact = depth - 1;
5209 if (affinity.flags.dups) {
5210 affinity.num_masks = __kmp_avail_proc;
5212 affinity.num_masks = numUnique;
5215 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
5216 (__kmp_affinity_num_places > 0) &&
5217 ((
unsigned)__kmp_affinity_num_places < affinity.num_masks) &&
5218 !is_hidden_helper_affinity) {
5219 affinity.num_masks = __kmp_affinity_num_places;
5222 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
5226 __kmp_topology->sort_compact(affinity);
5230 int num_hw_threads = __kmp_topology->get_num_hw_threads();
5231 kmp_full_mask_modifier_t full_mask;
5232 for (i = 0, j = 0; i < num_hw_threads; i++) {
5233 if ((!affinity.flags.dups) && (!__kmp_topology->at(i).leader)) {
5236 int osId = __kmp_topology->at(i).os_id;
5238 kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId);
5239 if (KMP_CPU_ISEMPTY(src))
5241 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j);
5242 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
5243 KMP_CPU_COPY(dest, src);
5244 full_mask.include(src);
5245 if (++j >= affinity.num_masks) {
5249 KMP_DEBUG_ASSERT(j == affinity.num_masks);
5251 if (full_mask.restrict_to_mask() && affinity.flags.verbose) {
5252 __kmp_topology->print(env_var);
5256 __kmp_topology->sort_ids();
5260 KMP_ASSERT2(0,
"Unexpected affinity setting");
5262 __kmp_aux_affinity_initialize_other_data(affinity);
5263 affinity.flags.initialized = TRUE;
5266 void __kmp_affinity_initialize(kmp_affinity_t &affinity) {
5275 int disabled = (affinity.type == affinity_disabled);
5276 if (!KMP_AFFINITY_CAPABLE())
5277 KMP_ASSERT(disabled);
5279 affinity.type = affinity_none;
5280 __kmp_aux_affinity_initialize(affinity);
5282 affinity.type = affinity_disabled;
5285 void __kmp_affinity_uninitialize(
void) {
5286 for (kmp_affinity_t *affinity : __kmp_affinities) {
5287 if (affinity->masks != NULL)
5288 KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks);
5289 if (affinity->os_id_masks != NULL)
5290 KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks);
5291 if (affinity->proclist != NULL)
5292 KMP_INTERNAL_FREE(affinity->proclist);
5293 if (affinity->ids != NULL)
5294 __kmp_free(affinity->ids);
5295 if (affinity->attrs != NULL)
5296 __kmp_free(affinity->attrs);
5297 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
5299 if (__kmp_affin_fullMask != NULL) {
5300 KMP_CPU_FREE(__kmp_affin_fullMask);
5301 __kmp_affin_fullMask = NULL;
5303 __kmp_avail_proc = 0;
5304 if (__kmp_affin_origMask != NULL) {
5305 if (KMP_AFFINITY_CAPABLE()) {
5308 bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
5310 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
5313 KMP_CPU_FREE(__kmp_affin_origMask);
5314 __kmp_affin_origMask = NULL;
5316 __kmp_affinity_num_places = 0;
5317 if (procarr != NULL) {
5318 __kmp_free(procarr);
5321 if (__kmp_osid_to_hwthread_map) {
5322 __kmp_free(__kmp_osid_to_hwthread_map);
5323 __kmp_osid_to_hwthread_map = NULL;
5326 if (__kmp_hwloc_topology != NULL) {
5327 hwloc_topology_destroy(__kmp_hwloc_topology);
5328 __kmp_hwloc_topology = NULL;
5331 if (__kmp_hw_subset) {
5332 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
5333 __kmp_hw_subset =
nullptr;
5335 if (__kmp_topology) {
5336 kmp_topology_t::deallocate(__kmp_topology);
5337 __kmp_topology =
nullptr;
5339 KMPAffinity::destroy_api();
5342 static void __kmp_select_mask_by_gtid(
int gtid,
const kmp_affinity_t *affinity,
5343 int *place, kmp_affin_mask_t **mask) {
5345 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5346 if (is_hidden_helper)
5349 mask_idx = gtid - 2;
5351 mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
5352 KMP_DEBUG_ASSERT(affinity->num_masks > 0);
5353 *place = (mask_idx + affinity->offset) % affinity->num_masks;
5354 *mask = KMP_CPU_INDEX(affinity->masks, *place);
5359 void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
5361 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5364 for (
int id = 0;
id < KMP_HW_LAST; ++id)
5365 th->th.th_topology_ids.ids[
id] = kmp_hw_thread_t::UNKNOWN_ID;
5366 th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
5368 if (!KMP_AFFINITY_CAPABLE()) {
5372 if (th->th.th_affin_mask == NULL) {
5373 KMP_CPU_ALLOC(th->th.th_affin_mask);
5375 KMP_CPU_ZERO(th->th.th_affin_mask);
5383 kmp_affin_mask_t *mask;
5385 const kmp_affinity_t *affinity;
5386 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5388 if (is_hidden_helper)
5389 affinity = &__kmp_hh_affinity;
5391 affinity = &__kmp_affinity;
5393 if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) {
5394 if ((affinity->type == affinity_none) ||
5395 (affinity->type == affinity_balanced) ||
5396 KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
5397 #if KMP_GROUP_AFFINITY
5398 if (__kmp_num_proc_groups > 1) {
5402 KMP_ASSERT(__kmp_affin_fullMask != NULL);
5404 mask = __kmp_affin_fullMask;
5406 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
5409 if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) {
5410 #if KMP_GROUP_AFFINITY
5411 if (__kmp_num_proc_groups > 1) {
5415 KMP_ASSERT(__kmp_affin_fullMask != NULL);
5417 mask = __kmp_affin_fullMask;
5419 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
5423 th->th.th_current_place = i;
5424 if (isa_root && !is_hidden_helper) {
5425 th->th.th_new_place = i;
5426 th->th.th_first_place = 0;
5427 th->th.th_last_place = affinity->num_masks - 1;
5428 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
5431 th->th.th_first_place = 0;
5432 th->th.th_last_place = affinity->num_masks - 1;
5436 th->th.th_topology_ids = __kmp_affinity.ids[i];
5437 th->th.th_topology_attrs = __kmp_affinity.attrs[i];
5440 if (i == KMP_PLACE_ALL) {
5441 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to all places\n",
5444 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to place %d\n",
5448 KMP_CPU_COPY(th->th.th_affin_mask, mask);
5451 void __kmp_affinity_bind_init_mask(
int gtid) {
5452 if (!KMP_AFFINITY_CAPABLE()) {
5455 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5456 const kmp_affinity_t *affinity;
5457 const char *env_var;
5458 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5460 if (is_hidden_helper)
5461 affinity = &__kmp_hh_affinity;
5463 affinity = &__kmp_affinity;
5464 env_var = __kmp_get_affinity_env_var(*affinity,
true);
5466 if (affinity->flags.verbose && (affinity->type == affinity_none ||
5467 (th->th.th_current_place != KMP_PLACE_ALL &&
5468 affinity->type != affinity_balanced)) &&
5469 !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
5470 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5471 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5472 th->th.th_affin_mask);
5473 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5481 if (affinity->type == affinity_none) {
5482 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
5487 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
5491 void __kmp_affinity_bind_place(
int gtid) {
5493 if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) {
5497 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5499 KA_TRACE(100, (
"__kmp_affinity_bind_place: binding T#%d to place %d (current "
5501 gtid, th->th.th_new_place, th->th.th_current_place));
5504 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5505 KMP_ASSERT(th->th.th_new_place >= 0);
5506 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity.num_masks);
5507 if (th->th.th_first_place <= th->th.th_last_place) {
5508 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
5509 (th->th.th_new_place <= th->th.th_last_place));
5511 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
5512 (th->th.th_new_place >= th->th.th_last_place));
5517 kmp_affin_mask_t *mask =
5518 KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place);
5519 KMP_CPU_COPY(th->th.th_affin_mask, mask);
5520 th->th.th_current_place = th->th.th_new_place;
5522 if (__kmp_affinity.flags.verbose) {
5523 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5524 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5525 th->th.th_affin_mask);
5526 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
5527 __kmp_gettid(), gtid, buf);
5529 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
5532 int __kmp_aux_set_affinity(
void **mask) {
5537 if (!KMP_AFFINITY_CAPABLE()) {
5541 gtid = __kmp_entry_gtid();
5544 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5545 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5546 (kmp_affin_mask_t *)(*mask));
5548 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
5552 if (__kmp_env_consistency_check) {
5553 if ((mask == NULL) || (*mask == NULL)) {
5554 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5559 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
5560 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5561 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5563 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
5568 if (num_procs == 0) {
5569 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5572 #if KMP_GROUP_AFFINITY
5573 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
5574 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5580 th = __kmp_threads[gtid];
5581 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5582 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5584 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
5587 th->th.th_current_place = KMP_PLACE_UNDEFINED;
5588 th->th.th_new_place = KMP_PLACE_UNDEFINED;
5589 th->th.th_first_place = 0;
5590 th->th.th_last_place = __kmp_affinity.num_masks - 1;
5593 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
5598 int __kmp_aux_get_affinity(
void **mask) {
5601 #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5604 if (!KMP_AFFINITY_CAPABLE()) {
5608 gtid = __kmp_entry_gtid();
5609 #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5610 th = __kmp_threads[gtid];
5614 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5618 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5619 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5620 th->th.th_affin_mask);
5622 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
5626 if (__kmp_env_consistency_check) {
5627 if ((mask == NULL) || (*mask == NULL)) {
5628 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
5632 #if !KMP_OS_WINDOWS && !KMP_OS_AIX
5634 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5637 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5638 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5639 (kmp_affin_mask_t *)(*mask));
5641 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
5649 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
5655 int __kmp_aux_get_affinity_max_proc() {
5656 if (!KMP_AFFINITY_CAPABLE()) {
5659 #if KMP_GROUP_AFFINITY
5660 if (__kmp_num_proc_groups > 1) {
5661 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
5667 int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
5668 if (!KMP_AFFINITY_CAPABLE()) {
5674 int gtid = __kmp_entry_gtid();
5675 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5676 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5677 (kmp_affin_mask_t *)(*mask));
5678 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
5679 "affinity mask for thread %d = %s\n",
5683 if (__kmp_env_consistency_check) {
5684 if ((mask == NULL) || (*mask == NULL)) {
5685 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
5689 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5692 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5696 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5700 int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
5701 if (!KMP_AFFINITY_CAPABLE()) {
5707 int gtid = __kmp_entry_gtid();
5708 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5709 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5710 (kmp_affin_mask_t *)(*mask));
5711 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
5712 "affinity mask for thread %d = %s\n",
5716 if (__kmp_env_consistency_check) {
5717 if ((mask == NULL) || (*mask == NULL)) {
5718 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
5722 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5725 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5729 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5733 int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
5734 if (!KMP_AFFINITY_CAPABLE()) {
5740 int gtid = __kmp_entry_gtid();
5741 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5742 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5743 (kmp_affin_mask_t *)(*mask));
5744 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
5745 "affinity mask for thread %d = %s\n",
5749 if (__kmp_env_consistency_check) {
5750 if ((mask == NULL) || (*mask == NULL)) {
5751 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
5755 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5758 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5762 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5765 #if KMP_WEIGHTED_ITERATIONS_SUPPORTED
5767 int __kmp_get_first_osid_with_ecore(
void) {
5769 int high = __kmp_topology->get_num_hw_threads() - 1;
5771 while (high - low > 1) {
5772 mid = (high + low) / 2;
5773 if (__kmp_topology->at(mid).attrs.get_core_type() ==
5774 KMP_HW_CORE_TYPE_CORE) {
5780 if (__kmp_topology->at(mid).attrs.get_core_type() == KMP_HW_CORE_TYPE_ATOM) {
5788 void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
5789 KMP_DEBUG_ASSERT(th);
5790 bool fine_gran =
true;
5791 int tid = th->th.th_info.ds.ds_tid;
5792 const char *env_var =
"KMP_AFFINITY";
5795 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
5798 switch (__kmp_affinity.gran) {
5802 if (__kmp_nThreadsPerCore > 1) {
5807 if (nCoresPerPkg > 1) {
5815 if (__kmp_topology->is_uniform()) {
5819 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5821 int ncores = __kmp_ncores;
5822 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5823 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5827 int chunk = nthreads / ncores;
5829 int big_cores = nthreads % ncores;
5831 int big_nth = (chunk + 1) * big_cores;
5832 if (tid < big_nth) {
5833 coreID = tid / (chunk + 1);
5834 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5836 coreID = (tid - big_cores) / chunk;
5837 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5839 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5840 "Illegal set affinity operation when not capable");
5842 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5847 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
5848 KMP_CPU_SET(osID, mask);
5850 for (
int i = 0; i < __kmp_nth_per_core; i++) {
5852 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
5853 KMP_CPU_SET(osID, mask);
5856 if (__kmp_affinity.flags.verbose) {
5857 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5858 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5859 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5862 __kmp_affinity_get_thread_topology_info(th);
5863 __kmp_set_system_affinity(mask, TRUE);
5866 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5870 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5871 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5872 __kmp_aff_depth - 1, core_level);
5873 int nth_per_core = __kmp_affinity_max_proc_per_core(
5874 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5878 if (nthreads == __kmp_avail_proc) {
5880 int osID = __kmp_topology->at(tid).os_id;
5881 KMP_CPU_SET(osID, mask);
5884 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5885 for (
int i = 0; i < __kmp_avail_proc; i++) {
5886 int osID = __kmp_topology->at(i).os_id;
5887 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5889 KMP_CPU_SET(osID, mask);
5893 }
else if (nthreads <= ncores) {
5896 for (
int i = 0; i < ncores; i++) {
5899 for (
int j = 0; j < nth_per_core; j++) {
5900 if (procarr[i * nth_per_core + j] != -1) {
5907 for (
int j = 0; j < nth_per_core; j++) {
5908 int osID = procarr[i * nth_per_core + j];
5910 KMP_CPU_SET(osID, mask);
5926 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
5928 int *ncores_with_x_procs =
5929 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5931 int *ncores_with_x_to_max_procs =
5932 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5934 for (
int i = 0; i <= nth_per_core; i++) {
5935 ncores_with_x_procs[i] = 0;
5936 ncores_with_x_to_max_procs[i] = 0;
5939 for (
int i = 0; i < ncores; i++) {
5941 for (
int j = 0; j < nth_per_core; j++) {
5942 if (procarr[i * nth_per_core + j] != -1) {
5946 nproc_at_core[i] = cnt;
5947 ncores_with_x_procs[cnt]++;
5950 for (
int i = 0; i <= nth_per_core; i++) {
5951 for (
int j = i; j <= nth_per_core; j++) {
5952 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5957 int nproc = nth_per_core * ncores;
5959 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5960 for (
int i = 0; i < nproc; i++) {
5967 for (
int j = 1; j <= nth_per_core; j++) {
5968 int cnt = ncores_with_x_to_max_procs[j];
5969 for (
int i = 0; i < ncores; i++) {
5971 if (nproc_at_core[i] == 0) {
5974 for (
int k = 0; k < nth_per_core; k++) {
5975 if (procarr[i * nth_per_core + k] != -1) {
5976 if (newarr[i * nth_per_core + k] == 0) {
5977 newarr[i * nth_per_core + k] = 1;
5983 newarr[i * nth_per_core + k]++;
5991 if (cnt == 0 || nth == 0) {
6002 for (
int i = 0; i < nproc; i++) {
6006 int osID = procarr[i];
6007 KMP_CPU_SET(osID, mask);
6009 int coreID = i / nth_per_core;
6010 for (
int ii = 0; ii < nth_per_core; ii++) {
6011 int osID = procarr[coreID * nth_per_core + ii];
6013 KMP_CPU_SET(osID, mask);
6023 if (__kmp_affinity.flags.verbose) {
6024 char buf[KMP_AFFIN_MASK_PRINT_LEN];
6025 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
6026 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
6029 __kmp_affinity_get_thread_topology_info(th);
6030 __kmp_set_system_affinity(mask, TRUE);
6034 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
6049 kmp_set_thread_affinity_mask_initial()
6054 int gtid = __kmp_get_gtid();
6057 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6058 "non-omp thread, returning\n"));
6061 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
6062 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6063 "affinity not initialized, returning\n"));
6066 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
6067 "set full mask for thread %d\n",
6069 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
6071 return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
6073 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
int try_open(const char *filename, const char *mode)