LLVM OpenMP* Runtime Library
kmp_affinity.cpp
1 /*
2  * kmp_affinity.cpp -- affinity management
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22 #if KMP_USE_HWLOC
23 // Copied from hwloc
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
28 #endif
29 #include <ctype.h>
30 
31 // The machine topology
32 kmp_topology_t *__kmp_topology = nullptr;
33 // KMP_HW_SUBSET environment variable
34 kmp_hw_subset_t *__kmp_hw_subset = nullptr;
35 
36 // Store the real or imagined machine hierarchy here
37 static hierarchy_info machine_hierarchy;
38 
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
40 
41 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
42  kmp_uint32 depth;
43  // The test below is true if affinity is available, but set to "none". Need to
44  // init on first use of hierarchical barrier.
45  if (TCR_1(machine_hierarchy.uninitialized))
46  machine_hierarchy.init(nproc);
47 
48  // Adjust the hierarchy in case num threads exceeds original
49  if (nproc > machine_hierarchy.base_num_threads)
50  machine_hierarchy.resize(nproc);
51 
52  depth = machine_hierarchy.depth;
53  KMP_DEBUG_ASSERT(depth > 0);
54 
55  thr_bar->depth = depth;
56  __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57  &(thr_bar->base_leaf_kids));
58  thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
59 }
60 
61 static int nCoresPerPkg, nPackages;
62 static int __kmp_nThreadsPerCore;
63 #ifndef KMP_DFLT_NTH_CORES
64 static int __kmp_ncores;
65 #endif
66 
67 const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) {
68  switch (type) {
69  case KMP_HW_SOCKET:
70  return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
71  case KMP_HW_DIE:
72  return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
73  case KMP_HW_MODULE:
74  return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
75  case KMP_HW_TILE:
76  return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
77  case KMP_HW_NUMA:
78  return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
79  case KMP_HW_L3:
80  return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
81  case KMP_HW_L2:
82  return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
83  case KMP_HW_L1:
84  return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
85  case KMP_HW_LLC:
86  return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
87  case KMP_HW_CORE:
88  return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
89  case KMP_HW_THREAD:
90  return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91  case KMP_HW_PROC_GROUP:
92  return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
93  }
94  return KMP_I18N_STR(Unknown);
95 }
96 
97 const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) {
98  switch (type) {
99  case KMP_HW_SOCKET:
100  return ((plural) ? "sockets" : "socket");
101  case KMP_HW_DIE:
102  return ((plural) ? "dice" : "die");
103  case KMP_HW_MODULE:
104  return ((plural) ? "modules" : "module");
105  case KMP_HW_TILE:
106  return ((plural) ? "tiles" : "tile");
107  case KMP_HW_NUMA:
108  return ((plural) ? "numa_domains" : "numa_domain");
109  case KMP_HW_L3:
110  return ((plural) ? "l3_caches" : "l3_cache");
111  case KMP_HW_L2:
112  return ((plural) ? "l2_caches" : "l2_cache");
113  case KMP_HW_L1:
114  return ((plural) ? "l1_caches" : "l1_cache");
115  case KMP_HW_LLC:
116  return ((plural) ? "ll_caches" : "ll_cache");
117  case KMP_HW_CORE:
118  return ((plural) ? "cores" : "core");
119  case KMP_HW_THREAD:
120  return ((plural) ? "threads" : "thread");
121  case KMP_HW_PROC_GROUP:
122  return ((plural) ? "proc_groups" : "proc_group");
123  }
124  return ((plural) ? "unknowns" : "unknown");
125 }
126 
127 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
128  switch (type) {
129  case KMP_HW_CORE_TYPE_UNKNOWN:
130  return "unknown";
131 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
132  case KMP_HW_CORE_TYPE_ATOM:
133  return "Intel Atom(R) processor";
134  case KMP_HW_CORE_TYPE_CORE:
135  return "Intel(R) Core(TM) processor";
136 #endif
137  }
138  return "unknown";
139 }
140 
141 #if KMP_AFFINITY_SUPPORTED
142 // If affinity is supported, check the affinity
143 // verbose and warning flags before printing warning
144 #define KMP_AFF_WARNING(s, ...) \
145  if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
146  KMP_WARNING(__VA_ARGS__); \
147  }
148 #else
149 #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
150 #endif
151 
153 // kmp_hw_thread_t methods
154 int kmp_hw_thread_t::compare_ids(const void *a, const void *b) {
155  const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a;
156  const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b;
157  int depth = __kmp_topology->get_depth();
158  for (int level = 0; level < depth; ++level) {
159  if (ahwthread->ids[level] < bhwthread->ids[level])
160  return -1;
161  else if (ahwthread->ids[level] > bhwthread->ids[level])
162  return 1;
163  }
164  if (ahwthread->os_id < bhwthread->os_id)
165  return -1;
166  else if (ahwthread->os_id > bhwthread->os_id)
167  return 1;
168  return 0;
169 }
170 
171 #if KMP_AFFINITY_SUPPORTED
172 int kmp_hw_thread_t::compare_compact(const void *a, const void *b) {
173  int i;
174  const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a;
175  const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b;
176  int depth = __kmp_topology->get_depth();
177  int compact = __kmp_topology->compact;
178  KMP_DEBUG_ASSERT(compact >= 0);
179  KMP_DEBUG_ASSERT(compact <= depth);
180  for (i = 0; i < compact; i++) {
181  int j = depth - i - 1;
182  if (aa->sub_ids[j] < bb->sub_ids[j])
183  return -1;
184  if (aa->sub_ids[j] > bb->sub_ids[j])
185  return 1;
186  }
187  for (; i < depth; i++) {
188  int j = i - compact;
189  if (aa->sub_ids[j] < bb->sub_ids[j])
190  return -1;
191  if (aa->sub_ids[j] > bb->sub_ids[j])
192  return 1;
193  }
194  return 0;
195 }
196 #endif
197 
198 void kmp_hw_thread_t::print() const {
199  int depth = __kmp_topology->get_depth();
200  printf("%4d ", os_id);
201  for (int i = 0; i < depth; ++i) {
202  printf("%4d ", ids[i]);
203  }
204  if (attrs) {
205  if (attrs.is_core_type_valid())
206  printf(" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
207  if (attrs.is_core_eff_valid())
208  printf(" (eff=%d)", attrs.get_core_eff());
209  }
210  printf("\n");
211 }
212 
214 // kmp_topology_t methods
215 
216 // Add a layer to the topology based on the ids. Assume the topology
217 // is perfectly nested (i.e., so no object has more than one parent)
218 void kmp_topology_t::_insert_layer(kmp_hw_t type, const int *ids) {
219  // Figure out where the layer should go by comparing the ids of the current
220  // layers with the new ids
221  int target_layer;
222  int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
223  int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
224 
225  // Start from the highest layer and work down to find target layer
226  // If new layer is equal to another layer then put the new layer above
227  for (target_layer = 0; target_layer < depth; ++target_layer) {
228  bool layers_equal = true;
229  bool strictly_above_target_layer = false;
230  for (int i = 0; i < num_hw_threads; ++i) {
231  int id = hw_threads[i].ids[target_layer];
232  int new_id = ids[i];
233  if (id != previous_id && new_id == previous_new_id) {
234  // Found the layer we are strictly above
235  strictly_above_target_layer = true;
236  layers_equal = false;
237  break;
238  } else if (id == previous_id && new_id != previous_new_id) {
239  // Found a layer we are below. Move to next layer and check.
240  layers_equal = false;
241  break;
242  }
243  previous_id = id;
244  previous_new_id = new_id;
245  }
246  if (strictly_above_target_layer || layers_equal)
247  break;
248  }
249 
250  // Found the layer we are above. Now move everything to accommodate the new
251  // layer. And put the new ids and type into the topology.
252  for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
253  types[j] = types[i];
254  types[target_layer] = type;
255  for (int k = 0; k < num_hw_threads; ++k) {
256  for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
257  hw_threads[k].ids[j] = hw_threads[k].ids[i];
258  hw_threads[k].ids[target_layer] = ids[k];
259  }
260  equivalent[type] = type;
261  depth++;
262 }
263 
264 #if KMP_GROUP_AFFINITY
265 // Insert the Windows Processor Group structure into the topology
266 void kmp_topology_t::_insert_windows_proc_groups() {
267  // Do not insert the processor group structure for a single group
268  if (__kmp_num_proc_groups == 1)
269  return;
270  kmp_affin_mask_t *mask;
271  int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads);
272  KMP_CPU_ALLOC(mask);
273  for (int i = 0; i < num_hw_threads; ++i) {
274  KMP_CPU_ZERO(mask);
275  KMP_CPU_SET(hw_threads[i].os_id, mask);
276  ids[i] = __kmp_get_proc_group(mask);
277  }
278  KMP_CPU_FREE(mask);
279  _insert_layer(KMP_HW_PROC_GROUP, ids);
280  __kmp_free(ids);
281 }
282 #endif
283 
284 // Remove layers that don't add information to the topology.
285 // This is done by having the layer take on the id = UNKNOWN_ID (-1)
286 void kmp_topology_t::_remove_radix1_layers() {
287  int preference[KMP_HW_LAST];
288  int top_index1, top_index2;
289  // Set up preference associative array
290  preference[KMP_HW_SOCKET] = 110;
291  preference[KMP_HW_PROC_GROUP] = 100;
292  preference[KMP_HW_CORE] = 95;
293  preference[KMP_HW_THREAD] = 90;
294  preference[KMP_HW_NUMA] = 85;
295  preference[KMP_HW_DIE] = 80;
296  preference[KMP_HW_TILE] = 75;
297  preference[KMP_HW_MODULE] = 73;
298  preference[KMP_HW_L3] = 70;
299  preference[KMP_HW_L2] = 65;
300  preference[KMP_HW_L1] = 60;
301  preference[KMP_HW_LLC] = 5;
302  top_index1 = 0;
303  top_index2 = 1;
304  while (top_index1 < depth - 1 && top_index2 < depth) {
305  kmp_hw_t type1 = types[top_index1];
306  kmp_hw_t type2 = types[top_index2];
307  KMP_ASSERT_VALID_HW_TYPE(type1);
308  KMP_ASSERT_VALID_HW_TYPE(type2);
309  // Do not allow the three main topology levels (sockets, cores, threads) to
310  // be compacted down
311  if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
312  type1 == KMP_HW_SOCKET) &&
313  (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
314  type2 == KMP_HW_SOCKET)) {
315  top_index1 = top_index2++;
316  continue;
317  }
318  bool radix1 = true;
319  bool all_same = true;
320  int id1 = hw_threads[0].ids[top_index1];
321  int id2 = hw_threads[0].ids[top_index2];
322  int pref1 = preference[type1];
323  int pref2 = preference[type2];
324  for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
325  if (hw_threads[hwidx].ids[top_index1] == id1 &&
326  hw_threads[hwidx].ids[top_index2] != id2) {
327  radix1 = false;
328  break;
329  }
330  if (hw_threads[hwidx].ids[top_index2] != id2)
331  all_same = false;
332  id1 = hw_threads[hwidx].ids[top_index1];
333  id2 = hw_threads[hwidx].ids[top_index2];
334  }
335  if (radix1) {
336  // Select the layer to remove based on preference
337  kmp_hw_t remove_type, keep_type;
338  int remove_layer, remove_layer_ids;
339  if (pref1 > pref2) {
340  remove_type = type2;
341  remove_layer = remove_layer_ids = top_index2;
342  keep_type = type1;
343  } else {
344  remove_type = type1;
345  remove_layer = remove_layer_ids = top_index1;
346  keep_type = type2;
347  }
348  // If all the indexes for the second (deeper) layer are the same.
349  // e.g., all are zero, then make sure to keep the first layer's ids
350  if (all_same)
351  remove_layer_ids = top_index2;
352  // Remove radix one type by setting the equivalence, removing the id from
353  // the hw threads and removing the layer from types and depth
354  set_equivalent_type(remove_type, keep_type);
355  for (int idx = 0; idx < num_hw_threads; ++idx) {
356  kmp_hw_thread_t &hw_thread = hw_threads[idx];
357  for (int d = remove_layer_ids; d < depth - 1; ++d)
358  hw_thread.ids[d] = hw_thread.ids[d + 1];
359  }
360  for (int idx = remove_layer; idx < depth - 1; ++idx)
361  types[idx] = types[idx + 1];
362  depth--;
363  } else {
364  top_index1 = top_index2++;
365  }
366  }
367  KMP_ASSERT(depth > 0);
368 }
369 
370 void kmp_topology_t::_set_last_level_cache() {
371  if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
372  set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
373  else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
374  set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
375 #if KMP_MIC_SUPPORTED
376  else if (__kmp_mic_type == mic3) {
377  if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
378  set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
379  else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
380  set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
381  // L2/Tile wasn't detected so just say L1
382  else
383  set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
384  }
385 #endif
386  else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
387  set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
388  // Fallback is to set last level cache to socket or core
389  if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
390  if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
391  set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
392  else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
393  set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
394  }
395  KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
396 }
397 
398 // Gather the count of each topology layer and the ratio
399 void kmp_topology_t::_gather_enumeration_information() {
400  int previous_id[KMP_HW_LAST];
401  int max[KMP_HW_LAST];
402 
403  for (int i = 0; i < depth; ++i) {
404  previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
405  max[i] = 0;
406  count[i] = 0;
407  ratio[i] = 0;
408  }
409  int core_level = get_level(KMP_HW_CORE);
410  for (int i = 0; i < num_hw_threads; ++i) {
411  kmp_hw_thread_t &hw_thread = hw_threads[i];
412  for (int layer = 0; layer < depth; ++layer) {
413  int id = hw_thread.ids[layer];
414  if (id != previous_id[layer]) {
415  // Add an additional increment to each count
416  for (int l = layer; l < depth; ++l)
417  count[l]++;
418  // Keep track of topology layer ratio statistics
419  max[layer]++;
420  for (int l = layer + 1; l < depth; ++l) {
421  if (max[l] > ratio[l])
422  ratio[l] = max[l];
423  max[l] = 1;
424  }
425  // Figure out the number of different core types
426  // and efficiencies for hybrid CPUs
427  if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
428  if (hw_thread.attrs.is_core_eff_valid() &&
429  hw_thread.attrs.core_eff >= num_core_efficiencies) {
430  // Because efficiencies can range from 0 to max efficiency - 1,
431  // the number of efficiencies is max efficiency + 1
432  num_core_efficiencies = hw_thread.attrs.core_eff + 1;
433  }
434  if (hw_thread.attrs.is_core_type_valid()) {
435  bool found = false;
436  for (int j = 0; j < num_core_types; ++j) {
437  if (hw_thread.attrs.get_core_type() == core_types[j]) {
438  found = true;
439  break;
440  }
441  }
442  if (!found) {
443  KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
444  core_types[num_core_types++] = hw_thread.attrs.get_core_type();
445  }
446  }
447  }
448  break;
449  }
450  }
451  for (int layer = 0; layer < depth; ++layer) {
452  previous_id[layer] = hw_thread.ids[layer];
453  }
454  }
455  for (int layer = 0; layer < depth; ++layer) {
456  if (max[layer] > ratio[layer])
457  ratio[layer] = max[layer];
458  }
459 }
460 
461 int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr,
462  int above_level,
463  bool find_all) const {
464  int current, current_max;
465  int previous_id[KMP_HW_LAST];
466  for (int i = 0; i < depth; ++i)
467  previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
468  int core_level = get_level(KMP_HW_CORE);
469  if (find_all)
470  above_level = -1;
471  KMP_ASSERT(above_level < core_level);
472  current_max = 0;
473  current = 0;
474  for (int i = 0; i < num_hw_threads; ++i) {
475  kmp_hw_thread_t &hw_thread = hw_threads[i];
476  if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
477  if (current > current_max)
478  current_max = current;
479  current = hw_thread.attrs.contains(attr);
480  } else {
481  for (int level = above_level + 1; level <= core_level; ++level) {
482  if (hw_thread.ids[level] != previous_id[level]) {
483  if (hw_thread.attrs.contains(attr))
484  current++;
485  break;
486  }
487  }
488  }
489  for (int level = 0; level < depth; ++level)
490  previous_id[level] = hw_thread.ids[level];
491  }
492  if (current > current_max)
493  current_max = current;
494  return current_max;
495 }
496 
497 // Find out if the topology is uniform
498 void kmp_topology_t::_discover_uniformity() {
499  int num = 1;
500  for (int level = 0; level < depth; ++level)
501  num *= ratio[level];
502  flags.uniform = (num == count[depth - 1]);
503 }
504 
505 // Set all the sub_ids for each hardware thread
506 void kmp_topology_t::_set_sub_ids() {
507  int previous_id[KMP_HW_LAST];
508  int sub_id[KMP_HW_LAST];
509 
510  for (int i = 0; i < depth; ++i) {
511  previous_id[i] = -1;
512  sub_id[i] = -1;
513  }
514  for (int i = 0; i < num_hw_threads; ++i) {
515  kmp_hw_thread_t &hw_thread = hw_threads[i];
516  // Setup the sub_id
517  for (int j = 0; j < depth; ++j) {
518  if (hw_thread.ids[j] != previous_id[j]) {
519  sub_id[j]++;
520  for (int k = j + 1; k < depth; ++k) {
521  sub_id[k] = 0;
522  }
523  break;
524  }
525  }
526  // Set previous_id
527  for (int j = 0; j < depth; ++j) {
528  previous_id[j] = hw_thread.ids[j];
529  }
530  // Set the sub_ids field
531  for (int j = 0; j < depth; ++j) {
532  hw_thread.sub_ids[j] = sub_id[j];
533  }
534  }
535 }
536 
537 void kmp_topology_t::_set_globals() {
538  // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores
539  int core_level, thread_level, package_level;
540  package_level = get_level(KMP_HW_SOCKET);
541 #if KMP_GROUP_AFFINITY
542  if (package_level == -1)
543  package_level = get_level(KMP_HW_PROC_GROUP);
544 #endif
545  core_level = get_level(KMP_HW_CORE);
546  thread_level = get_level(KMP_HW_THREAD);
547 
548  KMP_ASSERT(core_level != -1);
549  KMP_ASSERT(thread_level != -1);
550 
551  __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
552  if (package_level != -1) {
553  nCoresPerPkg = calculate_ratio(core_level, package_level);
554  nPackages = get_count(package_level);
555  } else {
556  // assume one socket
557  nCoresPerPkg = get_count(core_level);
558  nPackages = 1;
559  }
560 #ifndef KMP_DFLT_NTH_CORES
561  __kmp_ncores = get_count(core_level);
562 #endif
563 }
564 
565 kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth,
566  const kmp_hw_t *types) {
567  kmp_topology_t *retval;
568  // Allocate all data in one large allocation
569  size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc +
570  sizeof(int) * (size_t)KMP_HW_LAST * 3;
571  char *bytes = (char *)__kmp_allocate(size);
572  retval = (kmp_topology_t *)bytes;
573  if (nproc > 0) {
574  retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t));
575  } else {
576  retval->hw_threads = nullptr;
577  }
578  retval->num_hw_threads = nproc;
579  retval->depth = ndepth;
580  int *arr =
581  (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc);
582  retval->types = (kmp_hw_t *)arr;
583  retval->ratio = arr + (size_t)KMP_HW_LAST;
584  retval->count = arr + 2 * (size_t)KMP_HW_LAST;
585  retval->num_core_efficiencies = 0;
586  retval->num_core_types = 0;
587  retval->compact = 0;
588  for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
589  retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
590  KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
591  for (int i = 0; i < ndepth; ++i) {
592  retval->types[i] = types[i];
593  retval->equivalent[types[i]] = types[i];
594  }
595  return retval;
596 }
597 
598 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
599  if (topology)
600  __kmp_free(topology);
601 }
602 
603 bool kmp_topology_t::check_ids() const {
604  // Assume ids have been sorted
605  if (num_hw_threads == 0)
606  return true;
607  for (int i = 1; i < num_hw_threads; ++i) {
608  kmp_hw_thread_t &current_thread = hw_threads[i];
609  kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
610  bool unique = false;
611  for (int j = 0; j < depth; ++j) {
612  if (previous_thread.ids[j] != current_thread.ids[j]) {
613  unique = true;
614  break;
615  }
616  }
617  if (unique)
618  continue;
619  return false;
620  }
621  return true;
622 }
623 
624 void kmp_topology_t::dump() const {
625  printf("***********************\n");
626  printf("*** __kmp_topology: ***\n");
627  printf("***********************\n");
628  printf("* depth: %d\n", depth);
629 
630  printf("* types: ");
631  for (int i = 0; i < depth; ++i)
632  printf("%15s ", __kmp_hw_get_keyword(types[i]));
633  printf("\n");
634 
635  printf("* ratio: ");
636  for (int i = 0; i < depth; ++i) {
637  printf("%15d ", ratio[i]);
638  }
639  printf("\n");
640 
641  printf("* count: ");
642  for (int i = 0; i < depth; ++i) {
643  printf("%15d ", count[i]);
644  }
645  printf("\n");
646 
647  printf("* num_core_eff: %d\n", num_core_efficiencies);
648  printf("* num_core_types: %d\n", num_core_types);
649  printf("* core_types: ");
650  for (int i = 0; i < num_core_types; ++i)
651  printf("%3d ", core_types[i]);
652  printf("\n");
653 
654  printf("* equivalent map:\n");
655  KMP_FOREACH_HW_TYPE(i) {
656  const char *key = __kmp_hw_get_keyword(i);
657  const char *value = __kmp_hw_get_keyword(equivalent[i]);
658  printf("%-15s -> %-15s\n", key, value);
659  }
660 
661  printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No"));
662 
663  printf("* num_hw_threads: %d\n", num_hw_threads);
664  printf("* hw_threads:\n");
665  for (int i = 0; i < num_hw_threads; ++i) {
666  hw_threads[i].print();
667  }
668  printf("***********************\n");
669 }
670 
671 void kmp_topology_t::print(const char *env_var) const {
672  kmp_str_buf_t buf;
673  int print_types_depth;
674  __kmp_str_buf_init(&buf);
675  kmp_hw_t print_types[KMP_HW_LAST + 2];
676 
677  // Num Available Threads
678  if (num_hw_threads) {
679  KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
680  } else {
681  KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc);
682  }
683 
684  // Uniform or not
685  if (is_uniform()) {
686  KMP_INFORM(Uniform, env_var);
687  } else {
688  KMP_INFORM(NonUniform, env_var);
689  }
690 
691  // Equivalent types
692  KMP_FOREACH_HW_TYPE(type) {
693  kmp_hw_t eq_type = equivalent[type];
694  if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
695  KMP_INFORM(AffEqualTopologyTypes, env_var,
696  __kmp_hw_get_catalog_string(type),
697  __kmp_hw_get_catalog_string(eq_type));
698  }
699  }
700 
701  // Quick topology
702  KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST);
703  // Create a print types array that always guarantees printing
704  // the core and thread level
705  print_types_depth = 0;
706  for (int level = 0; level < depth; ++level)
707  print_types[print_types_depth++] = types[level];
708  if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
709  // Force in the core level for quick topology
710  if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
711  // Force core before thread e.g., 1 socket X 2 threads/socket
712  // becomes 1 socket X 1 core/socket X 2 threads/socket
713  print_types[print_types_depth - 1] = KMP_HW_CORE;
714  print_types[print_types_depth++] = KMP_HW_THREAD;
715  } else {
716  print_types[print_types_depth++] = KMP_HW_CORE;
717  }
718  }
719  // Always put threads at very end of quick topology
720  if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
721  print_types[print_types_depth++] = KMP_HW_THREAD;
722 
723  __kmp_str_buf_clear(&buf);
724  kmp_hw_t numerator_type;
725  kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
726  int core_level = get_level(KMP_HW_CORE);
727  int ncores = get_count(core_level);
728 
729  for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
730  int c;
731  bool plural;
732  numerator_type = print_types[plevel];
733  KMP_ASSERT_VALID_HW_TYPE(numerator_type);
734  if (equivalent[numerator_type] != numerator_type)
735  c = 1;
736  else
737  c = get_ratio(level++);
738  plural = (c > 1);
739  if (plevel == 0) {
740  __kmp_str_buf_print(&buf, "%d %s", c,
741  __kmp_hw_get_catalog_string(numerator_type, plural));
742  } else {
743  __kmp_str_buf_print(&buf, " x %d %s/%s", c,
744  __kmp_hw_get_catalog_string(numerator_type, plural),
745  __kmp_hw_get_catalog_string(denominator_type));
746  }
747  denominator_type = numerator_type;
748  }
749  KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
750 
751  // Hybrid topology information
752  if (__kmp_is_hybrid_cpu()) {
753  for (int i = 0; i < num_core_types; ++i) {
754  kmp_hw_core_type_t core_type = core_types[i];
755  kmp_hw_attr_t attr;
756  attr.clear();
757  attr.set_core_type(core_type);
758  int ncores = get_ncores_with_attr(attr);
759  if (ncores > 0) {
760  KMP_INFORM(TopologyHybrid, env_var, ncores,
761  __kmp_hw_get_core_type_string(core_type));
762  KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
763  for (int eff = 0; eff < num_core_efficiencies; ++eff) {
764  attr.set_core_eff(eff);
765  int ncores_with_eff = get_ncores_with_attr(attr);
766  if (ncores_with_eff > 0) {
767  KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
768  }
769  }
770  }
771  }
772  }
773 
774  if (num_hw_threads <= 0) {
775  __kmp_str_buf_free(&buf);
776  return;
777  }
778 
779  // Full OS proc to hardware thread map
780  KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
781  for (int i = 0; i < num_hw_threads; i++) {
782  __kmp_str_buf_clear(&buf);
783  for (int level = 0; level < depth; ++level) {
784  kmp_hw_t type = types[level];
785  __kmp_str_buf_print(&buf, "%s ", __kmp_hw_get_catalog_string(type));
786  __kmp_str_buf_print(&buf, "%d ", hw_threads[i].ids[level]);
787  }
788  if (__kmp_is_hybrid_cpu())
789  __kmp_str_buf_print(
790  &buf, "(%s)",
791  __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
792  KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
793  }
794 
795  __kmp_str_buf_free(&buf);
796 }
797 
798 #if KMP_AFFINITY_SUPPORTED
799 void kmp_topology_t::set_granularity(kmp_affinity_t &affinity) const {
800  const char *env_var = affinity.env_var;
801  // Set the number of affinity granularity levels
802  if (affinity.gran_levels < 0) {
803  kmp_hw_t gran_type = get_equivalent_type(affinity.gran);
804  // Check if user's granularity request is valid
805  if (gran_type == KMP_HW_UNKNOWN) {
806  // First try core, then thread, then package
807  kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
808  for (auto g : gran_types) {
809  if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
810  gran_type = g;
811  break;
812  }
813  }
814  KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
815  // Warn user what granularity setting will be used instead
816  KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
817  __kmp_hw_get_catalog_string(affinity.gran),
818  __kmp_hw_get_catalog_string(gran_type));
819  affinity.gran = gran_type;
820  }
821 #if KMP_GROUP_AFFINITY
822  // If more than one processor group exists, and the level of
823  // granularity specified by the user is too coarse, then the
824  // granularity must be adjusted "down" to processor group affinity
825  // because threads can only exist within one processor group.
826  // For example, if a user sets granularity=socket and there are two
827  // processor groups that cover a socket, then the runtime must
828  // restrict the granularity down to the processor group level.
829  if (__kmp_num_proc_groups > 1) {
830  int gran_depth = get_level(gran_type);
831  int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
832  if (gran_depth >= 0 && proc_group_depth >= 0 &&
833  gran_depth < proc_group_depth) {
834  KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var,
835  __kmp_hw_get_catalog_string(affinity.gran));
836  affinity.gran = gran_type = KMP_HW_PROC_GROUP;
837  }
838  }
839 #endif
840  affinity.gran_levels = 0;
841  for (int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
842  affinity.gran_levels++;
843  }
844 }
845 #endif
846 
847 void kmp_topology_t::canonicalize() {
848 #if KMP_GROUP_AFFINITY
849  _insert_windows_proc_groups();
850 #endif
851  _remove_radix1_layers();
852  _gather_enumeration_information();
853  _discover_uniformity();
854  _set_sub_ids();
855  _set_globals();
856  _set_last_level_cache();
857 
858 #if KMP_MIC_SUPPORTED
859  // Manually Add L2 = Tile equivalence
860  if (__kmp_mic_type == mic3) {
861  if (get_level(KMP_HW_L2) != -1)
862  set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
863  else if (get_level(KMP_HW_TILE) != -1)
864  set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
865  }
866 #endif
867 
868  // Perform post canonicalization checking
869  KMP_ASSERT(depth > 0);
870  for (int level = 0; level < depth; ++level) {
871  // All counts, ratios, and types must be valid
872  KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
873  KMP_ASSERT_VALID_HW_TYPE(types[level]);
874  // Detected types must point to themselves
875  KMP_ASSERT(equivalent[types[level]] == types[level]);
876  }
877 }
878 
879 // Canonicalize an explicit packages X cores/pkg X threads/core topology
880 void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg,
881  int nthreads_per_core, int ncores) {
882  int ndepth = 3;
883  depth = ndepth;
884  KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
885  for (int level = 0; level < depth; ++level) {
886  count[level] = 0;
887  ratio[level] = 0;
888  }
889  count[0] = npackages;
890  count[1] = ncores;
891  count[2] = __kmp_xproc;
892  ratio[0] = npackages;
893  ratio[1] = ncores_per_pkg;
894  ratio[2] = nthreads_per_core;
895  equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
896  equivalent[KMP_HW_CORE] = KMP_HW_CORE;
897  equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
898  types[0] = KMP_HW_SOCKET;
899  types[1] = KMP_HW_CORE;
900  types[2] = KMP_HW_THREAD;
901  //__kmp_avail_proc = __kmp_xproc;
902  _discover_uniformity();
903 }
904 
905 // Represents running sub IDs for a single core attribute where
906 // attribute values have SIZE possibilities.
907 template <size_t SIZE, typename IndexFunc> struct kmp_sub_ids_t {
908  int last_level; // last level in topology to consider for sub_ids
909  int sub_id[SIZE]; // The sub ID for a given attribute value
910  int prev_sub_id[KMP_HW_LAST];
911  IndexFunc indexer;
912 
913 public:
914  kmp_sub_ids_t(int last_level) : last_level(last_level) {
915  KMP_ASSERT(last_level < KMP_HW_LAST);
916  for (size_t i = 0; i < SIZE; ++i)
917  sub_id[i] = -1;
918  for (size_t i = 0; i < KMP_HW_LAST; ++i)
919  prev_sub_id[i] = -1;
920  }
921  void update(const kmp_hw_thread_t &hw_thread) {
922  int idx = indexer(hw_thread);
923  KMP_ASSERT(idx < (int)SIZE);
924  for (int level = 0; level <= last_level; ++level) {
925  if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
926  if (level < last_level)
927  sub_id[idx] = -1;
928  sub_id[idx]++;
929  break;
930  }
931  }
932  for (int level = 0; level <= last_level; ++level)
933  prev_sub_id[level] = hw_thread.sub_ids[level];
934  }
935  int get_sub_id(const kmp_hw_thread_t &hw_thread) const {
936  return sub_id[indexer(hw_thread)];
937  }
938 };
939 
940 static kmp_str_buf_t *
941 __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
942  bool plural) {
943  __kmp_str_buf_init(buf);
944  if (attr.is_core_type_valid())
945  __kmp_str_buf_print(buf, "%s %s",
946  __kmp_hw_get_core_type_string(attr.get_core_type()),
947  __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
948  else
949  __kmp_str_buf_print(buf, "%s eff=%d",
950  __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
951  attr.get_core_eff());
952  return buf;
953 }
954 
955 // Apply the KMP_HW_SUBSET envirable to the topology
956 // Returns true if KMP_HW_SUBSET filtered any processors
957 // otherwise, returns false
958 bool kmp_topology_t::filter_hw_subset() {
959  // If KMP_HW_SUBSET wasn't requested, then do nothing.
960  if (!__kmp_hw_subset)
961  return false;
962 
963  // First, sort the KMP_HW_SUBSET items by the machine topology
964  __kmp_hw_subset->sort();
965 
966  // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology
967  bool using_core_types = false;
968  bool using_core_effs = false;
969  int hw_subset_depth = __kmp_hw_subset->get_depth();
970  kmp_hw_t specified[KMP_HW_LAST];
971  int *topology_levels = (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth);
972  KMP_ASSERT(hw_subset_depth > 0);
973  KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
974  int core_level = get_level(KMP_HW_CORE);
975  for (int i = 0; i < hw_subset_depth; ++i) {
976  int max_count;
977  const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
978  int num = item.num[0];
979  int offset = item.offset[0];
980  kmp_hw_t type = item.type;
981  kmp_hw_t equivalent_type = equivalent[type];
982  int level = get_level(type);
983  topology_levels[i] = level;
984 
985  // Check to see if current layer is in detected machine topology
986  if (equivalent_type != KMP_HW_UNKNOWN) {
987  __kmp_hw_subset->at(i).type = equivalent_type;
988  } else {
989  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric,
990  __kmp_hw_get_catalog_string(type));
991  return false;
992  }
993 
994  // Check to see if current layer has already been
995  // specified either directly or through an equivalent type
996  if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
997  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers,
998  __kmp_hw_get_catalog_string(type),
999  __kmp_hw_get_catalog_string(specified[equivalent_type]));
1000  return false;
1001  }
1002  specified[equivalent_type] = type;
1003 
1004  // Check to see if each layer's num & offset parameters are valid
1005  max_count = get_ratio(level);
1006  if (max_count < 0 ||
1007  (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1008  bool plural = (num > 1);
1009  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric,
1010  __kmp_hw_get_catalog_string(type, plural));
1011  return false;
1012  }
1013 
1014  // Check to see if core attributes are consistent
1015  if (core_level == level) {
1016  // Determine which core attributes are specified
1017  for (int j = 0; j < item.num_attrs; ++j) {
1018  if (item.attr[j].is_core_type_valid())
1019  using_core_types = true;
1020  if (item.attr[j].is_core_eff_valid())
1021  using_core_effs = true;
1022  }
1023 
1024  // Check if using a single core attribute on non-hybrid arch.
1025  // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute.
1026  //
1027  // Check if using multiple core attributes on non-hyrbid arch.
1028  // Ignore all of KMP_HW_SUBSET if this is the case.
1029  if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1030  if (item.num_attrs == 1) {
1031  if (using_core_effs) {
1032  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1033  "efficiency");
1034  } else {
1035  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1036  "core_type");
1037  }
1038  using_core_effs = false;
1039  using_core_types = false;
1040  } else {
1041  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid);
1042  return false;
1043  }
1044  }
1045 
1046  // Check if using both core types and core efficiencies together
1047  if (using_core_types && using_core_effs) {
1048  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, "core_type",
1049  "efficiency");
1050  return false;
1051  }
1052 
1053  // Check that core efficiency values are valid
1054  if (using_core_effs) {
1055  for (int j = 0; j < item.num_attrs; ++j) {
1056  if (item.attr[j].is_core_eff_valid()) {
1057  int core_eff = item.attr[j].get_core_eff();
1058  if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1059  kmp_str_buf_t buf;
1060  __kmp_str_buf_init(&buf);
1061  __kmp_str_buf_print(&buf, "%d", item.attr[j].get_core_eff());
1062  __kmp_msg(kmp_ms_warning,
1063  KMP_MSG(AffHWSubsetAttrInvalid, "efficiency", buf.str),
1064  KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1065  __kmp_msg_null);
1066  __kmp_str_buf_free(&buf);
1067  return false;
1068  }
1069  }
1070  }
1071  }
1072 
1073  // Check that the number of requested cores with attributes is valid
1074  if (using_core_types || using_core_effs) {
1075  for (int j = 0; j < item.num_attrs; ++j) {
1076  int num = item.num[j];
1077  int offset = item.offset[j];
1078  int level_above = core_level - 1;
1079  if (level_above >= 0) {
1080  max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1081  if (max_count <= 0 ||
1082  (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1083  kmp_str_buf_t buf;
1084  __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1085  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str);
1086  __kmp_str_buf_free(&buf);
1087  return false;
1088  }
1089  }
1090  }
1091  }
1092 
1093  if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1094  for (int j = 0; j < item.num_attrs; ++j) {
1095  // Ambiguous use of specific core attribute + generic core
1096  // e.g., 4c & 3c:intel_core or 4c & 3c:eff1
1097  if (!item.attr[j]) {
1098  kmp_hw_attr_t other_attr;
1099  for (int k = 0; k < item.num_attrs; ++k) {
1100  if (item.attr[k] != item.attr[j]) {
1101  other_attr = item.attr[k];
1102  break;
1103  }
1104  }
1105  kmp_str_buf_t buf;
1106  __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1107  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
1108  __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1109  __kmp_str_buf_free(&buf);
1110  return false;
1111  }
1112  // Allow specifying a specific core type or core eff exactly once
1113  for (int k = 0; k < j; ++k) {
1114  if (!item.attr[j] || !item.attr[k])
1115  continue;
1116  if (item.attr[k] == item.attr[j]) {
1117  kmp_str_buf_t buf;
1118  __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1119  item.num[j] > 0);
1120  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str);
1121  __kmp_str_buf_free(&buf);
1122  return false;
1123  }
1124  }
1125  }
1126  }
1127  }
1128  }
1129 
1130  struct core_type_indexer {
1131  int operator()(const kmp_hw_thread_t &t) const {
1132  switch (t.attrs.get_core_type()) {
1133 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1134  case KMP_HW_CORE_TYPE_ATOM:
1135  return 1;
1136  case KMP_HW_CORE_TYPE_CORE:
1137  return 2;
1138 #endif
1139  case KMP_HW_CORE_TYPE_UNKNOWN:
1140  return 0;
1141  }
1142  KMP_ASSERT(0);
1143  return 0;
1144  }
1145  };
1146  struct core_eff_indexer {
1147  int operator()(const kmp_hw_thread_t &t) const {
1148  return t.attrs.get_core_eff();
1149  }
1150  };
1151 
1152  kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1153  core_level);
1154  kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1155  core_level);
1156 
1157  // Determine which hardware threads should be filtered.
1158  int num_filtered = 0;
1159  bool *filtered = (bool *)__kmp_allocate(sizeof(bool) * num_hw_threads);
1160  for (int i = 0; i < num_hw_threads; ++i) {
1161  kmp_hw_thread_t &hw_thread = hw_threads[i];
1162  // Update type_sub_id
1163  if (using_core_types)
1164  core_type_sub_ids.update(hw_thread);
1165  if (using_core_effs)
1166  core_eff_sub_ids.update(hw_thread);
1167 
1168  // Check to see if this hardware thread should be filtered
1169  bool should_be_filtered = false;
1170  for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1171  ++hw_subset_index) {
1172  const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1173  int level = topology_levels[hw_subset_index];
1174  if (level == -1)
1175  continue;
1176  if ((using_core_effs || using_core_types) && level == core_level) {
1177  // Look for the core attribute in KMP_HW_SUBSET which corresponds
1178  // to this hardware thread's core attribute. Use this num,offset plus
1179  // the running sub_id for the particular core attribute of this hardware
1180  // thread to determine if the hardware thread should be filtered or not.
1181  int attr_idx;
1182  kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1183  int core_eff = hw_thread.attrs.get_core_eff();
1184  for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1185  if (using_core_types &&
1186  hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1187  break;
1188  if (using_core_effs &&
1189  hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1190  break;
1191  }
1192  // This core attribute isn't in the KMP_HW_SUBSET so always filter it.
1193  if (attr_idx == hw_subset_item.num_attrs) {
1194  should_be_filtered = true;
1195  break;
1196  }
1197  int sub_id;
1198  int num = hw_subset_item.num[attr_idx];
1199  int offset = hw_subset_item.offset[attr_idx];
1200  if (using_core_types)
1201  sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1202  else
1203  sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1204  if (sub_id < offset ||
1205  (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1206  should_be_filtered = true;
1207  break;
1208  }
1209  } else {
1210  int num = hw_subset_item.num[0];
1211  int offset = hw_subset_item.offset[0];
1212  if (hw_thread.sub_ids[level] < offset ||
1213  (num != kmp_hw_subset_t::USE_ALL &&
1214  hw_thread.sub_ids[level] >= offset + num)) {
1215  should_be_filtered = true;
1216  break;
1217  }
1218  }
1219  }
1220  // Collect filtering information
1221  filtered[i] = should_be_filtered;
1222  if (should_be_filtered)
1223  num_filtered++;
1224  }
1225 
1226  // One last check that we shouldn't allow filtering entire machine
1227  if (num_filtered == num_hw_threads) {
1228  KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered);
1229  __kmp_free(filtered);
1230  return false;
1231  }
1232 
1233  // Apply the filter
1234  int new_index = 0;
1235  for (int i = 0; i < num_hw_threads; ++i) {
1236  if (!filtered[i]) {
1237  if (i != new_index)
1238  hw_threads[new_index] = hw_threads[i];
1239  new_index++;
1240  } else {
1241 #if KMP_AFFINITY_SUPPORTED
1242  KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1243 #endif
1244  __kmp_avail_proc--;
1245  }
1246  }
1247 
1248  KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1249  num_hw_threads = new_index;
1250 
1251  // Post hardware subset canonicalization
1252  _gather_enumeration_information();
1253  _discover_uniformity();
1254  _set_globals();
1255  _set_last_level_cache();
1256  __kmp_free(filtered);
1257  return true;
1258 }
1259 
1260 bool kmp_topology_t::is_close(int hwt1, int hwt2, int hw_level) const {
1261  if (hw_level >= depth)
1262  return true;
1263  bool retval = true;
1264  const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1265  const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1266  for (int i = 0; i < (depth - hw_level); ++i) {
1267  if (t1.ids[i] != t2.ids[i])
1268  return false;
1269  }
1270  return retval;
1271 }
1272 
1274 
1275 #if KMP_AFFINITY_SUPPORTED
1276 
1277 bool KMPAffinity::picked_api = false;
1278 
1279 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
1280 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
1281 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
1282 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
1283 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
1284 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
1285 
1286 void KMPAffinity::pick_api() {
1287  KMPAffinity *affinity_dispatch;
1288  if (picked_api)
1289  return;
1290 #if KMP_USE_HWLOC
1291  // Only use Hwloc if affinity isn't explicitly disabled and
1292  // user requests Hwloc topology method
1293  if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1294  __kmp_affinity.type != affinity_disabled) {
1295  affinity_dispatch = new KMPHwlocAffinity();
1296  } else
1297 #endif
1298  {
1299  affinity_dispatch = new KMPNativeAffinity();
1300  }
1301  __kmp_affinity_dispatch = affinity_dispatch;
1302  picked_api = true;
1303 }
1304 
1305 void KMPAffinity::destroy_api() {
1306  if (__kmp_affinity_dispatch != NULL) {
1307  delete __kmp_affinity_dispatch;
1308  __kmp_affinity_dispatch = NULL;
1309  picked_api = false;
1310  }
1311 }
1312 
1313 #define KMP_ADVANCE_SCAN(scan) \
1314  while (*scan != '\0') { \
1315  scan++; \
1316  }
1317 
1318 // Print the affinity mask to the character array in a pretty format.
1319 // The format is a comma separated list of non-negative integers or integer
1320 // ranges: e.g., 1,2,3-5,7,9-15
1321 // The format can also be the string "{<empty>}" if no bits are set in mask
1322 char *__kmp_affinity_print_mask(char *buf, int buf_len,
1323  kmp_affin_mask_t *mask) {
1324  int start = 0, finish = 0, previous = 0;
1325  bool first_range;
1326  KMP_ASSERT(buf);
1327  KMP_ASSERT(buf_len >= 40);
1328  KMP_ASSERT(mask);
1329  char *scan = buf;
1330  char *end = buf + buf_len - 1;
1331 
1332  // Check for empty set.
1333  if (mask->begin() == mask->end()) {
1334  KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
1335  KMP_ADVANCE_SCAN(scan);
1336  KMP_ASSERT(scan <= end);
1337  return buf;
1338  }
1339 
1340  first_range = true;
1341  start = mask->begin();
1342  while (1) {
1343  // Find next range
1344  // [start, previous] is inclusive range of contiguous bits in mask
1345  for (finish = mask->next(start), previous = start;
1346  finish == previous + 1 && finish != mask->end();
1347  finish = mask->next(finish)) {
1348  previous = finish;
1349  }
1350 
1351  // The first range does not need a comma printed before it, but the rest
1352  // of the ranges do need a comma beforehand
1353  if (!first_range) {
1354  KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
1355  KMP_ADVANCE_SCAN(scan);
1356  } else {
1357  first_range = false;
1358  }
1359  // Range with three or more contiguous bits in the affinity mask
1360  if (previous - start > 1) {
1361  KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
1362  } else {
1363  // Range with one or two contiguous bits in the affinity mask
1364  KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
1365  KMP_ADVANCE_SCAN(scan);
1366  if (previous - start > 0) {
1367  KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
1368  }
1369  }
1370  KMP_ADVANCE_SCAN(scan);
1371  // Start over with new start point
1372  start = finish;
1373  if (start == mask->end())
1374  break;
1375  // Check for overflow
1376  if (end - scan < 2)
1377  break;
1378  }
1379 
1380  // Check for overflow
1381  KMP_ASSERT(scan <= end);
1382  return buf;
1383 }
1384 #undef KMP_ADVANCE_SCAN
1385 
1386 // Print the affinity mask to the string buffer object in a pretty format
1387 // The format is a comma separated list of non-negative integers or integer
1388 // ranges: e.g., 1,2,3-5,7,9-15
1389 // The format can also be the string "{<empty>}" if no bits are set in mask
1390 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1391  kmp_affin_mask_t *mask) {
1392  int start = 0, finish = 0, previous = 0;
1393  bool first_range;
1394  KMP_ASSERT(buf);
1395  KMP_ASSERT(mask);
1396 
1397  __kmp_str_buf_clear(buf);
1398 
1399  // Check for empty set.
1400  if (mask->begin() == mask->end()) {
1401  __kmp_str_buf_print(buf, "%s", "{<empty>}");
1402  return buf;
1403  }
1404 
1405  first_range = true;
1406  start = mask->begin();
1407  while (1) {
1408  // Find next range
1409  // [start, previous] is inclusive range of contiguous bits in mask
1410  for (finish = mask->next(start), previous = start;
1411  finish == previous + 1 && finish != mask->end();
1412  finish = mask->next(finish)) {
1413  previous = finish;
1414  }
1415 
1416  // The first range does not need a comma printed before it, but the rest
1417  // of the ranges do need a comma beforehand
1418  if (!first_range) {
1419  __kmp_str_buf_print(buf, "%s", ",");
1420  } else {
1421  first_range = false;
1422  }
1423  // Range with three or more contiguous bits in the affinity mask
1424  if (previous - start > 1) {
1425  __kmp_str_buf_print(buf, "%u-%u", start, previous);
1426  } else {
1427  // Range with one or two contiguous bits in the affinity mask
1428  __kmp_str_buf_print(buf, "%u", start);
1429  if (previous - start > 0) {
1430  __kmp_str_buf_print(buf, ",%u", previous);
1431  }
1432  }
1433  // Start over with new start point
1434  start = finish;
1435  if (start == mask->end())
1436  break;
1437  }
1438  return buf;
1439 }
1440 
1441 // Return (possibly empty) affinity mask representing the offline CPUs
1442 // Caller must free the mask
1443 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1444  kmp_affin_mask_t *offline;
1445  KMP_CPU_ALLOC(offline);
1446  KMP_CPU_ZERO(offline);
1447 #if KMP_OS_LINUX
1448  int n, begin_cpu, end_cpu;
1449  kmp_safe_raii_file_t offline_file;
1450  auto skip_ws = [](FILE *f) {
1451  int c;
1452  do {
1453  c = fgetc(f);
1454  } while (isspace(c));
1455  if (c != EOF)
1456  ungetc(c, f);
1457  };
1458  // File contains CSV of integer ranges representing the offline CPUs
1459  // e.g., 1,2,4-7,9,11-15
1460  int status = offline_file.try_open("/sys/devices/system/cpu/offline", "r");
1461  if (status != 0)
1462  return offline;
1463  while (!feof(offline_file)) {
1464  skip_ws(offline_file);
1465  n = fscanf(offline_file, "%d", &begin_cpu);
1466  if (n != 1)
1467  break;
1468  skip_ws(offline_file);
1469  int c = fgetc(offline_file);
1470  if (c == EOF || c == ',') {
1471  // Just single CPU
1472  end_cpu = begin_cpu;
1473  } else if (c == '-') {
1474  // Range of CPUs
1475  skip_ws(offline_file);
1476  n = fscanf(offline_file, "%d", &end_cpu);
1477  if (n != 1)
1478  break;
1479  skip_ws(offline_file);
1480  c = fgetc(offline_file); // skip ','
1481  } else {
1482  // Syntax problem
1483  break;
1484  }
1485  // Ensure a valid range of CPUs
1486  if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1487  end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1488  continue;
1489  }
1490  // Insert [begin_cpu, end_cpu] into offline mask
1491  for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1492  KMP_CPU_SET(cpu, offline);
1493  }
1494  }
1495 #endif
1496  return offline;
1497 }
1498 
1499 // Return the number of available procs
1500 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1501  int avail_proc = 0;
1502  KMP_CPU_ZERO(mask);
1503 
1504 #if KMP_GROUP_AFFINITY
1505 
1506  if (__kmp_num_proc_groups > 1) {
1507  int group;
1508  KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1509  for (group = 0; group < __kmp_num_proc_groups; group++) {
1510  int i;
1511  int num = __kmp_GetActiveProcessorCount(group);
1512  for (i = 0; i < num; i++) {
1513  KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
1514  avail_proc++;
1515  }
1516  }
1517  } else
1518 
1519 #endif /* KMP_GROUP_AFFINITY */
1520 
1521  {
1522  int proc;
1523  kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1524  for (proc = 0; proc < __kmp_xproc; proc++) {
1525  // Skip offline CPUs
1526  if (KMP_CPU_ISSET(proc, offline_cpus))
1527  continue;
1528  KMP_CPU_SET(proc, mask);
1529  avail_proc++;
1530  }
1531  KMP_CPU_FREE(offline_cpus);
1532  }
1533 
1534  return avail_proc;
1535 }
1536 
1537 // All of the __kmp_affinity_create_*_map() routines should allocate the
1538 // internal topology object and set the layer ids for it. Each routine
1539 // returns a boolean on whether it was successful at doing so.
1540 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1541 // Original mask is a subset of full mask in multiple processor groups topology
1542 kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1543 
1544 #if KMP_USE_HWLOC
1545 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1546 #if HWLOC_API_VERSION >= 0x00020000
1547  return hwloc_obj_type_is_cache(obj->type);
1548 #else
1549  return obj->type == HWLOC_OBJ_CACHE;
1550 #endif
1551 }
1552 
1553 // Returns KMP_HW_* type derived from HWLOC_* type
1554 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1555 
1556  if (__kmp_hwloc_is_cache_type(obj)) {
1557  if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1558  return KMP_HW_UNKNOWN;
1559  switch (obj->attr->cache.depth) {
1560  case 1:
1561  return KMP_HW_L1;
1562  case 2:
1563 #if KMP_MIC_SUPPORTED
1564  if (__kmp_mic_type == mic3) {
1565  return KMP_HW_TILE;
1566  }
1567 #endif
1568  return KMP_HW_L2;
1569  case 3:
1570  return KMP_HW_L3;
1571  }
1572  return KMP_HW_UNKNOWN;
1573  }
1574 
1575  switch (obj->type) {
1576  case HWLOC_OBJ_PACKAGE:
1577  return KMP_HW_SOCKET;
1578  case HWLOC_OBJ_NUMANODE:
1579  return KMP_HW_NUMA;
1580  case HWLOC_OBJ_CORE:
1581  return KMP_HW_CORE;
1582  case HWLOC_OBJ_PU:
1583  return KMP_HW_THREAD;
1584  case HWLOC_OBJ_GROUP:
1585 #if HWLOC_API_VERSION >= 0x00020000
1586  if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1587  return KMP_HW_DIE;
1588  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1589  return KMP_HW_TILE;
1590  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1591  return KMP_HW_MODULE;
1592  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1593  return KMP_HW_PROC_GROUP;
1594 #endif
1595  return KMP_HW_UNKNOWN;
1596 #if HWLOC_API_VERSION >= 0x00020100
1597  case HWLOC_OBJ_DIE:
1598  return KMP_HW_DIE;
1599 #endif
1600  }
1601  return KMP_HW_UNKNOWN;
1602 }
1603 
1604 // Returns the number of objects of type 'type' below 'obj' within the topology
1605 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
1606 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
1607 // object.
1608 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1609  hwloc_obj_type_t type) {
1610  int retval = 0;
1611  hwloc_obj_t first;
1612  for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1613  obj->logical_index, type, 0);
1614  first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1615  obj->type, first) == obj;
1616  first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1617  first)) {
1618  ++retval;
1619  }
1620  return retval;
1621 }
1622 
1623 // This gets the sub_id for a lower object under a higher object in the
1624 // topology tree
1625 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1626  hwloc_obj_t lower) {
1627  hwloc_obj_t obj;
1628  hwloc_obj_type_t ltype = lower->type;
1629  int lindex = lower->logical_index - 1;
1630  int sub_id = 0;
1631  // Get the previous lower object
1632  obj = hwloc_get_obj_by_type(t, ltype, lindex);
1633  while (obj && lindex >= 0 &&
1634  hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1635  if (obj->userdata) {
1636  sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1637  break;
1638  }
1639  sub_id++;
1640  lindex--;
1641  obj = hwloc_get_obj_by_type(t, ltype, lindex);
1642  }
1643  // store sub_id + 1 so that 0 is differed from NULL
1644  lower->userdata = RCAST(void *, sub_id + 1);
1645  return sub_id;
1646 }
1647 
1648 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) {
1649  kmp_hw_t type;
1650  int hw_thread_index, sub_id;
1651  int depth;
1652  hwloc_obj_t pu, obj, root, prev;
1653  kmp_hw_t types[KMP_HW_LAST];
1654  hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1655 
1656  hwloc_topology_t tp = __kmp_hwloc_topology;
1657  *msg_id = kmp_i18n_null;
1658  if (__kmp_affinity.flags.verbose) {
1659  KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
1660  }
1661 
1662  if (!KMP_AFFINITY_CAPABLE()) {
1663  // Hack to try and infer the machine topology using only the data
1664  // available from hwloc on the current thread, and __kmp_xproc.
1665  KMP_ASSERT(__kmp_affinity.type == affinity_none);
1666  // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
1667  hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1668  if (o != NULL)
1669  nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1670  else
1671  nCoresPerPkg = 1; // no PACKAGE found
1672  o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1673  if (o != NULL)
1674  __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1675  else
1676  __kmp_nThreadsPerCore = 1; // no CORE found
1677  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1678  if (nCoresPerPkg == 0)
1679  nCoresPerPkg = 1; // to prevent possible division by 0
1680  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1681  return true;
1682  }
1683 
1684 #if HWLOC_API_VERSION >= 0x00020400
1685  // Handle multiple types of cores if they exist on the system
1686  int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1687 
1688  typedef struct kmp_hwloc_cpukinds_info_t {
1689  int efficiency;
1690  kmp_hw_core_type_t core_type;
1691  hwloc_bitmap_t mask;
1692  } kmp_hwloc_cpukinds_info_t;
1693  kmp_hwloc_cpukinds_info_t *cpukinds = nullptr;
1694 
1695  if (nr_cpu_kinds > 0) {
1696  unsigned nr_infos;
1697  struct hwloc_info_s *infos;
1698  cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1699  sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1700  for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1701  cpukinds[idx].efficiency = -1;
1702  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1703  cpukinds[idx].mask = hwloc_bitmap_alloc();
1704  if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1705  &cpukinds[idx].efficiency, &nr_infos, &infos,
1706  0) == 0) {
1707  for (unsigned i = 0; i < nr_infos; ++i) {
1708  if (__kmp_str_match("CoreType", 8, infos[i].name)) {
1709 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1710  if (__kmp_str_match("IntelAtom", 9, infos[i].value)) {
1711  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1712  break;
1713  } else if (__kmp_str_match("IntelCore", 9, infos[i].value)) {
1714  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1715  break;
1716  }
1717 #endif
1718  }
1719  }
1720  }
1721  }
1722  }
1723 #endif
1724 
1725  root = hwloc_get_root_obj(tp);
1726 
1727  // Figure out the depth and types in the topology
1728  depth = 0;
1729  pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1730  KMP_ASSERT(pu);
1731  obj = pu;
1732  types[depth] = KMP_HW_THREAD;
1733  hwloc_types[depth] = obj->type;
1734  depth++;
1735  while (obj != root && obj != NULL) {
1736  obj = obj->parent;
1737 #if HWLOC_API_VERSION >= 0x00020000
1738  if (obj->memory_arity) {
1739  hwloc_obj_t memory;
1740  for (memory = obj->memory_first_child; memory;
1741  memory = hwloc_get_next_child(tp, obj, memory)) {
1742  if (memory->type == HWLOC_OBJ_NUMANODE)
1743  break;
1744  }
1745  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1746  types[depth] = KMP_HW_NUMA;
1747  hwloc_types[depth] = memory->type;
1748  depth++;
1749  }
1750  }
1751 #endif
1752  type = __kmp_hwloc_type_2_topology_type(obj);
1753  if (type != KMP_HW_UNKNOWN) {
1754  types[depth] = type;
1755  hwloc_types[depth] = obj->type;
1756  depth++;
1757  }
1758  }
1759  KMP_ASSERT(depth > 0);
1760 
1761  // Get the order for the types correct
1762  for (int i = 0, j = depth - 1; i < j; ++i, --j) {
1763  hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1764  kmp_hw_t temp = types[i];
1765  types[i] = types[j];
1766  types[j] = temp;
1767  hwloc_types[i] = hwloc_types[j];
1768  hwloc_types[j] = hwloc_temp;
1769  }
1770 
1771  // Allocate the data structure to be returned.
1772  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1773 
1774  hw_thread_index = 0;
1775  pu = NULL;
1776  while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1777  int index = depth - 1;
1778  bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1779  kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1780  if (included) {
1781  hw_thread.clear();
1782  hw_thread.ids[index] = pu->logical_index;
1783  hw_thread.os_id = pu->os_index;
1784  // If multiple core types, then set that attribute for the hardware thread
1785 #if HWLOC_API_VERSION >= 0x00020400
1786  if (cpukinds) {
1787  int cpukind_index = -1;
1788  for (int i = 0; i < nr_cpu_kinds; ++i) {
1789  if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1790  cpukind_index = i;
1791  break;
1792  }
1793  }
1794  if (cpukind_index >= 0) {
1795  hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1796  hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1797  }
1798  }
1799 #endif
1800  index--;
1801  }
1802  obj = pu;
1803  prev = obj;
1804  while (obj != root && obj != NULL) {
1805  obj = obj->parent;
1806 #if HWLOC_API_VERSION >= 0x00020000
1807  // NUMA Nodes are handled differently since they are not within the
1808  // parent/child structure anymore. They are separate children
1809  // of obj (memory_first_child points to first memory child)
1810  if (obj->memory_arity) {
1811  hwloc_obj_t memory;
1812  for (memory = obj->memory_first_child; memory;
1813  memory = hwloc_get_next_child(tp, obj, memory)) {
1814  if (memory->type == HWLOC_OBJ_NUMANODE)
1815  break;
1816  }
1817  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1818  sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1819  if (included) {
1820  hw_thread.ids[index] = memory->logical_index;
1821  hw_thread.ids[index + 1] = sub_id;
1822  index--;
1823  }
1824  prev = memory;
1825  }
1826  prev = obj;
1827  }
1828 #endif
1829  type = __kmp_hwloc_type_2_topology_type(obj);
1830  if (type != KMP_HW_UNKNOWN) {
1831  sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1832  if (included) {
1833  hw_thread.ids[index] = obj->logical_index;
1834  hw_thread.ids[index + 1] = sub_id;
1835  index--;
1836  }
1837  prev = obj;
1838  }
1839  }
1840  if (included)
1841  hw_thread_index++;
1842  }
1843 
1844 #if HWLOC_API_VERSION >= 0x00020400
1845  // Free the core types information
1846  if (cpukinds) {
1847  for (int idx = 0; idx < nr_cpu_kinds; ++idx)
1848  hwloc_bitmap_free(cpukinds[idx].mask);
1849  __kmp_free(cpukinds);
1850  }
1851 #endif
1852  __kmp_topology->sort_ids();
1853  return true;
1854 }
1855 #endif // KMP_USE_HWLOC
1856 
1857 // If we don't know how to retrieve the machine's processor topology, or
1858 // encounter an error in doing so, this routine is called to form a "flat"
1859 // mapping of os thread id's <-> processor id's.
1860 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) {
1861  *msg_id = kmp_i18n_null;
1862  int depth = 3;
1863  kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1864 
1865  if (__kmp_affinity.flags.verbose) {
1866  KMP_INFORM(UsingFlatOS, "KMP_AFFINITY");
1867  }
1868 
1869  // Even if __kmp_affinity.type == affinity_none, this routine might still
1870  // be called to set __kmp_ncores, as well as
1871  // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1872  if (!KMP_AFFINITY_CAPABLE()) {
1873  KMP_ASSERT(__kmp_affinity.type == affinity_none);
1874  __kmp_ncores = nPackages = __kmp_xproc;
1875  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1876  return true;
1877  }
1878 
1879  // When affinity is off, this routine will still be called to set
1880  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1881  // Make sure all these vars are set correctly, and return now if affinity is
1882  // not enabled.
1883  __kmp_ncores = nPackages = __kmp_avail_proc;
1884  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1885 
1886  // Construct the data structure to be returned.
1887  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1888  int avail_ct = 0;
1889  int i;
1890  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1891  // Skip this proc if it is not included in the machine model.
1892  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1893  continue;
1894  }
1895  kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1896  hw_thread.clear();
1897  hw_thread.os_id = i;
1898  hw_thread.ids[0] = i;
1899  hw_thread.ids[1] = 0;
1900  hw_thread.ids[2] = 0;
1901  avail_ct++;
1902  }
1903  if (__kmp_affinity.flags.verbose) {
1904  KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
1905  }
1906  return true;
1907 }
1908 
1909 #if KMP_GROUP_AFFINITY
1910 // If multiple Windows* OS processor groups exist, we can create a 2-level
1911 // topology map with the groups at level 0 and the individual procs at level 1.
1912 // This facilitates letting the threads float among all procs in a group,
1913 // if granularity=group (the default when there are multiple groups).
1914 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) {
1915  *msg_id = kmp_i18n_null;
1916  int depth = 3;
1917  kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1918  const static size_t BITS_PER_GROUP = CHAR_BIT * sizeof(DWORD_PTR);
1919 
1920  if (__kmp_affinity.flags.verbose) {
1921  KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
1922  }
1923 
1924  // If we aren't affinity capable, then use flat topology
1925  if (!KMP_AFFINITY_CAPABLE()) {
1926  KMP_ASSERT(__kmp_affinity.type == affinity_none);
1927  nPackages = __kmp_num_proc_groups;
1928  __kmp_nThreadsPerCore = 1;
1929  __kmp_ncores = __kmp_xproc;
1930  nCoresPerPkg = nPackages / __kmp_ncores;
1931  return true;
1932  }
1933 
1934  // Construct the data structure to be returned.
1935  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1936  int avail_ct = 0;
1937  int i;
1938  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1939  // Skip this proc if it is not included in the machine model.
1940  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1941  continue;
1942  }
1943  kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1944  hw_thread.clear();
1945  hw_thread.os_id = i;
1946  hw_thread.ids[0] = i / BITS_PER_GROUP;
1947  hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1948  }
1949  return true;
1950 }
1951 #endif /* KMP_GROUP_AFFINITY */
1952 
1953 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1954 
1955 template <kmp_uint32 LSB, kmp_uint32 MSB>
1956 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1957  const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB;
1958  const kmp_uint32 SHIFT_RIGHT = LSB;
1959  kmp_uint32 retval = v;
1960  retval <<= SHIFT_LEFT;
1961  retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1962  return retval;
1963 }
1964 
1965 static int __kmp_cpuid_mask_width(int count) {
1966  int r = 0;
1967 
1968  while ((1 << r) < count)
1969  ++r;
1970  return r;
1971 }
1972 
1973 class apicThreadInfo {
1974 public:
1975  unsigned osId; // param to __kmp_affinity_bind_thread
1976  unsigned apicId; // from cpuid after binding
1977  unsigned maxCoresPerPkg; // ""
1978  unsigned maxThreadsPerPkg; // ""
1979  unsigned pkgId; // inferred from above values
1980  unsigned coreId; // ""
1981  unsigned threadId; // ""
1982 };
1983 
1984 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1985  const void *b) {
1986  const apicThreadInfo *aa = (const apicThreadInfo *)a;
1987  const apicThreadInfo *bb = (const apicThreadInfo *)b;
1988  if (aa->pkgId < bb->pkgId)
1989  return -1;
1990  if (aa->pkgId > bb->pkgId)
1991  return 1;
1992  if (aa->coreId < bb->coreId)
1993  return -1;
1994  if (aa->coreId > bb->coreId)
1995  return 1;
1996  if (aa->threadId < bb->threadId)
1997  return -1;
1998  if (aa->threadId > bb->threadId)
1999  return 1;
2000  return 0;
2001 }
2002 
2003 class kmp_cache_info_t {
2004 public:
2005  struct info_t {
2006  unsigned level, mask;
2007  };
2008  kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2009  size_t get_depth() const { return depth; }
2010  info_t &operator[](size_t index) { return table[index]; }
2011  const info_t &operator[](size_t index) const { return table[index]; }
2012 
2013  static kmp_hw_t get_topology_type(unsigned level) {
2014  KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2015  switch (level) {
2016  case 1:
2017  return KMP_HW_L1;
2018  case 2:
2019  return KMP_HW_L2;
2020  case 3:
2021  return KMP_HW_L3;
2022  }
2023  return KMP_HW_UNKNOWN;
2024  }
2025 
2026 private:
2027  static const int MAX_CACHE_LEVEL = 3;
2028 
2029  size_t depth;
2030  info_t table[MAX_CACHE_LEVEL];
2031 
2032  void get_leaf4_levels() {
2033  unsigned level = 0;
2034  while (depth < MAX_CACHE_LEVEL) {
2035  unsigned cache_type, max_threads_sharing;
2036  unsigned cache_level, cache_mask_width;
2037  kmp_cpuid buf2;
2038  __kmp_x86_cpuid(4, level, &buf2);
2039  cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2040  if (!cache_type)
2041  break;
2042  // Skip instruction caches
2043  if (cache_type == 2) {
2044  level++;
2045  continue;
2046  }
2047  max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2048  cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2049  cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2050  table[depth].level = cache_level;
2051  table[depth].mask = ((-1) << cache_mask_width);
2052  depth++;
2053  level++;
2054  }
2055  }
2056 };
2057 
2058 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
2059 // an algorithm which cycles through the available os threads, setting
2060 // the current thread's affinity mask to that thread, and then retrieves
2061 // the Apic Id for each thread context using the cpuid instruction.
2062 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) {
2063  kmp_cpuid buf;
2064  *msg_id = kmp_i18n_null;
2065 
2066  if (__kmp_affinity.flags.verbose) {
2067  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2068  }
2069 
2070  // Check if cpuid leaf 4 is supported.
2071  __kmp_x86_cpuid(0, 0, &buf);
2072  if (buf.eax < 4) {
2073  *msg_id = kmp_i18n_str_NoLeaf4Support;
2074  return false;
2075  }
2076 
2077  // The algorithm used starts by setting the affinity to each available thread
2078  // and retrieving info from the cpuid instruction, so if we are not capable of
2079  // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
2080  // need to do something else - use the defaults that we calculated from
2081  // issuing cpuid without binding to each proc.
2082  if (!KMP_AFFINITY_CAPABLE()) {
2083  // Hack to try and infer the machine topology using only the data
2084  // available from cpuid on the current thread, and __kmp_xproc.
2085  KMP_ASSERT(__kmp_affinity.type == affinity_none);
2086 
2087  // Get an upper bound on the number of threads per package using cpuid(1).
2088  // On some OS/chps combinations where HT is supported by the chip but is
2089  // disabled, this value will be 2 on a single core chip. Usually, it will be
2090  // 2 if HT is enabled and 1 if HT is disabled.
2091  __kmp_x86_cpuid(1, 0, &buf);
2092  int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2093  if (maxThreadsPerPkg == 0) {
2094  maxThreadsPerPkg = 1;
2095  }
2096 
2097  // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
2098  // value.
2099  //
2100  // The author of cpu_count.cpp treated this only an upper bound on the
2101  // number of cores, but I haven't seen any cases where it was greater than
2102  // the actual number of cores, so we will treat it as exact in this block of
2103  // code.
2104  //
2105  // First, we need to check if cpuid(4) is supported on this chip. To see if
2106  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
2107  // greater.
2108  __kmp_x86_cpuid(0, 0, &buf);
2109  if (buf.eax >= 4) {
2110  __kmp_x86_cpuid(4, 0, &buf);
2111  nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2112  } else {
2113  nCoresPerPkg = 1;
2114  }
2115 
2116  // There is no way to reliably tell if HT is enabled without issuing the
2117  // cpuid instruction from every thread, can correlating the cpuid info, so
2118  // if the machine is not affinity capable, we assume that HT is off. We have
2119  // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
2120  // does not support HT.
2121  //
2122  // - Older OSes are usually found on machines with older chips, which do not
2123  // support HT.
2124  // - The performance penalty for mistakenly identifying a machine as HT when
2125  // it isn't (which results in blocktime being incorrectly set to 0) is
2126  // greater than the penalty when for mistakenly identifying a machine as
2127  // being 1 thread/core when it is really HT enabled (which results in
2128  // blocktime being incorrectly set to a positive value).
2129  __kmp_ncores = __kmp_xproc;
2130  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2131  __kmp_nThreadsPerCore = 1;
2132  return true;
2133  }
2134 
2135  // From here on, we can assume that it is safe to call
2136  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2137  // __kmp_affinity.type = affinity_none.
2138 
2139  // Save the affinity mask for the current thread.
2140  kmp_affinity_raii_t previous_affinity;
2141 
2142  // Run through each of the available contexts, binding the current thread
2143  // to it, and obtaining the pertinent information using the cpuid instr.
2144  //
2145  // The relevant information is:
2146  // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
2147  // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
2148  // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
2149  // of this field determines the width of the core# + thread# fields in the
2150  // Apic Id. It is also an upper bound on the number of threads per
2151  // package, but it has been verified that situations happen were it is not
2152  // exact. In particular, on certain OS/chip combinations where Intel(R)
2153  // Hyper-Threading Technology is supported by the chip but has been
2154  // disabled, the value of this field will be 2 (for a single core chip).
2155  // On other OS/chip combinations supporting Intel(R) Hyper-Threading
2156  // Technology, the value of this field will be 1 when Intel(R)
2157  // Hyper-Threading Technology is disabled and 2 when it is enabled.
2158  // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
2159  // of this field (+1) determines the width of the core# field in the Apic
2160  // Id. The comments in "cpucount.cpp" say that this value is an upper
2161  // bound, but the IA-32 architecture manual says that it is exactly the
2162  // number of cores per package, and I haven't seen any case where it
2163  // wasn't.
2164  //
2165  // From this information, deduce the package Id, core Id, and thread Id,
2166  // and set the corresponding fields in the apicThreadInfo struct.
2167  unsigned i;
2168  apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2169  __kmp_avail_proc * sizeof(apicThreadInfo));
2170  unsigned nApics = 0;
2171  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2172  // Skip this proc if it is not included in the machine model.
2173  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2174  continue;
2175  }
2176  KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
2177 
2178  __kmp_affinity_dispatch->bind_thread(i);
2179  threadInfo[nApics].osId = i;
2180 
2181  // The apic id and max threads per pkg come from cpuid(1).
2182  __kmp_x86_cpuid(1, 0, &buf);
2183  if (((buf.edx >> 9) & 1) == 0) {
2184  __kmp_free(threadInfo);
2185  *msg_id = kmp_i18n_str_ApicNotPresent;
2186  return false;
2187  }
2188  threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2189  threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2190  if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2191  threadInfo[nApics].maxThreadsPerPkg = 1;
2192  }
2193 
2194  // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
2195  // value.
2196  //
2197  // First, we need to check if cpuid(4) is supported on this chip. To see if
2198  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
2199  // or greater.
2200  __kmp_x86_cpuid(0, 0, &buf);
2201  if (buf.eax >= 4) {
2202  __kmp_x86_cpuid(4, 0, &buf);
2203  threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2204  } else {
2205  threadInfo[nApics].maxCoresPerPkg = 1;
2206  }
2207 
2208  // Infer the pkgId / coreId / threadId using only the info obtained locally.
2209  int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2210  threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2211 
2212  int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2213  int widthT = widthCT - widthC;
2214  if (widthT < 0) {
2215  // I've never seen this one happen, but I suppose it could, if the cpuid
2216  // instruction on a chip was really screwed up. Make sure to restore the
2217  // affinity mask before the tail call.
2218  __kmp_free(threadInfo);
2219  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2220  return false;
2221  }
2222 
2223  int maskC = (1 << widthC) - 1;
2224  threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2225 
2226  int maskT = (1 << widthT) - 1;
2227  threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2228 
2229  nApics++;
2230  }
2231 
2232  // We've collected all the info we need.
2233  // Restore the old affinity mask for this thread.
2234  previous_affinity.restore();
2235 
2236  // Sort the threadInfo table by physical Id.
2237  qsort(threadInfo, nApics, sizeof(*threadInfo),
2238  __kmp_affinity_cmp_apicThreadInfo_phys_id);
2239 
2240  // The table is now sorted by pkgId / coreId / threadId, but we really don't
2241  // know the radix of any of the fields. pkgId's may be sparsely assigned among
2242  // the chips on a system. Although coreId's are usually assigned
2243  // [0 .. coresPerPkg-1] and threadId's are usually assigned
2244  // [0..threadsPerCore-1], we don't want to make any such assumptions.
2245  //
2246  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2247  // total # packages) are at this point - we want to determine that now. We
2248  // only have an upper bound on the first two figures.
2249  //
2250  // We also perform a consistency check at this point: the values returned by
2251  // the cpuid instruction for any thread bound to a given package had better
2252  // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
2253  nPackages = 1;
2254  nCoresPerPkg = 1;
2255  __kmp_nThreadsPerCore = 1;
2256  unsigned nCores = 1;
2257 
2258  unsigned pkgCt = 1; // to determine radii
2259  unsigned lastPkgId = threadInfo[0].pkgId;
2260  unsigned coreCt = 1;
2261  unsigned lastCoreId = threadInfo[0].coreId;
2262  unsigned threadCt = 1;
2263  unsigned lastThreadId = threadInfo[0].threadId;
2264 
2265  // intra-pkg consist checks
2266  unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2267  unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2268 
2269  for (i = 1; i < nApics; i++) {
2270  if (threadInfo[i].pkgId != lastPkgId) {
2271  nCores++;
2272  pkgCt++;
2273  lastPkgId = threadInfo[i].pkgId;
2274  if ((int)coreCt > nCoresPerPkg)
2275  nCoresPerPkg = coreCt;
2276  coreCt = 1;
2277  lastCoreId = threadInfo[i].coreId;
2278  if ((int)threadCt > __kmp_nThreadsPerCore)
2279  __kmp_nThreadsPerCore = threadCt;
2280  threadCt = 1;
2281  lastThreadId = threadInfo[i].threadId;
2282 
2283  // This is a different package, so go on to the next iteration without
2284  // doing any consistency checks. Reset the consistency check vars, though.
2285  prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2286  prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2287  continue;
2288  }
2289 
2290  if (threadInfo[i].coreId != lastCoreId) {
2291  nCores++;
2292  coreCt++;
2293  lastCoreId = threadInfo[i].coreId;
2294  if ((int)threadCt > __kmp_nThreadsPerCore)
2295  __kmp_nThreadsPerCore = threadCt;
2296  threadCt = 1;
2297  lastThreadId = threadInfo[i].threadId;
2298  } else if (threadInfo[i].threadId != lastThreadId) {
2299  threadCt++;
2300  lastThreadId = threadInfo[i].threadId;
2301  } else {
2302  __kmp_free(threadInfo);
2303  *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2304  return false;
2305  }
2306 
2307  // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
2308  // fields agree between all the threads bounds to a given package.
2309  if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2310  (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2311  __kmp_free(threadInfo);
2312  *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2313  return false;
2314  }
2315  }
2316  // When affinity is off, this routine will still be called to set
2317  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2318  // Make sure all these vars are set correctly
2319  nPackages = pkgCt;
2320  if ((int)coreCt > nCoresPerPkg)
2321  nCoresPerPkg = coreCt;
2322  if ((int)threadCt > __kmp_nThreadsPerCore)
2323  __kmp_nThreadsPerCore = threadCt;
2324  __kmp_ncores = nCores;
2325  KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
2326 
2327  // Now that we've determined the number of packages, the number of cores per
2328  // package, and the number of threads per core, we can construct the data
2329  // structure that is to be returned.
2330  int idx = 0;
2331  int pkgLevel = 0;
2332  int coreLevel = 1;
2333  int threadLevel = 2;
2334  //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
2335  int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2336  kmp_hw_t types[3];
2337  if (pkgLevel >= 0)
2338  types[idx++] = KMP_HW_SOCKET;
2339  if (coreLevel >= 0)
2340  types[idx++] = KMP_HW_CORE;
2341  if (threadLevel >= 0)
2342  types[idx++] = KMP_HW_THREAD;
2343 
2344  KMP_ASSERT(depth > 0);
2345  __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2346 
2347  for (i = 0; i < nApics; ++i) {
2348  idx = 0;
2349  unsigned os = threadInfo[i].osId;
2350  kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2351  hw_thread.clear();
2352 
2353  if (pkgLevel >= 0) {
2354  hw_thread.ids[idx++] = threadInfo[i].pkgId;
2355  }
2356  if (coreLevel >= 0) {
2357  hw_thread.ids[idx++] = threadInfo[i].coreId;
2358  }
2359  if (threadLevel >= 0) {
2360  hw_thread.ids[idx++] = threadInfo[i].threadId;
2361  }
2362  hw_thread.os_id = os;
2363  }
2364 
2365  __kmp_free(threadInfo);
2366  __kmp_topology->sort_ids();
2367  if (!__kmp_topology->check_ids()) {
2368  kmp_topology_t::deallocate(__kmp_topology);
2369  __kmp_topology = nullptr;
2370  *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2371  return false;
2372  }
2373  return true;
2374 }
2375 
2376 // Hybrid cpu detection using CPUID.1A
2377 // Thread should be pinned to processor already
2378 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency,
2379  unsigned *native_model_id) {
2380  kmp_cpuid buf;
2381  __kmp_x86_cpuid(0x1a, 0, &buf);
2382  *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2383  switch (*type) {
2384  case KMP_HW_CORE_TYPE_ATOM:
2385  *efficiency = 0;
2386  break;
2387  case KMP_HW_CORE_TYPE_CORE:
2388  *efficiency = 1;
2389  break;
2390  default:
2391  *efficiency = 0;
2392  }
2393  *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2394 }
2395 
2396 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
2397 // architectures support a newer interface for specifying the x2APIC Ids,
2398 // based on CPUID.B or CPUID.1F
2399 /*
2400  * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
2401  Bits Bits Bits Bits
2402  31-16 15-8 7-4 4-0
2403 ---+-----------+--------------+-------------+-----------------+
2404 EAX| reserved | reserved | reserved | Bits to Shift |
2405 ---+-----------|--------------+-------------+-----------------|
2406 EBX| reserved | Num logical processors at level (16 bits) |
2407 ---+-----------|--------------+-------------------------------|
2408 ECX| reserved | Level Type | Level Number (8 bits) |
2409 ---+-----------+--------------+-------------------------------|
2410 EDX| X2APIC ID (32 bits) |
2411 ---+----------------------------------------------------------+
2412 */
2413 
2414 enum {
2415  INTEL_LEVEL_TYPE_INVALID = 0, // Package level
2416  INTEL_LEVEL_TYPE_SMT = 1,
2417  INTEL_LEVEL_TYPE_CORE = 2,
2418  INTEL_LEVEL_TYPE_MODULE = 3,
2419  INTEL_LEVEL_TYPE_TILE = 4,
2420  INTEL_LEVEL_TYPE_DIE = 5,
2421  INTEL_LEVEL_TYPE_LAST = 6,
2422 };
2423 
2424 struct cpuid_level_info_t {
2425  unsigned level_type, mask, mask_width, nitems, cache_mask;
2426 };
2427 
2428 static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) {
2429  switch (intel_type) {
2430  case INTEL_LEVEL_TYPE_INVALID:
2431  return KMP_HW_SOCKET;
2432  case INTEL_LEVEL_TYPE_SMT:
2433  return KMP_HW_THREAD;
2434  case INTEL_LEVEL_TYPE_CORE:
2435  return KMP_HW_CORE;
2436  case INTEL_LEVEL_TYPE_TILE:
2437  return KMP_HW_TILE;
2438  case INTEL_LEVEL_TYPE_MODULE:
2439  return KMP_HW_MODULE;
2440  case INTEL_LEVEL_TYPE_DIE:
2441  return KMP_HW_DIE;
2442  }
2443  return KMP_HW_UNKNOWN;
2444 }
2445 
2446 // This function takes the topology leaf, a levels array to store the levels
2447 // detected and a bitmap of the known levels.
2448 // Returns the number of levels in the topology
2449 static unsigned
2450 __kmp_x2apicid_get_levels(int leaf,
2451  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2452  kmp_uint64 known_levels) {
2453  unsigned level, levels_index;
2454  unsigned level_type, mask_width, nitems;
2455  kmp_cpuid buf;
2456 
2457  // New algorithm has known topology layers act as highest unknown topology
2458  // layers when unknown topology layers exist.
2459  // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z>
2460  // are unknown topology layers, Then SMT will take the characteristics of
2461  // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>).
2462  // This eliminates unknown portions of the topology while still keeping the
2463  // correct structure.
2464  level = levels_index = 0;
2465  do {
2466  __kmp_x86_cpuid(leaf, level, &buf);
2467  level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2468  mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2469  nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2470  if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2471  return 0;
2472 
2473  if (known_levels & (1ull << level_type)) {
2474  // Add a new level to the topology
2475  KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2476  levels[levels_index].level_type = level_type;
2477  levels[levels_index].mask_width = mask_width;
2478  levels[levels_index].nitems = nitems;
2479  levels_index++;
2480  } else {
2481  // If it is an unknown level, then logically move the previous layer up
2482  if (levels_index > 0) {
2483  levels[levels_index - 1].mask_width = mask_width;
2484  levels[levels_index - 1].nitems = nitems;
2485  }
2486  }
2487  level++;
2488  } while (level_type != INTEL_LEVEL_TYPE_INVALID);
2489 
2490  // Ensure the INTEL_LEVEL_TYPE_INVALID (Socket) layer isn't first
2491  if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID)
2492  return 0;
2493 
2494  // Set the masks to & with apicid
2495  for (unsigned i = 0; i < levels_index; ++i) {
2496  if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2497  levels[i].mask = ~((-1) << levels[i].mask_width);
2498  levels[i].cache_mask = (-1) << levels[i].mask_width;
2499  for (unsigned j = 0; j < i; ++j)
2500  levels[i].mask ^= levels[j].mask;
2501  } else {
2502  KMP_DEBUG_ASSERT(i > 0);
2503  levels[i].mask = (-1) << levels[i - 1].mask_width;
2504  levels[i].cache_mask = 0;
2505  }
2506  }
2507  return levels_index;
2508 }
2509 
2510 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) {
2511 
2512  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2513  kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2514  unsigned levels_index;
2515  kmp_cpuid buf;
2516  kmp_uint64 known_levels;
2517  int topology_leaf, highest_leaf, apic_id;
2518  int num_leaves;
2519  static int leaves[] = {0, 0};
2520 
2521  kmp_i18n_id_t leaf_message_id;
2522 
2523  KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2524 
2525  *msg_id = kmp_i18n_null;
2526  if (__kmp_affinity.flags.verbose) {
2527  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2528  }
2529 
2530  // Figure out the known topology levels
2531  known_levels = 0ull;
2532  for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2533  if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2534  known_levels |= (1ull << i);
2535  }
2536  }
2537 
2538  // Get the highest cpuid leaf supported
2539  __kmp_x86_cpuid(0, 0, &buf);
2540  highest_leaf = buf.eax;
2541 
2542  // If a specific topology method was requested, only allow that specific leaf
2543  // otherwise, try both leaves 31 and 11 in that order
2544  num_leaves = 0;
2545  if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2546  num_leaves = 1;
2547  leaves[0] = 11;
2548  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2549  } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2550  num_leaves = 1;
2551  leaves[0] = 31;
2552  leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2553  } else {
2554  num_leaves = 2;
2555  leaves[0] = 31;
2556  leaves[1] = 11;
2557  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2558  }
2559 
2560  // Check to see if cpuid leaf 31 or 11 is supported.
2561  __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2562  topology_leaf = -1;
2563  for (int i = 0; i < num_leaves; ++i) {
2564  int leaf = leaves[i];
2565  if (highest_leaf < leaf)
2566  continue;
2567  __kmp_x86_cpuid(leaf, 0, &buf);
2568  if (buf.ebx == 0)
2569  continue;
2570  topology_leaf = leaf;
2571  levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2572  if (levels_index == 0)
2573  continue;
2574  break;
2575  }
2576  if (topology_leaf == -1 || levels_index == 0) {
2577  *msg_id = leaf_message_id;
2578  return false;
2579  }
2580  KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2581 
2582  // The algorithm used starts by setting the affinity to each available thread
2583  // and retrieving info from the cpuid instruction, so if we are not capable of
2584  // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
2585  // we need to do something else - use the defaults that we calculated from
2586  // issuing cpuid without binding to each proc.
2587  if (!KMP_AFFINITY_CAPABLE()) {
2588  // Hack to try and infer the machine topology using only the data
2589  // available from cpuid on the current thread, and __kmp_xproc.
2590  KMP_ASSERT(__kmp_affinity.type == affinity_none);
2591  for (unsigned i = 0; i < levels_index; ++i) {
2592  if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2593  __kmp_nThreadsPerCore = levels[i].nitems;
2594  } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2595  nCoresPerPkg = levels[i].nitems;
2596  }
2597  }
2598  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2599  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2600  return true;
2601  }
2602 
2603  // Allocate the data structure to be returned.
2604  int depth = levels_index;
2605  for (int i = depth - 1, j = 0; i >= 0; --i, ++j)
2606  types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2607  __kmp_topology =
2608  kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2609 
2610  // Insert equivalent cache types if they exist
2611  kmp_cache_info_t cache_info;
2612  for (size_t i = 0; i < cache_info.get_depth(); ++i) {
2613  const kmp_cache_info_t::info_t &info = cache_info[i];
2614  unsigned cache_mask = info.mask;
2615  unsigned cache_level = info.level;
2616  for (unsigned j = 0; j < levels_index; ++j) {
2617  unsigned hw_cache_mask = levels[j].cache_mask;
2618  kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2619  if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2620  kmp_hw_t type =
2621  __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2622  __kmp_topology->set_equivalent_type(cache_type, type);
2623  }
2624  }
2625  }
2626 
2627  // From here on, we can assume that it is safe to call
2628  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2629  // __kmp_affinity.type = affinity_none.
2630 
2631  // Save the affinity mask for the current thread.
2632  kmp_affinity_raii_t previous_affinity;
2633 
2634  // Run through each of the available contexts, binding the current thread
2635  // to it, and obtaining the pertinent information using the cpuid instr.
2636  unsigned int proc;
2637  int hw_thread_index = 0;
2638  KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2639  cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2640  unsigned my_levels_index;
2641 
2642  // Skip this proc if it is not included in the machine model.
2643  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2644  continue;
2645  }
2646  KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2647 
2648  __kmp_affinity_dispatch->bind_thread(proc);
2649 
2650  // New algorithm
2651  __kmp_x86_cpuid(topology_leaf, 0, &buf);
2652  apic_id = buf.edx;
2653  kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2654  my_levels_index =
2655  __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2656  if (my_levels_index == 0 || my_levels_index != levels_index) {
2657  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2658  return false;
2659  }
2660  hw_thread.clear();
2661  hw_thread.os_id = proc;
2662  // Put in topology information
2663  for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2664  hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2665  if (j > 0) {
2666  hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2667  }
2668  }
2669  // Hybrid information
2670  if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2671  kmp_hw_core_type_t type;
2672  unsigned native_model_id;
2673  int efficiency;
2674  __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2675  hw_thread.attrs.set_core_type(type);
2676  hw_thread.attrs.set_core_eff(efficiency);
2677  }
2678  hw_thread_index++;
2679  }
2680  KMP_ASSERT(hw_thread_index > 0);
2681  __kmp_topology->sort_ids();
2682  if (!__kmp_topology->check_ids()) {
2683  kmp_topology_t::deallocate(__kmp_topology);
2684  __kmp_topology = nullptr;
2685  *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2686  return false;
2687  }
2688  return true;
2689 }
2690 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2691 
2692 #define osIdIndex 0
2693 #define threadIdIndex 1
2694 #define coreIdIndex 2
2695 #define pkgIdIndex 3
2696 #define nodeIdIndex 4
2697 
2698 typedef unsigned *ProcCpuInfo;
2699 static unsigned maxIndex = pkgIdIndex;
2700 
2701 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
2702  const void *b) {
2703  unsigned i;
2704  const unsigned *aa = *(unsigned *const *)a;
2705  const unsigned *bb = *(unsigned *const *)b;
2706  for (i = maxIndex;; i--) {
2707  if (aa[i] < bb[i])
2708  return -1;
2709  if (aa[i] > bb[i])
2710  return 1;
2711  if (i == osIdIndex)
2712  break;
2713  }
2714  return 0;
2715 }
2716 
2717 #if KMP_USE_HIER_SCHED
2718 // Set the array sizes for the hierarchy layers
2719 static void __kmp_dispatch_set_hierarchy_values() {
2720  // Set the maximum number of L1's to number of cores
2721  // Set the maximum number of L2's to to either number of cores / 2 for
2722  // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2723  // Or the number of cores for Intel(R) Xeon(R) processors
2724  // Set the maximum number of NUMA nodes and L3's to number of packages
2725  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2726  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2727  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2728 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2729  KMP_MIC_SUPPORTED
2730  if (__kmp_mic_type >= mic3)
2731  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2732  else
2733 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2734  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2735  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2736  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2737  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2738  // Set the number of threads per unit
2739  // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2740  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2741  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2742  __kmp_nThreadsPerCore;
2743 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2744  KMP_MIC_SUPPORTED
2745  if (__kmp_mic_type >= mic3)
2746  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2747  2 * __kmp_nThreadsPerCore;
2748  else
2749 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2750  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2751  __kmp_nThreadsPerCore;
2752  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2753  nCoresPerPkg * __kmp_nThreadsPerCore;
2754  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2755  nCoresPerPkg * __kmp_nThreadsPerCore;
2756  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2757  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2758 }
2759 
2760 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2761 // i.e., this thread's L1 or this thread's L2, etc.
2762 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2763  int index = type + 1;
2764  int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2765  KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2766  if (type == kmp_hier_layer_e::LAYER_THREAD)
2767  return tid;
2768  else if (type == kmp_hier_layer_e::LAYER_LOOP)
2769  return 0;
2770  KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2771  if (tid >= num_hw_threads)
2772  tid = tid % num_hw_threads;
2773  return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2774 }
2775 
2776 // Return the number of t1's per t2
2777 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2778  int i1 = t1 + 1;
2779  int i2 = t2 + 1;
2780  KMP_DEBUG_ASSERT(i1 <= i2);
2781  KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2782  KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2783  KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2784  // (nthreads/t2) / (nthreads/t1) = t1 / t2
2785  return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2786 }
2787 #endif // KMP_USE_HIER_SCHED
2788 
2789 static inline const char *__kmp_cpuinfo_get_filename() {
2790  const char *filename;
2791  if (__kmp_cpuinfo_file != nullptr)
2792  filename = __kmp_cpuinfo_file;
2793  else
2794  filename = "/proc/cpuinfo";
2795  return filename;
2796 }
2797 
2798 static inline const char *__kmp_cpuinfo_get_envvar() {
2799  const char *envvar = nullptr;
2800  if (__kmp_cpuinfo_file != nullptr)
2801  envvar = "KMP_CPUINFO_FILE";
2802  return envvar;
2803 }
2804 
2805 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2806 // affinity map.
2807 static bool __kmp_affinity_create_cpuinfo_map(int *line,
2808  kmp_i18n_id_t *const msg_id) {
2809  const char *filename = __kmp_cpuinfo_get_filename();
2810  const char *envvar = __kmp_cpuinfo_get_envvar();
2811  *msg_id = kmp_i18n_null;
2812 
2813  if (__kmp_affinity.flags.verbose) {
2814  KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
2815  }
2816 
2817  kmp_safe_raii_file_t f(filename, "r", envvar);
2818 
2819  // Scan of the file, and count the number of "processor" (osId) fields,
2820  // and find the highest value of <n> for a node_<n> field.
2821  char buf[256];
2822  unsigned num_records = 0;
2823  while (!feof(f)) {
2824  buf[sizeof(buf) - 1] = 1;
2825  if (!fgets(buf, sizeof(buf), f)) {
2826  // Read errors presumably because of EOF
2827  break;
2828  }
2829 
2830  char s1[] = "processor";
2831  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2832  num_records++;
2833  continue;
2834  }
2835 
2836  // FIXME - this will match "node_<n> <garbage>"
2837  unsigned level;
2838  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2839  // validate the input fisrt:
2840  if (level > (unsigned)__kmp_xproc) { // level is too big
2841  level = __kmp_xproc;
2842  }
2843  if (nodeIdIndex + level >= maxIndex) {
2844  maxIndex = nodeIdIndex + level;
2845  }
2846  continue;
2847  }
2848  }
2849 
2850  // Check for empty file / no valid processor records, or too many. The number
2851  // of records can't exceed the number of valid bits in the affinity mask.
2852  if (num_records == 0) {
2853  *msg_id = kmp_i18n_str_NoProcRecords;
2854  return false;
2855  }
2856  if (num_records > (unsigned)__kmp_xproc) {
2857  *msg_id = kmp_i18n_str_TooManyProcRecords;
2858  return false;
2859  }
2860 
2861  // Set the file pointer back to the beginning, so that we can scan the file
2862  // again, this time performing a full parse of the data. Allocate a vector of
2863  // ProcCpuInfo object, where we will place the data. Adding an extra element
2864  // at the end allows us to remove a lot of extra checks for termination
2865  // conditions.
2866  if (fseek(f, 0, SEEK_SET) != 0) {
2867  *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2868  return false;
2869  }
2870 
2871  // Allocate the array of records to store the proc info in. The dummy
2872  // element at the end makes the logic in filling them out easier to code.
2873  unsigned **threadInfo =
2874  (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2875  unsigned i;
2876  for (i = 0; i <= num_records; i++) {
2877  threadInfo[i] =
2878  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2879  }
2880 
2881 #define CLEANUP_THREAD_INFO \
2882  for (i = 0; i <= num_records; i++) { \
2883  __kmp_free(threadInfo[i]); \
2884  } \
2885  __kmp_free(threadInfo);
2886 
2887  // A value of UINT_MAX means that we didn't find the field
2888  unsigned __index;
2889 
2890 #define INIT_PROC_INFO(p) \
2891  for (__index = 0; __index <= maxIndex; __index++) { \
2892  (p)[__index] = UINT_MAX; \
2893  }
2894 
2895  for (i = 0; i <= num_records; i++) {
2896  INIT_PROC_INFO(threadInfo[i]);
2897  }
2898 
2899  unsigned num_avail = 0;
2900  *line = 0;
2901  while (!feof(f)) {
2902  // Create an inner scoping level, so that all the goto targets at the end of
2903  // the loop appear in an outer scoping level. This avoids warnings about
2904  // jumping past an initialization to a target in the same block.
2905  {
2906  buf[sizeof(buf) - 1] = 1;
2907  bool long_line = false;
2908  if (!fgets(buf, sizeof(buf), f)) {
2909  // Read errors presumably because of EOF
2910  // If there is valid data in threadInfo[num_avail], then fake
2911  // a blank line in ensure that the last address gets parsed.
2912  bool valid = false;
2913  for (i = 0; i <= maxIndex; i++) {
2914  if (threadInfo[num_avail][i] != UINT_MAX) {
2915  valid = true;
2916  }
2917  }
2918  if (!valid) {
2919  break;
2920  }
2921  buf[0] = 0;
2922  } else if (!buf[sizeof(buf) - 1]) {
2923  // The line is longer than the buffer. Set a flag and don't
2924  // emit an error if we were going to ignore the line, anyway.
2925  long_line = true;
2926 
2927 #define CHECK_LINE \
2928  if (long_line) { \
2929  CLEANUP_THREAD_INFO; \
2930  *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2931  return false; \
2932  }
2933  }
2934  (*line)++;
2935 
2936 #if KMP_ARCH_LOONGARCH64
2937  // The parsing logic of /proc/cpuinfo in this function highly depends on
2938  // the blank lines between each processor info block. But on LoongArch a
2939  // blank line exists before the first processor info block (i.e. after the
2940  // "system type" line). This blank line was added because the "system
2941  // type" line is unrelated to any of the CPUs. We must skip this line so
2942  // that the original logic works on LoongArch.
2943  if (*buf == '\n' && *line == 2)
2944  continue;
2945 #endif
2946 
2947  char s1[] = "processor";
2948  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2949  CHECK_LINE;
2950  char *p = strchr(buf + sizeof(s1) - 1, ':');
2951  unsigned val;
2952  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2953  goto no_val;
2954  if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2955 #if KMP_ARCH_AARCH64
2956  // Handle the old AArch64 /proc/cpuinfo layout differently,
2957  // it contains all of the 'processor' entries listed in a
2958  // single 'Processor' section, therefore the normal looking
2959  // for duplicates in that section will always fail.
2960  num_avail++;
2961 #else
2962  goto dup_field;
2963 #endif
2964  threadInfo[num_avail][osIdIndex] = val;
2965 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2966  char path[256];
2967  KMP_SNPRINTF(
2968  path, sizeof(path),
2969  "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2970  threadInfo[num_avail][osIdIndex]);
2971  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2972 
2973  KMP_SNPRINTF(path, sizeof(path),
2974  "/sys/devices/system/cpu/cpu%u/topology/core_id",
2975  threadInfo[num_avail][osIdIndex]);
2976  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2977  continue;
2978 #else
2979  }
2980  char s2[] = "physical id";
2981  if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2982  CHECK_LINE;
2983  char *p = strchr(buf + sizeof(s2) - 1, ':');
2984  unsigned val;
2985  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2986  goto no_val;
2987  if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2988  goto dup_field;
2989  threadInfo[num_avail][pkgIdIndex] = val;
2990  continue;
2991  }
2992  char s3[] = "core id";
2993  if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2994  CHECK_LINE;
2995  char *p = strchr(buf + sizeof(s3) - 1, ':');
2996  unsigned val;
2997  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2998  goto no_val;
2999  if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
3000  goto dup_field;
3001  threadInfo[num_avail][coreIdIndex] = val;
3002  continue;
3003 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
3004  }
3005  char s4[] = "thread id";
3006  if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
3007  CHECK_LINE;
3008  char *p = strchr(buf + sizeof(s4) - 1, ':');
3009  unsigned val;
3010  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
3011  goto no_val;
3012  if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3013  goto dup_field;
3014  threadInfo[num_avail][threadIdIndex] = val;
3015  continue;
3016  }
3017  unsigned level;
3018  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
3019  CHECK_LINE;
3020  char *p = strchr(buf + sizeof(s4) - 1, ':');
3021  unsigned val;
3022  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
3023  goto no_val;
3024  // validate the input before using level:
3025  if (level > (unsigned)__kmp_xproc) { // level is too big
3026  level = __kmp_xproc;
3027  }
3028  if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3029  goto dup_field;
3030  threadInfo[num_avail][nodeIdIndex + level] = val;
3031  continue;
3032  }
3033 
3034  // We didn't recognize the leading token on the line. There are lots of
3035  // leading tokens that we don't recognize - if the line isn't empty, go on
3036  // to the next line.
3037  if ((*buf != 0) && (*buf != '\n')) {
3038  // If the line is longer than the buffer, read characters
3039  // until we find a newline.
3040  if (long_line) {
3041  int ch;
3042  while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
3043  ;
3044  }
3045  continue;
3046  }
3047 
3048  // A newline has signalled the end of the processor record.
3049  // Check that there aren't too many procs specified.
3050  if ((int)num_avail == __kmp_xproc) {
3051  CLEANUP_THREAD_INFO;
3052  *msg_id = kmp_i18n_str_TooManyEntries;
3053  return false;
3054  }
3055 
3056  // Check for missing fields. The osId field must be there, and we
3057  // currently require that the physical id field is specified, also.
3058  if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3059  CLEANUP_THREAD_INFO;
3060  *msg_id = kmp_i18n_str_MissingProcField;
3061  return false;
3062  }
3063  if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3064  CLEANUP_THREAD_INFO;
3065  *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3066  return false;
3067  }
3068 
3069  // Skip this proc if it is not included in the machine model.
3070  if (KMP_AFFINITY_CAPABLE() &&
3071  !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3072  __kmp_affin_fullMask)) {
3073  INIT_PROC_INFO(threadInfo[num_avail]);
3074  continue;
3075  }
3076 
3077  // We have a successful parse of this proc's info.
3078  // Increment the counter, and prepare for the next proc.
3079  num_avail++;
3080  KMP_ASSERT(num_avail <= num_records);
3081  INIT_PROC_INFO(threadInfo[num_avail]);
3082  }
3083  continue;
3084 
3085  no_val:
3086  CLEANUP_THREAD_INFO;
3087  *msg_id = kmp_i18n_str_MissingValCpuinfo;
3088  return false;
3089 
3090  dup_field:
3091  CLEANUP_THREAD_INFO;
3092  *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3093  return false;
3094  }
3095  *line = 0;
3096 
3097 #if KMP_MIC && REDUCE_TEAM_SIZE
3098  unsigned teamSize = 0;
3099 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3100 
3101  // check for num_records == __kmp_xproc ???
3102 
3103  // If it is configured to omit the package level when there is only a single
3104  // package, the logic at the end of this routine won't work if there is only a
3105  // single thread
3106  KMP_ASSERT(num_avail > 0);
3107  KMP_ASSERT(num_avail <= num_records);
3108 
3109  // Sort the threadInfo table by physical Id.
3110  qsort(threadInfo, num_avail, sizeof(*threadInfo),
3111  __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3112 
3113  // The table is now sorted by pkgId / coreId / threadId, but we really don't
3114  // know the radix of any of the fields. pkgId's may be sparsely assigned among
3115  // the chips on a system. Although coreId's are usually assigned
3116  // [0 .. coresPerPkg-1] and threadId's are usually assigned
3117  // [0..threadsPerCore-1], we don't want to make any such assumptions.
3118  //
3119  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
3120  // total # packages) are at this point - we want to determine that now. We
3121  // only have an upper bound on the first two figures.
3122  unsigned *counts =
3123  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3124  unsigned *maxCt =
3125  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3126  unsigned *totals =
3127  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3128  unsigned *lastId =
3129  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3130 
3131  bool assign_thread_ids = false;
3132  unsigned threadIdCt;
3133  unsigned index;
3134 
3135 restart_radix_check:
3136  threadIdCt = 0;
3137 
3138  // Initialize the counter arrays with data from threadInfo[0].
3139  if (assign_thread_ids) {
3140  if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3141  threadInfo[0][threadIdIndex] = threadIdCt++;
3142  } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3143  threadIdCt = threadInfo[0][threadIdIndex] + 1;
3144  }
3145  }
3146  for (index = 0; index <= maxIndex; index++) {
3147  counts[index] = 1;
3148  maxCt[index] = 1;
3149  totals[index] = 1;
3150  lastId[index] = threadInfo[0][index];
3151  ;
3152  }
3153 
3154  // Run through the rest of the OS procs.
3155  for (i = 1; i < num_avail; i++) {
3156  // Find the most significant index whose id differs from the id for the
3157  // previous OS proc.
3158  for (index = maxIndex; index >= threadIdIndex; index--) {
3159  if (assign_thread_ids && (index == threadIdIndex)) {
3160  // Auto-assign the thread id field if it wasn't specified.
3161  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3162  threadInfo[i][threadIdIndex] = threadIdCt++;
3163  }
3164  // Apparently the thread id field was specified for some entries and not
3165  // others. Start the thread id counter off at the next higher thread id.
3166  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3167  threadIdCt = threadInfo[i][threadIdIndex] + 1;
3168  }
3169  }
3170  if (threadInfo[i][index] != lastId[index]) {
3171  // Run through all indices which are less significant, and reset the
3172  // counts to 1. At all levels up to and including index, we need to
3173  // increment the totals and record the last id.
3174  unsigned index2;
3175  for (index2 = threadIdIndex; index2 < index; index2++) {
3176  totals[index2]++;
3177  if (counts[index2] > maxCt[index2]) {
3178  maxCt[index2] = counts[index2];
3179  }
3180  counts[index2] = 1;
3181  lastId[index2] = threadInfo[i][index2];
3182  }
3183  counts[index]++;
3184  totals[index]++;
3185  lastId[index] = threadInfo[i][index];
3186 
3187  if (assign_thread_ids && (index > threadIdIndex)) {
3188 
3189 #if KMP_MIC && REDUCE_TEAM_SIZE
3190  // The default team size is the total #threads in the machine
3191  // minus 1 thread for every core that has 3 or more threads.
3192  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3193 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3194 
3195  // Restart the thread counter, as we are on a new core.
3196  threadIdCt = 0;
3197 
3198  // Auto-assign the thread id field if it wasn't specified.
3199  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3200  threadInfo[i][threadIdIndex] = threadIdCt++;
3201  }
3202 
3203  // Apparently the thread id field was specified for some entries and
3204  // not others. Start the thread id counter off at the next higher
3205  // thread id.
3206  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3207  threadIdCt = threadInfo[i][threadIdIndex] + 1;
3208  }
3209  }
3210  break;
3211  }
3212  }
3213  if (index < threadIdIndex) {
3214  // If thread ids were specified, it is an error if they are not unique.
3215  // Also, check that we waven't already restarted the loop (to be safe -
3216  // shouldn't need to).
3217  if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3218  __kmp_free(lastId);
3219  __kmp_free(totals);
3220  __kmp_free(maxCt);
3221  __kmp_free(counts);
3222  CLEANUP_THREAD_INFO;
3223  *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3224  return false;
3225  }
3226 
3227  // If the thread ids were not specified and we see entries entries that
3228  // are duplicates, start the loop over and assign the thread ids manually.
3229  assign_thread_ids = true;
3230  goto restart_radix_check;
3231  }
3232  }
3233 
3234 #if KMP_MIC && REDUCE_TEAM_SIZE
3235  // The default team size is the total #threads in the machine
3236  // minus 1 thread for every core that has 3 or more threads.
3237  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3238 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3239 
3240  for (index = threadIdIndex; index <= maxIndex; index++) {
3241  if (counts[index] > maxCt[index]) {
3242  maxCt[index] = counts[index];
3243  }
3244  }
3245 
3246  __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3247  nCoresPerPkg = maxCt[coreIdIndex];
3248  nPackages = totals[pkgIdIndex];
3249 
3250  // When affinity is off, this routine will still be called to set
3251  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
3252  // Make sure all these vars are set correctly, and return now if affinity is
3253  // not enabled.
3254  __kmp_ncores = totals[coreIdIndex];
3255  if (!KMP_AFFINITY_CAPABLE()) {
3256  KMP_ASSERT(__kmp_affinity.type == affinity_none);
3257  return true;
3258  }
3259 
3260 #if KMP_MIC && REDUCE_TEAM_SIZE
3261  // Set the default team size.
3262  if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3263  __kmp_dflt_team_nth = teamSize;
3264  KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
3265  "__kmp_dflt_team_nth = %d\n",
3266  __kmp_dflt_team_nth));
3267  }
3268 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3269 
3270  KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
3271 
3272  // Count the number of levels which have more nodes at that level than at the
3273  // parent's level (with there being an implicit root node of the top level).
3274  // This is equivalent to saying that there is at least one node at this level
3275  // which has a sibling. These levels are in the map, and the package level is
3276  // always in the map.
3277  bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
3278  for (index = threadIdIndex; index < maxIndex; index++) {
3279  KMP_ASSERT(totals[index] >= totals[index + 1]);
3280  inMap[index] = (totals[index] > totals[index + 1]);
3281  }
3282  inMap[maxIndex] = (totals[maxIndex] > 1);
3283  inMap[pkgIdIndex] = true;
3284  inMap[coreIdIndex] = true;
3285  inMap[threadIdIndex] = true;
3286 
3287  int depth = 0;
3288  int idx = 0;
3289  kmp_hw_t types[KMP_HW_LAST];
3290  int pkgLevel = -1;
3291  int coreLevel = -1;
3292  int threadLevel = -1;
3293  for (index = threadIdIndex; index <= maxIndex; index++) {
3294  if (inMap[index]) {
3295  depth++;
3296  }
3297  }
3298  if (inMap[pkgIdIndex]) {
3299  pkgLevel = idx;
3300  types[idx++] = KMP_HW_SOCKET;
3301  }
3302  if (inMap[coreIdIndex]) {
3303  coreLevel = idx;
3304  types[idx++] = KMP_HW_CORE;
3305  }
3306  if (inMap[threadIdIndex]) {
3307  threadLevel = idx;
3308  types[idx++] = KMP_HW_THREAD;
3309  }
3310  KMP_ASSERT(depth > 0);
3311 
3312  // Construct the data structure that is to be returned.
3313  __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3314 
3315  for (i = 0; i < num_avail; ++i) {
3316  unsigned os = threadInfo[i][osIdIndex];
3317  int src_index;
3318  kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3319  hw_thread.clear();
3320  hw_thread.os_id = os;
3321 
3322  idx = 0;
3323  for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3324  if (!inMap[src_index]) {
3325  continue;
3326  }
3327  if (src_index == pkgIdIndex) {
3328  hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3329  } else if (src_index == coreIdIndex) {
3330  hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3331  } else if (src_index == threadIdIndex) {
3332  hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3333  }
3334  }
3335  }
3336 
3337  __kmp_free(inMap);
3338  __kmp_free(lastId);
3339  __kmp_free(totals);
3340  __kmp_free(maxCt);
3341  __kmp_free(counts);
3342  CLEANUP_THREAD_INFO;
3343  __kmp_topology->sort_ids();
3344  if (!__kmp_topology->check_ids()) {
3345  kmp_topology_t::deallocate(__kmp_topology);
3346  __kmp_topology = nullptr;
3347  *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3348  return false;
3349  }
3350  return true;
3351 }
3352 
3353 // Create and return a table of affinity masks, indexed by OS thread ID.
3354 // This routine handles OR'ing together all the affinity masks of threads
3355 // that are sufficiently close, if granularity > fine.
3356 static void __kmp_create_os_id_masks(unsigned *numUnique,
3357  kmp_affinity_t &affinity) {
3358  // First form a table of affinity masks in order of OS thread id.
3359  int maxOsId;
3360  int i;
3361  int numAddrs = __kmp_topology->get_num_hw_threads();
3362  int depth = __kmp_topology->get_depth();
3363  const char *env_var = affinity.env_var;
3364  KMP_ASSERT(numAddrs);
3365  KMP_ASSERT(depth);
3366 
3367  maxOsId = 0;
3368  for (i = numAddrs - 1;; --i) {
3369  int osId = __kmp_topology->at(i).os_id;
3370  if (osId > maxOsId) {
3371  maxOsId = osId;
3372  }
3373  if (i == 0)
3374  break;
3375  }
3376  affinity.num_os_id_masks = maxOsId + 1;
3377  KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks);
3378  KMP_ASSERT(affinity.gran_levels >= 0);
3379  if (affinity.flags.verbose && (affinity.gran_levels > 0)) {
3380  KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels);
3381  }
3382  if (affinity.gran_levels >= (int)depth) {
3383  KMP_AFF_WARNING(affinity, AffThreadsMayMigrate);
3384  }
3385 
3386  // Run through the table, forming the masks for all threads on each core.
3387  // Threads on the same core will have identical kmp_hw_thread_t objects, not
3388  // considering the last level, which must be the thread id. All threads on a
3389  // core will appear consecutively.
3390  int unique = 0;
3391  int j = 0; // index of 1st thread on core
3392  int leader = 0;
3393  kmp_affin_mask_t *sum;
3394  KMP_CPU_ALLOC_ON_STACK(sum);
3395  KMP_CPU_ZERO(sum);
3396  KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3397  for (i = 1; i < numAddrs; i++) {
3398  // If this thread is sufficiently close to the leader (within the
3399  // granularity setting), then set the bit for this os thread in the
3400  // affinity mask for this group, and go on to the next thread.
3401  if (__kmp_topology->is_close(leader, i, affinity.gran_levels)) {
3402  KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3403  continue;
3404  }
3405 
3406  // For every thread in this group, copy the mask to the thread's entry in
3407  // the OS Id mask table. Mark the first address as a leader.
3408  for (; j < i; j++) {
3409  int osId = __kmp_topology->at(j).os_id;
3410  KMP_DEBUG_ASSERT(osId <= maxOsId);
3411  kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3412  KMP_CPU_COPY(mask, sum);
3413  __kmp_topology->at(j).leader = (j == leader);
3414  }
3415  unique++;
3416 
3417  // Start a new mask.
3418  leader = i;
3419  KMP_CPU_ZERO(sum);
3420  KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3421  }
3422 
3423  // For every thread in last group, copy the mask to the thread's
3424  // entry in the OS Id mask table.
3425  for (; j < i; j++) {
3426  int osId = __kmp_topology->at(j).os_id;
3427  KMP_DEBUG_ASSERT(osId <= maxOsId);
3428  kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3429  KMP_CPU_COPY(mask, sum);
3430  __kmp_topology->at(j).leader = (j == leader);
3431  }
3432  unique++;
3433  KMP_CPU_FREE_FROM_STACK(sum);
3434 
3435  *numUnique = unique;
3436 }
3437 
3438 // Stuff for the affinity proclist parsers. It's easier to declare these vars
3439 // as file-static than to try and pass them through the calling sequence of
3440 // the recursive-descent OMP_PLACES parser.
3441 static kmp_affin_mask_t *newMasks;
3442 static int numNewMasks;
3443 static int nextNewMask;
3444 
3445 #define ADD_MASK(_mask) \
3446  { \
3447  if (nextNewMask >= numNewMasks) { \
3448  int i; \
3449  numNewMasks *= 2; \
3450  kmp_affin_mask_t *temp; \
3451  KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3452  for (i = 0; i < numNewMasks / 2; i++) { \
3453  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3454  kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3455  KMP_CPU_COPY(dest, src); \
3456  } \
3457  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3458  newMasks = temp; \
3459  } \
3460  KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3461  nextNewMask++; \
3462  }
3463 
3464 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3465  { \
3466  if (((_osId) > _maxOsId) || \
3467  (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3468  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
3469  } else { \
3470  ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3471  } \
3472  }
3473 
3474 // Re-parse the proclist (for the explicit affinity type), and form the list
3475 // of affinity newMasks indexed by gtid.
3476 static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) {
3477  int i;
3478  kmp_affin_mask_t **out_masks = &affinity.masks;
3479  unsigned *out_numMasks = &affinity.num_masks;
3480  const char *proclist = affinity.proclist;
3481  kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3482  int maxOsId = affinity.num_os_id_masks - 1;
3483  const char *scan = proclist;
3484  const char *next = proclist;
3485 
3486  // We use malloc() for the temporary mask vector, so that we can use
3487  // realloc() to extend it.
3488  numNewMasks = 2;
3489  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3490  nextNewMask = 0;
3491  kmp_affin_mask_t *sumMask;
3492  KMP_CPU_ALLOC(sumMask);
3493  int setSize = 0;
3494 
3495  for (;;) {
3496  int start, end, stride;
3497 
3498  SKIP_WS(scan);
3499  next = scan;
3500  if (*next == '\0') {
3501  break;
3502  }
3503 
3504  if (*next == '{') {
3505  int num;
3506  setSize = 0;
3507  next++; // skip '{'
3508  SKIP_WS(next);
3509  scan = next;
3510 
3511  // Read the first integer in the set.
3512  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
3513  SKIP_DIGITS(next);
3514  num = __kmp_str_to_int(scan, *next);
3515  KMP_ASSERT2(num >= 0, "bad explicit proc list");
3516 
3517  // Copy the mask for that osId to the sum (union) mask.
3518  if ((num > maxOsId) ||
3519  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3520  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3521  KMP_CPU_ZERO(sumMask);
3522  } else {
3523  KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3524  setSize = 1;
3525  }
3526 
3527  for (;;) {
3528  // Check for end of set.
3529  SKIP_WS(next);
3530  if (*next == '}') {
3531  next++; // skip '}'
3532  break;
3533  }
3534 
3535  // Skip optional comma.
3536  if (*next == ',') {
3537  next++;
3538  }
3539  SKIP_WS(next);
3540 
3541  // Read the next integer in the set.
3542  scan = next;
3543  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3544 
3545  SKIP_DIGITS(next);
3546  num = __kmp_str_to_int(scan, *next);
3547  KMP_ASSERT2(num >= 0, "bad explicit proc list");
3548 
3549  // Add the mask for that osId to the sum mask.
3550  if ((num > maxOsId) ||
3551  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3552  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3553  } else {
3554  KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3555  setSize++;
3556  }
3557  }
3558  if (setSize > 0) {
3559  ADD_MASK(sumMask);
3560  }
3561 
3562  SKIP_WS(next);
3563  if (*next == ',') {
3564  next++;
3565  }
3566  scan = next;
3567  continue;
3568  }
3569 
3570  // Read the first integer.
3571  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3572  SKIP_DIGITS(next);
3573  start = __kmp_str_to_int(scan, *next);
3574  KMP_ASSERT2(start >= 0, "bad explicit proc list");
3575  SKIP_WS(next);
3576 
3577  // If this isn't a range, then add a mask to the list and go on.
3578  if (*next != '-') {
3579  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3580 
3581  // Skip optional comma.
3582  if (*next == ',') {
3583  next++;
3584  }
3585  scan = next;
3586  continue;
3587  }
3588 
3589  // This is a range. Skip over the '-' and read in the 2nd int.
3590  next++; // skip '-'
3591  SKIP_WS(next);
3592  scan = next;
3593  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3594  SKIP_DIGITS(next);
3595  end = __kmp_str_to_int(scan, *next);
3596  KMP_ASSERT2(end >= 0, "bad explicit proc list");
3597 
3598  // Check for a stride parameter
3599  stride = 1;
3600  SKIP_WS(next);
3601  if (*next == ':') {
3602  // A stride is specified. Skip over the ':" and read the 3rd int.
3603  int sign = +1;
3604  next++; // skip ':'
3605  SKIP_WS(next);
3606  scan = next;
3607  if (*next == '-') {
3608  sign = -1;
3609  next++;
3610  SKIP_WS(next);
3611  scan = next;
3612  }
3613  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3614  SKIP_DIGITS(next);
3615  stride = __kmp_str_to_int(scan, *next);
3616  KMP_ASSERT2(stride >= 0, "bad explicit proc list");
3617  stride *= sign;
3618  }
3619 
3620  // Do some range checks.
3621  KMP_ASSERT2(stride != 0, "bad explicit proc list");
3622  if (stride > 0) {
3623  KMP_ASSERT2(start <= end, "bad explicit proc list");
3624  } else {
3625  KMP_ASSERT2(start >= end, "bad explicit proc list");
3626  }
3627  KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
3628 
3629  // Add the mask for each OS proc # to the list.
3630  if (stride > 0) {
3631  do {
3632  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3633  start += stride;
3634  } while (start <= end);
3635  } else {
3636  do {
3637  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3638  start += stride;
3639  } while (start >= end);
3640  }
3641 
3642  // Skip optional comma.
3643  SKIP_WS(next);
3644  if (*next == ',') {
3645  next++;
3646  }
3647  scan = next;
3648  }
3649 
3650  *out_numMasks = nextNewMask;
3651  if (nextNewMask == 0) {
3652  *out_masks = NULL;
3653  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3654  return;
3655  }
3656  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3657  for (i = 0; i < nextNewMask; i++) {
3658  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3659  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3660  KMP_CPU_COPY(dest, src);
3661  }
3662  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3663  KMP_CPU_FREE(sumMask);
3664 }
3665 
3666 /*-----------------------------------------------------------------------------
3667 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3668 places. Again, Here is the grammar:
3669 
3670 place_list := place
3671 place_list := place , place_list
3672 place := num
3673 place := place : num
3674 place := place : num : signed
3675 place := { subplacelist }
3676 place := ! place // (lowest priority)
3677 subplace_list := subplace
3678 subplace_list := subplace , subplace_list
3679 subplace := num
3680 subplace := num : num
3681 subplace := num : num : signed
3682 signed := num
3683 signed := + signed
3684 signed := - signed
3685 -----------------------------------------------------------------------------*/
3686 static void __kmp_process_subplace_list(const char **scan,
3687  kmp_affinity_t &affinity, int maxOsId,
3688  kmp_affin_mask_t *tempMask,
3689  int *setSize) {
3690  const char *next;
3691  kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3692 
3693  for (;;) {
3694  int start, count, stride, i;
3695 
3696  // Read in the starting proc id
3697  SKIP_WS(*scan);
3698  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3699  next = *scan;
3700  SKIP_DIGITS(next);
3701  start = __kmp_str_to_int(*scan, *next);
3702  KMP_ASSERT(start >= 0);
3703  *scan = next;
3704 
3705  // valid follow sets are ',' ':' and '}'
3706  SKIP_WS(*scan);
3707  if (**scan == '}' || **scan == ',') {
3708  if ((start > maxOsId) ||
3709  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3710  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3711  } else {
3712  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3713  (*setSize)++;
3714  }
3715  if (**scan == '}') {
3716  break;
3717  }
3718  (*scan)++; // skip ','
3719  continue;
3720  }
3721  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3722  (*scan)++; // skip ':'
3723 
3724  // Read count parameter
3725  SKIP_WS(*scan);
3726  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3727  next = *scan;
3728  SKIP_DIGITS(next);
3729  count = __kmp_str_to_int(*scan, *next);
3730  KMP_ASSERT(count >= 0);
3731  *scan = next;
3732 
3733  // valid follow sets are ',' ':' and '}'
3734  SKIP_WS(*scan);
3735  if (**scan == '}' || **scan == ',') {
3736  for (i = 0; i < count; i++) {
3737  if ((start > maxOsId) ||
3738  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3739  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3740  break; // don't proliferate warnings for large count
3741  } else {
3742  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3743  start++;
3744  (*setSize)++;
3745  }
3746  }
3747  if (**scan == '}') {
3748  break;
3749  }
3750  (*scan)++; // skip ','
3751  continue;
3752  }
3753  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3754  (*scan)++; // skip ':'
3755 
3756  // Read stride parameter
3757  int sign = +1;
3758  for (;;) {
3759  SKIP_WS(*scan);
3760  if (**scan == '+') {
3761  (*scan)++; // skip '+'
3762  continue;
3763  }
3764  if (**scan == '-') {
3765  sign *= -1;
3766  (*scan)++; // skip '-'
3767  continue;
3768  }
3769  break;
3770  }
3771  SKIP_WS(*scan);
3772  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3773  next = *scan;
3774  SKIP_DIGITS(next);
3775  stride = __kmp_str_to_int(*scan, *next);
3776  KMP_ASSERT(stride >= 0);
3777  *scan = next;
3778  stride *= sign;
3779 
3780  // valid follow sets are ',' and '}'
3781  SKIP_WS(*scan);
3782  if (**scan == '}' || **scan == ',') {
3783  for (i = 0; i < count; i++) {
3784  if ((start > maxOsId) ||
3785  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3786  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3787  break; // don't proliferate warnings for large count
3788  } else {
3789  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3790  start += stride;
3791  (*setSize)++;
3792  }
3793  }
3794  if (**scan == '}') {
3795  break;
3796  }
3797  (*scan)++; // skip ','
3798  continue;
3799  }
3800 
3801  KMP_ASSERT2(0, "bad explicit places list");
3802  }
3803 }
3804 
3805 static void __kmp_process_place(const char **scan, kmp_affinity_t &affinity,
3806  int maxOsId, kmp_affin_mask_t *tempMask,
3807  int *setSize) {
3808  const char *next;
3809  kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3810 
3811  // valid follow sets are '{' '!' and num
3812  SKIP_WS(*scan);
3813  if (**scan == '{') {
3814  (*scan)++; // skip '{'
3815  __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize);
3816  KMP_ASSERT2(**scan == '}', "bad explicit places list");
3817  (*scan)++; // skip '}'
3818  } else if (**scan == '!') {
3819  (*scan)++; // skip '!'
3820  __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize);
3821  KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3822  } else if ((**scan >= '0') && (**scan <= '9')) {
3823  next = *scan;
3824  SKIP_DIGITS(next);
3825  int num = __kmp_str_to_int(*scan, *next);
3826  KMP_ASSERT(num >= 0);
3827  if ((num > maxOsId) ||
3828  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3829  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3830  } else {
3831  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3832  (*setSize)++;
3833  }
3834  *scan = next; // skip num
3835  } else {
3836  KMP_ASSERT2(0, "bad explicit places list");
3837  }
3838 }
3839 
3840 // static void
3841 void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) {
3842  int i, j, count, stride, sign;
3843  kmp_affin_mask_t **out_masks = &affinity.masks;
3844  unsigned *out_numMasks = &affinity.num_masks;
3845  const char *placelist = affinity.proclist;
3846  kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3847  int maxOsId = affinity.num_os_id_masks - 1;
3848  const char *scan = placelist;
3849  const char *next = placelist;
3850 
3851  numNewMasks = 2;
3852  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3853  nextNewMask = 0;
3854 
3855  // tempMask is modified based on the previous or initial
3856  // place to form the current place
3857  // previousMask contains the previous place
3858  kmp_affin_mask_t *tempMask;
3859  kmp_affin_mask_t *previousMask;
3860  KMP_CPU_ALLOC(tempMask);
3861  KMP_CPU_ZERO(tempMask);
3862  KMP_CPU_ALLOC(previousMask);
3863  KMP_CPU_ZERO(previousMask);
3864  int setSize = 0;
3865 
3866  for (;;) {
3867  __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize);
3868 
3869  // valid follow sets are ',' ':' and EOL
3870  SKIP_WS(scan);
3871  if (*scan == '\0' || *scan == ',') {
3872  if (setSize > 0) {
3873  ADD_MASK(tempMask);
3874  }
3875  KMP_CPU_ZERO(tempMask);
3876  setSize = 0;
3877  if (*scan == '\0') {
3878  break;
3879  }
3880  scan++; // skip ','
3881  continue;
3882  }
3883 
3884  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3885  scan++; // skip ':'
3886 
3887  // Read count parameter
3888  SKIP_WS(scan);
3889  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3890  next = scan;
3891  SKIP_DIGITS(next);
3892  count = __kmp_str_to_int(scan, *next);
3893  KMP_ASSERT(count >= 0);
3894  scan = next;
3895 
3896  // valid follow sets are ',' ':' and EOL
3897  SKIP_WS(scan);
3898  if (*scan == '\0' || *scan == ',') {
3899  stride = +1;
3900  } else {
3901  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3902  scan++; // skip ':'
3903 
3904  // Read stride parameter
3905  sign = +1;
3906  for (;;) {
3907  SKIP_WS(scan);
3908  if (*scan == '+') {
3909  scan++; // skip '+'
3910  continue;
3911  }
3912  if (*scan == '-') {
3913  sign *= -1;
3914  scan++; // skip '-'
3915  continue;
3916  }
3917  break;
3918  }
3919  SKIP_WS(scan);
3920  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3921  next = scan;
3922  SKIP_DIGITS(next);
3923  stride = __kmp_str_to_int(scan, *next);
3924  KMP_DEBUG_ASSERT(stride >= 0);
3925  scan = next;
3926  stride *= sign;
3927  }
3928 
3929  // Add places determined by initial_place : count : stride
3930  for (i = 0; i < count; i++) {
3931  if (setSize == 0) {
3932  break;
3933  }
3934  // Add the current place, then build the next place (tempMask) from that
3935  KMP_CPU_COPY(previousMask, tempMask);
3936  ADD_MASK(previousMask);
3937  KMP_CPU_ZERO(tempMask);
3938  setSize = 0;
3939  KMP_CPU_SET_ITERATE(j, previousMask) {
3940  if (!KMP_CPU_ISSET(j, previousMask)) {
3941  continue;
3942  }
3943  if ((j + stride > maxOsId) || (j + stride < 0) ||
3944  (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3945  (!KMP_CPU_ISSET(j + stride,
3946  KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3947  if (i < count - 1) {
3948  KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride);
3949  }
3950  continue;
3951  }
3952  KMP_CPU_SET(j + stride, tempMask);
3953  setSize++;
3954  }
3955  }
3956  KMP_CPU_ZERO(tempMask);
3957  setSize = 0;
3958 
3959  // valid follow sets are ',' and EOL
3960  SKIP_WS(scan);
3961  if (*scan == '\0') {
3962  break;
3963  }
3964  if (*scan == ',') {
3965  scan++; // skip ','
3966  continue;
3967  }
3968 
3969  KMP_ASSERT2(0, "bad explicit places list");
3970  }
3971 
3972  *out_numMasks = nextNewMask;
3973  if (nextNewMask == 0) {
3974  *out_masks = NULL;
3975  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3976  return;
3977  }
3978  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3979  KMP_CPU_FREE(tempMask);
3980  KMP_CPU_FREE(previousMask);
3981  for (i = 0; i < nextNewMask; i++) {
3982  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3983  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3984  KMP_CPU_COPY(dest, src);
3985  }
3986  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3987 }
3988 
3989 #undef ADD_MASK
3990 #undef ADD_MASK_OSID
3991 
3992 // This function figures out the deepest level at which there is at least one
3993 // cluster/core with more than one processing unit bound to it.
3994 static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) {
3995  int core_level = 0;
3996 
3997  for (int i = 0; i < nprocs; i++) {
3998  const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3999  for (int j = bottom_level; j > 0; j--) {
4000  if (hw_thread.ids[j] > 0) {
4001  if (core_level < (j - 1)) {
4002  core_level = j - 1;
4003  }
4004  }
4005  }
4006  }
4007  return core_level;
4008 }
4009 
4010 // This function counts number of clusters/cores at given level.
4011 static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level,
4012  int core_level) {
4013  return __kmp_topology->get_count(core_level);
4014 }
4015 // This function finds to which cluster/core given processing unit is bound.
4016 static int __kmp_affinity_find_core(int proc, int bottom_level,
4017  int core_level) {
4018  int core = 0;
4019  KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4020  for (int i = 0; i <= proc; ++i) {
4021  if (i + 1 <= proc) {
4022  for (int j = 0; j <= core_level; ++j) {
4023  if (__kmp_topology->at(i + 1).sub_ids[j] !=
4024  __kmp_topology->at(i).sub_ids[j]) {
4025  core++;
4026  break;
4027  }
4028  }
4029  }
4030  }
4031  return core;
4032 }
4033 
4034 // This function finds maximal number of processing units bound to a
4035 // cluster/core at given level.
4036 static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level,
4037  int core_level) {
4038  if (core_level >= bottom_level)
4039  return 1;
4040  int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4041  return __kmp_topology->calculate_ratio(thread_level, core_level);
4042 }
4043 
4044 static int *procarr = NULL;
4045 static int __kmp_aff_depth = 0;
4046 static int *__kmp_osid_to_hwthread_map = NULL;
4047 
4048 static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t *mask,
4049  kmp_affinity_ids_t &ids,
4050  kmp_affinity_attrs_t &attrs) {
4051  if (!KMP_AFFINITY_CAPABLE())
4052  return;
4053 
4054  // Initiailze ids and attrs thread data
4055  for (int i = 0; i < KMP_HW_LAST; ++i)
4056  ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
4057  attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4058 
4059  // Iterate through each os id within the mask and determine
4060  // the topology id and attribute information
4061  int cpu;
4062  int depth = __kmp_topology->get_depth();
4063  KMP_CPU_SET_ITERATE(cpu, mask) {
4064  int osid_idx = __kmp_osid_to_hwthread_map[cpu];
4065  const kmp_hw_thread_t &hw_thread = __kmp_topology->at(osid_idx);
4066  for (int level = 0; level < depth; ++level) {
4067  kmp_hw_t type = __kmp_topology->get_type(level);
4068  int id = hw_thread.sub_ids[level];
4069  if (ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids[type] == id) {
4070  ids[type] = id;
4071  } else {
4072  // This mask spans across multiple topology units, set it as such
4073  // and mark every level below as such as well.
4074  ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4075  for (; level < depth; ++level) {
4076  kmp_hw_t type = __kmp_topology->get_type(level);
4077  ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4078  }
4079  }
4080  }
4081  if (!attrs.valid) {
4082  attrs.core_type = hw_thread.attrs.get_core_type();
4083  attrs.core_eff = hw_thread.attrs.get_core_eff();
4084  attrs.valid = 1;
4085  } else {
4086  // This mask spans across multiple attributes, set it as such
4087  if (attrs.core_type != hw_thread.attrs.get_core_type())
4088  attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN;
4089  if (attrs.core_eff != hw_thread.attrs.get_core_eff())
4090  attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF;
4091  }
4092  }
4093 }
4094 
4095 static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) {
4096  if (!KMP_AFFINITY_CAPABLE())
4097  return;
4098  const kmp_affin_mask_t *mask = th->th.th_affin_mask;
4099  kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4100  kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs;
4101  __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4102 }
4103 
4104 // Assign the topology information to each place in the place list
4105 // A thread can then grab not only its affinity mask, but the topology
4106 // information associated with that mask. e.g., Which socket is a thread on
4107 static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) {
4108  if (!KMP_AFFINITY_CAPABLE())
4109  return;
4110  if (affinity.type != affinity_none) {
4111  KMP_ASSERT(affinity.num_os_id_masks);
4112  KMP_ASSERT(affinity.os_id_masks);
4113  }
4114  KMP_ASSERT(affinity.num_masks);
4115  KMP_ASSERT(affinity.masks);
4116  KMP_ASSERT(__kmp_affin_fullMask);
4117 
4118  int max_cpu = __kmp_affin_fullMask->get_max_cpu();
4119  int num_hw_threads = __kmp_topology->get_num_hw_threads();
4120 
4121  // Allocate thread topology information
4122  if (!affinity.ids) {
4123  affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(
4124  sizeof(kmp_affinity_ids_t) * affinity.num_masks);
4125  }
4126  if (!affinity.attrs) {
4127  affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate(
4128  sizeof(kmp_affinity_attrs_t) * affinity.num_masks);
4129  }
4130  if (!__kmp_osid_to_hwthread_map) {
4131  // Want the +1 because max_cpu should be valid index into map
4132  __kmp_osid_to_hwthread_map =
4133  (int *)__kmp_allocate(sizeof(int) * (max_cpu + 1));
4134  }
4135 
4136  // Create the OS proc to hardware thread map
4137  for (int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread)
4138  __kmp_osid_to_hwthread_map[__kmp_topology->at(hw_thread).os_id] = hw_thread;
4139 
4140  for (unsigned i = 0; i < affinity.num_masks; ++i) {
4141  kmp_affinity_ids_t &ids = affinity.ids[i];
4142  kmp_affinity_attrs_t &attrs = affinity.attrs[i];
4143  kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i);
4144  __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4145  }
4146 }
4147 
4148 // Create a one element mask array (set of places) which only contains the
4149 // initial process's affinity mask
4150 static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) {
4151  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4152  KMP_ASSERT(affinity.type == affinity_none);
4153  affinity.num_masks = 1;
4154  KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4155  kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0);
4156  KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4157  __kmp_affinity_get_topology_info(affinity);
4158 }
4159 
4160 static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) {
4161  // Create the "full" mask - this defines all of the processors that we
4162  // consider to be in the machine model. If respect is set, then it is the
4163  // initialization thread's affinity mask. Otherwise, it is all processors that
4164  // we know about on the machine.
4165  int verbose = affinity.flags.verbose;
4166  const char *env_var = affinity.env_var;
4167 
4168  // Already initialized
4169  if (__kmp_affin_fullMask && __kmp_affin_origMask)
4170  return;
4171 
4172  if (__kmp_affin_fullMask == NULL) {
4173  KMP_CPU_ALLOC(__kmp_affin_fullMask);
4174  }
4175  if (__kmp_affin_origMask == NULL) {
4176  KMP_CPU_ALLOC(__kmp_affin_origMask);
4177  }
4178  if (KMP_AFFINITY_CAPABLE()) {
4179  __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4180  // Make a copy before possible expanding to the entire machine mask
4181  __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4182  if (affinity.flags.respect) {
4183  // Count the number of available processors.
4184  unsigned i;
4185  __kmp_avail_proc = 0;
4186  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4187  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4188  continue;
4189  }
4190  __kmp_avail_proc++;
4191  }
4192  if (__kmp_avail_proc > __kmp_xproc) {
4193  KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4194  affinity.type = affinity_none;
4195  KMP_AFFINITY_DISABLE();
4196  return;
4197  }
4198 
4199  if (verbose) {
4200  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4201  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4202  __kmp_affin_fullMask);
4203  KMP_INFORM(InitOSProcSetRespect, env_var, buf);
4204  }
4205  } else {
4206  if (verbose) {
4207  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4208  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4209  __kmp_affin_fullMask);
4210  KMP_INFORM(InitOSProcSetNotRespect, env_var, buf);
4211  }
4212  __kmp_avail_proc =
4213  __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4214 #if KMP_OS_WINDOWS
4215  if (__kmp_num_proc_groups <= 1) {
4216  // Copy expanded full mask if topology has single processor group
4217  __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4218  }
4219  // Set the process affinity mask since threads' affinity
4220  // masks must be subset of process mask in Windows* OS
4221  __kmp_affin_fullMask->set_process_affinity(true);
4222 #endif
4223  }
4224  }
4225 }
4226 
4227 static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
4228  bool success = false;
4229  const char *env_var = affinity.env_var;
4230  kmp_i18n_id_t msg_id = kmp_i18n_null;
4231  int verbose = affinity.flags.verbose;
4232 
4233  // For backward compatibility, setting KMP_CPUINFO_FILE =>
4234  // KMP_TOPOLOGY_METHOD=cpuinfo
4235  if ((__kmp_cpuinfo_file != NULL) &&
4236  (__kmp_affinity_top_method == affinity_top_method_all)) {
4237  __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4238  }
4239 
4240  if (__kmp_affinity_top_method == affinity_top_method_all) {
4241 // In the default code path, errors are not fatal - we just try using
4242 // another method. We only emit a warning message if affinity is on, or the
4243 // verbose flag is set, an the nowarnings flag was not set.
4244 #if KMP_USE_HWLOC
4245  if (!success &&
4246  __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4247  if (!__kmp_hwloc_error) {
4248  success = __kmp_affinity_create_hwloc_map(&msg_id);
4249  if (!success && verbose) {
4250  KMP_INFORM(AffIgnoringHwloc, env_var);
4251  }
4252  } else if (verbose) {
4253  KMP_INFORM(AffIgnoringHwloc, env_var);
4254  }
4255  }
4256 #endif
4257 
4258 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4259  if (!success) {
4260  success = __kmp_affinity_create_x2apicid_map(&msg_id);
4261  if (!success && verbose && msg_id != kmp_i18n_null) {
4262  KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4263  }
4264  }
4265  if (!success) {
4266  success = __kmp_affinity_create_apicid_map(&msg_id);
4267  if (!success && verbose && msg_id != kmp_i18n_null) {
4268  KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4269  }
4270  }
4271 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4272 
4273 #if KMP_OS_LINUX
4274  if (!success) {
4275  int line = 0;
4276  success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4277  if (!success && verbose && msg_id != kmp_i18n_null) {
4278  KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4279  }
4280  }
4281 #endif /* KMP_OS_LINUX */
4282 
4283 #if KMP_GROUP_AFFINITY
4284  if (!success && (__kmp_num_proc_groups > 1)) {
4285  success = __kmp_affinity_create_proc_group_map(&msg_id);
4286  if (!success && verbose && msg_id != kmp_i18n_null) {
4287  KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4288  }
4289  }
4290 #endif /* KMP_GROUP_AFFINITY */
4291 
4292  if (!success) {
4293  success = __kmp_affinity_create_flat_map(&msg_id);
4294  if (!success && verbose && msg_id != kmp_i18n_null) {
4295  KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4296  }
4297  KMP_ASSERT(success);
4298  }
4299  }
4300 
4301 // If the user has specified that a paricular topology discovery method is to be
4302 // used, then we abort if that method fails. The exception is group affinity,
4303 // which might have been implicitly set.
4304 #if KMP_USE_HWLOC
4305  else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4306  KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4307  success = __kmp_affinity_create_hwloc_map(&msg_id);
4308  if (!success) {
4309  KMP_ASSERT(msg_id != kmp_i18n_null);
4310  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4311  }
4312  }
4313 #endif // KMP_USE_HWLOC
4314 
4315 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4316  else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4317  __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4318  success = __kmp_affinity_create_x2apicid_map(&msg_id);
4319  if (!success) {
4320  KMP_ASSERT(msg_id != kmp_i18n_null);
4321  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4322  }
4323  } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4324  success = __kmp_affinity_create_apicid_map(&msg_id);
4325  if (!success) {
4326  KMP_ASSERT(msg_id != kmp_i18n_null);
4327  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4328  }
4329  }
4330 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4331 
4332  else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4333  int line = 0;
4334  success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4335  if (!success) {
4336  KMP_ASSERT(msg_id != kmp_i18n_null);
4337  const char *filename = __kmp_cpuinfo_get_filename();
4338  if (line > 0) {
4339  KMP_FATAL(FileLineMsgExiting, filename, line,
4340  __kmp_i18n_catgets(msg_id));
4341  } else {
4342  KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4343  }
4344  }
4345  }
4346 
4347 #if KMP_GROUP_AFFINITY
4348  else if (__kmp_affinity_top_method == affinity_top_method_group) {
4349  success = __kmp_affinity_create_proc_group_map(&msg_id);
4350  KMP_ASSERT(success);
4351  if (!success) {
4352  KMP_ASSERT(msg_id != kmp_i18n_null);
4353  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4354  }
4355  }
4356 #endif /* KMP_GROUP_AFFINITY */
4357 
4358  else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4359  success = __kmp_affinity_create_flat_map(&msg_id);
4360  // should not fail
4361  KMP_ASSERT(success);
4362  }
4363 
4364  // Early exit if topology could not be created
4365  if (!__kmp_topology) {
4366  if (KMP_AFFINITY_CAPABLE()) {
4367  KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4368  }
4369  if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4370  __kmp_ncores > 0) {
4371  __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4372  __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4373  __kmp_nThreadsPerCore, __kmp_ncores);
4374  if (verbose) {
4375  __kmp_topology->print(env_var);
4376  }
4377  }
4378  return false;
4379  }
4380 
4381  // Canonicalize, print (if requested), apply KMP_HW_SUBSET
4382  __kmp_topology->canonicalize();
4383  if (verbose)
4384  __kmp_topology->print(env_var);
4385  bool filtered = __kmp_topology->filter_hw_subset();
4386  if (filtered) {
4387 #if KMP_OS_WINDOWS
4388  // Copy filtered full mask if topology has single processor group
4389  if (__kmp_num_proc_groups <= 1)
4390 #endif
4391  __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4392  }
4393  if (filtered && verbose)
4394  __kmp_topology->print("KMP_HW_SUBSET");
4395  return success;
4396 }
4397 
4398 static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) {
4399  bool is_regular_affinity = (&affinity == &__kmp_affinity);
4400  bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity);
4401  const char *env_var = affinity.env_var;
4402 
4403  if (affinity.flags.initialized) {
4404  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4405  return;
4406  }
4407 
4408  if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask))
4409  __kmp_aux_affinity_initialize_masks(affinity);
4410 
4411  if (is_regular_affinity && !__kmp_topology) {
4412  bool success = __kmp_aux_affinity_initialize_topology(affinity);
4413  if (success) {
4414  // Initialize other data structures which depend on the topology
4415  machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4416  KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4417  } else {
4418  affinity.type = affinity_none;
4419  KMP_AFFINITY_DISABLE();
4420  }
4421  }
4422 
4423  // If KMP_AFFINITY=none, then only create the single "none" place
4424  // which is the process's initial affinity mask or the number of
4425  // hardware threads depending on respect,norespect
4426  if (affinity.type == affinity_none) {
4427  __kmp_create_affinity_none_places(affinity);
4428 #if KMP_USE_HIER_SCHED
4429  __kmp_dispatch_set_hierarchy_values();
4430 #endif
4431  affinity.flags.initialized = TRUE;
4432  return;
4433  }
4434 
4435  __kmp_topology->set_granularity(affinity);
4436  int depth = __kmp_topology->get_depth();
4437 
4438  // Create the table of masks, indexed by thread Id.
4439  unsigned numUnique;
4440  __kmp_create_os_id_masks(&numUnique, affinity);
4441  if (affinity.gran_levels == 0) {
4442  KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4443  }
4444 
4445  switch (affinity.type) {
4446 
4447  case affinity_explicit:
4448  KMP_DEBUG_ASSERT(affinity.proclist != NULL);
4449  if (is_hidden_helper_affinity ||
4450  __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4451  __kmp_affinity_process_proclist(affinity);
4452  } else {
4453  __kmp_affinity_process_placelist(affinity);
4454  }
4455  if (affinity.num_masks == 0) {
4456  KMP_AFF_WARNING(affinity, AffNoValidProcID);
4457  affinity.type = affinity_none;
4458  __kmp_create_affinity_none_places(affinity);
4459  affinity.flags.initialized = TRUE;
4460  return;
4461  }
4462  break;
4463 
4464  // The other affinity types rely on sorting the hardware threads according to
4465  // some permutation of the machine topology tree. Set affinity.compact
4466  // and affinity.offset appropriately, then jump to a common code
4467  // fragment to do the sort and create the array of affinity masks.
4468  case affinity_logical:
4469  affinity.compact = 0;
4470  if (affinity.offset) {
4471  affinity.offset =
4472  __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4473  }
4474  goto sortTopology;
4475 
4476  case affinity_physical:
4477  if (__kmp_nThreadsPerCore > 1) {
4478  affinity.compact = 1;
4479  if (affinity.compact >= depth) {
4480  affinity.compact = 0;
4481  }
4482  } else {
4483  affinity.compact = 0;
4484  }
4485  if (affinity.offset) {
4486  affinity.offset =
4487  __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4488  }
4489  goto sortTopology;
4490 
4491  case affinity_scatter:
4492  if (affinity.compact >= depth) {
4493  affinity.compact = 0;
4494  } else {
4495  affinity.compact = depth - 1 - affinity.compact;
4496  }
4497  goto sortTopology;
4498 
4499  case affinity_compact:
4500  if (affinity.compact >= depth) {
4501  affinity.compact = depth - 1;
4502  }
4503  goto sortTopology;
4504 
4505  case affinity_balanced:
4506  if (depth <= 1 || is_hidden_helper_affinity) {
4507  KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4508  affinity.type = affinity_none;
4509  __kmp_create_affinity_none_places(affinity);
4510  affinity.flags.initialized = TRUE;
4511  return;
4512  } else if (!__kmp_topology->is_uniform()) {
4513  // Save the depth for further usage
4514  __kmp_aff_depth = depth;
4515 
4516  int core_level =
4517  __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4518  int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4519  core_level);
4520  int maxprocpercore = __kmp_affinity_max_proc_per_core(
4521  __kmp_avail_proc, depth - 1, core_level);
4522 
4523  int nproc = ncores * maxprocpercore;
4524  if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4525  KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4526  affinity.type = affinity_none;
4527  __kmp_create_affinity_none_places(affinity);
4528  affinity.flags.initialized = TRUE;
4529  return;
4530  }
4531 
4532  procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4533  for (int i = 0; i < nproc; i++) {
4534  procarr[i] = -1;
4535  }
4536 
4537  int lastcore = -1;
4538  int inlastcore = 0;
4539  for (int i = 0; i < __kmp_avail_proc; i++) {
4540  int proc = __kmp_topology->at(i).os_id;
4541  int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4542 
4543  if (core == lastcore) {
4544  inlastcore++;
4545  } else {
4546  inlastcore = 0;
4547  }
4548  lastcore = core;
4549 
4550  procarr[core * maxprocpercore + inlastcore] = proc;
4551  }
4552  }
4553  if (affinity.compact >= depth) {
4554  affinity.compact = depth - 1;
4555  }
4556 
4557  sortTopology:
4558  // Allocate the gtid->affinity mask table.
4559  if (affinity.flags.dups) {
4560  affinity.num_masks = __kmp_avail_proc;
4561  } else {
4562  affinity.num_masks = numUnique;
4563  }
4564 
4565  if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4566  (__kmp_affinity_num_places > 0) &&
4567  ((unsigned)__kmp_affinity_num_places < affinity.num_masks) &&
4568  !is_hidden_helper_affinity) {
4569  affinity.num_masks = __kmp_affinity_num_places;
4570  }
4571 
4572  KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4573 
4574  // Sort the topology table according to the current setting of
4575  // affinity.compact, then fill out affinity.masks.
4576  __kmp_topology->sort_compact(affinity);
4577  {
4578  int i;
4579  unsigned j;
4580  int num_hw_threads = __kmp_topology->get_num_hw_threads();
4581  for (i = 0, j = 0; i < num_hw_threads; i++) {
4582  if ((!affinity.flags.dups) && (!__kmp_topology->at(i).leader)) {
4583  continue;
4584  }
4585  int osId = __kmp_topology->at(i).os_id;
4586 
4587  kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4588  kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j);
4589  KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4590  KMP_CPU_COPY(dest, src);
4591  if (++j >= affinity.num_masks) {
4592  break;
4593  }
4594  }
4595  KMP_DEBUG_ASSERT(j == affinity.num_masks);
4596  }
4597  // Sort the topology back using ids
4598  __kmp_topology->sort_ids();
4599  break;
4600 
4601  default:
4602  KMP_ASSERT2(0, "Unexpected affinity setting");
4603  }
4604  __kmp_affinity_get_topology_info(affinity);
4605  affinity.flags.initialized = TRUE;
4606 }
4607 
4608 void __kmp_affinity_initialize(kmp_affinity_t &affinity) {
4609  // Much of the code above was written assuming that if a machine was not
4610  // affinity capable, then affinity type == affinity_none.
4611  // We now explicitly represent this as affinity type == affinity_disabled.
4612  // There are too many checks for affinity type == affinity_none in this code.
4613  // Instead of trying to change them all, check if
4614  // affinity type == affinity_disabled, and if so, slam it with affinity_none,
4615  // call the real initialization routine, then restore affinity type to
4616  // affinity_disabled.
4617  int disabled = (affinity.type == affinity_disabled);
4618  if (!KMP_AFFINITY_CAPABLE())
4619  KMP_ASSERT(disabled);
4620  if (disabled)
4621  affinity.type = affinity_none;
4622  __kmp_aux_affinity_initialize(affinity);
4623  if (disabled)
4624  affinity.type = affinity_disabled;
4625 }
4626 
4627 void __kmp_affinity_uninitialize(void) {
4628  for (kmp_affinity_t *affinity : __kmp_affinities) {
4629  if (affinity->masks != NULL)
4630  KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks);
4631  if (affinity->os_id_masks != NULL)
4632  KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks);
4633  if (affinity->proclist != NULL)
4634  __kmp_free(affinity->proclist);
4635  if (affinity->ids != NULL)
4636  __kmp_free(affinity->ids);
4637  if (affinity->attrs != NULL)
4638  __kmp_free(affinity->attrs);
4639  *affinity = KMP_AFFINITY_INIT(affinity->env_var);
4640  }
4641  if (__kmp_affin_origMask != NULL) {
4642  if (KMP_AFFINITY_CAPABLE()) {
4643  __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
4644  }
4645  KMP_CPU_FREE(__kmp_affin_origMask);
4646  __kmp_affin_origMask = NULL;
4647  }
4648  __kmp_affinity_num_places = 0;
4649  if (procarr != NULL) {
4650  __kmp_free(procarr);
4651  procarr = NULL;
4652  }
4653  if (__kmp_osid_to_hwthread_map) {
4654  __kmp_free(__kmp_osid_to_hwthread_map);
4655  __kmp_osid_to_hwthread_map = NULL;
4656  }
4657 #if KMP_USE_HWLOC
4658  if (__kmp_hwloc_topology != NULL) {
4659  hwloc_topology_destroy(__kmp_hwloc_topology);
4660  __kmp_hwloc_topology = NULL;
4661  }
4662 #endif
4663  if (__kmp_hw_subset) {
4664  kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4665  __kmp_hw_subset = nullptr;
4666  }
4667  if (__kmp_topology) {
4668  kmp_topology_t::deallocate(__kmp_topology);
4669  __kmp_topology = nullptr;
4670  }
4671  KMPAffinity::destroy_api();
4672 }
4673 
4674 static void __kmp_select_mask_by_gtid(int gtid, const kmp_affinity_t *affinity,
4675  int *place, kmp_affin_mask_t **mask) {
4676  int mask_idx;
4677  bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
4678  if (is_hidden_helper)
4679  // The first gtid is the regular primary thread, the second gtid is the main
4680  // thread of hidden team which does not participate in task execution.
4681  mask_idx = gtid - 2;
4682  else
4683  mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4684  KMP_DEBUG_ASSERT(affinity->num_masks > 0);
4685  *place = (mask_idx + affinity->offset) % affinity->num_masks;
4686  *mask = KMP_CPU_INDEX(affinity->masks, *place);
4687 }
4688 
4689 // This function initializes the per-thread data concerning affinity including
4690 // the mask and topology information
4691 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4692 
4693  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4694 
4695  // Set the thread topology information to default of unknown
4696  for (int id = 0; id < KMP_HW_LAST; ++id)
4697  th->th.th_topology_ids[id] = kmp_hw_thread_t::UNKNOWN_ID;
4698  th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4699 
4700  if (!KMP_AFFINITY_CAPABLE()) {
4701  return;
4702  }
4703 
4704  if (th->th.th_affin_mask == NULL) {
4705  KMP_CPU_ALLOC(th->th.th_affin_mask);
4706  } else {
4707  KMP_CPU_ZERO(th->th.th_affin_mask);
4708  }
4709 
4710  // Copy the thread mask to the kmp_info_t structure. If
4711  // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e.
4712  // one that has all of the OS proc ids set, or if
4713  // __kmp_affinity.flags.respect is set, then the full mask is the
4714  // same as the mask of the initialization thread.
4715  kmp_affin_mask_t *mask;
4716  int i;
4717  const kmp_affinity_t *affinity;
4718  const char *env_var;
4719  bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
4720 
4721  if (is_hidden_helper)
4722  affinity = &__kmp_hh_affinity;
4723  else
4724  affinity = &__kmp_affinity;
4725  env_var = affinity->env_var;
4726 
4727  if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) {
4728  if ((affinity->type == affinity_none) ||
4729  (affinity->type == affinity_balanced) ||
4730  KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
4731 #if KMP_GROUP_AFFINITY
4732  if (__kmp_num_proc_groups > 1) {
4733  return;
4734  }
4735 #endif
4736  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4737  i = 0;
4738  mask = __kmp_affin_fullMask;
4739  } else {
4740  __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
4741  }
4742  } else {
4743  if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) {
4744 #if KMP_GROUP_AFFINITY
4745  if (__kmp_num_proc_groups > 1) {
4746  return;
4747  }
4748 #endif
4749  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4750  i = KMP_PLACE_ALL;
4751  mask = __kmp_affin_fullMask;
4752  } else {
4753  __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
4754  }
4755  }
4756 
4757  th->th.th_current_place = i;
4758  if (isa_root && !is_hidden_helper) {
4759  th->th.th_new_place = i;
4760  th->th.th_first_place = 0;
4761  th->th.th_last_place = affinity->num_masks - 1;
4762  } else if (KMP_AFFINITY_NON_PROC_BIND) {
4763  // When using a Non-OMP_PROC_BIND affinity method,
4764  // set all threads' place-partition-var to the entire place list
4765  th->th.th_first_place = 0;
4766  th->th.th_last_place = affinity->num_masks - 1;
4767  }
4768  // Copy topology information associated with the place
4769  if (i >= 0) {
4770  th->th.th_topology_ids = __kmp_affinity.ids[i];
4771  th->th.th_topology_attrs = __kmp_affinity.attrs[i];
4772  }
4773 
4774  if (i == KMP_PLACE_ALL) {
4775  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4776  gtid));
4777  } else {
4778  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4779  gtid, i));
4780  }
4781 
4782  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4783 
4784  /* to avoid duplicate printing (will be correctly printed on barrier) */
4785  if (affinity->flags.verbose &&
4786  (affinity->type == affinity_none ||
4787  (i != KMP_PLACE_ALL && affinity->type != affinity_balanced)) &&
4788  !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
4789  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4790  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4791  th->th.th_affin_mask);
4792  KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
4793  gtid, buf);
4794  }
4795 
4796 #if KMP_OS_WINDOWS
4797  // On Windows* OS, the process affinity mask might have changed. If the user
4798  // didn't request affinity and this call fails, just continue silently.
4799  // See CQ171393.
4800  if (affinity->type == affinity_none) {
4801  __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4802  } else
4803 #endif
4804  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4805 }
4806 
4807 void __kmp_affinity_set_place(int gtid) {
4808  // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND
4809  if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4810  return;
4811  }
4812 
4813  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4814 
4815  KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4816  "place = %d)\n",
4817  gtid, th->th.th_new_place, th->th.th_current_place));
4818 
4819  // Check that the new place is within this thread's partition.
4820  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4821  KMP_ASSERT(th->th.th_new_place >= 0);
4822  KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity.num_masks);
4823  if (th->th.th_first_place <= th->th.th_last_place) {
4824  KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4825  (th->th.th_new_place <= th->th.th_last_place));
4826  } else {
4827  KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4828  (th->th.th_new_place >= th->th.th_last_place));
4829  }
4830 
4831  // Copy the thread mask to the kmp_info_t structure,
4832  // and set this thread's affinity.
4833  kmp_affin_mask_t *mask =
4834  KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place);
4835  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4836  th->th.th_current_place = th->th.th_new_place;
4837  // Copy topology information associated with the place
4838  th->th.th_topology_ids = __kmp_affinity.ids[th->th.th_new_place];
4839  th->th.th_topology_attrs = __kmp_affinity.attrs[th->th.th_new_place];
4840 
4841  if (__kmp_affinity.flags.verbose) {
4842  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4843  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4844  th->th.th_affin_mask);
4845  KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4846  __kmp_gettid(), gtid, buf);
4847  }
4848  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4849 }
4850 
4851 int __kmp_aux_set_affinity(void **mask) {
4852  int gtid;
4853  kmp_info_t *th;
4854  int retval;
4855 
4856  if (!KMP_AFFINITY_CAPABLE()) {
4857  return -1;
4858  }
4859 
4860  gtid = __kmp_entry_gtid();
4861  KA_TRACE(
4862  1000, (""); {
4863  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4864  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4865  (kmp_affin_mask_t *)(*mask));
4866  __kmp_debug_printf(
4867  "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4868  gtid, buf);
4869  });
4870 
4871  if (__kmp_env_consistency_check) {
4872  if ((mask == NULL) || (*mask == NULL)) {
4873  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4874  } else {
4875  unsigned proc;
4876  int num_procs = 0;
4877 
4878  KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4879  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4880  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4881  }
4882  if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4883  continue;
4884  }
4885  num_procs++;
4886  }
4887  if (num_procs == 0) {
4888  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4889  }
4890 
4891 #if KMP_GROUP_AFFINITY
4892  if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4893  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4894  }
4895 #endif /* KMP_GROUP_AFFINITY */
4896  }
4897  }
4898 
4899  th = __kmp_threads[gtid];
4900  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4901  retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4902  if (retval == 0) {
4903  KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4904  }
4905 
4906  th->th.th_current_place = KMP_PLACE_UNDEFINED;
4907  th->th.th_new_place = KMP_PLACE_UNDEFINED;
4908  th->th.th_first_place = 0;
4909  th->th.th_last_place = __kmp_affinity.num_masks - 1;
4910 
4911  // Turn off 4.0 affinity for the current tread at this parallel level.
4912  th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4913 
4914  return retval;
4915 }
4916 
4917 int __kmp_aux_get_affinity(void **mask) {
4918  int gtid;
4919  int retval;
4920 #if KMP_OS_WINDOWS || KMP_DEBUG
4921  kmp_info_t *th;
4922 #endif
4923  if (!KMP_AFFINITY_CAPABLE()) {
4924  return -1;
4925  }
4926 
4927  gtid = __kmp_entry_gtid();
4928 #if KMP_OS_WINDOWS || KMP_DEBUG
4929  th = __kmp_threads[gtid];
4930 #else
4931  (void)gtid; // unused variable
4932 #endif
4933  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4934 
4935  KA_TRACE(
4936  1000, (""); {
4937  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4938  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4939  th->th.th_affin_mask);
4940  __kmp_printf(
4941  "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4942  buf);
4943  });
4944 
4945  if (__kmp_env_consistency_check) {
4946  if ((mask == NULL) || (*mask == NULL)) {
4947  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4948  }
4949  }
4950 
4951 #if !KMP_OS_WINDOWS
4952 
4953  retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4954  KA_TRACE(
4955  1000, (""); {
4956  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4957  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4958  (kmp_affin_mask_t *)(*mask));
4959  __kmp_printf(
4960  "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4961  buf);
4962  });
4963  return retval;
4964 
4965 #else
4966  (void)retval;
4967 
4968  KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4969  return 0;
4970 
4971 #endif /* KMP_OS_WINDOWS */
4972 }
4973 
4974 int __kmp_aux_get_affinity_max_proc() {
4975  if (!KMP_AFFINITY_CAPABLE()) {
4976  return 0;
4977  }
4978 #if KMP_GROUP_AFFINITY
4979  if (__kmp_num_proc_groups > 1) {
4980  return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
4981  }
4982 #endif
4983  return __kmp_xproc;
4984 }
4985 
4986 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
4987  if (!KMP_AFFINITY_CAPABLE()) {
4988  return -1;
4989  }
4990 
4991  KA_TRACE(
4992  1000, (""); {
4993  int gtid = __kmp_entry_gtid();
4994  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4995  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4996  (kmp_affin_mask_t *)(*mask));
4997  __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
4998  "affinity mask for thread %d = %s\n",
4999  proc, gtid, buf);
5000  });
5001 
5002  if (__kmp_env_consistency_check) {
5003  if ((mask == NULL) || (*mask == NULL)) {
5004  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
5005  }
5006  }
5007 
5008  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5009  return -1;
5010  }
5011  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5012  return -2;
5013  }
5014 
5015  KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5016  return 0;
5017 }
5018 
5019 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
5020  if (!KMP_AFFINITY_CAPABLE()) {
5021  return -1;
5022  }
5023 
5024  KA_TRACE(
5025  1000, (""); {
5026  int gtid = __kmp_entry_gtid();
5027  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5028  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5029  (kmp_affin_mask_t *)(*mask));
5030  __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5031  "affinity mask for thread %d = %s\n",
5032  proc, gtid, buf);
5033  });
5034 
5035  if (__kmp_env_consistency_check) {
5036  if ((mask == NULL) || (*mask == NULL)) {
5037  KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
5038  }
5039  }
5040 
5041  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5042  return -1;
5043  }
5044  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5045  return -2;
5046  }
5047 
5048  KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5049  return 0;
5050 }
5051 
5052 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
5053  if (!KMP_AFFINITY_CAPABLE()) {
5054  return -1;
5055  }
5056 
5057  KA_TRACE(
5058  1000, (""); {
5059  int gtid = __kmp_entry_gtid();
5060  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5061  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5062  (kmp_affin_mask_t *)(*mask));
5063  __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5064  "affinity mask for thread %d = %s\n",
5065  proc, gtid, buf);
5066  });
5067 
5068  if (__kmp_env_consistency_check) {
5069  if ((mask == NULL) || (*mask == NULL)) {
5070  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
5071  }
5072  }
5073 
5074  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5075  return -1;
5076  }
5077  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5078  return 0;
5079  }
5080 
5081  return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5082 }
5083 
5084 // Dynamic affinity settings - Affinity balanced
5085 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
5086  KMP_DEBUG_ASSERT(th);
5087  bool fine_gran = true;
5088  int tid = th->th.th_info.ds.ds_tid;
5089  const char *env_var = "KMP_AFFINITY";
5090 
5091  // Do not perform balanced affinity for the hidden helper threads
5092  if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
5093  return;
5094 
5095  switch (__kmp_affinity.gran) {
5096  case KMP_HW_THREAD:
5097  break;
5098  case KMP_HW_CORE:
5099  if (__kmp_nThreadsPerCore > 1) {
5100  fine_gran = false;
5101  }
5102  break;
5103  case KMP_HW_SOCKET:
5104  if (nCoresPerPkg > 1) {
5105  fine_gran = false;
5106  }
5107  break;
5108  default:
5109  fine_gran = false;
5110  }
5111 
5112  if (__kmp_topology->is_uniform()) {
5113  int coreID;
5114  int threadID;
5115  // Number of hyper threads per core in HT machine
5116  int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5117  // Number of cores
5118  int ncores = __kmp_ncores;
5119  if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5120  __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5121  ncores = nPackages;
5122  }
5123  // How many threads will be bound to each core
5124  int chunk = nthreads / ncores;
5125  // How many cores will have an additional thread bound to it - "big cores"
5126  int big_cores = nthreads % ncores;
5127  // Number of threads on the big cores
5128  int big_nth = (chunk + 1) * big_cores;
5129  if (tid < big_nth) {
5130  coreID = tid / (chunk + 1);
5131  threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5132  } else { // tid >= big_nth
5133  coreID = (tid - big_cores) / chunk;
5134  threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5135  }
5136  KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5137  "Illegal set affinity operation when not capable");
5138 
5139  kmp_affin_mask_t *mask = th->th.th_affin_mask;
5140  KMP_CPU_ZERO(mask);
5141 
5142  if (fine_gran) {
5143  int osID =
5144  __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
5145  KMP_CPU_SET(osID, mask);
5146  } else {
5147  for (int i = 0; i < __kmp_nth_per_core; i++) {
5148  int osID;
5149  osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
5150  KMP_CPU_SET(osID, mask);
5151  }
5152  }
5153  if (__kmp_affinity.flags.verbose) {
5154  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5155  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5156  KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5157  tid, buf);
5158  }
5159  __kmp_affinity_get_thread_topology_info(th);
5160  __kmp_set_system_affinity(mask, TRUE);
5161  } else { // Non-uniform topology
5162 
5163  kmp_affin_mask_t *mask = th->th.th_affin_mask;
5164  KMP_CPU_ZERO(mask);
5165 
5166  int core_level =
5167  __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5168  int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5169  __kmp_aff_depth - 1, core_level);
5170  int nth_per_core = __kmp_affinity_max_proc_per_core(
5171  __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5172 
5173  // For performance gain consider the special case nthreads ==
5174  // __kmp_avail_proc
5175  if (nthreads == __kmp_avail_proc) {
5176  if (fine_gran) {
5177  int osID = __kmp_topology->at(tid).os_id;
5178  KMP_CPU_SET(osID, mask);
5179  } else {
5180  int core =
5181  __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5182  for (int i = 0; i < __kmp_avail_proc; i++) {
5183  int osID = __kmp_topology->at(i).os_id;
5184  if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5185  core) {
5186  KMP_CPU_SET(osID, mask);
5187  }
5188  }
5189  }
5190  } else if (nthreads <= ncores) {
5191 
5192  int core = 0;
5193  for (int i = 0; i < ncores; i++) {
5194  // Check if this core from procarr[] is in the mask
5195  int in_mask = 0;
5196  for (int j = 0; j < nth_per_core; j++) {
5197  if (procarr[i * nth_per_core + j] != -1) {
5198  in_mask = 1;
5199  break;
5200  }
5201  }
5202  if (in_mask) {
5203  if (tid == core) {
5204  for (int j = 0; j < nth_per_core; j++) {
5205  int osID = procarr[i * nth_per_core + j];
5206  if (osID != -1) {
5207  KMP_CPU_SET(osID, mask);
5208  // For fine granularity it is enough to set the first available
5209  // osID for this core
5210  if (fine_gran) {
5211  break;
5212  }
5213  }
5214  }
5215  break;
5216  } else {
5217  core++;
5218  }
5219  }
5220  }
5221  } else { // nthreads > ncores
5222  // Array to save the number of processors at each core
5223  int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5224  // Array to save the number of cores with "x" available processors;
5225  int *ncores_with_x_procs =
5226  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5227  // Array to save the number of cores with # procs from x to nth_per_core
5228  int *ncores_with_x_to_max_procs =
5229  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5230 
5231  for (int i = 0; i <= nth_per_core; i++) {
5232  ncores_with_x_procs[i] = 0;
5233  ncores_with_x_to_max_procs[i] = 0;
5234  }
5235 
5236  for (int i = 0; i < ncores; i++) {
5237  int cnt = 0;
5238  for (int j = 0; j < nth_per_core; j++) {
5239  if (procarr[i * nth_per_core + j] != -1) {
5240  cnt++;
5241  }
5242  }
5243  nproc_at_core[i] = cnt;
5244  ncores_with_x_procs[cnt]++;
5245  }
5246 
5247  for (int i = 0; i <= nth_per_core; i++) {
5248  for (int j = i; j <= nth_per_core; j++) {
5249  ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5250  }
5251  }
5252 
5253  // Max number of processors
5254  int nproc = nth_per_core * ncores;
5255  // An array to keep number of threads per each context
5256  int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5257  for (int i = 0; i < nproc; i++) {
5258  newarr[i] = 0;
5259  }
5260 
5261  int nth = nthreads;
5262  int flag = 0;
5263  while (nth > 0) {
5264  for (int j = 1; j <= nth_per_core; j++) {
5265  int cnt = ncores_with_x_to_max_procs[j];
5266  for (int i = 0; i < ncores; i++) {
5267  // Skip the core with 0 processors
5268  if (nproc_at_core[i] == 0) {
5269  continue;
5270  }
5271  for (int k = 0; k < nth_per_core; k++) {
5272  if (procarr[i * nth_per_core + k] != -1) {
5273  if (newarr[i * nth_per_core + k] == 0) {
5274  newarr[i * nth_per_core + k] = 1;
5275  cnt--;
5276  nth--;
5277  break;
5278  } else {
5279  if (flag != 0) {
5280  newarr[i * nth_per_core + k]++;
5281  cnt--;
5282  nth--;
5283  break;
5284  }
5285  }
5286  }
5287  }
5288  if (cnt == 0 || nth == 0) {
5289  break;
5290  }
5291  }
5292  if (nth == 0) {
5293  break;
5294  }
5295  }
5296  flag = 1;
5297  }
5298  int sum = 0;
5299  for (int i = 0; i < nproc; i++) {
5300  sum += newarr[i];
5301  if (sum > tid) {
5302  if (fine_gran) {
5303  int osID = procarr[i];
5304  KMP_CPU_SET(osID, mask);
5305  } else {
5306  int coreID = i / nth_per_core;
5307  for (int ii = 0; ii < nth_per_core; ii++) {
5308  int osID = procarr[coreID * nth_per_core + ii];
5309  if (osID != -1) {
5310  KMP_CPU_SET(osID, mask);
5311  }
5312  }
5313  }
5314  break;
5315  }
5316  }
5317  __kmp_free(newarr);
5318  }
5319 
5320  if (__kmp_affinity.flags.verbose) {
5321  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5322  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5323  KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5324  tid, buf);
5325  }
5326  __kmp_affinity_get_thread_topology_info(th);
5327  __kmp_set_system_affinity(mask, TRUE);
5328  }
5329 }
5330 
5331 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5332 // We don't need this entry for Windows because
5333 // there is GetProcessAffinityMask() api
5334 //
5335 // The intended usage is indicated by these steps:
5336 // 1) The user gets the current affinity mask
5337 // 2) Then sets the affinity by calling this function
5338 // 3) Error check the return value
5339 // 4) Use non-OpenMP parallelization
5340 // 5) Reset the affinity to what was stored in step 1)
5341 #ifdef __cplusplus
5342 extern "C"
5343 #endif
5344  int
5345  kmp_set_thread_affinity_mask_initial()
5346 // the function returns 0 on success,
5347 // -1 if we cannot bind thread
5348 // >0 (errno) if an error happened during binding
5349 {
5350  int gtid = __kmp_get_gtid();
5351  if (gtid < 0) {
5352  // Do not touch non-omp threads
5353  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5354  "non-omp thread, returning\n"));
5355  return -1;
5356  }
5357  if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5358  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5359  "affinity not initialized, returning\n"));
5360  return -1;
5361  }
5362  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5363  "set full mask for thread %d\n",
5364  gtid));
5365  KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5366  return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5367 }
5368 #endif
5369 
5370 #endif // KMP_AFFINITY_SUPPORTED
int try_open(const char *filename, const char *mode)
Definition: kmp.h:4569