20 #include "kmp_wrapper_getpid.h" 21 #include "kmp_affinity.h" 26 void __kmp_cleanup_hierarchy() {
27 machine_hierarchy.fini();
30 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
33 if (TCR_1(machine_hierarchy.uninitialized))
34 machine_hierarchy.init(NULL, nproc);
37 if (nproc > machine_hierarchy.base_num_threads)
38 machine_hierarchy.resize(nproc);
40 depth = machine_hierarchy.
depth;
41 KMP_DEBUG_ASSERT(depth > 0);
43 thr_bar->depth = depth;
44 thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.
numPerLevel[0]-1;
45 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
48 #if KMP_AFFINITY_SUPPORTED 55 __kmp_affinity_print_mask(
char *buf,
int buf_len, kmp_affin_mask_t *mask)
57 int num_chars_to_write, num_chars_written;
59 KMP_ASSERT(buf_len >= 40);
62 num_chars_to_write = hwloc_bitmap_list_snprintf(buf, 0, (hwloc_bitmap_t)mask);
67 if(hwloc_bitmap_iszero((hwloc_bitmap_t)mask)) {
68 KMP_SNPRINTF(buf, buf_len,
"{<empty>}");
69 }
else if(num_chars_to_write < buf_len - 3) {
73 num_chars_written = hwloc_bitmap_list_snprintf(buf+1, buf_len-3, (hwloc_bitmap_t)mask);
74 buf[num_chars_written+1] =
'}';
75 buf[num_chars_written+2] =
'\0';
80 hwloc_bitmap_list_snprintf(buf+1, buf_len-7, (hwloc_bitmap_t)mask);
85 scan = buf + buf_len - 7;
86 while(*scan >=
'0' && *scan <= '9' && scan >= buf)
98 __kmp_affinity_print_mask(
char *buf,
int buf_len, kmp_affin_mask_t *mask)
100 KMP_ASSERT(buf_len >= 40);
102 char *end = buf + buf_len - 1;
108 for (i = 0; i < KMP_CPU_SETSIZE; i++) {
109 if (KMP_CPU_ISSET(i, mask)) {
113 if (i == KMP_CPU_SETSIZE) {
114 KMP_SNPRINTF(scan, end-scan+1,
"{<empty>}");
115 while (*scan !=
'\0') scan++;
116 KMP_ASSERT(scan <= end);
120 KMP_SNPRINTF(scan, end-scan+1,
"{%ld", (
long)i);
121 while (*scan !=
'\0') scan++;
123 for (; i < KMP_CPU_SETSIZE; i++) {
124 if (! KMP_CPU_ISSET(i, mask)) {
134 if (end - scan < 15) {
137 KMP_SNPRINTF(scan, end-scan+1,
",%-ld", (
long)i);
138 while (*scan !=
'\0') scan++;
140 if (i < KMP_CPU_SETSIZE) {
141 KMP_SNPRINTF(scan, end-scan+1,
",...");
142 while (*scan !=
'\0') scan++;
144 KMP_SNPRINTF(scan, end-scan+1,
"}");
145 while (*scan !=
'\0') scan++;
146 KMP_ASSERT(scan <= end);
149 #endif // KMP_USE_HWLOC 153 __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask)
157 # if KMP_GROUP_AFFINITY 159 if (__kmp_num_proc_groups > 1) {
161 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
162 for (group = 0; group < __kmp_num_proc_groups; group++) {
164 int num = __kmp_GetActiveProcessorCount(group);
165 for (i = 0; i < num; i++) {
166 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
176 for (proc = 0; proc < __kmp_xproc; proc++) {
177 KMP_CPU_SET(proc, mask);
196 __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
199 KMP_DEBUG_ASSERT(numAddrs > 0);
200 int depth = address2os->first.depth;
201 unsigned *counts = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
202 unsigned *lastLabel = (
unsigned *)__kmp_allocate(depth
205 for (labCt = 0; labCt < depth; labCt++) {
206 address2os[0].first.childNums[labCt] = counts[labCt] = 0;
207 lastLabel[labCt] = address2os[0].first.labels[labCt];
210 for (i = 1; i < numAddrs; i++) {
211 for (labCt = 0; labCt < depth; labCt++) {
212 if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
214 for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
216 lastLabel[labCt2] = address2os[i].first.labels[labCt2];
219 lastLabel[labCt] = address2os[i].first.labels[labCt];
223 for (labCt = 0; labCt < depth; labCt++) {
224 address2os[i].first.childNums[labCt] = counts[labCt];
226 for (; labCt < (int)Address::maxDepth; labCt++) {
227 address2os[i].first.childNums[labCt] = 0;
245 static kmp_affin_mask_t *fullMask = NULL;
248 __kmp_affinity_get_fullMask() {
return fullMask; }
251 static int nCoresPerPkg, nPackages;
252 static int __kmp_nThreadsPerCore;
253 #ifndef KMP_DFLT_NTH_CORES 254 static int __kmp_ncores;
264 __kmp_affinity_uniform_topology()
266 return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
275 __kmp_affinity_print_topology(AddrUnsPair *address2os,
int len,
int depth,
276 int pkgLevel,
int coreLevel,
int threadLevel)
280 KMP_INFORM(OSProcToPhysicalThreadMap,
"KMP_AFFINITY");
281 for (proc = 0; proc < len; proc++) {
284 __kmp_str_buf_init(&buf);
285 for (level = 0; level < depth; level++) {
286 if (level == threadLevel) {
287 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Thread));
289 else if (level == coreLevel) {
290 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Core));
292 else if (level == pkgLevel) {
293 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Package));
295 else if (level > pkgLevel) {
296 __kmp_str_buf_print(&buf,
"%s_%d ", KMP_I18N_STR(Node),
297 level - pkgLevel - 1);
300 __kmp_str_buf_print(&buf,
"L%d ", level);
302 __kmp_str_buf_print(&buf,
"%d ",
303 address2os[proc].first.labels[level]);
305 KMP_INFORM(OSProcMapToPack,
"KMP_AFFINITY", address2os[proc].second,
307 __kmp_str_buf_free(&buf);
313 __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
314 kmp_i18n_id_t *
const msg_id)
317 *msg_id = kmp_i18n_null;
322 kmp_affin_mask_t *oldMask;
323 KMP_CPU_ALLOC(oldMask);
324 __kmp_get_system_affinity(oldMask, TRUE);
326 unsigned depth = hwloc_topology_get_depth(__kmp_hwloc_topology);
327 int threadLevel = hwloc_get_type_depth(__kmp_hwloc_topology, HWLOC_OBJ_PU);
328 int coreLevel = hwloc_get_type_depth(__kmp_hwloc_topology, HWLOC_OBJ_CORE);
329 int pkgLevel = hwloc_get_type_depth(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET);
330 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 0;
336 hwloc_obj_t current_level_iterator = hwloc_get_root_obj(__kmp_hwloc_topology);
337 hwloc_obj_t child_iterator;
338 for(child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, NULL);
339 child_iterator != NULL;
340 child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, child_iterator))
344 current_level_iterator = hwloc_get_obj_by_depth(__kmp_hwloc_topology, pkgLevel, 0);
345 for(child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, NULL);
346 child_iterator != NULL;
347 child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, child_iterator))
351 current_level_iterator = hwloc_get_obj_by_depth(__kmp_hwloc_topology, coreLevel, 0);
352 for(child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, NULL);
353 child_iterator != NULL;
354 child_iterator = hwloc_get_next_child(__kmp_hwloc_topology, current_level_iterator, child_iterator))
356 __kmp_nThreadsPerCore++;
359 if (! KMP_AFFINITY_CAPABLE())
365 KMP_ASSERT(__kmp_affinity_type == affinity_none);
367 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
368 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
369 if (__kmp_affinity_verbose) {
370 KMP_INFORM(AffNotCapableUseLocCpuidL11,
"KMP_AFFINITY");
371 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
372 if (__kmp_affinity_uniform_topology()) {
373 KMP_INFORM(Uniform,
"KMP_AFFINITY");
375 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
377 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
378 __kmp_nThreadsPerCore, __kmp_ncores);
386 AddrUnsPair *retval = (AddrUnsPair *)__kmp_allocate(
sizeof(AddrUnsPair) * __kmp_avail_proc);
388 unsigned num_hardware_threads = hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology, threadLevel);
390 hwloc_obj_t hardware_thread_iterator;
391 int nActiveThreads = 0;
392 for(i=0;i<num_hardware_threads;i++) {
393 hardware_thread_iterator = hwloc_get_obj_by_depth(__kmp_hwloc_topology, threadLevel, i);
395 if(! KMP_CPU_ISSET(i, fullMask))
continue;
396 addr.labels[0] = hardware_thread_iterator->parent->parent->logical_index;
397 addr.labels[1] = hardware_thread_iterator->parent->logical_index % nCoresPerPkg;
398 addr.labels[2] = hardware_thread_iterator->logical_index % __kmp_nThreadsPerCore;
399 retval[nActiveThreads] = AddrUnsPair(addr, hardware_thread_iterator->os_index);
406 KMP_ASSERT(nActiveThreads > 0);
407 if (nActiveThreads == 1) {
408 __kmp_ncores = nPackages = 1;
409 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
410 if (__kmp_affinity_verbose) {
411 char buf[KMP_AFFIN_MASK_PRINT_LEN];
412 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
414 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
415 if (__kmp_affinity_respect_mask) {
416 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
418 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
420 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
421 KMP_INFORM(Uniform,
"KMP_AFFINITY");
422 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
423 __kmp_nThreadsPerCore, __kmp_ncores);
426 if (__kmp_affinity_type == affinity_none) {
428 KMP_CPU_FREE(oldMask);
436 addr.labels[0] = retval[0].first.labels[pkgLevel-1];
437 retval[0].first = addr;
439 if (__kmp_affinity_gran_levels < 0) {
440 __kmp_affinity_gran_levels = 0;
443 if (__kmp_affinity_verbose) {
444 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
447 *address2os = retval;
448 KMP_CPU_FREE(oldMask);
455 qsort(retval, nActiveThreads,
sizeof(*retval), __kmp_affinity_cmp_Address_labels);
463 __kmp_ncores = hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology, coreLevel);
468 unsigned npackages = hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology, pkgLevel);
469 unsigned ncores = __kmp_ncores;
470 unsigned nthreads = hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology, threadLevel);
471 unsigned uniform = (npackages * nCoresPerPkg * __kmp_nThreadsPerCore == nthreads);
476 if (__kmp_affinity_verbose) {
477 char mask[KMP_AFFIN_MASK_PRINT_LEN];
478 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
480 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
481 if (__kmp_affinity_respect_mask) {
482 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", mask);
484 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", mask);
486 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
488 KMP_INFORM(Uniform,
"KMP_AFFINITY");
490 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
494 __kmp_str_buf_init(&buf);
496 __kmp_str_buf_print(&buf,
"%d", npackages);
500 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, nCoresPerPkg,
501 __kmp_nThreadsPerCore, __kmp_ncores);
503 __kmp_str_buf_free(&buf);
506 if (__kmp_affinity_type == affinity_none) {
507 KMP_CPU_FREE(oldMask);
518 for (level = 1; level < (int)depth; level++) {
519 if ((hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology,level) == 1) && (level != pkgLevel)) {
529 if (new_depth != depth-1) {
530 AddrUnsPair *new_retval = (AddrUnsPair *)__kmp_allocate(
531 sizeof(AddrUnsPair) * nActiveThreads);
532 for (proc = 0; (int)proc < nActiveThreads; proc++) {
533 Address addr(new_depth);
534 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
537 for (level = 1; level < (int)depth; level++) {
538 if ((hwloc_get_nbobjs_by_depth(__kmp_hwloc_topology,level) == 1) && (level != pkgLevel)) {
539 if (level == threadLevel) {
542 else if ((threadLevel >= 0) && (level < threadLevel)) {
545 if (level == coreLevel) {
548 else if ((coreLevel >= 0) && (level < coreLevel)) {
551 if (level < pkgLevel) {
556 for (proc = 0; (int)proc < nActiveThreads; proc++) {
557 new_retval[proc].first.labels[new_level]
558 = retval[proc].first.labels[level];
568 if (__kmp_affinity_gran_levels < 0) {
573 __kmp_affinity_gran_levels = 0;
574 if ((threadLevel-1 >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
575 __kmp_affinity_gran_levels++;
577 if ((coreLevel-1 >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
578 __kmp_affinity_gran_levels++;
580 if (__kmp_affinity_gran > affinity_gran_package) {
581 __kmp_affinity_gran_levels++;
585 if (__kmp_affinity_verbose) {
586 __kmp_affinity_print_topology(retval, nActiveThreads, depth-1, pkgLevel-1,
587 coreLevel-1, threadLevel-1);
590 KMP_CPU_FREE(oldMask);
591 *address2os = retval;
592 if(depth == 0)
return 0;
595 #endif // KMP_USE_HWLOC 603 __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
604 kmp_i18n_id_t *
const msg_id)
607 *msg_id = kmp_i18n_null;
614 if (! KMP_AFFINITY_CAPABLE()) {
615 KMP_ASSERT(__kmp_affinity_type == affinity_none);
616 __kmp_ncores = nPackages = __kmp_xproc;
617 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
618 if (__kmp_affinity_verbose) {
619 KMP_INFORM(AffFlatTopology,
"KMP_AFFINITY");
620 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
621 KMP_INFORM(Uniform,
"KMP_AFFINITY");
622 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
623 __kmp_nThreadsPerCore, __kmp_ncores);
634 __kmp_ncores = nPackages = __kmp_avail_proc;
635 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
636 if (__kmp_affinity_verbose) {
637 char buf[KMP_AFFIN_MASK_PRINT_LEN];
638 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, fullMask);
640 KMP_INFORM(AffCapableUseFlat,
"KMP_AFFINITY");
641 if (__kmp_affinity_respect_mask) {
642 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
644 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
646 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
647 KMP_INFORM(Uniform,
"KMP_AFFINITY");
648 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
649 __kmp_nThreadsPerCore, __kmp_ncores);
651 if (__kmp_affinity_type == affinity_none) {
658 *address2os = (AddrUnsPair*)
659 __kmp_allocate(
sizeof(**address2os) * __kmp_avail_proc);
662 KMP_CPU_SET_ITERATE(i, fullMask) {
666 if (! KMP_CPU_ISSET(i, fullMask)) {
672 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
674 if (__kmp_affinity_verbose) {
675 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
678 if (__kmp_affinity_gran_levels < 0) {
683 if (__kmp_affinity_gran > affinity_gran_package) {
684 __kmp_affinity_gran_levels = 1;
687 __kmp_affinity_gran_levels = 0;
694 # if KMP_GROUP_AFFINITY 705 __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
706 kmp_i18n_id_t *
const msg_id)
709 *msg_id = kmp_i18n_null;
715 if ((! KMP_AFFINITY_CAPABLE()) || (__kmp_get_proc_group(fullMask) >= 0)) {
723 *address2os = (AddrUnsPair*)
724 __kmp_allocate(
sizeof(**address2os) * __kmp_avail_proc);
727 KMP_CPU_SET_ITERATE(i, fullMask) {
731 if (! KMP_CPU_ISSET(i, fullMask)) {
736 addr.labels[0] = i / (CHAR_BIT *
sizeof(DWORD_PTR));
737 addr.labels[1] = i % (CHAR_BIT *
sizeof(DWORD_PTR));
738 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
740 if (__kmp_affinity_verbose) {
741 KMP_INFORM(AffOSProcToGroup,
"KMP_AFFINITY", i, addr.labels[0],
746 if (__kmp_affinity_gran_levels < 0) {
747 if (__kmp_affinity_gran == affinity_gran_group) {
748 __kmp_affinity_gran_levels = 1;
750 else if ((__kmp_affinity_gran == affinity_gran_fine)
751 || (__kmp_affinity_gran == affinity_gran_thread)) {
752 __kmp_affinity_gran_levels = 0;
755 const char *gran_str = NULL;
756 if (__kmp_affinity_gran == affinity_gran_core) {
759 else if (__kmp_affinity_gran == affinity_gran_package) {
760 gran_str =
"package";
762 else if (__kmp_affinity_gran == affinity_gran_node) {
770 __kmp_affinity_gran_levels = 0;
779 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 782 __kmp_cpuid_mask_width(
int count) {
785 while((1<<r) < count)
791 class apicThreadInfo {
795 unsigned maxCoresPerPkg;
796 unsigned maxThreadsPerPkg;
804 __kmp_affinity_cmp_apicThreadInfo_os_id(
const void *a,
const void *b)
806 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
807 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
808 if (aa->osId < bb->osId)
return -1;
809 if (aa->osId > bb->osId)
return 1;
815 __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
const void *b)
817 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
818 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
819 if (aa->pkgId < bb->pkgId)
return -1;
820 if (aa->pkgId > bb->pkgId)
return 1;
821 if (aa->coreId < bb->coreId)
return -1;
822 if (aa->coreId > bb->coreId)
return 1;
823 if (aa->threadId < bb->threadId)
return -1;
824 if (aa->threadId > bb->threadId)
return 1;
836 __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
837 kmp_i18n_id_t *
const msg_id)
842 *msg_id = kmp_i18n_null;
847 __kmp_x86_cpuid(0, 0, &buf);
849 *msg_id = kmp_i18n_str_NoLeaf4Support;
861 if (! KMP_AFFINITY_CAPABLE()) {
866 KMP_ASSERT(__kmp_affinity_type == affinity_none);
876 __kmp_x86_cpuid(1, 0, &buf);
877 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
878 if (maxThreadsPerPkg == 0) {
879 maxThreadsPerPkg = 1;
895 __kmp_x86_cpuid(0, 0, &buf);
897 __kmp_x86_cpuid(4, 0, &buf);
898 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
921 __kmp_ncores = __kmp_xproc;
922 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
923 __kmp_nThreadsPerCore = 1;
924 if (__kmp_affinity_verbose) {
925 KMP_INFORM(AffNotCapableUseLocCpuid,
"KMP_AFFINITY");
926 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
927 if (__kmp_affinity_uniform_topology()) {
928 KMP_INFORM(Uniform,
"KMP_AFFINITY");
930 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
932 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
933 __kmp_nThreadsPerCore, __kmp_ncores);
948 kmp_affin_mask_t *oldMask;
949 KMP_CPU_ALLOC(oldMask);
950 KMP_ASSERT(oldMask != NULL);
951 __kmp_get_system_affinity(oldMask, TRUE);
986 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
987 __kmp_avail_proc *
sizeof(apicThreadInfo));
989 KMP_CPU_SET_ITERATE(i, fullMask) {
993 if (! KMP_CPU_ISSET(i, fullMask)) {
996 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
998 __kmp_affinity_bind_thread(i);
999 threadInfo[nApics].osId = i;
1004 __kmp_x86_cpuid(1, 0, &buf);
1005 if (! (buf.edx >> 9) & 1) {
1006 __kmp_set_system_affinity(oldMask, TRUE);
1007 __kmp_free(threadInfo);
1008 KMP_CPU_FREE(oldMask);
1009 *msg_id = kmp_i18n_str_ApicNotPresent;
1012 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1013 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1014 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1015 threadInfo[nApics].maxThreadsPerPkg = 1;
1026 __kmp_x86_cpuid(0, 0, &buf);
1028 __kmp_x86_cpuid(4, 0, &buf);
1029 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1032 threadInfo[nApics].maxCoresPerPkg = 1;
1039 int widthCT = __kmp_cpuid_mask_width(
1040 threadInfo[nApics].maxThreadsPerPkg);
1041 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1043 int widthC = __kmp_cpuid_mask_width(
1044 threadInfo[nApics].maxCoresPerPkg);
1045 int widthT = widthCT - widthC;
1052 __kmp_set_system_affinity(oldMask, TRUE);
1053 __kmp_free(threadInfo);
1054 KMP_CPU_FREE(oldMask);
1055 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1059 int maskC = (1 << widthC) - 1;
1060 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT)
1063 int maskT = (1 << widthT) - 1;
1064 threadInfo[nApics].threadId = threadInfo[nApics].apicId &maskT;
1073 __kmp_set_system_affinity(oldMask, TRUE);
1085 KMP_ASSERT(nApics > 0);
1087 __kmp_ncores = nPackages = 1;
1088 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1089 if (__kmp_affinity_verbose) {
1090 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1091 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1093 KMP_INFORM(AffUseGlobCpuid,
"KMP_AFFINITY");
1094 if (__kmp_affinity_respect_mask) {
1095 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1097 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1099 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1100 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1101 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1102 __kmp_nThreadsPerCore, __kmp_ncores);
1105 if (__kmp_affinity_type == affinity_none) {
1106 __kmp_free(threadInfo);
1107 KMP_CPU_FREE(oldMask);
1111 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair));
1113 addr.labels[0] = threadInfo[0].pkgId;
1114 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1116 if (__kmp_affinity_gran_levels < 0) {
1117 __kmp_affinity_gran_levels = 0;
1120 if (__kmp_affinity_verbose) {
1121 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1124 __kmp_free(threadInfo);
1125 KMP_CPU_FREE(oldMask);
1132 qsort(threadInfo, nApics,
sizeof(*threadInfo),
1133 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1152 __kmp_nThreadsPerCore = 1;
1153 unsigned nCores = 1;
1156 unsigned lastPkgId = threadInfo[0].pkgId;
1157 unsigned coreCt = 1;
1158 unsigned lastCoreId = threadInfo[0].coreId;
1159 unsigned threadCt = 1;
1160 unsigned lastThreadId = threadInfo[0].threadId;
1163 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1164 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1166 for (i = 1; i < nApics; i++) {
1167 if (threadInfo[i].pkgId != lastPkgId) {
1170 lastPkgId = threadInfo[i].pkgId;
1171 if ((
int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1173 lastCoreId = threadInfo[i].coreId;
1174 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1176 lastThreadId = threadInfo[i].threadId;
1183 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1184 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1188 if (threadInfo[i].coreId != lastCoreId) {
1191 lastCoreId = threadInfo[i].coreId;
1192 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1194 lastThreadId = threadInfo[i].threadId;
1196 else if (threadInfo[i].threadId != lastThreadId) {
1198 lastThreadId = threadInfo[i].threadId;
1201 __kmp_free(threadInfo);
1202 KMP_CPU_FREE(oldMask);
1203 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1211 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg)
1212 || (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1213 __kmp_free(threadInfo);
1214 KMP_CPU_FREE(oldMask);
1215 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1220 if ((
int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1221 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1229 __kmp_ncores = nCores;
1230 if (__kmp_affinity_verbose) {
1231 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1232 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1234 KMP_INFORM(AffUseGlobCpuid,
"KMP_AFFINITY");
1235 if (__kmp_affinity_respect_mask) {
1236 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1238 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1240 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1241 if (__kmp_affinity_uniform_topology()) {
1242 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1244 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1246 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1247 __kmp_nThreadsPerCore, __kmp_ncores);
1251 if (__kmp_affinity_type == affinity_none) {
1252 __kmp_free(threadInfo);
1253 KMP_CPU_FREE(oldMask);
1263 int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1264 int threadLevel = (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1265 unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1267 KMP_ASSERT(depth > 0);
1268 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair) * nApics);
1270 for (i = 0; i < nApics; ++i) {
1271 Address addr(depth);
1272 unsigned os = threadInfo[i].osId;
1275 if (pkgLevel >= 0) {
1276 addr.labels[d++] = threadInfo[i].pkgId;
1278 if (coreLevel >= 0) {
1279 addr.labels[d++] = threadInfo[i].coreId;
1281 if (threadLevel >= 0) {
1282 addr.labels[d++] = threadInfo[i].threadId;
1284 (*address2os)[i] = AddrUnsPair(addr, os);
1287 if (__kmp_affinity_gran_levels < 0) {
1292 __kmp_affinity_gran_levels = 0;
1293 if ((threadLevel >= 0)
1294 && (__kmp_affinity_gran > affinity_gran_thread)) {
1295 __kmp_affinity_gran_levels++;
1297 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1298 __kmp_affinity_gran_levels++;
1300 if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1301 __kmp_affinity_gran_levels++;
1305 if (__kmp_affinity_verbose) {
1306 __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1307 coreLevel, threadLevel);
1310 __kmp_free(threadInfo);
1311 KMP_CPU_FREE(oldMask);
1322 __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1323 kmp_i18n_id_t *
const msg_id)
1328 *msg_id = kmp_i18n_null;
1333 __kmp_x86_cpuid(0, 0, &buf);
1335 *msg_id = kmp_i18n_str_NoLeaf11Support;
1338 __kmp_x86_cpuid(11, 0, &buf);
1340 *msg_id = kmp_i18n_str_NoLeaf11Support;
1351 int threadLevel = -1;
1354 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1356 for (level = 0;; level++) {
1369 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1372 __kmp_x86_cpuid(11, level, &buf);
1383 int kind = (buf.ecx >> 8) & 0xff;
1388 threadLevel = level;
1391 __kmp_nThreadsPerCore = buf.ebx & 0xff;
1392 if (__kmp_nThreadsPerCore == 0) {
1393 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1397 else if (kind == 2) {
1403 nCoresPerPkg = buf.ebx & 0xff;
1404 if (nCoresPerPkg == 0) {
1405 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1411 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1414 if (pkgLevel >= 0) {
1418 nPackages = buf.ebx & 0xff;
1419 if (nPackages == 0) {
1420 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1433 if (threadLevel >= 0) {
1434 threadLevel = depth - threadLevel - 1;
1436 if (coreLevel >= 0) {
1437 coreLevel = depth - coreLevel - 1;
1439 KMP_DEBUG_ASSERT(pkgLevel >= 0);
1440 pkgLevel = depth - pkgLevel - 1;
1450 if (! KMP_AFFINITY_CAPABLE())
1456 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1458 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1459 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1460 if (__kmp_affinity_verbose) {
1461 KMP_INFORM(AffNotCapableUseLocCpuidL11,
"KMP_AFFINITY");
1462 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1463 if (__kmp_affinity_uniform_topology()) {
1464 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1466 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1468 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1469 __kmp_nThreadsPerCore, __kmp_ncores);
1484 kmp_affin_mask_t *oldMask;
1485 KMP_CPU_ALLOC(oldMask);
1486 __kmp_get_system_affinity(oldMask, TRUE);
1491 AddrUnsPair *retval = (AddrUnsPair *)
1492 __kmp_allocate(
sizeof(AddrUnsPair) * __kmp_avail_proc);
1500 KMP_CPU_SET_ITERATE(proc, fullMask) {
1504 if (! KMP_CPU_ISSET(proc, fullMask)) {
1507 KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1509 __kmp_affinity_bind_thread(proc);
1515 Address addr(depth);
1518 for (level = 0; level < depth; level++) {
1519 __kmp_x86_cpuid(11, level, &buf);
1520 unsigned apicId = buf.edx;
1522 if (level != depth - 1) {
1523 KMP_CPU_FREE(oldMask);
1524 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1527 addr.labels[depth - level - 1] = apicId >> prev_shift;
1531 int shift = buf.eax & 0x1f;
1532 int mask = (1 << shift) - 1;
1533 addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
1536 if (level != depth) {
1537 KMP_CPU_FREE(oldMask);
1538 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1542 retval[nApics] = AddrUnsPair(addr, proc);
1550 __kmp_set_system_affinity(oldMask, TRUE);
1555 KMP_ASSERT(nApics > 0);
1557 __kmp_ncores = nPackages = 1;
1558 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1559 if (__kmp_affinity_verbose) {
1560 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1561 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1563 KMP_INFORM(AffUseGlobCpuidL11,
"KMP_AFFINITY");
1564 if (__kmp_affinity_respect_mask) {
1565 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1567 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1569 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1570 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1571 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1572 __kmp_nThreadsPerCore, __kmp_ncores);
1575 if (__kmp_affinity_type == affinity_none) {
1577 KMP_CPU_FREE(oldMask);
1585 addr.labels[0] = retval[0].first.labels[pkgLevel];
1586 retval[0].first = addr;
1588 if (__kmp_affinity_gran_levels < 0) {
1589 __kmp_affinity_gran_levels = 0;
1592 if (__kmp_affinity_verbose) {
1593 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1596 *address2os = retval;
1597 KMP_CPU_FREE(oldMask);
1604 qsort(retval, nApics,
sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1609 unsigned *totals = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1610 unsigned *counts = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1611 unsigned *maxCt = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1612 unsigned *last = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1613 for (level = 0; level < depth; level++) {
1617 last[level] = retval[0].first.labels[level];
1626 for (proc = 1; (int)proc < nApics; proc++) {
1628 for (level = 0; level < depth; level++) {
1629 if (retval[proc].first.labels[level] != last[level]) {
1631 for (j = level + 1; j < depth; j++) {
1641 last[j] = retval[proc].first.labels[j];
1645 if (counts[level] > maxCt[level]) {
1646 maxCt[level] = counts[level];
1648 last[level] = retval[proc].first.labels[level];
1651 else if (level == depth - 1) {
1657 KMP_CPU_FREE(oldMask);
1658 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
1670 if (threadLevel >= 0) {
1671 __kmp_nThreadsPerCore = maxCt[threadLevel];
1674 __kmp_nThreadsPerCore = 1;
1676 nPackages = totals[pkgLevel];
1678 if (coreLevel >= 0) {
1679 __kmp_ncores = totals[coreLevel];
1680 nCoresPerPkg = maxCt[coreLevel];
1683 __kmp_ncores = nPackages;
1690 unsigned prod = maxCt[0];
1691 for (level = 1; level < depth; level++) {
1692 prod *= maxCt[level];
1694 bool uniform = (prod == totals[level - 1]);
1699 if (__kmp_affinity_verbose) {
1700 char mask[KMP_AFFIN_MASK_PRINT_LEN];
1701 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1703 KMP_INFORM(AffUseGlobCpuidL11,
"KMP_AFFINITY");
1704 if (__kmp_affinity_respect_mask) {
1705 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", mask);
1707 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", mask);
1709 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1711 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1713 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1717 __kmp_str_buf_init(&buf);
1719 __kmp_str_buf_print(&buf,
"%d", totals[0]);
1720 for (level = 1; level <= pkgLevel; level++) {
1721 __kmp_str_buf_print(&buf,
" x %d", maxCt[level]);
1723 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, nCoresPerPkg,
1724 __kmp_nThreadsPerCore, __kmp_ncores);
1726 __kmp_str_buf_free(&buf);
1729 if (__kmp_affinity_type == affinity_none) {
1735 KMP_CPU_FREE(oldMask);
1744 for (level = 0; level < depth; level++) {
1745 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1755 if (new_depth != depth) {
1756 AddrUnsPair *new_retval = (AddrUnsPair *)__kmp_allocate(
1757 sizeof(AddrUnsPair) * nApics);
1758 for (proc = 0; (int)proc < nApics; proc++) {
1759 Address addr(new_depth);
1760 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
1763 int newPkgLevel = -1;
1764 int newCoreLevel = -1;
1765 int newThreadLevel = -1;
1767 for (level = 0; level < depth; level++) {
1768 if ((maxCt[level] == 1)
1769 && (level != pkgLevel)) {
1775 if (level == pkgLevel) {
1776 newPkgLevel = level;
1778 if (level == coreLevel) {
1779 newCoreLevel = level;
1781 if (level == threadLevel) {
1782 newThreadLevel = level;
1784 for (proc = 0; (int)proc < nApics; proc++) {
1785 new_retval[proc].first.labels[new_level]
1786 = retval[proc].first.labels[level];
1792 retval = new_retval;
1794 pkgLevel = newPkgLevel;
1795 coreLevel = newCoreLevel;
1796 threadLevel = newThreadLevel;
1799 if (__kmp_affinity_gran_levels < 0) {
1804 __kmp_affinity_gran_levels = 0;
1805 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1806 __kmp_affinity_gran_levels++;
1808 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1809 __kmp_affinity_gran_levels++;
1811 if (__kmp_affinity_gran > affinity_gran_package) {
1812 __kmp_affinity_gran_levels++;
1816 if (__kmp_affinity_verbose) {
1817 __kmp_affinity_print_topology(retval, nApics, depth, pkgLevel,
1818 coreLevel, threadLevel);
1825 KMP_CPU_FREE(oldMask);
1826 *address2os = retval;
1835 #define threadIdIndex 1 1836 #define coreIdIndex 2 1837 #define pkgIdIndex 3 1838 #define nodeIdIndex 4 1840 typedef unsigned *ProcCpuInfo;
1841 static unsigned maxIndex = pkgIdIndex;
1845 __kmp_affinity_cmp_ProcCpuInfo_os_id(
const void *a,
const void *b)
1847 const unsigned *aa = (
const unsigned *)a;
1848 const unsigned *bb = (
const unsigned *)b;
1849 if (aa[osIdIndex] < bb[osIdIndex])
return -1;
1850 if (aa[osIdIndex] > bb[osIdIndex])
return 1;
1856 __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
const void *b)
1859 const unsigned *aa = *((
const unsigned **)a);
1860 const unsigned *bb = *((
const unsigned **)b);
1861 for (i = maxIndex; ; i--) {
1862 if (aa[i] < bb[i])
return -1;
1863 if (aa[i] > bb[i])
return 1;
1864 if (i == osIdIndex)
break;
1875 __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
int *line,
1876 kmp_i18n_id_t *
const msg_id, FILE *f)
1879 *msg_id = kmp_i18n_null;
1886 unsigned num_records = 0;
1888 buf[
sizeof(buf) - 1] = 1;
1889 if (! fgets(buf,
sizeof(buf), f)) {
1896 char s1[] =
"processor";
1897 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
1906 if (KMP_SSCANF(buf,
"node_%d id", &level) == 1) {
1907 if (nodeIdIndex + level >= maxIndex) {
1908 maxIndex = nodeIdIndex + level;
1919 if (num_records == 0) {
1921 *msg_id = kmp_i18n_str_NoProcRecords;
1924 if (num_records > (
unsigned)__kmp_xproc) {
1926 *msg_id = kmp_i18n_str_TooManyProcRecords;
1937 if (fseek(f, 0, SEEK_SET) != 0) {
1939 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
1947 unsigned **threadInfo = (
unsigned **)__kmp_allocate((num_records + 1)
1948 *
sizeof(
unsigned *));
1950 for (i = 0; i <= num_records; i++) {
1951 threadInfo[i] = (
unsigned *)__kmp_allocate((maxIndex + 1)
1952 *
sizeof(unsigned));
1955 #define CLEANUP_THREAD_INFO \ 1956 for (i = 0; i <= num_records; i++) { \ 1957 __kmp_free(threadInfo[i]); \ 1959 __kmp_free(threadInfo); 1966 #define INIT_PROC_INFO(p) \ 1967 for (__index = 0; __index <= maxIndex; __index++) { \ 1968 (p)[__index] = UINT_MAX; \ 1971 for (i = 0; i <= num_records; i++) {
1972 INIT_PROC_INFO(threadInfo[i]);
1975 unsigned num_avail = 0;
1985 buf[
sizeof(buf) - 1] = 1;
1986 bool long_line =
false;
1987 if (! fgets(buf,
sizeof(buf), f)) {
1995 for (i = 0; i <= maxIndex; i++) {
1996 if (threadInfo[num_avail][i] != UINT_MAX) {
2004 }
else if (!buf[
sizeof(buf) - 1]) {
2011 #define CHECK_LINE \ 2013 CLEANUP_THREAD_INFO; \ 2014 *msg_id = kmp_i18n_str_LongLineCpuinfo; \ 2020 char s1[] =
"processor";
2021 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2023 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2025 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2026 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
goto dup_field;
2027 threadInfo[num_avail][osIdIndex] = val;
2028 #if KMP_OS_LINUX && USE_SYSFS_INFO 2030 KMP_SNPRINTF(path,
sizeof(path),
2031 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2032 threadInfo[num_avail][osIdIndex]);
2033 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2035 KMP_SNPRINTF(path,
sizeof(path),
2036 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2037 threadInfo[num_avail][osIdIndex]);
2038 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
2042 char s2[] =
"physical id";
2043 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
2045 char *p = strchr(buf +
sizeof(s2) - 1,
':');
2047 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2048 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
goto dup_field;
2049 threadInfo[num_avail][pkgIdIndex] = val;
2052 char s3[] =
"core id";
2053 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
2055 char *p = strchr(buf +
sizeof(s3) - 1,
':');
2057 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2058 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
goto dup_field;
2059 threadInfo[num_avail][coreIdIndex] = val;
2061 #endif // KMP_OS_LINUX && USE_SYSFS_INFO 2063 char s4[] =
"thread id";
2064 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
2066 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2068 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2069 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
goto dup_field;
2070 threadInfo[num_avail][threadIdIndex] = val;
2074 if (KMP_SSCANF(buf,
"node_%d id", &level) == 1) {
2076 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2078 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2079 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2080 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
goto dup_field;
2081 threadInfo[num_avail][nodeIdIndex + level] = val;
2090 if ((*buf != 0) && (*buf !=
'\n')) {
2097 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'));
2106 if ((
int)num_avail == __kmp_xproc) {
2107 CLEANUP_THREAD_INFO;
2108 *msg_id = kmp_i18n_str_TooManyEntries;
2116 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2117 CLEANUP_THREAD_INFO;
2118 *msg_id = kmp_i18n_str_MissingProcField;
2121 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2122 CLEANUP_THREAD_INFO;
2123 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2130 if (! KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex], fullMask)) {
2131 INIT_PROC_INFO(threadInfo[num_avail]);
2140 KMP_ASSERT(num_avail <= num_records);
2141 INIT_PROC_INFO(threadInfo[num_avail]);
2146 CLEANUP_THREAD_INFO;
2147 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2151 CLEANUP_THREAD_INFO;
2152 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2157 # if KMP_MIC && REDUCE_TEAM_SIZE 2158 unsigned teamSize = 0;
2159 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2173 KMP_ASSERT(num_avail > 0);
2174 KMP_ASSERT(num_avail <= num_records);
2175 if (num_avail == 1) {
2177 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2178 if (__kmp_affinity_verbose) {
2179 if (! KMP_AFFINITY_CAPABLE()) {
2180 KMP_INFORM(AffNotCapableUseCpuinfo,
"KMP_AFFINITY");
2181 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2182 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2185 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2186 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
2188 KMP_INFORM(AffCapableUseCpuinfo,
"KMP_AFFINITY");
2189 if (__kmp_affinity_respect_mask) {
2190 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
2192 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
2194 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2195 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2199 __kmp_str_buf_init(&buf);
2200 __kmp_str_buf_print(&buf,
"1");
2201 for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2202 __kmp_str_buf_print(&buf,
" x 1");
2204 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, 1, 1, 1);
2205 __kmp_str_buf_free(&buf);
2208 if (__kmp_affinity_type == affinity_none) {
2209 CLEANUP_THREAD_INFO;
2213 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair));
2215 addr.labels[0] = threadInfo[0][pkgIdIndex];
2216 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2218 if (__kmp_affinity_gran_levels < 0) {
2219 __kmp_affinity_gran_levels = 0;
2222 if (__kmp_affinity_verbose) {
2223 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2226 CLEANUP_THREAD_INFO;
2233 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
2234 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2247 unsigned *counts = (
unsigned *)__kmp_allocate((maxIndex + 1)
2248 *
sizeof(unsigned));
2249 unsigned *maxCt = (
unsigned *)__kmp_allocate((maxIndex + 1)
2250 *
sizeof(unsigned));
2251 unsigned *totals = (
unsigned *)__kmp_allocate((maxIndex + 1)
2252 *
sizeof(unsigned));
2253 unsigned *lastId = (
unsigned *)__kmp_allocate((maxIndex + 1)
2254 *
sizeof(unsigned));
2256 bool assign_thread_ids =
false;
2257 unsigned threadIdCt;
2260 restart_radix_check:
2266 if (assign_thread_ids) {
2267 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2268 threadInfo[0][threadIdIndex] = threadIdCt++;
2270 else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2271 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2274 for (index = 0; index <= maxIndex; index++) {
2278 lastId[index] = threadInfo[0][index];;
2284 for (i = 1; i < num_avail; i++) {
2289 for (index = maxIndex; index >= threadIdIndex; index--) {
2290 if (assign_thread_ids && (index == threadIdIndex)) {
2294 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2295 threadInfo[i][threadIdIndex] = threadIdCt++;
2303 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2304 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2307 if (threadInfo[i][index] != lastId[index]) {
2316 for (index2 = threadIdIndex; index2 < index; index2++) {
2318 if (counts[index2] > maxCt[index2]) {
2319 maxCt[index2] = counts[index2];
2322 lastId[index2] = threadInfo[i][index2];
2326 lastId[index] = threadInfo[i][index];
2328 if (assign_thread_ids && (index > threadIdIndex)) {
2330 # if KMP_MIC && REDUCE_TEAM_SIZE 2335 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2336 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2346 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2347 threadInfo[i][threadIdIndex] = threadIdCt++;
2355 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2356 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2362 if (index < threadIdIndex) {
2368 if ((threadInfo[i][threadIdIndex] != UINT_MAX)
2369 || assign_thread_ids) {
2374 CLEANUP_THREAD_INFO;
2375 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2384 assign_thread_ids =
true;
2385 goto restart_radix_check;
2389 # if KMP_MIC && REDUCE_TEAM_SIZE 2394 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2395 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2397 for (index = threadIdIndex; index <= maxIndex; index++) {
2398 if (counts[index] > maxCt[index]) {
2399 maxCt[index] = counts[index];
2403 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2404 nCoresPerPkg = maxCt[coreIdIndex];
2405 nPackages = totals[pkgIdIndex];
2410 unsigned prod = totals[maxIndex];
2411 for (index = threadIdIndex; index < maxIndex; index++) {
2412 prod *= maxCt[index];
2414 bool uniform = (prod == totals[threadIdIndex]);
2422 __kmp_ncores = totals[coreIdIndex];
2424 if (__kmp_affinity_verbose) {
2425 if (! KMP_AFFINITY_CAPABLE()) {
2426 KMP_INFORM(AffNotCapableUseCpuinfo,
"KMP_AFFINITY");
2427 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2429 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2431 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
2435 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2436 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, fullMask);
2437 KMP_INFORM(AffCapableUseCpuinfo,
"KMP_AFFINITY");
2438 if (__kmp_affinity_respect_mask) {
2439 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
2441 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
2443 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2445 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2447 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
2451 __kmp_str_buf_init(&buf);
2453 __kmp_str_buf_print(&buf,
"%d", totals[maxIndex]);
2454 for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2455 __kmp_str_buf_print(&buf,
" x %d", maxCt[index]);
2457 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2458 maxCt[threadIdIndex], __kmp_ncores);
2460 __kmp_str_buf_free(&buf);
2463 # if KMP_MIC && REDUCE_TEAM_SIZE 2467 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2468 __kmp_dflt_team_nth = teamSize;
2469 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting __kmp_dflt_team_nth = %d\n",
2470 __kmp_dflt_team_nth));
2472 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2474 if (__kmp_affinity_type == affinity_none) {
2479 CLEANUP_THREAD_INFO;
2490 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
2492 for (index = threadIdIndex; index < maxIndex; index++) {
2493 KMP_ASSERT(totals[index] >= totals[index + 1]);
2494 inMap[index] = (totals[index] > totals[index + 1]);
2496 inMap[maxIndex] = (totals[maxIndex] > 1);
2497 inMap[pkgIdIndex] =
true;
2500 for (index = threadIdIndex; index <= maxIndex; index++) {
2505 KMP_ASSERT(depth > 0);
2510 *address2os = (AddrUnsPair*)
2511 __kmp_allocate(
sizeof(AddrUnsPair) * num_avail);
2514 int threadLevel = -1;
2516 for (i = 0; i < num_avail; ++i) {
2517 Address addr(depth);
2518 unsigned os = threadInfo[i][osIdIndex];
2522 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2523 if (! inMap[src_index]) {
2526 addr.labels[dst_index] = threadInfo[i][src_index];
2527 if (src_index == pkgIdIndex) {
2528 pkgLevel = dst_index;
2530 else if (src_index == coreIdIndex) {
2531 coreLevel = dst_index;
2533 else if (src_index == threadIdIndex) {
2534 threadLevel = dst_index;
2538 (*address2os)[i] = AddrUnsPair(addr, os);
2541 if (__kmp_affinity_gran_levels < 0) {
2547 __kmp_affinity_gran_levels = 0;
2548 for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2549 if (! inMap[src_index]) {
2552 switch (src_index) {
2554 if (__kmp_affinity_gran > affinity_gran_thread) {
2555 __kmp_affinity_gran_levels++;
2560 if (__kmp_affinity_gran > affinity_gran_core) {
2561 __kmp_affinity_gran_levels++;
2566 if (__kmp_affinity_gran > affinity_gran_package) {
2567 __kmp_affinity_gran_levels++;
2574 if (__kmp_affinity_verbose) {
2575 __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2576 coreLevel, threadLevel);
2584 CLEANUP_THREAD_INFO;
2594 static kmp_affin_mask_t *
2595 __kmp_create_masks(
unsigned *maxIndex,
unsigned *numUnique,
2596 AddrUnsPair *address2os,
unsigned numAddrs)
2605 KMP_ASSERT(numAddrs > 0);
2606 depth = address2os[0].first.depth;
2609 for (i = 0; i < numAddrs; i++) {
2610 unsigned osId = address2os[i].second;
2611 if (osId > maxOsId) {
2615 kmp_affin_mask_t *osId2Mask;
2616 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId+1));
2623 qsort(address2os, numAddrs,
sizeof(*address2os),
2624 __kmp_affinity_cmp_Address_labels);
2626 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2627 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2628 KMP_INFORM(ThreadsMigrate,
"KMP_AFFINITY", __kmp_affinity_gran_levels);
2630 if (__kmp_affinity_gran_levels >= (
int)depth) {
2631 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2632 && (__kmp_affinity_type != affinity_none))) {
2633 KMP_WARNING(AffThreadsMayMigrate);
2643 unsigned unique = 0;
2645 unsigned leader = 0;
2646 Address *leaderAddr = &(address2os[0].first);
2647 kmp_affin_mask_t *sum;
2648 KMP_CPU_ALLOC_ON_STACK(sum);
2650 KMP_CPU_SET(address2os[0].second, sum);
2651 for (i = 1; i < numAddrs; i++) {
2657 if (leaderAddr->isClose(address2os[i].first,
2658 __kmp_affinity_gran_levels)) {
2659 KMP_CPU_SET(address2os[i].second, sum);
2668 for (; j < i; j++) {
2669 unsigned osId = address2os[j].second;
2670 KMP_DEBUG_ASSERT(osId <= maxOsId);
2671 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2672 KMP_CPU_COPY(mask, sum);
2673 address2os[j].first.leader = (j == leader);
2681 leaderAddr = &(address2os[i].first);
2683 KMP_CPU_SET(address2os[i].second, sum);
2690 for (; j < i; j++) {
2691 unsigned osId = address2os[j].second;
2692 KMP_DEBUG_ASSERT(osId <= maxOsId);
2693 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2694 KMP_CPU_COPY(mask, sum);
2695 address2os[j].first.leader = (j == leader);
2698 KMP_CPU_FREE_FROM_STACK(sum);
2700 *maxIndex = maxOsId;
2701 *numUnique = unique;
2711 static kmp_affin_mask_t *newMasks;
2712 static int numNewMasks;
2713 static int nextNewMask;
2715 #define ADD_MASK(_mask) \ 2717 if (nextNewMask >= numNewMasks) { \ 2720 kmp_affin_mask_t* temp; \ 2721 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \ 2722 for(i=0;i<numNewMasks/2;i++) { \ 2723 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i); \ 2724 kmp_affin_mask_t* dest = KMP_CPU_INDEX(temp, i); \ 2725 KMP_CPU_COPY(dest, src); \ 2727 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks/2); \ 2730 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \ 2734 #define ADD_MASK_OSID(_osId,_osId2Mask,_maxOsId) \ 2736 if (((_osId) > _maxOsId) || \ 2737 (! KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \ 2738 if (__kmp_affinity_verbose || (__kmp_affinity_warnings \ 2739 && (__kmp_affinity_type != affinity_none))) { \ 2740 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \ 2744 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \ 2754 __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2755 unsigned int *out_numMasks,
const char *proclist,
2756 kmp_affin_mask_t *osId2Mask,
int maxOsId)
2759 const char *scan = proclist;
2760 const char *next = proclist;
2767 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2769 kmp_affin_mask_t *sumMask;
2770 KMP_CPU_ALLOC(sumMask);
2774 int start, end, stride;
2778 if (*next ==
'\0') {
2792 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2795 num = __kmp_str_to_int(scan, *next);
2796 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2801 if ((num > maxOsId) ||
2802 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2803 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2804 && (__kmp_affinity_type != affinity_none))) {
2805 KMP_WARNING(AffIgnoreInvalidProcID, num);
2807 KMP_CPU_ZERO(sumMask);
2810 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2836 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2837 "bad explicit proc list");
2840 num = __kmp_str_to_int(scan, *next);
2841 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2846 if ((num > maxOsId) ||
2847 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2848 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2849 && (__kmp_affinity_type != affinity_none))) {
2850 KMP_WARNING(AffIgnoreInvalidProcID, num);
2854 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2873 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2875 start = __kmp_str_to_int(scan, *next);
2876 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
2883 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2901 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2903 end = __kmp_str_to_int(scan, *next);
2904 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
2925 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2926 "bad explicit proc list");
2928 stride = __kmp_str_to_int(scan, *next);
2929 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
2936 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
2938 KMP_ASSERT2(start <= end,
"bad explicit proc list");
2941 KMP_ASSERT2(start >= end,
"bad explicit proc list");
2943 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
2950 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2952 }
while (start <= end);
2956 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2958 }
while (start >= end);
2971 *out_numMasks = nextNewMask;
2972 if (nextNewMask == 0) {
2974 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
2977 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
2978 for(i = 0; i < nextNewMask; i++) {
2979 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
2980 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
2981 KMP_CPU_COPY(dest, src);
2983 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
2984 KMP_CPU_FREE(sumMask);
3014 __kmp_process_subplace_list(
const char **scan, kmp_affin_mask_t *osId2Mask,
3015 int maxOsId, kmp_affin_mask_t *tempMask,
int *setSize)
3020 int start, count, stride, i;
3026 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3027 "bad explicit places list");
3030 start = __kmp_str_to_int(*scan, *next);
3031 KMP_ASSERT(start >= 0);
3038 if (**scan ==
'}' || **scan ==
',') {
3039 if ((start > maxOsId) ||
3040 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3041 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3042 && (__kmp_affinity_type != affinity_none))) {
3043 KMP_WARNING(AffIgnoreInvalidProcID, start);
3047 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3050 if (**scan ==
'}') {
3056 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3063 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3064 "bad explicit places list");
3067 count = __kmp_str_to_int(*scan, *next);
3068 KMP_ASSERT(count >= 0);
3075 if (**scan ==
'}' || **scan ==
',') {
3076 for (i = 0; i < count; i++) {
3077 if ((start > maxOsId) ||
3078 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3079 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3080 && (__kmp_affinity_type != affinity_none))) {
3081 KMP_WARNING(AffIgnoreInvalidProcID, start);
3086 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3091 if (**scan ==
'}') {
3097 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3106 if (**scan ==
'+') {
3110 if (**scan ==
'-') {
3118 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3119 "bad explicit places list");
3122 stride = __kmp_str_to_int(*scan, *next);
3123 KMP_ASSERT(stride >= 0);
3131 if (**scan ==
'}' || **scan ==
',') {
3132 for (i = 0; i < count; i++) {
3133 if ((start > maxOsId) ||
3134 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3135 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3136 && (__kmp_affinity_type != affinity_none))) {
3137 KMP_WARNING(AffIgnoreInvalidProcID, start);
3142 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3147 if (**scan ==
'}') {
3154 KMP_ASSERT2(0,
"bad explicit places list");
3160 __kmp_process_place(
const char **scan, kmp_affin_mask_t *osId2Mask,
3161 int maxOsId, kmp_affin_mask_t *tempMask,
int *setSize)
3169 if (**scan ==
'{') {
3171 __kmp_process_subplace_list(scan, osId2Mask, maxOsId , tempMask,
3173 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3176 else if (**scan ==
'!') {
3178 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3179 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3181 else if ((**scan >=
'0') && (**scan <=
'9')) {
3184 int num = __kmp_str_to_int(*scan, *next);
3185 KMP_ASSERT(num >= 0);
3186 if ((num > maxOsId) ||
3187 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3188 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3189 && (__kmp_affinity_type != affinity_none))) {
3190 KMP_WARNING(AffIgnoreInvalidProcID, num);
3194 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3200 KMP_ASSERT2(0,
"bad explicit places list");
3207 __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3208 unsigned int *out_numMasks,
const char *placelist,
3209 kmp_affin_mask_t *osId2Mask,
int maxOsId)
3211 int i,j,count,stride,sign;
3212 const char *scan = placelist;
3213 const char *next = placelist;
3216 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3222 kmp_affin_mask_t *tempMask;
3223 kmp_affin_mask_t *previousMask;
3224 KMP_CPU_ALLOC(tempMask);
3225 KMP_CPU_ZERO(tempMask);
3226 KMP_CPU_ALLOC(previousMask);
3227 KMP_CPU_ZERO(previousMask);
3231 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3237 if (*scan ==
'\0' || *scan ==
',') {
3241 KMP_CPU_ZERO(tempMask);
3243 if (*scan ==
'\0') {
3250 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3257 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
3258 "bad explicit places list");
3261 count = __kmp_str_to_int(scan, *next);
3262 KMP_ASSERT(count >= 0);
3269 if (*scan ==
'\0' || *scan ==
',') {
3273 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3294 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
3295 "bad explicit places list");
3298 stride = __kmp_str_to_int(scan, *next);
3299 KMP_DEBUG_ASSERT(stride >= 0);
3305 for (i = 0; i < count; i++) {
3310 KMP_CPU_COPY(previousMask, tempMask);
3311 ADD_MASK(previousMask);
3312 KMP_CPU_ZERO(tempMask);
3314 KMP_CPU_SET_ITERATE(j, previousMask) {
3315 if (! KMP_CPU_ISSET(j, previousMask)) {
3318 else if ((j+stride > maxOsId) || (j+stride < 0) ||
3319 (! KMP_CPU_ISSET(j+stride, KMP_CPU_INDEX(osId2Mask, j+stride)))) {
3320 if ((__kmp_affinity_verbose || (__kmp_affinity_warnings
3321 && (__kmp_affinity_type != affinity_none))) && i < count - 1) {
3322 KMP_WARNING(AffIgnoreInvalidProcID, j+stride);
3326 KMP_CPU_SET(j+stride, tempMask);
3331 KMP_CPU_ZERO(tempMask);
3338 if (*scan ==
'\0') {
3346 KMP_ASSERT2(0,
"bad explicit places list");
3349 *out_numMasks = nextNewMask;
3350 if (nextNewMask == 0) {
3352 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3355 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3356 KMP_CPU_FREE(tempMask);
3357 KMP_CPU_FREE(previousMask);
3358 for(i = 0; i < nextNewMask; i++) {
3359 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
3360 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
3361 KMP_CPU_COPY(dest, src);
3363 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3369 #undef ADD_MASK_OSID 3372 __kmp_apply_thread_places(AddrUnsPair **pAddr,
int depth)
3374 if (__kmp_place_num_sockets == 0 &&
3375 __kmp_place_num_cores == 0 &&
3376 __kmp_place_num_threads_per_core == 0 )
3378 if (__kmp_place_num_sockets == 0)
3379 __kmp_place_num_sockets = nPackages;
3380 if (__kmp_place_num_cores == 0)
3381 __kmp_place_num_cores = nCoresPerPkg;
3382 if (__kmp_place_num_threads_per_core == 0 ||
3383 __kmp_place_num_threads_per_core > __kmp_nThreadsPerCore)
3384 __kmp_place_num_threads_per_core = __kmp_nThreadsPerCore;
3386 if ( !__kmp_affinity_uniform_topology() ) {
3387 KMP_WARNING( AffThrPlaceNonUniform );
3391 KMP_WARNING( AffThrPlaceNonThreeLevel );
3394 if (__kmp_place_socket_offset + __kmp_place_num_sockets > nPackages) {
3395 KMP_WARNING(AffThrPlaceManySockets);
3398 if ( __kmp_place_core_offset + __kmp_place_num_cores > nCoresPerPkg ) {
3399 KMP_WARNING( AffThrPlaceManyCores );
3403 AddrUnsPair *newAddr = (AddrUnsPair *)__kmp_allocate(
sizeof(AddrUnsPair) *
3404 __kmp_place_num_sockets * __kmp_place_num_cores * __kmp_place_num_threads_per_core);
3406 int i, j, k, n_old = 0, n_new = 0;
3407 for (i = 0; i < nPackages; ++i)
3408 if (i < __kmp_place_socket_offset ||
3409 i >= __kmp_place_socket_offset + __kmp_place_num_sockets)
3410 n_old += nCoresPerPkg * __kmp_nThreadsPerCore;
3412 for (j = 0; j < nCoresPerPkg; ++j)
3413 if (j < __kmp_place_core_offset ||
3414 j >= __kmp_place_core_offset + __kmp_place_num_cores)
3415 n_old += __kmp_nThreadsPerCore;
3417 for (k = 0; k < __kmp_nThreadsPerCore; ++k) {
3418 if (k < __kmp_place_num_threads_per_core) {
3419 newAddr[n_new] = (*pAddr)[n_old];
3424 KMP_DEBUG_ASSERT(n_old == nPackages * nCoresPerPkg * __kmp_nThreadsPerCore);
3425 KMP_DEBUG_ASSERT(n_new == __kmp_place_num_sockets * __kmp_place_num_cores *
3426 __kmp_place_num_threads_per_core);
3428 nPackages = __kmp_place_num_sockets;
3429 nCoresPerPkg = __kmp_place_num_cores;
3430 __kmp_nThreadsPerCore = __kmp_place_num_threads_per_core;
3431 __kmp_avail_proc = n_new;
3432 __kmp_ncores = nPackages * __kmp_place_num_cores;
3434 __kmp_free( *pAddr );
3439 static AddrUnsPair *address2os = NULL;
3440 static int * procarr = NULL;
3441 static int __kmp_aff_depth = 0;
3444 __kmp_aux_affinity_initialize(
void)
3446 if (__kmp_affinity_masks != NULL) {
3447 KMP_ASSERT(fullMask != NULL);
3457 if (fullMask == NULL) {
3458 KMP_CPU_ALLOC(fullMask);
3460 if (KMP_AFFINITY_CAPABLE()) {
3461 if (__kmp_affinity_respect_mask) {
3462 __kmp_get_system_affinity(fullMask, TRUE);
3468 __kmp_avail_proc = 0;
3469 KMP_CPU_SET_ITERATE(i, fullMask) {
3470 if (! KMP_CPU_ISSET(i, fullMask)) {
3475 if (__kmp_avail_proc > __kmp_xproc) {
3476 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3477 && (__kmp_affinity_type != affinity_none))) {
3478 KMP_WARNING(ErrorInitializeAffinity);
3480 __kmp_affinity_type = affinity_none;
3481 KMP_AFFINITY_DISABLE();
3486 __kmp_affinity_entire_machine_mask(fullMask);
3487 __kmp_avail_proc = __kmp_xproc;
3492 kmp_i18n_id_t msg_id = kmp_i18n_null;
3498 if ((__kmp_cpuinfo_file != NULL) &&
3499 (__kmp_affinity_top_method == affinity_top_method_all)) {
3500 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
3503 if (__kmp_affinity_top_method == affinity_top_method_all) {
3509 const char *file_name = NULL;
3513 if (__kmp_affinity_verbose) {
3514 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
3516 if(!__kmp_hwloc_error) {
3517 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3519 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3520 KMP_ASSERT(address2os == NULL);
3522 }
else if(depth < 0 && __kmp_affinity_verbose) {
3523 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3525 }
else if(__kmp_affinity_verbose) {
3526 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3531 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 3534 if (__kmp_affinity_verbose) {
3535 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
3539 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
3541 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3542 KMP_ASSERT(address2os == NULL);
3547 if (__kmp_affinity_verbose) {
3548 if (msg_id != kmp_i18n_null) {
3549 KMP_INFORM(AffInfoStrStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id),
3550 KMP_I18N_STR(DecodingLegacyAPIC));
3553 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
3558 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3560 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3561 KMP_ASSERT(address2os == NULL);
3572 if (__kmp_affinity_verbose) {
3573 if (msg_id != kmp_i18n_null) {
3574 KMP_INFORM(AffStrParseFilename,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id),
"/proc/cpuinfo");
3577 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY",
"/proc/cpuinfo");
3581 FILE *f = fopen(
"/proc/cpuinfo",
"r");
3583 msg_id = kmp_i18n_str_CantOpenCpuinfo;
3586 file_name =
"/proc/cpuinfo";
3587 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3590 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3591 KMP_ASSERT(address2os == NULL);
3599 # if KMP_GROUP_AFFINITY 3601 if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
3602 if (__kmp_affinity_verbose) {
3603 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
3606 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3607 KMP_ASSERT(depth != 0);
3613 if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
3614 if (file_name == NULL) {
3615 KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
3617 else if (line == 0) {
3618 KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
3621 KMP_INFORM(UsingFlatOSFileLine, file_name, line, __kmp_i18n_catgets(msg_id));
3627 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3629 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3630 KMP_ASSERT(address2os == NULL);
3633 KMP_ASSERT(depth > 0);
3634 KMP_ASSERT(address2os != NULL);
3644 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 3646 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
3647 if (__kmp_affinity_verbose) {
3648 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY",
3649 KMP_I18N_STR(Decodingx2APIC));
3652 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
3654 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3655 KMP_ASSERT(address2os == NULL);
3659 KMP_ASSERT(msg_id != kmp_i18n_null);
3660 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3663 else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
3664 if (__kmp_affinity_verbose) {
3665 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY",
3666 KMP_I18N_STR(DecodingLegacyAPIC));
3669 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3671 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3672 KMP_ASSERT(address2os == NULL);
3676 KMP_ASSERT(msg_id != kmp_i18n_null);
3677 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3683 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
3684 const char *filename;
3685 if (__kmp_cpuinfo_file != NULL) {
3686 filename = __kmp_cpuinfo_file;
3689 filename =
"/proc/cpuinfo";
3692 if (__kmp_affinity_verbose) {
3693 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
3696 FILE *f = fopen(filename,
"r");
3699 if (__kmp_cpuinfo_file != NULL) {
3702 KMP_MSG(CantOpenFileForReading, filename),
3704 KMP_HNT(NameComesFrom_CPUINFO_FILE),
3711 KMP_MSG(CantOpenFileForReading, filename),
3718 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3721 KMP_ASSERT(msg_id != kmp_i18n_null);
3723 KMP_FATAL(FileLineMsgExiting, filename, line, __kmp_i18n_catgets(msg_id));
3726 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
3729 if (__kmp_affinity_type == affinity_none) {
3730 KMP_ASSERT(depth == 0);
3731 KMP_ASSERT(address2os == NULL);
3736 # if KMP_GROUP_AFFINITY 3738 else if (__kmp_affinity_top_method == affinity_top_method_group) {
3739 if (__kmp_affinity_verbose) {
3740 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
3743 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3744 KMP_ASSERT(depth != 0);
3746 KMP_ASSERT(msg_id != kmp_i18n_null);
3747 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3753 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
3754 if (__kmp_affinity_verbose) {
3755 KMP_INFORM(AffUsingFlatOS,
"KMP_AFFINITY");
3758 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3760 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3761 KMP_ASSERT(address2os == NULL);
3765 KMP_ASSERT(depth > 0);
3766 KMP_ASSERT(address2os != NULL);
3770 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
3771 if (__kmp_affinity_verbose) {
3772 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
3774 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3776 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3777 KMP_ASSERT(address2os == NULL);
3781 AddrUnsPair *otheraddress2os = NULL;
3782 int otherdepth = -1;
3784 otherdepth = __kmp_affinity_create_apicid_map(&otheraddress2os, &msg_id);
3786 otherdepth = __kmp_affinity_create_x2apicid_map(&otheraddress2os, &msg_id);
3788 if(otheraddress2os != NULL && address2os != NULL) {
3790 unsigned arent_equal_flag = 0;
3791 for(i=0;i<__kmp_avail_proc;i++) {
3792 if(otheraddress2os[i] != address2os[i]) arent_equal_flag = 1;
3794 if(arent_equal_flag) {
3795 KA_TRACE(10, (
"__kmp_aux_affinity_initialize: Hwloc affinity places are different from APICID\n"));
3796 KA_TRACE(10, (
"__kmp_aux_affinity_initialize: APICID Table:\n"));
3797 for(i=0;i<__kmp_avail_proc;i++) {
3798 otheraddress2os[i].print(); __kmp_printf(
"\n");
3800 KA_TRACE(10, (
"__kmp_aux_affinity_initialize: Hwloc Table:\n"));
3801 for(i=0;i<__kmp_avail_proc;i++) {
3802 address2os[i].print(); __kmp_printf(
"\n");
3806 KA_TRACE(10, (
"__kmp_aux_affinity_initialize: Hwloc affinity places are same as APICID\n"));
3809 # endif // KMP_DEBUG 3811 # endif // KMP_USE_HWLOC 3813 if (address2os == NULL) {
3814 if (KMP_AFFINITY_CAPABLE()
3815 && (__kmp_affinity_verbose || (__kmp_affinity_warnings
3816 && (__kmp_affinity_type != affinity_none)))) {
3817 KMP_WARNING(ErrorInitializeAffinity);
3819 __kmp_affinity_type = affinity_none;
3820 KMP_AFFINITY_DISABLE();
3824 __kmp_apply_thread_places(&address2os, depth);
3831 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique,
3832 address2os, __kmp_avail_proc);
3833 if (__kmp_affinity_gran_levels == 0) {
3834 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
3842 __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
3844 switch (__kmp_affinity_type) {
3846 case affinity_explicit:
3847 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
3849 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
3852 __kmp_affinity_process_proclist(&__kmp_affinity_masks,
3853 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3858 __kmp_affinity_process_placelist(&__kmp_affinity_masks,
3859 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3863 if (__kmp_affinity_num_masks == 0) {
3864 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3865 && (__kmp_affinity_type != affinity_none))) {
3866 KMP_WARNING(AffNoValidProcID);
3868 __kmp_affinity_type = affinity_none;
3881 case affinity_logical:
3882 __kmp_affinity_compact = 0;
3883 if (__kmp_affinity_offset) {
3884 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3889 case affinity_physical:
3890 if (__kmp_nThreadsPerCore > 1) {
3891 __kmp_affinity_compact = 1;
3892 if (__kmp_affinity_compact >= depth) {
3893 __kmp_affinity_compact = 0;
3896 __kmp_affinity_compact = 0;
3898 if (__kmp_affinity_offset) {
3899 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3904 case affinity_scatter:
3905 if (__kmp_affinity_compact >= depth) {
3906 __kmp_affinity_compact = 0;
3909 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
3913 case affinity_compact:
3914 if (__kmp_affinity_compact >= depth) {
3915 __kmp_affinity_compact = depth - 1;
3919 case affinity_balanced:
3921 if( nPackages > 1 ) {
3922 if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
3923 KMP_WARNING( AffBalancedNotAvail,
"KMP_AFFINITY" );
3925 __kmp_affinity_type = affinity_none;
3927 }
else if( __kmp_affinity_uniform_topology() ) {
3932 __kmp_aff_depth = depth;
3935 int nth_per_core = __kmp_nThreadsPerCore;
3938 if( nth_per_core > 1 ) {
3939 core_level = depth - 2;
3941 core_level = depth - 1;
3943 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
3944 int nproc = nth_per_core * ncores;
3946 procarr = (
int * )__kmp_allocate(
sizeof(
int ) * nproc );
3947 for(
int i = 0; i < nproc; i++ ) {
3951 for(
int i = 0; i < __kmp_avail_proc; i++ ) {
3952 int proc = address2os[ i ].second;
3956 int level = depth - 1;
3960 int core = address2os[ i ].first.labels[ level ];
3962 if( nth_per_core > 1 ) {
3963 thread = address2os[ i ].first.labels[ level ] % nth_per_core;
3964 core = address2os[ i ].first.labels[ level - 1 ];
3966 procarr[ core * nth_per_core + thread ] = proc;
3976 if (__kmp_affinity_dups) {
3977 __kmp_affinity_num_masks = __kmp_avail_proc;
3980 __kmp_affinity_num_masks = numUnique;
3984 if ( ( __kmp_nested_proc_bind.bind_types[0] != proc_bind_intel )
3985 && ( __kmp_affinity_num_places > 0 )
3986 && ( (
unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks ) ) {
3987 __kmp_affinity_num_masks = __kmp_affinity_num_places;
3991 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
3997 qsort(address2os, __kmp_avail_proc,
sizeof(*address2os),
3998 __kmp_affinity_cmp_Address_child_num);
4002 for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4003 if ((! __kmp_affinity_dups) && (! address2os[i].first.leader)) {
4006 unsigned osId = address2os[i].second;
4007 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4008 kmp_affin_mask_t *dest
4009 = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4010 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4011 KMP_CPU_COPY(dest, src);
4012 if (++j >= __kmp_affinity_num_masks) {
4016 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4021 KMP_ASSERT2(0,
"Unexpected affinity setting");
4024 __kmp_free(osId2Mask);
4025 machine_hierarchy.init(address2os, __kmp_avail_proc);
4030 __kmp_affinity_initialize(
void)
4043 int disabled = (__kmp_affinity_type == affinity_disabled);
4044 if (! KMP_AFFINITY_CAPABLE()) {
4045 KMP_ASSERT(disabled);
4048 __kmp_affinity_type = affinity_none;
4050 __kmp_aux_affinity_initialize();
4052 __kmp_affinity_type = affinity_disabled;
4058 __kmp_affinity_uninitialize(
void)
4060 if (__kmp_affinity_masks != NULL) {
4061 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4062 __kmp_affinity_masks = NULL;
4064 if (fullMask != NULL) {
4065 KMP_CPU_FREE(fullMask);
4068 __kmp_affinity_num_masks = 0;
4070 __kmp_affinity_num_places = 0;
4072 if (__kmp_affinity_proclist != NULL) {
4073 __kmp_free(__kmp_affinity_proclist);
4074 __kmp_affinity_proclist = NULL;
4076 if( address2os != NULL ) {
4077 __kmp_free( address2os );
4080 if( procarr != NULL ) {
4081 __kmp_free( procarr );
4088 __kmp_affinity_set_init_mask(
int gtid,
int isa_root)
4090 if (! KMP_AFFINITY_CAPABLE()) {
4094 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4095 if (th->th.th_affin_mask == NULL) {
4096 KMP_CPU_ALLOC(th->th.th_affin_mask);
4099 KMP_CPU_ZERO(th->th.th_affin_mask);
4109 kmp_affin_mask_t *mask;
4113 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
4116 if ((__kmp_affinity_type == affinity_none) || (__kmp_affinity_type == affinity_balanced)
4118 # if KMP_GROUP_AFFINITY 4119 if (__kmp_num_proc_groups > 1) {
4123 KMP_ASSERT(fullMask != NULL);
4128 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4129 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4130 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4136 || (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4137 # if KMP_GROUP_AFFINITY 4138 if (__kmp_num_proc_groups > 1) {
4142 KMP_ASSERT(fullMask != NULL);
4151 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4152 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4153 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4159 th->th.th_current_place = i;
4161 th->th.th_new_place = i;
4162 th->th.th_first_place = 0;
4163 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4166 if (i == KMP_PLACE_ALL) {
4167 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4171 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4176 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to fullMask\n",
4180 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to mask %d\n",
4185 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4187 if (__kmp_affinity_verbose) {
4188 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4189 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4190 th->th.th_affin_mask);
4191 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(), gtid,
4201 if ( __kmp_affinity_type == affinity_none ) {
4202 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4206 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4213 __kmp_affinity_set_place(
int gtid)
4217 if (! KMP_AFFINITY_CAPABLE()) {
4221 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4223 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current place = %d)\n",
4224 gtid, th->th.th_new_place, th->th.th_current_place));
4229 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4230 KMP_ASSERT(th->th.th_new_place >= 0);
4231 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4232 if (th->th.th_first_place <= th->th.th_last_place) {
4233 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place)
4234 && (th->th.th_new_place <= th->th.th_last_place));
4237 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place)
4238 || (th->th.th_new_place >= th->th.th_last_place));
4245 kmp_affin_mask_t *mask = KMP_CPU_INDEX(__kmp_affinity_masks,
4246 th->th.th_new_place);
4247 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4248 th->th.th_current_place = th->th.th_new_place;
4250 if (__kmp_affinity_verbose) {
4251 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4252 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4253 th->th.th_affin_mask);
4254 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4257 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4264 __kmp_aux_set_affinity(
void **mask)
4270 if (! KMP_AFFINITY_CAPABLE()) {
4274 gtid = __kmp_entry_gtid();
4276 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4277 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4278 (kmp_affin_mask_t *)(*mask));
4279 __kmp_debug_printf(
"kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4283 if (__kmp_env_consistency_check) {
4284 if ((mask == NULL) || (*mask == NULL)) {
4285 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4291 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t*)(*mask))) {
4292 if (! KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4296 if (! KMP_CPU_ISSET(proc, fullMask)) {
4297 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4301 if (num_procs == 0) {
4302 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4305 # if KMP_GROUP_AFFINITY 4306 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4307 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4314 th = __kmp_threads[gtid];
4315 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4316 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4318 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4322 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4323 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4324 th->th.th_first_place = 0;
4325 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4330 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4338 __kmp_aux_get_affinity(
void **mask)
4344 if (! KMP_AFFINITY_CAPABLE()) {
4348 gtid = __kmp_entry_gtid();
4349 th = __kmp_threads[gtid];
4350 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4353 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4354 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4355 th->th.th_affin_mask);
4356 __kmp_printf(
"kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid, buf);
4359 if (__kmp_env_consistency_check) {
4360 if ((mask == NULL) || (*mask == NULL)) {
4361 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4365 # if !KMP_OS_WINDOWS 4367 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4369 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4370 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4371 (kmp_affin_mask_t *)(*mask));
4372 __kmp_printf(
"kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid, buf);
4378 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4386 __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask)
4390 if (! KMP_AFFINITY_CAPABLE()) {
4395 int gtid = __kmp_entry_gtid();
4396 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4397 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4398 (kmp_affin_mask_t *)(*mask));
4399 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in affinity mask for thread %d = %s\n",
4403 if (__kmp_env_consistency_check) {
4404 if ((mask == NULL) || (*mask == NULL)) {
4405 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
4411 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4416 if (! KMP_CPU_ISSET(proc, fullMask)) {
4420 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4426 __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask)
4430 if (! KMP_AFFINITY_CAPABLE()) {
4435 int gtid = __kmp_entry_gtid();
4436 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4437 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4438 (kmp_affin_mask_t *)(*mask));
4439 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in affinity mask for thread %d = %s\n",
4443 if (__kmp_env_consistency_check) {
4444 if ((mask == NULL) || (*mask == NULL)) {
4445 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
4451 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4456 if (! KMP_CPU_ISSET(proc, fullMask)) {
4460 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4466 __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask)
4470 if (! KMP_AFFINITY_CAPABLE()) {
4475 int gtid = __kmp_entry_gtid();
4476 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4477 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4478 (kmp_affin_mask_t *)(*mask));
4479 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in affinity mask for thread %d = %s\n",
4483 if (__kmp_env_consistency_check) {
4484 if ((mask == NULL) || (*mask == NULL)) {
4485 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
4491 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4496 if (! KMP_CPU_ISSET(proc, fullMask)) {
4500 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4505 void __kmp_balanced_affinity(
int tid,
int nthreads )
4507 if( __kmp_affinity_uniform_topology() ) {
4511 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4513 int ncores = __kmp_ncores;
4515 int chunk = nthreads / ncores;
4517 int big_cores = nthreads % ncores;
4519 int big_nth = ( chunk + 1 ) * big_cores;
4520 if( tid < big_nth ) {
4521 coreID = tid / (chunk + 1 );
4522 threadID = ( tid % (chunk + 1 ) ) % __kmp_nth_per_core ;
4524 coreID = ( tid - big_cores ) / chunk;
4525 threadID = ( ( tid - big_cores ) % chunk ) % __kmp_nth_per_core ;
4528 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4529 "Illegal set affinity operation when not capable");
4531 kmp_affin_mask_t *mask;
4532 KMP_CPU_ALLOC_ON_STACK(mask);
4536 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4537 int osID = address2os[ coreID * __kmp_nth_per_core + threadID ].second;
4538 KMP_CPU_SET( osID, mask);
4539 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4540 for(
int i = 0; i < __kmp_nth_per_core; i++ ) {
4542 osID = address2os[ coreID * __kmp_nth_per_core + i ].second;
4543 KMP_CPU_SET( osID, mask);
4546 if (__kmp_affinity_verbose) {
4547 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4548 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4549 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4552 __kmp_set_system_affinity( mask, TRUE );
4553 KMP_CPU_FREE_FROM_STACK(mask);
4556 kmp_affin_mask_t *mask;
4557 KMP_CPU_ALLOC_ON_STACK(mask);
4561 int nth_per_core = __kmp_nThreadsPerCore;
4563 if( nth_per_core > 1 ) {
4564 core_level = __kmp_aff_depth - 2;
4566 core_level = __kmp_aff_depth - 1;
4570 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
4573 if( nthreads == __kmp_avail_proc ) {
4574 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4575 int osID = address2os[ tid ].second;
4576 KMP_CPU_SET( osID, mask);
4577 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4578 int coreID = address2os[ tid ].first.labels[ core_level ];
4582 for(
int i = 0; i < __kmp_avail_proc; i++ ) {
4583 int osID = address2os[ i ].second;
4584 int core = address2os[ i ].first.labels[ core_level ];
4585 if( core == coreID ) {
4586 KMP_CPU_SET( osID, mask);
4588 if( cnt == nth_per_core ) {
4594 }
else if( nthreads <= __kmp_ncores ) {
4597 for(
int i = 0; i < ncores; i++ ) {
4600 for(
int j = 0; j < nth_per_core; j++ ) {
4601 if( procarr[ i * nth_per_core + j ] != - 1 ) {
4608 for(
int j = 0; j < nth_per_core; j++ ) {
4609 int osID = procarr[ i * nth_per_core + j ];
4611 KMP_CPU_SET( osID, mask );
4613 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4628 int* nproc_at_core = (
int*)KMP_ALLOCA(
sizeof(
int)*ncores);
4630 int* ncores_with_x_procs = (
int*)KMP_ALLOCA(
sizeof(
int)*(nth_per_core+1));
4632 int* ncores_with_x_to_max_procs = (
int*)KMP_ALLOCA(
sizeof(
int)*(nth_per_core+1));
4634 for(
int i = 0; i <= nth_per_core; i++ ) {
4635 ncores_with_x_procs[ i ] = 0;
4636 ncores_with_x_to_max_procs[ i ] = 0;
4639 for(
int i = 0; i < ncores; i++ ) {
4641 for(
int j = 0; j < nth_per_core; j++ ) {
4642 if( procarr[ i * nth_per_core + j ] != -1 ) {
4646 nproc_at_core[ i ] = cnt;
4647 ncores_with_x_procs[ cnt ]++;
4650 for(
int i = 0; i <= nth_per_core; i++ ) {
4651 for(
int j = i; j <= nth_per_core; j++ ) {
4652 ncores_with_x_to_max_procs[ i ] += ncores_with_x_procs[ j ];
4657 int nproc = nth_per_core * ncores;
4659 int * newarr = (
int * )__kmp_allocate(
sizeof(
int ) * nproc );
4660 for(
int i = 0; i < nproc; i++ ) {
4667 for(
int j = 1; j <= nth_per_core; j++ ) {
4668 int cnt = ncores_with_x_to_max_procs[ j ];
4669 for(
int i = 0; i < ncores; i++ ) {
4671 if( nproc_at_core[ i ] == 0 ) {
4674 for(
int k = 0; k < nth_per_core; k++ ) {
4675 if( procarr[ i * nth_per_core + k ] != -1 ) {
4676 if( newarr[ i * nth_per_core + k ] == 0 ) {
4677 newarr[ i * nth_per_core + k ] = 1;
4683 newarr[ i * nth_per_core + k ] ++;
4691 if( cnt == 0 || nth == 0 ) {
4702 for(
int i = 0; i < nproc; i++ ) {
4706 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4707 int osID = procarr[ i ];
4708 KMP_CPU_SET( osID, mask);
4709 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4710 int coreID = i / nth_per_core;
4711 for(
int ii = 0; ii < nth_per_core; ii++ ) {
4712 int osID = procarr[ coreID * nth_per_core + ii ];
4714 KMP_CPU_SET( osID, mask);
4721 __kmp_free( newarr );
4724 if (__kmp_affinity_verbose) {
4725 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4726 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4727 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4730 __kmp_set_system_affinity( mask, TRUE );
4731 KMP_CPU_FREE_FROM_STACK(mask);
4749 kmp_set_thread_affinity_mask_initial()
4754 int gtid = __kmp_get_gtid();
4757 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4758 "non-omp thread, returning\n"));
4761 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
4762 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4763 "affinity not initialized, returning\n"));
4766 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4767 "set full mask for thread %d\n", gtid));
4768 KMP_DEBUG_ASSERT(fullMask != NULL);
4769 return __kmp_set_system_affinity(fullMask, FALSE);
4773 #endif // KMP_AFFINITY_SUPPORTED