15 #include "kmp_affinity.h" 20 #include "kmp_stats.h" 22 #include "kmp_wait_release.h" 23 #include "kmp_wrapper_getpid.h" 25 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD 29 #include <sys/resource.h> 30 #include <sys/syscall.h> 32 #include <sys/times.h> 35 #if KMP_OS_LINUX && !KMP_OS_CNK 36 #include <sys/sysinfo.h> 51 #include <mach/mach.h> 52 #include <sys/sysctl.h> 53 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD 54 #include <pthread_np.h> 56 #include <sys/types.h> 57 #include <sys/sysctl.h> 64 #include "tsan_annotations.h" 66 struct kmp_sys_timer {
67 struct timespec start;
71 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec) 73 static struct kmp_sys_timer __kmp_sys_timer_data;
75 #if KMP_HANDLE_SIGNALS 76 typedef void (*sig_func_t)(int);
77 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
78 static sigset_t __kmp_sigset;
81 static int __kmp_init_runtime = FALSE;
83 static int __kmp_fork_count = 0;
85 static pthread_condattr_t __kmp_suspend_cond_attr;
86 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
88 static kmp_cond_align_t __kmp_wait_cv;
89 static kmp_mutex_align_t __kmp_wait_mx;
91 kmp_uint64 __kmp_ticks_per_msec = 1000000;
94 static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
95 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
96 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
97 cond->c_cond.__c_waiting);
101 #if (KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED) 105 void __kmp_affinity_bind_thread(
int which) {
106 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
107 "Illegal set affinity operation when not capable");
109 kmp_affin_mask_t *mask;
110 KMP_CPU_ALLOC_ON_STACK(mask);
112 KMP_CPU_SET(which, mask);
113 __kmp_set_system_affinity(mask, TRUE);
114 KMP_CPU_FREE_FROM_STACK(mask);
120 void __kmp_affinity_determine_capable(
const char *env_var) {
123 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024) 128 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
133 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
134 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 135 "initial getaffinity call returned %d errno = %d\n",
141 if (__kmp_affinity_verbose ||
142 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
143 (__kmp_affinity_type != affinity_default) &&
144 (__kmp_affinity_type != affinity_disabled))) {
146 kmp_msg_t err_code = KMP_ERR(error);
147 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
148 err_code, __kmp_msg_null);
149 if (__kmp_generate_warnings == kmp_warnings_off) {
150 __kmp_str_free(&err_code.str);
153 KMP_AFFINITY_DISABLE();
154 KMP_INTERNAL_FREE(buf);
162 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
163 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 164 "setaffinity for mask size %d returned %d errno = %d\n",
165 gCode, sCode, errno));
167 if (errno == ENOSYS) {
168 if (__kmp_affinity_verbose ||
169 (__kmp_affinity_warnings &&
170 (__kmp_affinity_type != affinity_none) &&
171 (__kmp_affinity_type != affinity_default) &&
172 (__kmp_affinity_type != affinity_disabled))) {
174 kmp_msg_t err_code = KMP_ERR(error);
175 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
176 err_code, __kmp_msg_null);
177 if (__kmp_generate_warnings == kmp_warnings_off) {
178 __kmp_str_free(&err_code.str);
181 KMP_AFFINITY_DISABLE();
182 KMP_INTERNAL_FREE(buf);
184 if (errno == EFAULT) {
185 KMP_AFFINITY_ENABLE(gCode);
186 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 187 "affinity supported (mask size %d)\n",
188 (
int)__kmp_affin_mask_size));
189 KMP_INTERNAL_FREE(buf);
197 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 198 "searching for proper set size\n"));
200 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
201 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
202 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 203 "getaffinity for mask size %d returned %d errno = %d\n",
204 size, gCode, errno));
207 if (errno == ENOSYS) {
209 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 210 "inconsistent OS call behavior: errno == ENOSYS for mask " 213 if (__kmp_affinity_verbose ||
214 (__kmp_affinity_warnings &&
215 (__kmp_affinity_type != affinity_none) &&
216 (__kmp_affinity_type != affinity_default) &&
217 (__kmp_affinity_type != affinity_disabled))) {
219 kmp_msg_t err_code = KMP_ERR(error);
220 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
221 err_code, __kmp_msg_null);
222 if (__kmp_generate_warnings == kmp_warnings_off) {
223 __kmp_str_free(&err_code.str);
226 KMP_AFFINITY_DISABLE();
227 KMP_INTERNAL_FREE(buf);
233 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
234 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 235 "setaffinity for mask size %d returned %d errno = %d\n",
236 gCode, sCode, errno));
238 if (errno == ENOSYS) {
240 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 241 "inconsistent OS call behavior: errno == ENOSYS for mask " 244 if (__kmp_affinity_verbose ||
245 (__kmp_affinity_warnings &&
246 (__kmp_affinity_type != affinity_none) &&
247 (__kmp_affinity_type != affinity_default) &&
248 (__kmp_affinity_type != affinity_disabled))) {
250 kmp_msg_t err_code = KMP_ERR(error);
251 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
252 err_code, __kmp_msg_null);
253 if (__kmp_generate_warnings == kmp_warnings_off) {
254 __kmp_str_free(&err_code.str);
257 KMP_AFFINITY_DISABLE();
258 KMP_INTERNAL_FREE(buf);
261 if (errno == EFAULT) {
262 KMP_AFFINITY_ENABLE(gCode);
263 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 264 "affinity supported (mask size %d)\n",
265 (
int)__kmp_affin_mask_size));
266 KMP_INTERNAL_FREE(buf);
273 KMP_INTERNAL_FREE(buf);
278 KMP_AFFINITY_DISABLE();
279 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 280 "cannot determine mask size - affinity not supported\n"));
281 if (__kmp_affinity_verbose ||
282 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
283 (__kmp_affinity_type != affinity_default) &&
284 (__kmp_affinity_type != affinity_disabled))) {
285 KMP_WARNING(AffCantGetMaskSize, env_var);
289 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 293 int __kmp_futex_determine_capable() {
295 int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
296 int retval = (rc == 0) || (errno != ENOSYS);
299 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
300 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
301 retval ?
"" :
" not"));
306 #endif // KMP_USE_FUTEX 308 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS) 312 kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
313 kmp_int8 old_value, new_value;
315 old_value = TCR_1(*p);
316 new_value = old_value | d;
318 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
320 old_value = TCR_1(*p);
321 new_value = old_value | d;
326 kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
327 kmp_int8 old_value, new_value;
329 old_value = TCR_1(*p);
330 new_value = old_value & d;
332 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
334 old_value = TCR_1(*p);
335 new_value = old_value & d;
340 kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
341 kmp_uint32 old_value, new_value;
343 old_value = TCR_4(*p);
344 new_value = old_value | d;
346 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
348 old_value = TCR_4(*p);
349 new_value = old_value | d;
354 kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
355 kmp_uint32 old_value, new_value;
357 old_value = TCR_4(*p);
358 new_value = old_value & d;
360 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
362 old_value = TCR_4(*p);
363 new_value = old_value & d;
369 kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
370 kmp_int8 old_value, new_value;
372 old_value = TCR_1(*p);
373 new_value = old_value + d;
375 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
377 old_value = TCR_1(*p);
378 new_value = old_value + d;
383 kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
384 kmp_int64 old_value, new_value;
386 old_value = TCR_8(*p);
387 new_value = old_value + d;
389 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
391 old_value = TCR_8(*p);
392 new_value = old_value + d;
398 kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
399 kmp_uint64 old_value, new_value;
401 old_value = TCR_8(*p);
402 new_value = old_value | d;
403 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
405 old_value = TCR_8(*p);
406 new_value = old_value | d;
411 kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
412 kmp_uint64 old_value, new_value;
414 old_value = TCR_8(*p);
415 new_value = old_value & d;
416 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
418 old_value = TCR_8(*p);
419 new_value = old_value & d;
426 void __kmp_terminate_thread(
int gtid) {
428 kmp_info_t *th = __kmp_threads[gtid];
433 #ifdef KMP_CANCEL_THREADS 434 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
435 status = pthread_cancel(th->th.th_info.ds.ds_thread);
436 if (status != 0 && status != ESRCH) {
437 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
448 static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
450 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 451 KMP_OS_HURD || KMP_OS_KFREEBSD 460 if (!KMP_UBER_GTID(gtid)) {
463 status = pthread_attr_init(&attr);
464 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
465 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD 466 status = pthread_attr_get_np(pthread_self(), &attr);
467 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
469 status = pthread_getattr_np(pthread_self(), &attr);
470 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
472 status = pthread_attr_getstack(&attr, &addr, &size);
473 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
475 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:" 476 " %lu, low addr: %p\n",
478 status = pthread_attr_destroy(&attr);
479 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
482 if (size != 0 && addr != 0) {
484 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
485 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
486 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
492 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
493 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
494 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
498 static void *__kmp_launch_worker(
void *thr) {
499 int status, old_type, old_state;
500 #ifdef KMP_BLOCK_SIGNALS 501 sigset_t new_set, old_set;
504 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 505 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_KFREEBSD 506 void *
volatile padding = 0;
510 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
511 __kmp_gtid_set_specific(gtid);
512 #ifdef KMP_TDATA_GTID 515 #if KMP_STATS_ENABLED 517 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
518 __kmp_stats_thread_ptr->startLife();
519 KMP_SET_THREAD_STATE(IDLE);
524 __kmp_itt_thread_name(gtid);
527 #if KMP_AFFINITY_SUPPORTED 528 __kmp_affinity_set_init_mask(gtid, FALSE);
531 #ifdef KMP_CANCEL_THREADS 532 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
533 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
535 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
536 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
539 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 541 __kmp_clear_x87_fpu_status_word();
542 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
543 __kmp_load_mxcsr(&__kmp_init_mxcsr);
546 #ifdef KMP_BLOCK_SIGNALS 547 status = sigfillset(&new_set);
548 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
549 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
550 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
553 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 555 if (__kmp_stkoffset > 0 && gtid > 0) {
556 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
561 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
563 __kmp_check_stack_overlap((kmp_info_t *)thr);
565 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
567 #ifdef KMP_BLOCK_SIGNALS 568 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
569 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
578 static void *__kmp_launch_monitor(
void *thr) {
579 int status, old_type, old_state;
580 #ifdef KMP_BLOCK_SIGNALS 583 struct timespec interval;
585 int yield_cycles = 0;
589 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
592 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
593 #ifdef KMP_TDATA_GTID 594 __kmp_gtid = KMP_GTID_MONITOR;
601 __kmp_itt_thread_ignore();
604 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
607 __kmp_check_stack_overlap((kmp_info_t *)thr);
609 #ifdef KMP_CANCEL_THREADS 610 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
611 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
613 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
614 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
617 #if KMP_REAL_TIME_FIX 622 int sched = sched_getscheduler(0);
623 if (sched == SCHED_FIFO || sched == SCHED_RR) {
626 struct sched_param param;
627 int max_priority = sched_get_priority_max(sched);
629 KMP_WARNING(RealTimeSchedNotSupported);
630 sched_getparam(0, ¶m);
631 if (param.sched_priority < max_priority) {
632 param.sched_priority += 1;
633 rc = sched_setscheduler(0, sched, ¶m);
636 kmp_msg_t err_code = KMP_ERR(error);
637 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
638 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
639 if (__kmp_generate_warnings == kmp_warnings_off) {
640 __kmp_str_free(&err_code.str);
647 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
648 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
653 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
655 #endif // KMP_REAL_TIME_FIX 659 if (__kmp_monitor_wakeups == 1) {
661 interval.tv_nsec = 0;
664 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
667 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
669 if (__kmp_yield_cycle) {
670 __kmp_yielding_on = 0;
671 yield_count = __kmp_yield_off_count;
673 __kmp_yielding_on = 1;
676 while (!TCR_4(__kmp_global.g.g_done)) {
682 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
684 status = gettimeofday(&tval, NULL);
685 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
686 TIMEVAL_TO_TIMESPEC(&tval, &now);
688 now.tv_sec += interval.tv_sec;
689 now.tv_nsec += interval.tv_nsec;
691 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
693 now.tv_nsec -= KMP_NSEC_PER_SEC;
696 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
697 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
699 if (!TCR_4(__kmp_global.g.g_done)) {
700 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
701 &__kmp_wait_mx.m_mutex, &now);
703 if (status != ETIMEDOUT && status != EINTR) {
704 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
708 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
709 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
711 if (__kmp_yield_cycle) {
713 if ((yield_cycles % yield_count) == 0) {
714 if (__kmp_yielding_on) {
715 __kmp_yielding_on = 0;
716 yield_count = __kmp_yield_off_count;
718 __kmp_yielding_on = 1;
719 yield_count = __kmp_yield_on_count;
724 __kmp_yielding_on = 1;
727 TCW_4(__kmp_global.g.g_time.dt.t_value,
728 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
733 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
735 #ifdef KMP_BLOCK_SIGNALS 736 status = sigfillset(&new_set);
737 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
738 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
739 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
742 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
744 if (__kmp_global.g.g_abort != 0) {
750 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
751 __kmp_global.g.g_abort));
756 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
757 __kmp_terminate_thread(gtid);
761 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
762 __kmp_global.g.g_abort));
764 if (__kmp_global.g.g_abort > 0)
765 raise(__kmp_global.g.g_abort);
768 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
772 #endif // KMP_USE_MONITOR 774 void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
776 pthread_attr_t thread_attr;
779 th->th.th_info.ds.ds_gtid = gtid;
781 #if KMP_STATS_ENABLED 783 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
789 if (!KMP_UBER_GTID(gtid)) {
790 th->th.th_stats = __kmp_stats_list->push_back(gtid);
794 th->th.th_stats = __kmp_stats_thread_ptr;
796 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
798 #endif // KMP_STATS_ENABLED 800 if (KMP_UBER_GTID(gtid)) {
801 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
802 th->th.th_info.ds.ds_thread = pthread_self();
803 __kmp_set_stack_info(gtid, th);
804 __kmp_check_stack_overlap(th);
808 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
812 #ifdef KMP_THREAD_ATTR 813 status = pthread_attr_init(&thread_attr);
815 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
817 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
819 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
829 stack_size += gtid * __kmp_stkoffset * 2;
831 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 832 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
833 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
835 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 836 status = pthread_attr_setstacksize(&thread_attr, stack_size);
837 #ifdef KMP_BACKUP_STKSIZE 839 if (!__kmp_env_stksize) {
840 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
841 __kmp_stksize = KMP_BACKUP_STKSIZE;
842 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 843 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " 845 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
846 status = pthread_attr_setstacksize(&thread_attr, stack_size);
851 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
852 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
859 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
860 if (status != 0 || !handle) {
861 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 862 if (status == EINVAL) {
863 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
864 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
866 if (status == ENOMEM) {
867 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
868 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
871 if (status == EAGAIN) {
872 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
873 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
875 KMP_SYSFAIL(
"pthread_create", status);
878 th->th.th_info.ds.ds_thread = handle;
880 #ifdef KMP_THREAD_ATTR 881 status = pthread_attr_destroy(&thread_attr);
883 kmp_msg_t err_code = KMP_ERR(status);
884 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
886 if (__kmp_generate_warnings == kmp_warnings_off) {
887 __kmp_str_free(&err_code.str);
894 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
899 void __kmp_create_monitor(kmp_info_t *th) {
901 pthread_attr_t thread_attr;
904 int auto_adj_size = FALSE;
906 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
908 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of " 910 th->th.th_info.ds.ds_tid = 0;
911 th->th.th_info.ds.ds_gtid = 0;
914 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
918 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
919 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
920 #if KMP_REAL_TIME_FIX 921 TCW_4(__kmp_global.g.g_time.dt.t_value,
924 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
925 #endif // KMP_REAL_TIME_FIX 927 #ifdef KMP_THREAD_ATTR 928 if (__kmp_monitor_stksize == 0) {
929 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
930 auto_adj_size = TRUE;
932 status = pthread_attr_init(&thread_attr);
934 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
936 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
938 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
941 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 942 status = pthread_attr_getstacksize(&thread_attr, &size);
943 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
945 size = __kmp_sys_min_stksize;
949 if (__kmp_monitor_stksize == 0) {
950 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
952 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
953 __kmp_monitor_stksize = __kmp_sys_min_stksize;
956 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes," 957 "requested stacksize = %lu bytes\n",
958 size, __kmp_monitor_stksize));
963 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 964 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
965 __kmp_monitor_stksize));
966 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
969 __kmp_monitor_stksize *= 2;
972 kmp_msg_t err_code = KMP_ERR(status);
973 __kmp_msg(kmp_ms_warning,
974 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
975 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
976 if (__kmp_generate_warnings == kmp_warnings_off) {
977 __kmp_str_free(&err_code.str);
983 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
986 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 987 if (status == EINVAL) {
988 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
989 __kmp_monitor_stksize *= 2;
992 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
993 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
996 if (status == ENOMEM) {
997 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
998 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1002 if (status == EAGAIN) {
1003 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1004 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1006 KMP_SYSFAIL(
"pthread_create", status);
1009 th->th.th_info.ds.ds_thread = handle;
1011 #if KMP_REAL_TIME_FIX 1013 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
1014 sizeof(__kmp_global.g.g_time.dt.t_value));
1015 __kmp_wait_yield_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value,
1016 -1, &__kmp_neq_4, NULL);
1017 #endif // KMP_REAL_TIME_FIX 1019 #ifdef KMP_THREAD_ATTR 1020 status = pthread_attr_destroy(&thread_attr);
1022 kmp_msg_t err_code = KMP_ERR(status);
1023 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1025 if (__kmp_generate_warnings == kmp_warnings_off) {
1026 __kmp_str_free(&err_code.str);
1033 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1034 th->th.th_info.ds.ds_thread));
1037 #endif // KMP_USE_MONITOR 1039 void __kmp_exit_thread(
int exit_status) {
1040 pthread_exit((
void *)(intptr_t)exit_status);
1044 void __kmp_resume_monitor();
1046 void __kmp_reap_monitor(kmp_info_t *th) {
1050 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle" 1052 th->th.th_info.ds.ds_thread));
1057 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1058 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1059 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1069 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1070 if (status != ESRCH) {
1071 __kmp_resume_monitor();
1073 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1074 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1075 if (exit_val != th) {
1076 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1079 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1080 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1082 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle" 1084 th->th.th_info.ds.ds_thread));
1088 #endif // KMP_USE_MONITOR 1090 void __kmp_reap_worker(kmp_info_t *th) {
1097 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1099 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1103 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1105 if (exit_val != th) {
1106 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, " 1108 th->th.th_info.ds.ds_gtid, exit_val));
1112 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1113 th->th.th_info.ds.ds_gtid));
1118 #if KMP_HANDLE_SIGNALS 1120 static void __kmp_null_handler(
int signo) {
1124 static void __kmp_team_handler(
int signo) {
1125 if (__kmp_global.g.g_abort == 0) {
1128 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1143 if (__kmp_debug_buf) {
1144 __kmp_dump_debug_buffer();
1147 TCW_4(__kmp_global.g.g_abort, signo);
1149 TCW_4(__kmp_global.g.g_done, TRUE);
1154 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1161 static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1162 struct sigaction *oldact) {
1163 int rc = sigaction(signum, act, oldact);
1164 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1167 static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1168 int parallel_init) {
1171 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1172 if (parallel_init) {
1173 struct sigaction new_action;
1174 struct sigaction old_action;
1175 new_action.sa_handler = handler_func;
1176 new_action.sa_flags = 0;
1177 sigfillset(&new_action.sa_mask);
1178 __kmp_sigaction(sig, &new_action, &old_action);
1179 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1180 sigaddset(&__kmp_sigset, sig);
1183 __kmp_sigaction(sig, &old_action, NULL);
1187 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1192 static void __kmp_remove_one_handler(
int sig) {
1193 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1194 if (sigismember(&__kmp_sigset, sig)) {
1195 struct sigaction old;
1197 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1198 if ((old.sa_handler != __kmp_team_handler) &&
1199 (old.sa_handler != __kmp_null_handler)) {
1201 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, " 1202 "restoring: sig=%d\n",
1204 __kmp_sigaction(sig, &old, NULL);
1206 sigdelset(&__kmp_sigset, sig);
1211 void __kmp_install_signals(
int parallel_init) {
1212 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1213 if (__kmp_handle_signals || !parallel_init) {
1216 sigemptyset(&__kmp_sigset);
1217 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1218 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1219 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1220 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1221 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1222 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1223 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1224 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1226 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1228 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1230 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1235 void __kmp_remove_signals(
void) {
1237 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1238 for (sig = 1; sig < NSIG; ++sig) {
1239 __kmp_remove_one_handler(sig);
1243 #endif // KMP_HANDLE_SIGNALS 1245 void __kmp_enable(
int new_state) {
1246 #ifdef KMP_CANCEL_THREADS 1247 int status, old_state;
1248 status = pthread_setcancelstate(new_state, &old_state);
1249 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1250 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1254 void __kmp_disable(
int *old_state) {
1255 #ifdef KMP_CANCEL_THREADS 1257 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1258 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1262 static void __kmp_atfork_prepare(
void) {
1263 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1264 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1267 static void __kmp_atfork_parent(
void) {
1268 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1269 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1275 static void __kmp_atfork_child(
void) {
1276 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1283 #if KMP_AFFINITY_SUPPORTED 1287 kmp_set_thread_affinity_mask_initial();
1292 __kmp_affinity_type = affinity_none;
1294 if (__kmp_nested_proc_bind.bind_types != NULL) {
1295 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1297 #endif // OMP_40_ENABLED 1298 #endif // KMP_AFFINITY_SUPPORTED 1300 __kmp_init_runtime = FALSE;
1302 __kmp_init_monitor = 0;
1304 __kmp_init_parallel = FALSE;
1305 __kmp_init_middle = FALSE;
1306 __kmp_init_serial = FALSE;
1307 TCW_4(__kmp_init_gtid, FALSE);
1308 __kmp_init_common = FALSE;
1310 TCW_4(__kmp_init_user_locks, FALSE);
1311 #if !KMP_USE_DYNAMIC_LOCK 1312 __kmp_user_lock_table.used = 1;
1313 __kmp_user_lock_table.allocated = 0;
1314 __kmp_user_lock_table.table = NULL;
1315 __kmp_lock_blocks = NULL;
1319 TCW_4(__kmp_nth, 0);
1321 __kmp_thread_pool = NULL;
1322 __kmp_thread_pool_insert_pt = NULL;
1323 __kmp_team_pool = NULL;
1327 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1328 __kmp_threadpriv_cache_list));
1330 while (__kmp_threadpriv_cache_list != NULL) {
1332 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1333 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1334 &(*__kmp_threadpriv_cache_list->addr)));
1336 *__kmp_threadpriv_cache_list->addr = NULL;
1338 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1341 __kmp_init_runtime = FALSE;
1344 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1345 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1346 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1347 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1364 void __kmp_register_atfork(
void) {
1365 if (__kmp_need_register_atfork) {
1366 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1367 __kmp_atfork_child);
1368 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1369 __kmp_need_register_atfork = FALSE;
1373 void __kmp_suspend_initialize(
void) {
1375 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1376 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1377 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1378 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1381 static void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1382 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1383 if (th->th.th_suspend_init_count <= __kmp_fork_count) {
1387 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1388 &__kmp_suspend_cond_attr);
1389 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1390 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1391 &__kmp_suspend_mutex_attr);
1392 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1393 *(
volatile int *)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1394 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1398 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1399 if (th->th.th_suspend_init_count > __kmp_fork_count) {
1404 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1405 if (status != 0 && status != EBUSY) {
1406 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1408 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1409 if (status != 0 && status != EBUSY) {
1410 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1412 --th->th.th_suspend_init_count;
1413 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1420 static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1421 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1422 kmp_info_t *th = __kmp_threads[th_gtid];
1424 typename C::flag_t old_spin;
1426 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1429 __kmp_suspend_initialize_thread(th);
1431 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1432 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1434 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1435 th_gtid, flag->get()));
1439 old_spin = flag->set_sleeping();
1441 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x," 1443 th_gtid, flag->get(), flag->load(), old_spin));
1445 if (flag->done_check_val(old_spin)) {
1446 old_spin = flag->unset_sleeping();
1447 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit " 1449 th_gtid, flag->get()));
1454 int deactivated = FALSE;
1455 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1457 while (flag->is_sleeping()) {
1458 #ifdef DEBUG_SUSPEND 1460 __kmp_suspend_count++;
1461 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1462 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1468 th->th.th_active = FALSE;
1469 if (th->th.th_active_in_pool) {
1470 th->th.th_active_in_pool = FALSE;
1471 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1472 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1477 #if USE_SUSPEND_TIMEOUT 1478 struct timespec now;
1479 struct timeval tval;
1482 status = gettimeofday(&tval, NULL);
1483 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1484 TIMEVAL_TO_TIMESPEC(&tval, &now);
1486 msecs = (4 * __kmp_dflt_blocktime) + 200;
1487 now.tv_sec += msecs / 1000;
1488 now.tv_nsec += (msecs % 1000) * 1000;
1490 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform " 1491 "pthread_cond_timedwait\n",
1493 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1494 &th->th.th_suspend_mx.m_mutex, &now);
1496 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform" 1497 " pthread_cond_wait\n",
1499 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1500 &th->th.th_suspend_mx.m_mutex);
1503 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1504 KMP_SYSFAIL(
"pthread_cond_wait", status);
1507 if (status == ETIMEDOUT) {
1508 if (flag->is_sleeping()) {
1510 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1512 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit " 1516 }
else if (flag->is_sleeping()) {
1518 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1525 th->th.th_active = TRUE;
1526 if (TCR_4(th->th.th_in_pool)) {
1527 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1528 th->th.th_active_in_pool = TRUE;
1532 #ifdef DEBUG_SUSPEND 1535 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1536 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1541 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1542 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1543 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1546 void __kmp_suspend_32(
int th_gtid, kmp_flag_32 *flag) {
1547 __kmp_suspend_template(th_gtid, flag);
1549 void __kmp_suspend_64(
int th_gtid, kmp_flag_64 *flag) {
1550 __kmp_suspend_template(th_gtid, flag);
1552 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1553 __kmp_suspend_template(th_gtid, flag);
1560 static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1561 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1562 kmp_info_t *th = __kmp_threads[target_gtid];
1566 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1569 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1570 gtid, target_gtid));
1571 KMP_DEBUG_ASSERT(gtid != target_gtid);
1573 __kmp_suspend_initialize_thread(th);
1575 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1576 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1579 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1584 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1587 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1588 "awake: flag(%p)\n",
1589 gtid, target_gtid, NULL));
1590 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1591 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1595 typename C::flag_t old_spin = flag->unset_sleeping();
1596 if (!flag->is_sleeping_val(old_spin)) {
1597 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1600 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1601 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1602 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1605 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset " 1606 "sleep bit for flag's loc(%p): " 1608 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1610 TCW_PTR(th->th.th_sleep_loc, NULL);
1612 #ifdef DEBUG_SUSPEND 1615 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1616 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1617 target_gtid, buffer);
1620 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1621 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1622 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1623 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1624 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up" 1626 gtid, target_gtid));
1629 void __kmp_resume_32(
int target_gtid, kmp_flag_32 *flag) {
1630 __kmp_resume_template(target_gtid, flag);
1632 void __kmp_resume_64(
int target_gtid, kmp_flag_64 *flag) {
1633 __kmp_resume_template(target_gtid, flag);
1635 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1636 __kmp_resume_template(target_gtid, flag);
1640 void __kmp_resume_monitor() {
1641 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1644 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1645 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1647 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1649 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1650 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1651 #ifdef DEBUG_SUSPEND 1654 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1655 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1656 KMP_GTID_MONITOR, buffer);
1659 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1660 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1661 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1662 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1663 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up" 1665 gtid, KMP_GTID_MONITOR));
1667 #endif // KMP_USE_MONITOR 1669 void __kmp_yield(
int cond) {
1673 if (!__kmp_yielding_on)
1676 if (__kmp_yield_cycle && !KMP_YIELD_NOW())
1682 void __kmp_gtid_set_specific(
int gtid) {
1683 if (__kmp_init_gtid) {
1685 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1686 (
void *)(intptr_t)(gtid + 1));
1687 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1689 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1693 int __kmp_gtid_get_specific() {
1695 if (!__kmp_init_gtid) {
1696 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning " 1697 "KMP_GTID_SHUTDOWN\n"));
1698 return KMP_GTID_SHUTDOWN;
1700 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1702 gtid = KMP_GTID_DNE;
1706 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1707 __kmp_gtid_threadprivate_key, gtid));
1711 double __kmp_read_cpu_time(
void) {
1717 return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC;
1720 int __kmp_read_system_info(
struct kmp_sys_info *info) {
1722 struct rusage r_usage;
1724 memset(info, 0,
sizeof(*info));
1726 status = getrusage(RUSAGE_SELF, &r_usage);
1727 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1730 info->maxrss = r_usage.ru_maxrss;
1732 info->minflt = r_usage.ru_minflt;
1734 info->majflt = r_usage.ru_majflt;
1736 info->nswap = r_usage.ru_nswap;
1738 info->inblock = r_usage.ru_inblock;
1740 info->oublock = r_usage.ru_oublock;
1742 info->nvcsw = r_usage.ru_nvcsw;
1744 info->nivcsw = r_usage.ru_nivcsw;
1746 return (status != 0);
1749 void __kmp_read_system_time(
double *delta) {
1751 struct timeval tval;
1752 struct timespec stop;
1755 status = gettimeofday(&tval, NULL);
1756 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1757 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1758 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1759 *delta = (t_ns * 1e-9);
1762 void __kmp_clear_system_time(
void) {
1763 struct timeval tval;
1765 status = gettimeofday(&tval, NULL);
1766 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1767 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1770 static int __kmp_get_xproc(
void) {
1774 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 1775 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_KFREEBSD 1777 r = sysconf(_SC_NPROCESSORS_ONLN);
1785 host_basic_info_data_t info;
1786 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1787 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1788 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1791 r = info.avail_cpus;
1793 KMP_WARNING(CantGetNumAvailCPU);
1794 KMP_INFORM(AssumedNumCPU);
1799 #error "Unknown or unsupported OS." 1803 return r > 0 ? r : 2;
1807 int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1811 va_start(args, format);
1812 FILE *f = fopen(path,
"rb");
1815 result = vfscanf(f, format, args);
1821 void __kmp_runtime_initialize(
void) {
1823 pthread_mutexattr_t mutex_attr;
1824 pthread_condattr_t cond_attr;
1826 if (__kmp_init_runtime) {
1830 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) 1831 if (!__kmp_cpuinfo.initialized) {
1832 __kmp_query_cpuid(&__kmp_cpuinfo);
1836 __kmp_xproc = __kmp_get_xproc();
1838 if (sysconf(_SC_THREADS)) {
1841 __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX);
1842 if (__kmp_sys_max_nth == -1) {
1844 __kmp_sys_max_nth = INT_MAX;
1845 }
else if (__kmp_sys_max_nth <= 1) {
1847 __kmp_sys_max_nth = KMP_MAX_NTH;
1851 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1852 if (__kmp_sys_min_stksize <= 1) {
1853 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1858 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1860 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1861 __kmp_internal_end_dest);
1862 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1863 status = pthread_mutexattr_init(&mutex_attr);
1864 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1865 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1866 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1867 status = pthread_condattr_init(&cond_attr);
1868 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1869 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1870 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1872 __kmp_itt_initialize();
1875 __kmp_init_runtime = TRUE;
1878 void __kmp_runtime_destroy(
void) {
1881 if (!__kmp_init_runtime) {
1886 __kmp_itt_destroy();
1889 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1890 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1892 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1893 if (status != 0 && status != EBUSY) {
1894 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1896 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1897 if (status != 0 && status != EBUSY) {
1898 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1900 #if KMP_AFFINITY_SUPPORTED 1901 __kmp_affinity_uninitialize();
1904 __kmp_init_runtime = FALSE;
1909 void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1912 void __kmp_elapsed(
double *t) {
1914 #ifdef FIX_SGI_CLOCK 1917 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1918 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1920 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1924 status = gettimeofday(&tv, NULL);
1925 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1927 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1932 void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1935 kmp_uint64 __kmp_now_nsec() {
1937 gettimeofday(&t, NULL);
1938 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1939 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1943 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1945 void __kmp_initialize_system_tick() {
1946 kmp_uint64 now, nsec2, diff;
1947 kmp_uint64 delay = 100000;
1948 kmp_uint64 nsec = __kmp_now_nsec();
1949 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1950 while ((now = __kmp_hardware_timestamp()) < goal)
1952 nsec2 = __kmp_now_nsec();
1953 diff = nsec2 - nsec;
1955 kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff);
1957 __kmp_ticks_per_msec = tpms;
1965 int __kmp_is_address_mapped(
void *addr) {
1970 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_HURD || KMP_OS_KFREEBSD 1975 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
1978 file = fopen(name,
"r");
1979 KMP_ASSERT(file != NULL);
1983 void *beginning = NULL;
1984 void *ending = NULL;
1987 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
1991 KMP_ASSERT(rc == 3 &&
1992 KMP_STRLEN(perms) == 4);
1995 if ((addr >= beginning) && (addr < ending)) {
1997 if (strcmp(perms,
"rw") == 0) {
2007 KMP_INTERNAL_FREE(name);
2016 rc = vm_read_overwrite(
2018 (vm_address_t)(addr),
2020 (vm_address_t)(&buffer),
2033 mib[2] = VM_PROC_MAP;
2035 mib[4] =
sizeof(
struct kinfo_vmentry);
2038 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2042 size = size * 4 / 3;
2043 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2046 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2050 for (
size_t i = 0; i < size; i++) {
2051 if (kiv[i].kve_start >= (uint64_t)addr &&
2052 kiv[i].kve_end <= (uint64_t)addr) {
2057 KMP_INTERNAL_FREE(kiv);
2058 #elif KMP_OS_DRAGONFLY || KMP_OS_OPENBSD 2065 #error "Unknown or unsupported OS" 2073 #ifdef USE_LOAD_BALANCE 2075 #if KMP_OS_DARWIN || KMP_OS_NETBSD 2082 int __kmp_get_load_balance(
int max) {
2086 int res = getloadavg(averages, 3);
2091 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2092 ret_avg = averages[0];
2093 }
else if ((__kmp_load_balance_interval >= 180 &&
2094 __kmp_load_balance_interval < 600) &&
2096 ret_avg = averages[1];
2097 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2098 ret_avg = averages[2];
2112 int __kmp_get_load_balance(
int max) {
2113 static int permanent_error = 0;
2114 static int glb_running_threads = 0;
2116 static double glb_call_time = 0;
2118 int running_threads = 0;
2120 DIR *proc_dir = NULL;
2121 struct dirent *proc_entry = NULL;
2123 kmp_str_buf_t task_path;
2124 DIR *task_dir = NULL;
2125 struct dirent *task_entry = NULL;
2126 int task_path_fixed_len;
2128 kmp_str_buf_t stat_path;
2130 int stat_path_fixed_len;
2132 int total_processes = 0;
2133 int total_threads = 0;
2135 double call_time = 0.0;
2137 __kmp_str_buf_init(&task_path);
2138 __kmp_str_buf_init(&stat_path);
2140 __kmp_elapsed(&call_time);
2142 if (glb_call_time &&
2143 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2144 running_threads = glb_running_threads;
2148 glb_call_time = call_time;
2151 if (permanent_error) {
2152 running_threads = -1;
2161 proc_dir = opendir(
"/proc");
2162 if (proc_dir == NULL) {
2165 running_threads = -1;
2166 permanent_error = 1;
2171 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2172 task_path_fixed_len = task_path.used;
2174 proc_entry = readdir(proc_dir);
2175 while (proc_entry != NULL) {
2178 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2186 KMP_DEBUG_ASSERT(total_processes != 1 ||
2187 strcmp(proc_entry->d_name,
"1") == 0);
2190 task_path.used = task_path_fixed_len;
2191 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2192 KMP_STRLEN(proc_entry->d_name));
2193 __kmp_str_buf_cat(&task_path,
"/task", 5);
2195 task_dir = opendir(task_path.str);
2196 if (task_dir == NULL) {
2205 if (strcmp(proc_entry->d_name,
"1") == 0) {
2206 running_threads = -1;
2207 permanent_error = 1;
2212 __kmp_str_buf_clear(&stat_path);
2213 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2214 __kmp_str_buf_cat(&stat_path,
"/", 1);
2215 stat_path_fixed_len = stat_path.used;
2217 task_entry = readdir(task_dir);
2218 while (task_entry != NULL) {
2220 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2228 stat_path_fixed_len;
2229 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2230 KMP_STRLEN(task_entry->d_name));
2231 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2235 stat_file = open(stat_path.str, O_RDONLY);
2236 if (stat_file == -1) {
2266 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2273 char *close_parent = strstr(buffer,
") ");
2274 if (close_parent != NULL) {
2275 char state = *(close_parent + 2);
2278 if (running_threads >= max) {
2288 task_entry = readdir(task_dir);
2294 proc_entry = readdir(proc_dir);
2300 KMP_DEBUG_ASSERT(running_threads > 0);
2301 if (running_threads <= 0) {
2302 running_threads = 1;
2306 if (proc_dir != NULL) {
2309 __kmp_str_buf_free(&task_path);
2310 if (task_dir != NULL) {
2313 __kmp_str_buf_free(&stat_path);
2314 if (stat_file != -1) {
2318 glb_running_threads = running_threads;
2320 return running_threads;
2324 #endif // KMP_OS_DARWIN 2326 #endif // USE_LOAD_BALANCE 2328 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \ 2329 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || KMP_ARCH_PPC64) 2333 int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2337 void **exit_frame_ptr
2341 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2346 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2350 (*pkfn)(>id, &tid);
2353 (*pkfn)(>id, &tid, p_argv[0]);
2356 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2359 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2362 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2365 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2368 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2372 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2373 p_argv[5], p_argv[6]);
2376 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2377 p_argv[5], p_argv[6], p_argv[7]);
2380 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2381 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2384 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2385 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2388 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2389 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2392 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2393 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2397 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2398 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2399 p_argv[11], p_argv[12]);
2402 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2403 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2404 p_argv[11], p_argv[12], p_argv[13]);
2407 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2408 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2409 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2414 *exit_frame_ptr = 0;
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the paritioned timers to begin with name.