17 #include "kmp_wrapper_getpid.h" 23 #include "kmp_stats.h" 24 #include "kmp_wait_release.h" 25 #include "kmp_affinity.h" 27 #if !KMP_OS_FREEBSD && !KMP_OS_NETBSD 33 #include <sys/times.h> 34 #include <sys/resource.h> 35 #include <sys/syscall.h> 37 #if KMP_OS_LINUX && !KMP_OS_CNK 38 # include <sys/sysinfo.h> 53 # include <sys/sysctl.h> 54 # include <mach/mach.h> 56 # include <pthread_np.h> 63 #include "tsan_annotations.h" 68 struct kmp_sys_timer {
69 struct timespec start;
73 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec) 75 static struct kmp_sys_timer __kmp_sys_timer_data;
77 #if KMP_HANDLE_SIGNALS 78 typedef void (* sig_func_t )( int );
79 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[ NSIG ];
80 static sigset_t __kmp_sigset;
83 static int __kmp_init_runtime = FALSE;
85 static int __kmp_fork_count = 0;
87 static pthread_condattr_t __kmp_suspend_cond_attr;
88 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
90 static kmp_cond_align_t __kmp_wait_cv;
91 static kmp_mutex_align_t __kmp_wait_mx;
93 double __kmp_ticks_per_nsec;
100 __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond )
102 KMP_SNPRINTF( buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
103 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
104 cond->c_cond.__c_waiting );
111 #if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED) 118 __kmp_affinity_bind_thread(
int which )
120 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
121 "Illegal set affinity operation when not capable");
123 kmp_affin_mask_t *mask;
124 KMP_CPU_ALLOC_ON_STACK(mask);
126 KMP_CPU_SET(which, mask);
127 __kmp_set_system_affinity(mask, TRUE);
128 KMP_CPU_FREE_FROM_STACK(mask);
137 __kmp_affinity_determine_capable(
const char *env_var)
143 # define KMP_CPU_SET_SIZE_LIMIT (1024*1024) 148 buf = (
unsigned char * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
153 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
154 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 155 "initial getaffinity call returned %d errno = %d\n",
163 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
164 && (__kmp_affinity_type != affinity_none)
165 && (__kmp_affinity_type != affinity_default)
166 && (__kmp_affinity_type != affinity_disabled))) {
168 kmp_msg_t err_code = KMP_ERR( error );
171 KMP_MSG( GetAffSysCallNotSupported, env_var ),
175 if (__kmp_generate_warnings == kmp_warnings_off) {
176 __kmp_str_free(&err_code.str);
179 KMP_AFFINITY_DISABLE();
180 KMP_INTERNAL_FREE(buf);
189 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
190 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 191 "setaffinity for mask size %d returned %d errno = %d\n",
192 gCode, sCode, errno));
194 if (errno == ENOSYS) {
195 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
196 && (__kmp_affinity_type != affinity_none)
197 && (__kmp_affinity_type != affinity_default)
198 && (__kmp_affinity_type != affinity_disabled))) {
200 kmp_msg_t err_code = KMP_ERR( error );
203 KMP_MSG( SetAffSysCallNotSupported, env_var ),
207 if (__kmp_generate_warnings == kmp_warnings_off) {
208 __kmp_str_free(&err_code.str);
211 KMP_AFFINITY_DISABLE();
212 KMP_INTERNAL_FREE(buf);
214 if (errno == EFAULT) {
215 KMP_AFFINITY_ENABLE(gCode);
216 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 217 "affinity supported (mask size %d)\n",
218 (
int)__kmp_affin_mask_size));
219 KMP_INTERNAL_FREE(buf);
229 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 230 "searching for proper set size\n"));
232 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
233 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
234 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 235 "getaffinity for mask size %d returned %d errno = %d\n", size,
239 if ( errno == ENOSYS )
244 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 245 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
247 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
248 && (__kmp_affinity_type != affinity_none)
249 && (__kmp_affinity_type != affinity_default)
250 && (__kmp_affinity_type != affinity_disabled))) {
252 kmp_msg_t err_code = KMP_ERR( error );
255 KMP_MSG( GetAffSysCallNotSupported, env_var ),
259 if (__kmp_generate_warnings == kmp_warnings_off) {
260 __kmp_str_free(&err_code.str);
263 KMP_AFFINITY_DISABLE();
264 KMP_INTERNAL_FREE(buf);
270 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
271 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 272 "setaffinity for mask size %d returned %d errno = %d\n",
273 gCode, sCode, errno));
275 if (errno == ENOSYS) {
279 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 280 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
282 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
283 && (__kmp_affinity_type != affinity_none)
284 && (__kmp_affinity_type != affinity_default)
285 && (__kmp_affinity_type != affinity_disabled))) {
287 kmp_msg_t err_code = KMP_ERR( error );
290 KMP_MSG( SetAffSysCallNotSupported, env_var ),
294 if (__kmp_generate_warnings == kmp_warnings_off) {
295 __kmp_str_free(&err_code.str);
298 KMP_AFFINITY_DISABLE();
299 KMP_INTERNAL_FREE(buf);
302 if (errno == EFAULT) {
303 KMP_AFFINITY_ENABLE(gCode);
304 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 305 "affinity supported (mask size %d)\n",
306 (
int)__kmp_affin_mask_size));
307 KMP_INTERNAL_FREE(buf);
313 KMP_INTERNAL_FREE(buf);
319 KMP_AFFINITY_DISABLE();
320 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 321 "cannot determine mask size - affinity not supported\n"));
322 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
323 && (__kmp_affinity_type != affinity_none)
324 && (__kmp_affinity_type != affinity_default)
325 && (__kmp_affinity_type != affinity_disabled))) {
326 KMP_WARNING( AffCantGetMaskSize, env_var );
330 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 338 __kmp_futex_determine_capable()
341 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
342 int retval = ( rc == 0 ) || ( errno != ENOSYS );
344 KA_TRACE(10, (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
346 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
347 retval ?
"" :
" not" ) );
352 #endif // KMP_USE_FUTEX 357 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) 364 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d )
366 kmp_int8 old_value, new_value;
368 old_value = TCR_1( *p );
369 new_value = old_value | d;
371 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
374 old_value = TCR_1( *p );
375 new_value = old_value | d;
381 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d )
383 kmp_int8 old_value, new_value;
385 old_value = TCR_1( *p );
386 new_value = old_value & d;
388 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
391 old_value = TCR_1( *p );
392 new_value = old_value & d;
398 __kmp_test_then_or32(
volatile kmp_int32 *p, kmp_int32 d )
400 kmp_int32 old_value, new_value;
402 old_value = TCR_4( *p );
403 new_value = old_value | d;
405 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
408 old_value = TCR_4( *p );
409 new_value = old_value | d;
415 __kmp_test_then_and32(
volatile kmp_int32 *p, kmp_int32 d )
417 kmp_int32 old_value, new_value;
419 old_value = TCR_4( *p );
420 new_value = old_value & d;
422 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
425 old_value = TCR_4( *p );
426 new_value = old_value & d;
431 # if KMP_ARCH_X86 || KMP_ARCH_PPC64 || (KMP_OS_LINUX && KMP_ARCH_AARCH64) 433 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d )
435 kmp_int8 old_value, new_value;
437 old_value = TCR_1( *p );
438 new_value = old_value + d;
440 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
443 old_value = TCR_1( *p );
444 new_value = old_value + d;
450 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d )
452 kmp_int64 old_value, new_value;
454 old_value = TCR_8( *p );
455 new_value = old_value + d;
457 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
460 old_value = TCR_8( *p );
461 new_value = old_value + d;
468 __kmp_test_then_or64(
volatile kmp_int64 *p, kmp_int64 d )
470 kmp_int64 old_value, new_value;
472 old_value = TCR_8( *p );
473 new_value = old_value | d;
474 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
477 old_value = TCR_8( *p );
478 new_value = old_value | d;
484 __kmp_test_then_and64(
volatile kmp_int64 *p, kmp_int64 d )
486 kmp_int64 old_value, new_value;
488 old_value = TCR_8( *p );
489 new_value = old_value & d;
490 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
493 old_value = TCR_8( *p );
494 new_value = old_value & d;
502 __kmp_terminate_thread(
int gtid )
505 kmp_info_t *th = __kmp_threads[ gtid ];
509 #ifdef KMP_CANCEL_THREADS 510 KA_TRACE( 10, (
"__kmp_terminate_thread: kill (%d)\n", gtid ) );
511 status = pthread_cancel( th->th.th_info.ds.ds_thread );
512 if ( status != 0 && status != ESRCH ) {
515 KMP_MSG( CantTerminateWorkerThread ),
539 __kmp_set_stack_info(
int gtid, kmp_info_t *th )
542 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD 552 if ( ! KMP_UBER_GTID(gtid) ) {
555 status = pthread_attr_init( &attr );
556 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status );
557 #if KMP_OS_FREEBSD || KMP_OS_NETBSD 558 status = pthread_attr_get_np( pthread_self(), &attr );
559 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status );
561 status = pthread_getattr_np( pthread_self(), &attr );
562 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status );
564 status = pthread_attr_getstack( &attr, &addr, &size );
565 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status );
566 KA_TRACE( 60, (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, " 570 status = pthread_attr_destroy( &attr );
571 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status );
574 if ( size != 0 && addr != 0 ) {
576 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
577 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
578 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
583 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
584 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
585 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
590 __kmp_launch_worker(
void *thr )
592 int status, old_type, old_state;
593 #ifdef KMP_BLOCK_SIGNALS 594 sigset_t new_set, old_set;
597 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD 598 void *
volatile padding = 0;
602 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
603 __kmp_gtid_set_specific( gtid );
604 #ifdef KMP_TDATA_GTID 607 #if KMP_STATS_ENABLED 609 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
611 KMP_SET_THREAD_STATE(IDLE);
616 __kmp_itt_thread_name( gtid );
619 #if KMP_AFFINITY_SUPPORTED 620 __kmp_affinity_set_init_mask( gtid, FALSE );
623 #ifdef KMP_CANCEL_THREADS 624 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
625 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status );
627 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
628 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
631 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 636 __kmp_clear_x87_fpu_status_word();
637 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
638 __kmp_load_mxcsr( &__kmp_init_mxcsr );
641 #ifdef KMP_BLOCK_SIGNALS 642 status = sigfillset( & new_set );
643 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status );
644 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
645 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
648 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD 649 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
650 padding = KMP_ALLOCA( gtid * __kmp_stkoffset );
655 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
657 __kmp_check_stack_overlap( (kmp_info_t*)thr );
659 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
661 #ifdef KMP_BLOCK_SIGNALS 662 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
663 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
673 __kmp_launch_monitor(
void *thr )
675 int status, old_type, old_state;
676 #ifdef KMP_BLOCK_SIGNALS 679 struct timespec interval;
681 int yield_cycles = 0;
685 KA_TRACE( 10, (
"__kmp_launch_monitor: #1 launched\n" ) );
688 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
689 #ifdef KMP_TDATA_GTID 690 __kmp_gtid = KMP_GTID_MONITOR;
696 __kmp_itt_thread_ignore();
699 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
701 __kmp_check_stack_overlap( (kmp_info_t*)thr );
703 #ifdef KMP_CANCEL_THREADS 704 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
705 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status );
707 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
708 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
711 #if KMP_REAL_TIME_FIX 715 int sched = sched_getscheduler( 0 );
716 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
719 struct sched_param param;
720 int max_priority = sched_get_priority_max( sched );
722 KMP_WARNING( RealTimeSchedNotSupported );
723 sched_getparam( 0, & param );
724 if ( param.sched_priority < max_priority ) {
725 param.sched_priority += 1;
726 rc = sched_setscheduler( 0, sched, & param );
729 kmp_msg_t err_code = KMP_ERR( error );
732 KMP_MSG( CantChangeMonitorPriority ),
734 KMP_MSG( MonitorWillStarve ),
737 if (__kmp_generate_warnings == kmp_warnings_off) {
738 __kmp_str_free(&err_code.str);
746 KMP_MSG( RunningAtMaxPriority ),
747 KMP_MSG( MonitorWillStarve ),
748 KMP_HNT( RunningAtMaxPriority ),
753 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
755 #endif // KMP_REAL_TIME_FIX 759 if ( __kmp_monitor_wakeups == 1 ) {
761 interval.tv_nsec = 0;
764 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
767 KA_TRACE( 10, (
"__kmp_launch_monitor: #2 monitor\n" ) );
769 if (__kmp_yield_cycle) {
770 __kmp_yielding_on = 0;
771 yield_count = __kmp_yield_off_count;
773 __kmp_yielding_on = 1;
776 while( ! TCR_4( __kmp_global.g.g_done ) ) {
782 KA_TRACE( 15, (
"__kmp_launch_monitor: update\n" ) );
784 status = gettimeofday( &tval, NULL );
785 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
786 TIMEVAL_TO_TIMESPEC( &tval, &now );
788 now.tv_sec += interval.tv_sec;
789 now.tv_nsec += interval.tv_nsec;
791 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
793 now.tv_nsec -= KMP_NSEC_PER_SEC;
796 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
797 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
799 if ( !TCR_4(__kmp_global.g.g_done) ) {
800 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
802 if ( status != ETIMEDOUT && status != EINTR ) {
803 KMP_SYSFAIL(
"pthread_cond_timedwait", status );
807 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
808 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
810 if (__kmp_yield_cycle) {
812 if ( (yield_cycles % yield_count) == 0 ) {
813 if (__kmp_yielding_on) {
814 __kmp_yielding_on = 0;
815 yield_count = __kmp_yield_off_count;
817 __kmp_yielding_on = 1;
818 yield_count = __kmp_yield_on_count;
823 __kmp_yielding_on = 1;
826 TCW_4( __kmp_global.g.g_time.dt.t_value,
827 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
832 KA_TRACE( 10, (
"__kmp_launch_monitor: #3 cleanup\n" ) );
834 #ifdef KMP_BLOCK_SIGNALS 835 status = sigfillset( & new_set );
836 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status );
837 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
838 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
841 KA_TRACE( 10, (
"__kmp_launch_monitor: #4 finished\n" ) );
843 if( __kmp_global.g.g_abort != 0 ) {
849 KA_TRACE( 10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
854 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
855 __kmp_terminate_thread( gtid );
859 KA_TRACE( 10, (
"__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
861 if (__kmp_global.g.g_abort > 0)
862 raise( __kmp_global.g.g_abort );
866 KA_TRACE( 10, (
"__kmp_launch_monitor: #7 exit\n" ) );
870 #endif // KMP_USE_MONITOR 873 __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size )
876 pthread_attr_t thread_attr;
880 th->th.th_info.ds.ds_gtid = gtid;
882 #if KMP_STATS_ENABLED 884 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
889 if(!KMP_UBER_GTID(gtid)) {
890 th->th.th_stats = __kmp_stats_list->push_back(gtid);
894 th->th.th_stats = __kmp_stats_thread_ptr;
896 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
898 #endif // KMP_STATS_ENABLED 900 if ( KMP_UBER_GTID(gtid) ) {
901 KA_TRACE( 10, (
"__kmp_create_worker: uber thread (%d)\n", gtid ) );
902 th -> th.th_info.ds.ds_thread = pthread_self();
903 __kmp_set_stack_info( gtid, th );
904 __kmp_check_stack_overlap( th );
908 KA_TRACE( 10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid ) );
912 #ifdef KMP_THREAD_ATTR 913 status = pthread_attr_init( &thread_attr );
915 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantInitThreadAttrs ), KMP_ERR( status ), __kmp_msg_null);
917 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
919 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerState ), KMP_ERR( status ), __kmp_msg_null);
928 stack_size += gtid * __kmp_stkoffset * 2;
930 KA_TRACE( 10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 931 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
932 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
934 # ifdef _POSIX_THREAD_ATTR_STACKSIZE 935 status = pthread_attr_setstacksize( & thread_attr, stack_size );
936 # ifdef KMP_BACKUP_STKSIZE 938 if ( ! __kmp_env_stksize ) {
939 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
940 __kmp_stksize = KMP_BACKUP_STKSIZE;
941 KA_TRACE( 10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 942 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " 944 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
946 status = pthread_attr_setstacksize( &thread_attr, stack_size );
951 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
952 KMP_HNT( ChangeWorkerStackSize ), __kmp_msg_null);
958 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (
void *) th );
959 if ( status != 0 || ! handle ) {
960 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 961 if ( status == EINVAL ) {
962 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
963 KMP_HNT( IncreaseWorkerStackSize ), __kmp_msg_null);
965 if ( status == ENOMEM ) {
966 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
967 KMP_HNT( DecreaseWorkerStackSize ), __kmp_msg_null);
970 if ( status == EAGAIN ) {
971 __kmp_msg(kmp_ms_fatal, KMP_MSG( NoResourcesForWorkerThread ), KMP_ERR( status ),
972 KMP_HNT( Decrease_NUM_THREADS ), __kmp_msg_null);
974 KMP_SYSFAIL(
"pthread_create", status );
977 th->th.th_info.ds.ds_thread = handle;
979 #ifdef KMP_THREAD_ATTR 980 status = pthread_attr_destroy( & thread_attr );
982 kmp_msg_t err_code = KMP_ERR( status );
983 __kmp_msg(kmp_ms_warning, KMP_MSG( CantDestroyThreadAttrs ), err_code, __kmp_msg_null);
984 if (__kmp_generate_warnings == kmp_warnings_off) {
985 __kmp_str_free(&err_code.str);
992 KA_TRACE( 10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid ) );
999 __kmp_create_monitor( kmp_info_t *th )
1002 pthread_attr_t thread_attr;
1005 int auto_adj_size = FALSE;
1007 if( __kmp_dflt_blocktime == KMP_MAX_BLOCKTIME ) {
1009 KA_TRACE( 10, (
"__kmp_create_monitor: skipping monitor thread because of MAX blocktime\n" ) );
1010 th->th.th_info.ds.ds_tid = 0;
1011 th->th.th_info.ds.ds_gtid = 0;
1014 KA_TRACE( 10, (
"__kmp_create_monitor: try to create monitor\n" ) );
1018 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1019 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1020 #if KMP_REAL_TIME_FIX 1021 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 );
1023 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
1024 #endif // KMP_REAL_TIME_FIX 1026 #ifdef KMP_THREAD_ATTR 1027 if ( __kmp_monitor_stksize == 0 ) {
1028 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1029 auto_adj_size = TRUE;
1031 status = pthread_attr_init( &thread_attr );
1032 if ( status != 0 ) {
1035 KMP_MSG( CantInitThreadAttrs ),
1040 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1041 if ( status != 0 ) {
1044 KMP_MSG( CantSetMonitorState ),
1050 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 1051 status = pthread_attr_getstacksize( & thread_attr, & size );
1052 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status );
1054 size = __kmp_sys_min_stksize;
1058 if ( __kmp_monitor_stksize == 0 ) {
1059 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1061 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1062 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1065 KA_TRACE( 10, (
"__kmp_create_monitor: default stacksize = %lu bytes," 1066 "requested stacksize = %lu bytes\n",
1067 size, __kmp_monitor_stksize ) );
1073 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 1074 KA_TRACE( 10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
1075 __kmp_monitor_stksize ) );
1076 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1077 if ( status != 0 ) {
1078 if ( auto_adj_size ) {
1079 __kmp_monitor_stksize *= 2;
1082 kmp_msg_t err_code = KMP_ERR( status );
1085 KMP_MSG( CantSetMonitorStackSize, (
long int) __kmp_monitor_stksize ),
1087 KMP_HNT( ChangeMonitorStackSize ),
1090 if (__kmp_generate_warnings == kmp_warnings_off) {
1091 __kmp_str_free(&err_code.str);
1096 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (
void *) th );
1098 if ( status != 0 ) {
1099 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 1100 if ( status == EINVAL ) {
1101 if ( auto_adj_size && ( __kmp_monitor_stksize < (
size_t)0x40000000 ) ) {
1102 __kmp_monitor_stksize *= 2;
1107 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1109 KMP_HNT( IncreaseMonitorStackSize ),
1113 if ( status == ENOMEM ) {
1116 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1118 KMP_HNT( DecreaseMonitorStackSize ),
1123 if ( status == EAGAIN ) {
1126 KMP_MSG( NoResourcesForMonitorThread ),
1128 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1132 KMP_SYSFAIL(
"pthread_create", status );
1135 th->th.th_info.ds.ds_thread = handle;
1137 #if KMP_REAL_TIME_FIX 1139 KMP_DEBUG_ASSERT(
sizeof( kmp_uint32 ) ==
sizeof( __kmp_global.g.g_time.dt.t_value ) );
1141 (kmp_uint32
volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1143 #endif // KMP_REAL_TIME_FIX 1145 #ifdef KMP_THREAD_ATTR 1146 status = pthread_attr_destroy( & thread_attr );
1147 if ( status != 0 ) {
1148 kmp_msg_t err_code = KMP_ERR( status );
1151 KMP_MSG( CantDestroyThreadAttrs ),
1155 if (__kmp_generate_warnings == kmp_warnings_off) {
1156 __kmp_str_free(&err_code.str);
1163 KA_TRACE( 10, (
"__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1166 #endif // KMP_USE_MONITOR 1172 pthread_exit( (
void *)(intptr_t) exit_status );
1176 void __kmp_resume_monitor();
1179 __kmp_reap_monitor( kmp_info_t *th )
1184 KA_TRACE( 10, (
"__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1185 th->th.th_info.ds.ds_thread ) );
1190 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1191 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1192 KA_TRACE( 10, (
"__kmp_reap_monitor: monitor did not start, returning\n") );
1203 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1204 if (status != ESRCH) {
1205 __kmp_resume_monitor();
1207 KA_TRACE( 10, (
"__kmp_reap_monitor: try to join with monitor\n") );
1208 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1209 if (exit_val != th) {
1212 KMP_MSG( ReapMonitorError ),
1218 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1219 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1221 KA_TRACE( 10, (
"__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1222 th->th.th_info.ds.ds_thread ) );
1227 #endif // KMP_USE_MONITOR 1230 __kmp_reap_worker( kmp_info_t *th )
1237 KA_TRACE( 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1239 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1242 if ( status != 0 ) {
1243 __kmp_msg(kmp_ms_fatal, KMP_MSG( ReapWorkerError ), KMP_ERR( status ), __kmp_msg_null);
1245 if ( exit_val != th ) {
1246 KA_TRACE( 10, (
"__kmp_reap_worker: worker T#%d did not reap properly, exit_val = %p\n",
1247 th->th.th_info.ds.ds_gtid, exit_val ) );
1251 KA_TRACE( 10, (
"__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1260 #if KMP_HANDLE_SIGNALS 1264 __kmp_null_handler(
int signo )
1271 __kmp_team_handler(
int signo )
1273 if ( __kmp_global.g.g_abort == 0 ) {
1276 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo );
1291 if ( __kmp_debug_buf ) {
1292 __kmp_dump_debug_buffer( );
1295 TCW_4( __kmp_global.g.g_abort, signo );
1297 TCW_4( __kmp_global.g.g_done, TRUE );
1302 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type" );
1311 void __kmp_sigaction(
int signum,
const struct sigaction * act,
struct sigaction * oldact ) {
1312 int rc = sigaction( signum, act, oldact );
1313 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc );
1318 __kmp_install_one_handler(
int sig, sig_func_t handler_func,
int parallel_init )
1321 KB_TRACE( 60, (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1322 if ( parallel_init ) {
1323 struct sigaction new_action;
1324 struct sigaction old_action;
1325 new_action.sa_handler = handler_func;
1326 new_action.sa_flags = 0;
1327 sigfillset( & new_action.sa_mask );
1328 __kmp_sigaction( sig, & new_action, & old_action );
1329 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1330 sigaddset( & __kmp_sigset, sig );
1333 __kmp_sigaction( sig, & old_action, NULL );
1337 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1344 __kmp_remove_one_handler(
int sig )
1346 KB_TRACE( 60, (
"__kmp_remove_one_handler( %d )\n", sig ) );
1347 if ( sigismember( & __kmp_sigset, sig ) ) {
1348 struct sigaction old;
1350 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1351 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1353 KB_TRACE( 10, (
"__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1354 __kmp_sigaction( sig, & old, NULL );
1356 sigdelset( & __kmp_sigset, sig );
1363 __kmp_install_signals(
int parallel_init )
1365 KB_TRACE( 10, (
"__kmp_install_signals( %d )\n", parallel_init ) );
1366 if ( __kmp_handle_signals || ! parallel_init ) {
1369 sigemptyset( & __kmp_sigset );
1370 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1371 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1372 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1373 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1374 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1375 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1376 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1377 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1379 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1381 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1383 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1390 __kmp_remove_signals(
void )
1393 KB_TRACE( 10, (
"__kmp_remove_signals()\n" ) );
1394 for ( sig = 1; sig < NSIG; ++ sig ) {
1395 __kmp_remove_one_handler( sig );
1400 #endif // KMP_HANDLE_SIGNALS 1406 __kmp_enable(
int new_state )
1408 #ifdef KMP_CANCEL_THREADS 1409 int status, old_state;
1410 status = pthread_setcancelstate( new_state, & old_state );
1411 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
1412 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1417 __kmp_disable(
int * old_state )
1419 #ifdef KMP_CANCEL_THREADS 1421 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1422 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
1430 __kmp_atfork_prepare (
void)
1436 __kmp_atfork_parent (
void)
1447 __kmp_atfork_child (
void)
1455 __kmp_init_runtime = FALSE;
1457 __kmp_init_monitor = 0;
1459 __kmp_init_parallel = FALSE;
1460 __kmp_init_middle = FALSE;
1461 __kmp_init_serial = FALSE;
1462 TCW_4(__kmp_init_gtid, FALSE);
1463 __kmp_init_common = FALSE;
1465 TCW_4(__kmp_init_user_locks, FALSE);
1466 #if ! KMP_USE_DYNAMIC_LOCK 1467 __kmp_user_lock_table.used = 1;
1468 __kmp_user_lock_table.allocated = 0;
1469 __kmp_user_lock_table.table = NULL;
1470 __kmp_lock_blocks = NULL;
1474 TCW_4(__kmp_nth, 0);
1478 KA_TRACE( 10, (
"__kmp_atfork_child: checking cache address list %p\n",
1479 __kmp_threadpriv_cache_list ) );
1481 while ( __kmp_threadpriv_cache_list != NULL ) {
1483 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1484 KC_TRACE( 50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1485 &(*__kmp_threadpriv_cache_list -> addr) ) );
1487 *__kmp_threadpriv_cache_list -> addr = NULL;
1489 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1492 __kmp_init_runtime = FALSE;
1495 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1496 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1497 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1512 __kmp_register_atfork(
void) {
1513 if ( __kmp_need_register_atfork ) {
1514 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1515 KMP_CHECK_SYSFAIL(
"pthread_atfork", status );
1516 __kmp_need_register_atfork = FALSE;
1521 __kmp_suspend_initialize(
void )
1524 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1525 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status );
1526 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1527 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status );
1531 __kmp_suspend_initialize_thread( kmp_info_t *th )
1533 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1534 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1538 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1539 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status );
1540 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1541 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status );
1542 *(
volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1543 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1548 __kmp_suspend_uninitialize_thread( kmp_info_t *th )
1550 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1555 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1556 if ( status != 0 && status != EBUSY ) {
1557 KMP_SYSFAIL(
"pthread_cond_destroy", status );
1559 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1560 if ( status != 0 && status != EBUSY ) {
1561 KMP_SYSFAIL(
"pthread_mutex_destroy", status );
1563 --th->th.th_suspend_init_count;
1564 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1572 static inline void __kmp_suspend_template(
int th_gtid, C *flag )
1574 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1575 kmp_info_t *th = __kmp_threads[th_gtid];
1577 typename C::flag_t old_spin;
1579 KF_TRACE( 30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
1581 __kmp_suspend_initialize_thread( th );
1583 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1584 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
1586 KF_TRACE( 10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1587 th_gtid, flag->get() ) );
1592 old_spin = flag->set_sleeping();
1594 KF_TRACE( 5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x, was %x\n",
1595 th_gtid, flag->get(), *(flag->get()), old_spin ) );
1597 if ( flag->done_check_val(old_spin) ) {
1598 old_spin = flag->unset_sleeping();
1599 KF_TRACE( 5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1600 th_gtid, flag->get()) );
1606 int deactivated = FALSE;
1607 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1608 while ( flag->is_sleeping() ) {
1609 #ifdef DEBUG_SUSPEND 1611 __kmp_suspend_count++;
1612 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
1613 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
1616 if ( ! deactivated ) {
1617 th->th.th_active = FALSE;
1618 if ( th->th.th_active_in_pool ) {
1619 th->th.th_active_in_pool = FALSE;
1620 KMP_TEST_THEN_DEC32(
1621 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1622 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1627 #if USE_SUSPEND_TIMEOUT 1628 struct timespec now;
1629 struct timeval tval;
1632 status = gettimeofday( &tval, NULL );
1633 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1634 TIMEVAL_TO_TIMESPEC( &tval, &now );
1636 msecs = (4*__kmp_dflt_blocktime) + 200;
1637 now.tv_sec += msecs / 1000;
1638 now.tv_nsec += (msecs % 1000)*1000;
1640 KF_TRACE( 15, (
"__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
1642 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1644 KF_TRACE( 15, (
"__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
1646 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1649 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1650 KMP_SYSFAIL(
"pthread_cond_wait", status );
1653 if (status == ETIMEDOUT) {
1654 if ( flag->is_sleeping() ) {
1655 KF_TRACE( 100, (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
1657 KF_TRACE( 2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
1660 }
else if ( flag->is_sleeping() ) {
1661 KF_TRACE( 100, (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
1667 if ( deactivated ) {
1668 th->th.th_active = TRUE;
1669 if ( TCR_4(th->th.th_in_pool) ) {
1670 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
1671 th->th.th_active_in_pool = TRUE;
1676 #ifdef DEBUG_SUSPEND 1679 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
1680 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
1684 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1685 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1687 KF_TRACE( 30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1690 void __kmp_suspend_32(
int th_gtid, kmp_flag_32 *flag) {
1691 __kmp_suspend_template(th_gtid, flag);
1693 void __kmp_suspend_64(
int th_gtid, kmp_flag_64 *flag) {
1694 __kmp_suspend_template(th_gtid, flag);
1696 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1697 __kmp_suspend_template(th_gtid, flag);
1706 static inline void __kmp_resume_template(
int target_gtid, C *flag )
1708 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1709 kmp_info_t *th = __kmp_threads[target_gtid];
1713 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1716 KF_TRACE( 30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
1717 KMP_DEBUG_ASSERT( gtid != target_gtid );
1719 __kmp_suspend_initialize_thread( th );
1721 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1722 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
1725 flag = (C *)th->th.th_sleep_loc;
1729 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1730 KF_TRACE( 5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1731 gtid, target_gtid, NULL ) );
1732 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1733 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1737 typename C::flag_t old_spin = flag->unset_sleeping();
1738 if ( ! flag->is_sleeping_val(old_spin) ) {
1739 KF_TRACE( 5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): " 1741 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
1742 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1743 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1746 KF_TRACE( 5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): " 1748 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
1750 TCW_PTR(th->th.th_sleep_loc, NULL);
1753 #ifdef DEBUG_SUSPEND 1756 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
1757 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
1761 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1762 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status );
1763 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1764 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1765 KF_TRACE( 30, (
"__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
1766 gtid, target_gtid ) );
1769 void __kmp_resume_32(
int target_gtid, kmp_flag_32 *flag) {
1770 __kmp_resume_template(target_gtid, flag);
1772 void __kmp_resume_64(
int target_gtid, kmp_flag_64 *flag) {
1773 __kmp_resume_template(target_gtid, flag);
1775 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1776 __kmp_resume_template(target_gtid, flag);
1781 __kmp_resume_monitor()
1783 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1786 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1787 KF_TRACE( 30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1788 gtid, KMP_GTID_MONITOR ) );
1789 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1791 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1792 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
1793 #ifdef DEBUG_SUSPEND 1796 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1797 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1800 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1801 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status );
1802 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1803 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1804 KF_TRACE( 30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1805 gtid, KMP_GTID_MONITOR ) );
1807 #endif // KMP_USE_MONITOR 1813 __kmp_yield(
int cond )
1817 && __kmp_yielding_on
1828 __kmp_gtid_set_specific(
int gtid )
1830 if( __kmp_init_gtid ) {
1832 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (
void*)(intptr_t)(gtid+1) );
1833 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status );
1835 KA_TRACE( 50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n" ) );
1840 __kmp_gtid_get_specific()
1843 if ( !__kmp_init_gtid ) {
1844 KA_TRACE( 50, (
"__kmp_gtid_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1845 return KMP_GTID_SHUTDOWN;
1847 gtid = (int)(
size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1849 gtid = KMP_GTID_DNE;
1854 KA_TRACE( 50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1855 __kmp_gtid_threadprivate_key, gtid ));
1863 __kmp_read_cpu_time(
void )
1870 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1874 __kmp_read_system_info(
struct kmp_sys_info *info )
1877 struct rusage r_usage;
1879 memset( info, 0,
sizeof( *info ) );
1881 status = getrusage( RUSAGE_SELF, &r_usage);
1882 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status );
1884 info->maxrss = r_usage.ru_maxrss;
1885 info->minflt = r_usage.ru_minflt;
1886 info->majflt = r_usage.ru_majflt;
1887 info->nswap = r_usage.ru_nswap;
1888 info->inblock = r_usage.ru_inblock;
1889 info->oublock = r_usage.ru_oublock;
1890 info->nvcsw = r_usage.ru_nvcsw;
1891 info->nivcsw = r_usage.ru_nivcsw;
1893 return (status != 0);
1900 __kmp_read_system_time(
double *delta )
1903 struct timeval tval;
1904 struct timespec stop;
1907 status = gettimeofday( &tval, NULL );
1908 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1909 TIMEVAL_TO_TIMESPEC( &tval, &stop );
1910 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1911 *delta = (t_ns * 1e-9);
1915 __kmp_clear_system_time(
void )
1917 struct timeval tval;
1919 status = gettimeofday( &tval, NULL );
1920 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1921 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
1930 __kmp_tv_threadprivate_store( kmp_info_t *th,
void *global_addr,
void *thread_addr )
1934 p = (
struct tv_data *) __kmp_allocate(
sizeof( *p ) );
1936 p->u.tp.global_addr = global_addr;
1937 p->u.tp.thread_addr = thread_addr;
1939 p->type = (
void *) 1;
1941 p->next = th->th.th_local.tv_data;
1942 th->th.th_local.tv_data = p;
1944 if ( p->next == 0 ) {
1945 int rc = pthread_setspecific( __kmp_tv_key, p );
1946 KMP_CHECK_SYSFAIL(
"pthread_setspecific", rc );
1956 __kmp_get_xproc(
void ) {
1960 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD 1962 r = sysconf( _SC_NPROCESSORS_ONLN );
1970 host_basic_info_data_t info;
1971 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1972 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
1973 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
1976 r = info.avail_cpus;
1978 KMP_WARNING( CantGetNumAvailCPU );
1979 KMP_INFORM( AssumedNumCPU );
1984 #error "Unknown or unsupported OS." 1988 return r > 0 ? r : 2;
1993 __kmp_read_from_file(
char const *path,
char const *format, ... )
1998 va_start(args, format);
1999 FILE *f = fopen(path,
"rb");
2002 result = vfscanf(f, format, args);
2009 __kmp_runtime_initialize(
void )
2012 pthread_mutexattr_t mutex_attr;
2013 pthread_condattr_t cond_attr;
2015 if ( __kmp_init_runtime ) {
2019 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) 2020 if ( ! __kmp_cpuinfo.initialized ) {
2021 __kmp_query_cpuid( &__kmp_cpuinfo );
2025 __kmp_xproc = __kmp_get_xproc();
2027 if ( sysconf( _SC_THREADS ) ) {
2030 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2031 if ( __kmp_sys_max_nth == -1 ) {
2033 __kmp_sys_max_nth = INT_MAX;
2035 else if ( __kmp_sys_max_nth <= 1 ) {
2037 __kmp_sys_max_nth = KMP_MAX_NTH;
2041 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2042 if ( __kmp_sys_min_stksize <= 1 ) {
2043 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2048 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2052 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2053 KMP_CHECK_SYSFAIL(
"pthread_key_create", rc );
2057 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2058 KMP_CHECK_SYSFAIL(
"pthread_key_create", status );
2059 status = pthread_mutexattr_init( & mutex_attr );
2060 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status );
2061 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2062 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status );
2063 status = pthread_condattr_init( & cond_attr );
2064 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status );
2065 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2066 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status );
2068 __kmp_itt_initialize();
2071 __kmp_init_runtime = TRUE;
2075 __kmp_runtime_destroy(
void )
2079 if ( ! __kmp_init_runtime ) {
2084 __kmp_itt_destroy();
2087 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2088 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status );
2090 status = pthread_key_delete( __kmp_tv_key );
2091 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status );
2094 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2095 if ( status != 0 && status != EBUSY ) {
2096 KMP_SYSFAIL(
"pthread_mutex_destroy", status );
2098 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2099 if ( status != 0 && status != EBUSY ) {
2100 KMP_SYSFAIL(
"pthread_cond_destroy", status );
2102 #if KMP_AFFINITY_SUPPORTED 2103 __kmp_affinity_uninitialize();
2106 __kmp_init_runtime = FALSE;
2113 __kmp_thread_sleep(
int millis )
2115 sleep( ( millis + 500 ) / 1000 );
2120 __kmp_elapsed(
double *t )
2123 # ifdef FIX_SGI_CLOCK 2126 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2127 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status );
2128 *t = (double) ts.tv_nsec * (1.0 / (
double) KMP_NSEC_PER_SEC) +
2133 status = gettimeofday( & tv, NULL );
2134 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
2135 *t = (double) tv.tv_usec * (1.0 / (
double) KMP_USEC_PER_SEC) +
2142 __kmp_elapsed_tick(
double *t )
2144 *t = 1 / (double) CLOCKS_PER_SEC;
2152 gettimeofday(&t, NULL);
2153 return KMP_NSEC_PER_SEC*t.tv_sec + 1000*t.tv_usec;
2156 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 2159 __kmp_initialize_system_tick()
2161 kmp_uint64 delay = 100000;
2162 kmp_uint64 nsec = __kmp_now_nsec();
2163 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2165 while ((now = __kmp_hardware_timestamp()) < goal);
2166 __kmp_ticks_per_nsec = 1.0 * (delay + (now - goal)) / (__kmp_now_nsec() - nsec);
2175 __kmp_is_address_mapped(
void * addr ) {
2180 #if KMP_OS_LINUX || KMP_OS_FREEBSD 2187 char * name = __kmp_str_format(
"/proc/%d/maps", getpid() );
2190 file = fopen( name,
"r" );
2191 KMP_ASSERT( file != NULL );
2195 void * beginning = NULL;
2196 void * ending = NULL;
2199 rc = fscanf( file,
"%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2203 KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 );
2206 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2208 if ( strcmp( perms,
"rw" ) == 0 ) {
2219 KMP_INTERNAL_FREE( name );
2233 (vm_address_t)( addr ),
2235 (vm_address_t)( & buffer ),
2243 #elif KMP_OS_FREEBSD || KMP_OS_NETBSD 2250 #error "Unknown or unsupported OS" 2258 #ifdef USE_LOAD_BALANCE 2269 __kmp_get_load_balance(
int max )
2274 int res = getloadavg( averages, 3 );
2279 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2280 ret_avg = averages[0];
2281 }
else if ( ( __kmp_load_balance_interval >= 180
2282 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2283 ret_avg = averages[1];
2284 }
else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2285 ret_avg = averages[2];
2299 __kmp_get_load_balance(
int max )
2301 static int permanent_error = 0;
2303 static int glb_running_threads = 0;
2304 static double glb_call_time = 0;
2306 int running_threads = 0;
2308 DIR * proc_dir = NULL;
2309 struct dirent * proc_entry = NULL;
2311 kmp_str_buf_t task_path;
2312 DIR * task_dir = NULL;
2313 struct dirent * task_entry = NULL;
2314 int task_path_fixed_len;
2316 kmp_str_buf_t stat_path;
2318 int stat_path_fixed_len;
2320 int total_processes = 0;
2321 int total_threads = 0;
2323 double call_time = 0.0;
2325 __kmp_str_buf_init( & task_path );
2326 __kmp_str_buf_init( & stat_path );
2328 __kmp_elapsed( & call_time );
2330 if ( glb_call_time &&
2331 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2332 running_threads = glb_running_threads;
2336 glb_call_time = call_time;
2339 if ( permanent_error ) {
2340 running_threads = -1;
2349 proc_dir = opendir(
"/proc" );
2350 if ( proc_dir == NULL ) {
2353 running_threads = -1;
2354 permanent_error = 1;
2359 __kmp_str_buf_cat( & task_path,
"/proc/", 6 );
2360 task_path_fixed_len = task_path.used;
2362 proc_entry = readdir( proc_dir );
2363 while ( proc_entry != NULL ) {
2366 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2374 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name,
"1" ) == 0 );
2377 task_path.used = task_path_fixed_len;
2378 __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) );
2379 __kmp_str_buf_cat( & task_path,
"/task", 5 );
2381 task_dir = opendir( task_path.str );
2382 if ( task_dir == NULL ) {
2390 if ( strcmp( proc_entry->d_name,
"1" ) == 0 ) {
2391 running_threads = -1;
2392 permanent_error = 1;
2397 __kmp_str_buf_clear( & stat_path );
2398 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2399 __kmp_str_buf_cat( & stat_path,
"/", 1 );
2400 stat_path_fixed_len = stat_path.used;
2402 task_entry = readdir( task_dir );
2403 while ( task_entry != NULL ) {
2405 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2412 stat_path.used = stat_path_fixed_len;
2413 __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) );
2414 __kmp_str_buf_cat( & stat_path,
"/stat", 5 );
2418 stat_file = open( stat_path.str, O_RDONLY );
2419 if ( stat_file == -1 ) {
2452 len = read( stat_file, buffer,
sizeof( buffer ) - 1 );
2459 char * close_parent = strstr( buffer,
") " );
2460 if ( close_parent != NULL ) {
2461 char state = * ( close_parent + 2 );
2462 if ( state ==
'R' ) {
2464 if ( running_threads >= max ) {
2474 task_entry = readdir( task_dir );
2476 closedir( task_dir );
2480 proc_entry = readdir( proc_dir );
2488 KMP_DEBUG_ASSERT( running_threads > 0 );
2489 if ( running_threads <= 0 ) {
2490 running_threads = 1;
2494 if ( proc_dir != NULL ) {
2495 closedir( proc_dir );
2497 __kmp_str_buf_free( & task_path );
2498 if ( task_dir != NULL ) {
2499 closedir( task_dir );
2501 __kmp_str_buf_free( & stat_path );
2502 if ( stat_file != -1 ) {
2506 glb_running_threads = running_threads;
2508 return running_threads;
2512 # endif // KMP_OS_DARWIN 2514 #endif // USE_LOAD_BALANCE 2516 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || (KMP_OS_LINUX && KMP_ARCH_AARCH64) || KMP_ARCH_PPC64) 2521 __kmp_invoke_microtask( microtask_t pkfn,
2523 int argc,
void *p_argv[]
2525 ,
void **exit_frame_ptr
2530 *exit_frame_ptr = __builtin_frame_address(0);
2535 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2539 (*pkfn)(>id, &tid);
2542 (*pkfn)(>id, &tid, p_argv[0]);
2545 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2548 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2551 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2554 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2557 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2561 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2562 p_argv[5], p_argv[6]);
2565 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2566 p_argv[5], p_argv[6], p_argv[7]);
2569 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2570 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2573 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2574 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2577 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2578 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2581 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2582 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2586 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2587 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2588 p_argv[11], p_argv[12]);
2591 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2592 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2593 p_argv[11], p_argv[12], p_argv[13]);
2596 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2597 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2598 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2603 *exit_frame_ptr = 0;
#define KMP_START_EXPLICIT_TIMER(name)
"Starts" an explicit timer which will need a corresponding KMP_STOP_EXPLICIT_TIMER() macro...
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the paritioned timers to begin with name.