15 #include "kmp_affinity.h" 16 #include "kmp_atomic.h" 17 #include "kmp_environment.h" 18 #include "kmp_error.h" 22 #include "kmp_settings.h" 23 #include "kmp_stats.h" 25 #include "kmp_wait_release.h" 26 #include "kmp_wrapper_getpid.h" 27 #include "kmp_dispatch.h" 28 #if KMP_USE_HIER_SCHED 29 #include "kmp_dispatch_hier.h" 33 #include "ompt-specific.h" 37 #define KMP_USE_PRCTL 0 43 #include "tsan_annotations.h" 45 #if defined(KMP_GOMP_COMPAT) 46 char const __kmp_version_alt_comp[] =
47 KMP_VERSION_PREFIX
"alternative compiler support: yes";
50 char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX
"API version: " 62 char const __kmp_version_lock[] =
63 KMP_VERSION_PREFIX
"lock type: run time selectable";
66 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y)) 71 kmp_info_t __kmp_monitor;
76 void __kmp_cleanup(
void);
78 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *,
int tid,
80 static void __kmp_initialize_team(kmp_team_t *team,
int new_nproc,
81 kmp_internal_control_t *new_icvs,
83 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED 84 static void __kmp_partition_places(kmp_team_t *team,
85 int update_master_only = 0);
87 static void __kmp_do_serial_initialize(
void);
88 void __kmp_fork_barrier(
int gtid,
int tid);
89 void __kmp_join_barrier(
int gtid);
90 void __kmp_setup_icv_copy(kmp_team_t *team,
int new_nproc,
91 kmp_internal_control_t *new_icvs,
ident_t *loc);
93 #ifdef USE_LOAD_BALANCE 94 static int __kmp_load_balance_nproc(kmp_root_t *root,
int set_nproc);
97 static int __kmp_expand_threads(
int nNeed);
99 static int __kmp_unregister_root_other_thread(
int gtid);
101 static void __kmp_unregister_library(
void);
102 static void __kmp_reap_thread(kmp_info_t *thread,
int is_root);
103 kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
108 int __kmp_get_global_thread_id() {
110 kmp_info_t **other_threads;
118 (
"*** __kmp_get_global_thread_id: entering, nproc=%d all_nproc=%d\n",
119 __kmp_nth, __kmp_all_nth));
126 if (!TCR_4(__kmp_init_gtid))
129 #ifdef KMP_TDATA_GTID 130 if (TCR_4(__kmp_gtid_mode) >= 3) {
131 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using TDATA\n"));
135 if (TCR_4(__kmp_gtid_mode) >= 2) {
136 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using keyed TLS\n"));
137 return __kmp_gtid_get_specific();
139 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using internal alg.\n"));
141 stack_addr = (
char *)&stack_data;
142 other_threads = __kmp_threads;
155 for (i = 0; i < __kmp_threads_capacity; i++) {
157 kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
161 stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
162 stack_base = (
char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
166 if (stack_addr <= stack_base) {
167 size_t stack_diff = stack_base - stack_addr;
169 if (stack_diff <= stack_size) {
172 KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
180 (
"*** __kmp_get_global_thread_id: internal alg. failed to find " 181 "thread, using TLS\n"));
182 i = __kmp_gtid_get_specific();
192 if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
193 KMP_FATAL(StackOverflow, i);
196 stack_base = (
char *)other_threads[i]->th.th_info.ds.ds_stackbase;
197 if (stack_addr > stack_base) {
198 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
199 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
200 other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
203 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
204 stack_base - stack_addr);
208 if (__kmp_storage_map) {
209 char *stack_end = (
char *)other_threads[i]->th.th_info.ds.ds_stackbase;
210 char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
211 __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
212 other_threads[i]->th.th_info.ds.ds_stacksize,
213 "th_%d stack (refinement)", i);
218 int __kmp_get_global_thread_id_reg() {
221 if (!__kmp_init_serial) {
224 #ifdef KMP_TDATA_GTID 225 if (TCR_4(__kmp_gtid_mode) >= 3) {
226 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id_reg: using TDATA\n"));
230 if (TCR_4(__kmp_gtid_mode) >= 2) {
231 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
232 gtid = __kmp_gtid_get_specific();
235 (
"*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
236 gtid = __kmp_get_global_thread_id();
240 if (gtid == KMP_GTID_DNE) {
242 (
"__kmp_get_global_thread_id_reg: Encountered new root thread. " 243 "Registering a new gtid.\n"));
244 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
245 if (!__kmp_init_serial) {
246 __kmp_do_serial_initialize();
247 gtid = __kmp_gtid_get_specific();
249 gtid = __kmp_register_root(FALSE);
251 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
255 KMP_DEBUG_ASSERT(gtid >= 0);
261 void __kmp_check_stack_overlap(kmp_info_t *th) {
263 char *stack_beg = NULL;
264 char *stack_end = NULL;
267 KA_TRACE(10, (
"__kmp_check_stack_overlap: called\n"));
268 if (__kmp_storage_map) {
269 stack_end = (
char *)th->th.th_info.ds.ds_stackbase;
270 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
272 gtid = __kmp_gtid_from_thread(th);
274 if (gtid == KMP_GTID_MONITOR) {
275 __kmp_print_storage_map_gtid(
276 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
277 "th_%s stack (%s)",
"mon",
278 (th->th.th_info.ds.ds_stackgrow) ?
"initial" :
"actual");
280 __kmp_print_storage_map_gtid(
281 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
282 "th_%d stack (%s)", gtid,
283 (th->th.th_info.ds.ds_stackgrow) ?
"initial" :
"actual");
289 gtid = __kmp_gtid_from_thread(th);
290 if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
292 (
"__kmp_check_stack_overlap: performing extensive checking\n"));
293 if (stack_beg == NULL) {
294 stack_end = (
char *)th->th.th_info.ds.ds_stackbase;
295 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
298 for (f = 0; f < __kmp_threads_capacity; f++) {
299 kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
301 if (f_th && f_th != th) {
302 char *other_stack_end =
303 (
char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
304 char *other_stack_beg =
305 other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
306 if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
307 (stack_end > other_stack_beg && stack_end < other_stack_end)) {
310 if (__kmp_storage_map)
311 __kmp_print_storage_map_gtid(
312 -1, other_stack_beg, other_stack_end,
313 (
size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
314 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
316 __kmp_fatal(KMP_MSG(StackOverlap), KMP_HNT(ChangeStackLimit),
322 KA_TRACE(10, (
"__kmp_check_stack_overlap: returning\n"));
327 void __kmp_infinite_loop(
void) {
328 static int done = FALSE;
335 #define MAX_MESSAGE 512 337 void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
size_t size,
338 char const *format, ...) {
339 char buffer[MAX_MESSAGE];
342 va_start(ap, format);
343 KMP_SNPRINTF(buffer,
sizeof(buffer),
"OMP storage map: %p %p%8lu %s\n", p1,
344 p2, (
unsigned long)size, format);
345 __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
346 __kmp_vprintf(kmp_err, buffer, ap);
347 #if KMP_PRINT_DATA_PLACEMENT 350 if (p1 <= p2 && (
char *)p2 - (
char *)p1 == size) {
351 if (__kmp_storage_map_verbose) {
352 node = __kmp_get_host_node(p1);
354 __kmp_storage_map_verbose = FALSE;
358 int localProc = __kmp_get_cpu_from_gtid(gtid);
360 const int page_size = KMP_GET_PAGE_SIZE();
362 p1 = (
void *)((
size_t)p1 & ~((size_t)page_size - 1));
363 p2 = (
void *)(((
size_t)p2 - 1) & ~((
size_t)page_size - 1));
365 __kmp_printf_no_lock(
" GTID %d localNode %d\n", gtid,
368 __kmp_printf_no_lock(
" GTID %d\n", gtid);
377 (
char *)p1 += page_size;
378 }
while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
379 __kmp_printf_no_lock(
" %p-%p memNode %d\n", last, (
char *)p1 - 1,
383 __kmp_printf_no_lock(
" %p-%p memNode %d\n", p1,
384 (
char *)p1 + (page_size - 1),
385 __kmp_get_host_node(p1));
387 __kmp_printf_no_lock(
" %p-%p memNode %d\n", p2,
388 (
char *)p2 + (page_size - 1),
389 __kmp_get_host_node(p2));
395 __kmp_printf_no_lock(
" %s\n", KMP_I18N_STR(StorageMapWarning));
398 __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
401 void __kmp_warn(
char const *format, ...) {
402 char buffer[MAX_MESSAGE];
405 if (__kmp_generate_warnings == kmp_warnings_off) {
409 va_start(ap, format);
411 KMP_SNPRINTF(buffer,
sizeof(buffer),
"OMP warning: %s\n", format);
412 __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
413 __kmp_vprintf(kmp_err, buffer, ap);
414 __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
419 void __kmp_abort_process() {
421 __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
423 if (__kmp_debug_buf) {
424 __kmp_dump_debug_buffer();
427 if (KMP_OS_WINDOWS) {
430 __kmp_global.g.g_abort = SIGABRT;
447 __kmp_infinite_loop();
448 __kmp_release_bootstrap_lock(&__kmp_exit_lock);
452 void __kmp_abort_thread(
void) {
455 __kmp_infinite_loop();
461 static void __kmp_print_thread_storage_map(kmp_info_t *thr,
int gtid) {
462 __kmp_print_storage_map_gtid(gtid, thr, thr + 1,
sizeof(kmp_info_t),
"th_%d",
465 __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
466 sizeof(kmp_desc_t),
"th_%d.th_info", gtid);
468 __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
469 sizeof(kmp_local_t),
"th_%d.th_local", gtid);
471 __kmp_print_storage_map_gtid(
472 gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
473 sizeof(kmp_balign_t) * bs_last_barrier,
"th_%d.th_bar", gtid);
475 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
476 &thr->th.th_bar[bs_plain_barrier + 1],
477 sizeof(kmp_balign_t),
"th_%d.th_bar[plain]",
480 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
481 &thr->th.th_bar[bs_forkjoin_barrier + 1],
482 sizeof(kmp_balign_t),
"th_%d.th_bar[forkjoin]",
485 #if KMP_FAST_REDUCTION_BARRIER 486 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
487 &thr->th.th_bar[bs_reduction_barrier + 1],
488 sizeof(kmp_balign_t),
"th_%d.th_bar[reduction]",
490 #endif // KMP_FAST_REDUCTION_BARRIER 496 static void __kmp_print_team_storage_map(
const char *header, kmp_team_t *team,
497 int team_id,
int num_thr) {
498 int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
499 __kmp_print_storage_map_gtid(-1, team, team + 1,
sizeof(kmp_team_t),
"%s_%d",
502 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
503 &team->t.t_bar[bs_last_barrier],
504 sizeof(kmp_balign_team_t) * bs_last_barrier,
505 "%s_%d.t_bar", header, team_id);
507 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
508 &team->t.t_bar[bs_plain_barrier + 1],
509 sizeof(kmp_balign_team_t),
"%s_%d.t_bar[plain]",
512 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
513 &team->t.t_bar[bs_forkjoin_barrier + 1],
514 sizeof(kmp_balign_team_t),
515 "%s_%d.t_bar[forkjoin]", header, team_id);
517 #if KMP_FAST_REDUCTION_BARRIER 518 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
519 &team->t.t_bar[bs_reduction_barrier + 1],
520 sizeof(kmp_balign_team_t),
521 "%s_%d.t_bar[reduction]", header, team_id);
522 #endif // KMP_FAST_REDUCTION_BARRIER 524 __kmp_print_storage_map_gtid(
525 -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
526 sizeof(kmp_disp_t) * num_thr,
"%s_%d.t_dispatch", header, team_id);
528 __kmp_print_storage_map_gtid(
529 -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
530 sizeof(kmp_info_t *) * num_thr,
"%s_%d.t_threads", header, team_id);
532 __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
533 &team->t.t_disp_buffer[num_disp_buff],
534 sizeof(dispatch_shared_info_t) * num_disp_buff,
535 "%s_%d.t_disp_buffer", header, team_id);
537 __kmp_print_storage_map_gtid(-1, &team->t.t_taskq, &team->t.t_copypriv_data,
538 sizeof(kmp_taskq_t),
"%s_%d.t_taskq", header,
542 static void __kmp_init_allocator() {
544 __kmp_init_memkind();
547 static void __kmp_fini_allocator() {
549 __kmp_fini_memkind();
558 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
560 __kmp_init_bootstrap_lock(lck);
563 static void __kmp_reset_locks_on_process_detach(
int gtid_req) {
581 for (i = 0; i < __kmp_threads_capacity; ++i) {
584 kmp_info_t *th = __kmp_threads[i];
587 int gtid = th->th.th_info.ds.ds_gtid;
588 if (gtid == gtid_req)
593 int alive = __kmp_is_thread_alive(th, &exit_val);
598 if (thread_count == 0)
604 __kmp_reset_lock(&__kmp_forkjoin_lock);
606 __kmp_reset_lock(&__kmp_stdio_lock);
610 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
615 case DLL_PROCESS_ATTACH:
616 KA_TRACE(10, (
"DllMain: PROCESS_ATTACH\n"));
620 case DLL_PROCESS_DETACH:
621 KA_TRACE(10, (
"DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
623 if (lpReserved != NULL) {
649 __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
652 __kmp_internal_end_library(__kmp_gtid_get_specific());
656 case DLL_THREAD_ATTACH:
657 KA_TRACE(10, (
"DllMain: THREAD_ATTACH\n"));
663 case DLL_THREAD_DETACH:
664 KA_TRACE(10, (
"DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
666 __kmp_internal_end_thread(__kmp_gtid_get_specific());
678 int __kmp_change_library(
int status) {
681 old_status = __kmp_yield_init &
685 __kmp_yield_init |= 1;
687 __kmp_yield_init &= ~1;
695 void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
696 int gtid = *gtid_ref;
697 #ifdef BUILD_PARALLEL_ORDERED 698 kmp_team_t *team = __kmp_team_from_gtid(gtid);
701 if (__kmp_env_consistency_check) {
702 if (__kmp_threads[gtid]->th.th_root->r.r_active)
703 #if KMP_USE_DYNAMIC_LOCK 704 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
706 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
709 #ifdef BUILD_PARALLEL_ORDERED 710 if (!team->t.t_serialized) {
712 KMP_WAIT_YIELD(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid),
720 void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
721 int gtid = *gtid_ref;
722 #ifdef BUILD_PARALLEL_ORDERED 723 int tid = __kmp_tid_from_gtid(gtid);
724 kmp_team_t *team = __kmp_team_from_gtid(gtid);
727 if (__kmp_env_consistency_check) {
728 if (__kmp_threads[gtid]->th.th_root->r.r_active)
729 __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
731 #ifdef BUILD_PARALLEL_ORDERED 732 if (!team->t.t_serialized) {
737 team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
747 int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws) {
752 if (!TCR_4(__kmp_init_parallel))
753 __kmp_parallel_initialize();
755 th = __kmp_threads[gtid];
756 team = th->th.th_team;
759 th->th.th_ident = id_ref;
761 if (team->t.t_serialized) {
764 kmp_int32 old_this = th->th.th_local.this_construct;
766 ++th->th.th_local.this_construct;
770 if (team->t.t_construct == old_this) {
771 status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
772 th->th.th_local.this_construct);
775 if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
776 KMP_MASTER_GTID(gtid) &&
778 th->th.th_teams_microtask == NULL &&
780 team->t.t_active_level ==
782 __kmp_itt_metadata_single(id_ref);
787 if (__kmp_env_consistency_check) {
788 if (status && push_ws) {
789 __kmp_push_workshare(gtid, ct_psingle, id_ref);
791 __kmp_check_workshare(gtid, ct_psingle, id_ref);
796 __kmp_itt_single_start(gtid);
802 void __kmp_exit_single(
int gtid) {
804 __kmp_itt_single_end(gtid);
806 if (__kmp_env_consistency_check)
807 __kmp_pop_workshare(gtid, ct_psingle, NULL);
816 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
817 int master_tid,
int set_nthreads
825 KMP_DEBUG_ASSERT(__kmp_init_serial);
826 KMP_DEBUG_ASSERT(root && parent_team);
830 new_nthreads = set_nthreads;
831 if (!get__dynamic_2(parent_team, master_tid)) {
834 #ifdef USE_LOAD_BALANCE 835 else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
836 new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
837 if (new_nthreads == 1) {
838 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d load balance reduced " 839 "reservation to 1 thread\n",
843 if (new_nthreads < set_nthreads) {
844 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d load balance reduced " 845 "reservation to %d threads\n",
846 master_tid, new_nthreads));
850 else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
851 new_nthreads = __kmp_avail_proc - __kmp_nth +
852 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
853 if (new_nthreads <= 1) {
854 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d thread limit reduced " 855 "reservation to 1 thread\n",
859 if (new_nthreads < set_nthreads) {
860 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d thread limit reduced " 861 "reservation to %d threads\n",
862 master_tid, new_nthreads));
864 new_nthreads = set_nthreads;
866 }
else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
867 if (set_nthreads > 2) {
868 new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
869 new_nthreads = (new_nthreads % set_nthreads) + 1;
870 if (new_nthreads == 1) {
871 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d dynamic random reduced " 872 "reservation to 1 thread\n",
876 if (new_nthreads < set_nthreads) {
877 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d dynamic random reduced " 878 "reservation to %d threads\n",
879 master_tid, new_nthreads));
887 if (__kmp_nth + new_nthreads -
888 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
890 int tl_nthreads = __kmp_max_nth - __kmp_nth +
891 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
892 if (tl_nthreads <= 0) {
897 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
898 __kmp_reserve_warn = 1;
899 __kmp_msg(kmp_ms_warning,
900 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
901 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
903 if (tl_nthreads == 1) {
904 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT " 905 "reduced reservation to 1 thread\n",
909 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced " 910 "reservation to %d threads\n",
911 master_tid, tl_nthreads));
912 new_nthreads = tl_nthreads;
916 if (root->r.r_cg_nthreads + new_nthreads -
917 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
919 int tl_nthreads = __kmp_cg_max_nth - root->r.r_cg_nthreads +
920 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
921 if (tl_nthreads <= 0) {
926 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
927 __kmp_reserve_warn = 1;
928 __kmp_msg(kmp_ms_warning,
929 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
930 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
932 if (tl_nthreads == 1) {
933 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT " 934 "reduced reservation to 1 thread\n",
938 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced " 939 "reservation to %d threads\n",
940 master_tid, tl_nthreads));
941 new_nthreads = tl_nthreads;
947 capacity = __kmp_threads_capacity;
948 if (TCR_PTR(__kmp_threads[0]) == NULL) {
951 if (__kmp_nth + new_nthreads -
952 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
955 int slotsRequired = __kmp_nth + new_nthreads -
956 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
958 int slotsAdded = __kmp_expand_threads(slotsRequired);
959 if (slotsAdded < slotsRequired) {
961 new_nthreads -= (slotsRequired - slotsAdded);
962 KMP_ASSERT(new_nthreads >= 1);
965 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
966 __kmp_reserve_warn = 1;
967 if (__kmp_tp_cached) {
968 __kmp_msg(kmp_ms_warning,
969 KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
970 KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
971 KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
973 __kmp_msg(kmp_ms_warning,
974 KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
975 KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
982 if (new_nthreads == 1) {
984 (
"__kmp_reserve_threads: T#%d serializing team after reclaiming " 985 "dead roots and rechecking; requested %d threads\n",
986 __kmp_get_gtid(), set_nthreads));
988 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d allocating %d threads; requested" 990 __kmp_get_gtid(), new_nthreads, set_nthreads));
999 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
1000 kmp_info_t *master_th,
int master_gtid) {
1004 KA_TRACE(10, (
"__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
1005 KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
1009 master_th->th.th_info.ds.ds_tid = 0;
1010 master_th->th.th_team = team;
1011 master_th->th.th_team_nproc = team->t.t_nproc;
1012 master_th->th.th_team_master = master_th;
1013 master_th->th.th_team_serialized = FALSE;
1014 master_th->th.th_dispatch = &team->t.t_dispatch[0];
1017 #if KMP_NESTED_HOT_TEAMS 1019 kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
1022 int level = team->t.t_active_level - 1;
1023 if (master_th->th.th_teams_microtask) {
1024 if (master_th->th.th_teams_size.nteams > 1) {
1028 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1029 master_th->th.th_teams_level == team->t.t_level) {
1034 if (level < __kmp_hot_teams_max_level) {
1035 if (hot_teams[level].hot_team) {
1037 KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
1041 hot_teams[level].hot_team = team;
1042 hot_teams[level].hot_team_nth = team->t.t_nproc;
1049 use_hot_team = team == root->r.r_hot_team;
1051 if (!use_hot_team) {
1054 team->t.t_threads[0] = master_th;
1055 __kmp_initialize_info(master_th, team, 0, master_gtid);
1058 for (i = 1; i < team->t.t_nproc; i++) {
1061 kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1062 team->t.t_threads[i] = thr;
1063 KMP_DEBUG_ASSERT(thr);
1064 KMP_DEBUG_ASSERT(thr->th.th_team == team);
1066 KA_TRACE(20, (
"__kmp_fork_team_threads: T#%d(%d:%d) init arrived " 1067 "T#%d(%d:%d) join =%llu, plain=%llu\n",
1068 __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1069 __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1070 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1071 team->t.t_bar[bs_plain_barrier].b_arrived));
1073 thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1074 thr->th.th_teams_level = master_th->th.th_teams_level;
1075 thr->th.th_teams_size = master_th->th.th_teams_size;
1079 kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1080 for (b = 0; b < bs_last_barrier; ++b) {
1081 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1082 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1084 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1090 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED 1091 __kmp_partition_places(team);
1096 if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1097 for (i = 0; i < team->t.t_nproc; i++) {
1098 kmp_info_t *thr = team->t.t_threads[i];
1099 if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1100 thr->th.th_prev_level != team->t.t_level) {
1101 team->t.t_display_affinity = 1;
1111 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1115 inline static void propagateFPControl(kmp_team_t *team) {
1116 if (__kmp_inherit_fp_control) {
1117 kmp_int16 x87_fpu_control_word;
1121 __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1122 __kmp_store_mxcsr(&mxcsr);
1123 mxcsr &= KMP_X86_MXCSR_MASK;
1134 KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1135 KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1138 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1142 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1148 inline static void updateHWFPControl(kmp_team_t *team) {
1149 if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1152 kmp_int16 x87_fpu_control_word;
1154 __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1155 __kmp_store_mxcsr(&mxcsr);
1156 mxcsr &= KMP_X86_MXCSR_MASK;
1158 if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1159 __kmp_clear_x87_fpu_status_word();
1160 __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1163 if (team->t.t_mxcsr != mxcsr) {
1164 __kmp_load_mxcsr(&team->t.t_mxcsr);
1169 #define propagateFPControl(x) ((void)0) 1170 #define updateHWFPControl(x) ((void)0) 1173 static void __kmp_alloc_argv_entries(
int argc, kmp_team_t *team,
1178 void __kmp_serialized_parallel(
ident_t *loc, kmp_int32 global_tid) {
1179 kmp_info_t *this_thr;
1180 kmp_team_t *serial_team;
1182 KC_TRACE(10, (
"__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1189 if (!TCR_4(__kmp_init_parallel))
1190 __kmp_parallel_initialize();
1192 this_thr = __kmp_threads[global_tid];
1193 serial_team = this_thr->th.th_serial_team;
1196 KMP_DEBUG_ASSERT(serial_team);
1199 if (__kmp_tasking_mode != tskm_immediate_exec) {
1201 this_thr->th.th_task_team ==
1202 this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1203 KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1205 KA_TRACE(20, (
"__kmpc_serialized_parallel: T#%d pushing task_team %p / " 1206 "team %p, new task_team = NULL\n",
1207 global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1208 this_thr->th.th_task_team = NULL;
1212 kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1213 if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1214 proc_bind = proc_bind_false;
1215 }
else if (proc_bind == proc_bind_default) {
1218 proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1221 this_thr->th.th_set_proc_bind = proc_bind_default;
1225 ompt_data_t ompt_parallel_data = ompt_data_none;
1226 ompt_data_t *implicit_task_data;
1227 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1228 if (ompt_enabled.enabled &&
1229 this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1231 ompt_task_info_t *parent_task_info;
1232 parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
1234 parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1235 if (ompt_enabled.ompt_callback_parallel_begin) {
1238 ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1239 &(parent_task_info->task_data), &(parent_task_info->frame),
1240 &ompt_parallel_data, team_size, ompt_parallel_invoker_program,
1244 #endif // OMPT_SUPPORT 1246 if (this_thr->th.th_team != serial_team) {
1248 int level = this_thr->th.th_team->t.t_level;
1250 if (serial_team->t.t_serialized) {
1253 kmp_team_t *new_team;
1255 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1257 new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1264 &this_thr->th.th_current_task->td_icvs,
1265 0 USE_NESTED_HOT_ARG(NULL));
1266 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1267 KMP_ASSERT(new_team);
1270 new_team->t.t_threads[0] = this_thr;
1271 new_team->t.t_parent = this_thr->th.th_team;
1272 serial_team = new_team;
1273 this_thr->th.th_serial_team = serial_team;
1277 (
"__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1278 global_tid, serial_team));
1286 (
"__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1287 global_tid, serial_team));
1291 KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1292 KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1293 KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1294 serial_team->t.t_ident = loc;
1295 serial_team->t.t_serialized = 1;
1296 serial_team->t.t_nproc = 1;
1297 serial_team->t.t_parent = this_thr->th.th_team;
1298 serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
1299 this_thr->th.th_team = serial_team;
1300 serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1302 KF_TRACE(10, (
"__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1303 this_thr->th.th_current_task));
1304 KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1305 this_thr->th.th_current_task->td_flags.executing = 0;
1307 __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1312 copy_icvs(&this_thr->th.th_current_task->td_icvs,
1313 &this_thr->th.th_current_task->td_parent->td_icvs);
1317 if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1318 this_thr->th.th_current_task->td_icvs.nproc =
1319 __kmp_nested_nth.nth[level + 1];
1323 if (__kmp_nested_proc_bind.used &&
1324 (level + 1 < __kmp_nested_proc_bind.used)) {
1325 this_thr->th.th_current_task->td_icvs.proc_bind =
1326 __kmp_nested_proc_bind.bind_types[level + 1];
1331 serial_team->t.t_pkfn = (microtask_t)(~0);
1333 this_thr->th.th_info.ds.ds_tid = 0;
1336 this_thr->th.th_team_nproc = 1;
1337 this_thr->th.th_team_master = this_thr;
1338 this_thr->th.th_team_serialized = 1;
1340 serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1341 serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1343 serial_team->t.t_def_allocator = this_thr->th.th_def_allocator;
1346 propagateFPControl(serial_team);
1349 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1350 if (!serial_team->t.t_dispatch->th_disp_buffer) {
1351 serial_team->t.t_dispatch->th_disp_buffer =
1352 (dispatch_private_info_t *)__kmp_allocate(
1353 sizeof(dispatch_private_info_t));
1355 this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1362 KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1363 KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1364 KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1365 ++serial_team->t.t_serialized;
1366 this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1369 int level = this_thr->th.th_team->t.t_level;
1372 if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1373 this_thr->th.th_current_task->td_icvs.nproc =
1374 __kmp_nested_nth.nth[level + 1];
1376 serial_team->t.t_level++;
1377 KF_TRACE(10, (
"__kmpc_serialized_parallel: T#%d increasing nesting level " 1378 "of serial team %p to %d\n",
1379 global_tid, serial_team, serial_team->t.t_level));
1382 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1384 dispatch_private_info_t *disp_buffer =
1385 (dispatch_private_info_t *)__kmp_allocate(
1386 sizeof(dispatch_private_info_t));
1387 disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1388 serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1390 this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1395 KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1401 if (__kmp_display_affinity) {
1402 if (this_thr->th.th_prev_level != serial_team->t.t_level ||
1403 this_thr->th.th_prev_num_threads != 1) {
1405 __kmp_aux_display_affinity(global_tid, NULL);
1406 this_thr->th.th_prev_level = serial_team->t.t_level;
1407 this_thr->th.th_prev_num_threads = 1;
1412 if (__kmp_env_consistency_check)
1413 __kmp_push_parallel(global_tid, NULL);
1415 serial_team->t.ompt_team_info.master_return_address = codeptr;
1416 if (ompt_enabled.enabled &&
1417 this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1418 OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1420 ompt_lw_taskteam_t lw_taskteam;
1421 __ompt_lw_taskteam_init(&lw_taskteam, this_thr, global_tid,
1422 &ompt_parallel_data, codeptr);
1424 __ompt_lw_taskteam_link(&lw_taskteam, this_thr, 1);
1428 implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
1429 if (ompt_enabled.ompt_callback_implicit_task) {
1430 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1431 ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
1432 OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid), ompt_task_implicit);
1433 OMPT_CUR_TASK_INFO(this_thr)
1434 ->thread_num = __kmp_tid_from_gtid(global_tid);
1438 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
1439 OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1446 int __kmp_fork_call(
ident_t *loc,
int gtid,
1447 enum fork_context_e call_context,
1448 kmp_int32 argc, microtask_t microtask, launch_t invoker,
1450 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1459 int master_this_cons;
1461 kmp_team_t *parent_team;
1462 kmp_info_t *master_th;
1466 int master_set_numthreads;
1472 #if KMP_NESTED_HOT_TEAMS 1473 kmp_hot_team_ptr_t **p_hot_teams;
1476 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1479 KA_TRACE(20, (
"__kmp_fork_call: enter T#%d\n", gtid));
1480 if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1483 void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1485 if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1486 __kmp_stkpadding += (short)((kmp_int64)dummy);
1492 if (!TCR_4(__kmp_init_parallel))
1493 __kmp_parallel_initialize();
1496 master_th = __kmp_threads[gtid];
1498 parent_team = master_th->th.th_team;
1499 master_tid = master_th->th.th_info.ds.ds_tid;
1500 master_this_cons = master_th->th.th_local.this_construct;
1501 root = master_th->th.th_root;
1502 master_active = root->r.r_active;
1503 master_set_numthreads = master_th->th.th_set_nproc;
1506 ompt_data_t ompt_parallel_data = ompt_data_none;
1507 ompt_data_t *parent_task_data;
1508 ompt_frame_t *ompt_frame;
1509 ompt_data_t *implicit_task_data;
1510 void *return_address = NULL;
1512 if (ompt_enabled.enabled) {
1513 __ompt_get_task_info_internal(0, NULL, &parent_task_data, &ompt_frame,
1515 return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1520 level = parent_team->t.t_level;
1522 active_level = parent_team->t.t_active_level;
1525 teams_level = master_th->th.th_teams_level;
1527 #if KMP_NESTED_HOT_TEAMS 1528 p_hot_teams = &master_th->th.th_hot_teams;
1529 if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1530 *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1531 sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1532 (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1534 (*p_hot_teams)[0].hot_team_nth = 1;
1539 if (ompt_enabled.enabled) {
1540 if (ompt_enabled.ompt_callback_parallel_begin) {
1541 int team_size = master_set_numthreads
1542 ? master_set_numthreads
1543 : get__nproc_2(parent_team, master_tid);
1544 ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1545 parent_task_data, ompt_frame, &ompt_parallel_data, team_size,
1546 OMPT_INVOKER(call_context), return_address);
1548 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1552 master_th->th.th_ident = loc;
1555 if (master_th->th.th_teams_microtask && ap &&
1556 microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1560 parent_team->t.t_ident = loc;
1561 __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1562 parent_team->t.t_argc = argc;
1563 argv = (
void **)parent_team->t.t_argv;
1564 for (i = argc - 1; i >= 0; --i)
1566 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1567 *argv++ = va_arg(*ap,
void *);
1569 *argv++ = va_arg(ap,
void *);
1572 if (parent_team == master_th->th.th_serial_team) {
1575 KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1578 parent_team->t.t_serialized--;
1581 void **exit_runtime_p;
1583 ompt_lw_taskteam_t lw_taskteam;
1585 if (ompt_enabled.enabled) {
1586 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1587 &ompt_parallel_data, return_address);
1588 exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_frame.ptr);
1590 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1594 implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1595 if (ompt_enabled.ompt_callback_implicit_task) {
1596 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1597 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1598 implicit_task_data, 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit);
1599 OMPT_CUR_TASK_INFO(master_th)
1600 ->thread_num = __kmp_tid_from_gtid(gtid);
1604 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1606 exit_runtime_p = &dummy;
1611 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1612 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1613 __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1622 *exit_runtime_p = NULL;
1623 if (ompt_enabled.enabled) {
1624 OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = ompt_data_none;
1625 if (ompt_enabled.ompt_callback_implicit_task) {
1626 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1627 ompt_scope_end, NULL, implicit_task_data, 1,
1628 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1630 __ompt_lw_taskteam_unlink(master_th);
1632 if (ompt_enabled.ompt_callback_parallel_end) {
1633 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1634 OMPT_CUR_TEAM_DATA(master_th), OMPT_CUR_TASK_DATA(master_th),
1635 OMPT_INVOKER(call_context), return_address);
1637 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1643 parent_team->t.t_pkfn = microtask;
1644 parent_team->t.t_invoke = invoker;
1645 KMP_ATOMIC_INC(&root->r.r_in_parallel);
1646 parent_team->t.t_active_level++;
1647 parent_team->t.t_level++;
1649 parent_team->t.t_def_allocator = master_th->th.th_def_allocator;
1653 if (master_set_numthreads) {
1654 if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1656 kmp_info_t **other_threads = parent_team->t.t_threads;
1657 parent_team->t.t_nproc = master_set_numthreads;
1658 for (i = 0; i < master_set_numthreads; ++i) {
1659 other_threads[i]->th.th_team_nproc = master_set_numthreads;
1663 master_th->th.th_set_nproc = 0;
1667 if (__kmp_debugging) {
1668 int nth = __kmp_omp_num_threads(loc);
1670 master_set_numthreads = nth;
1675 KF_TRACE(10, (
"__kmp_fork_call: before internal fork: root=%p, team=%p, " 1676 "master_th=%p, gtid=%d\n",
1677 root, parent_team, master_th, gtid));
1678 __kmp_internal_fork(loc, gtid, parent_team);
1679 KF_TRACE(10, (
"__kmp_fork_call: after internal fork: root=%p, team=%p, " 1680 "master_th=%p, gtid=%d\n",
1681 root, parent_team, master_th, gtid));
1684 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1685 parent_team->t.t_id, parent_team->t.t_pkfn));
1687 if (!parent_team->t.t_invoke(gtid)) {
1688 KMP_ASSERT2(0,
"cannot invoke microtask for MASTER thread");
1690 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1691 parent_team->t.t_id, parent_team->t.t_pkfn));
1694 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
1701 if (__kmp_tasking_mode != tskm_immediate_exec) {
1702 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1703 parent_team->t.t_task_team[master_th->th.th_task_state]);
1707 if (parent_team->t.t_active_level >=
1708 master_th->th.th_current_task->td_icvs.max_active_levels) {
1712 int enter_teams = ((ap == NULL && active_level == 0) ||
1713 (ap && teams_level > 0 && teams_level == level));
1716 master_set_numthreads
1717 ? master_set_numthreads
1726 if ((!get__nested(master_th) && (root->r.r_in_parallel
1731 (__kmp_library == library_serial)) {
1732 KC_TRACE(10, (
"__kmp_fork_call: T#%d serializing team; requested %d" 1740 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1741 nthreads = __kmp_reserve_threads(
1742 root, parent_team, master_tid, nthreads
1753 if (nthreads == 1) {
1757 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1761 KMP_DEBUG_ASSERT(nthreads > 0);
1764 master_th->th.th_set_nproc = 0;
1767 if (nthreads == 1) {
1769 #if KMP_OS_LINUX && \ 1770 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) 1773 void **args = (
void **)KMP_ALLOCA(argc *
sizeof(
void *));
1778 (
"__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1782 if (call_context == fork_context_intel) {
1784 master_th->th.th_serial_team->t.t_ident = loc;
1788 master_th->th.th_serial_team->t.t_level--;
1793 void **exit_runtime_p;
1794 ompt_task_info_t *task_info;
1796 ompt_lw_taskteam_t lw_taskteam;
1798 if (ompt_enabled.enabled) {
1799 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1800 &ompt_parallel_data, return_address);
1802 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1805 task_info = OMPT_CUR_TASK_INFO(master_th);
1806 exit_runtime_p = &(task_info->frame.exit_frame.ptr);
1807 if (ompt_enabled.ompt_callback_implicit_task) {
1808 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1809 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1810 &(task_info->task_data), 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit);
1811 OMPT_CUR_TASK_INFO(master_th)
1812 ->thread_num = __kmp_tid_from_gtid(gtid);
1816 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1818 exit_runtime_p = &dummy;
1823 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1824 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1825 __kmp_invoke_microtask(microtask, gtid, 0, argc,
1826 parent_team->t.t_argv
1835 if (ompt_enabled.enabled) {
1836 exit_runtime_p = NULL;
1837 if (ompt_enabled.ompt_callback_implicit_task) {
1838 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1839 ompt_scope_end, NULL, &(task_info->task_data), 1,
1840 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1843 __ompt_lw_taskteam_unlink(master_th);
1844 if (ompt_enabled.ompt_callback_parallel_end) {
1845 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1846 OMPT_CUR_TEAM_DATA(master_th), parent_task_data,
1847 OMPT_INVOKER(call_context), return_address);
1849 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1852 }
else if (microtask == (microtask_t)__kmp_teams_master) {
1853 KMP_DEBUG_ASSERT(master_th->th.th_team ==
1854 master_th->th.th_serial_team);
1855 team = master_th->th.th_team;
1857 team->t.t_invoke = invoker;
1858 __kmp_alloc_argv_entries(argc, team, TRUE);
1859 team->t.t_argc = argc;
1860 argv = (
void **)team->t.t_argv;
1862 for (i = argc - 1; i >= 0; --i)
1864 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1865 *argv++ = va_arg(*ap,
void *);
1867 *argv++ = va_arg(ap,
void *);
1870 for (i = 0; i < argc; ++i)
1872 argv[i] = parent_team->t.t_argv[i];
1882 for (i = argc - 1; i >= 0; --i)
1884 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1885 *argv++ = va_arg(*ap,
void *);
1887 *argv++ = va_arg(ap,
void *);
1893 void **exit_runtime_p;
1894 ompt_task_info_t *task_info;
1896 ompt_lw_taskteam_t lw_taskteam;
1898 if (ompt_enabled.enabled) {
1899 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1900 &ompt_parallel_data, return_address);
1901 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1903 task_info = OMPT_CUR_TASK_INFO(master_th);
1904 exit_runtime_p = &(task_info->frame.exit_frame.ptr);
1907 implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1908 if (ompt_enabled.ompt_callback_implicit_task) {
1909 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1910 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1911 implicit_task_data, 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit);
1912 OMPT_CUR_TASK_INFO(master_th)
1913 ->thread_num = __kmp_tid_from_gtid(gtid);
1917 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1919 exit_runtime_p = &dummy;
1924 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1925 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1926 __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1935 if (ompt_enabled.enabled) {
1936 *exit_runtime_p = NULL;
1937 if (ompt_enabled.ompt_callback_implicit_task) {
1938 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1939 ompt_scope_end, NULL, &(task_info->task_data), 1,
1940 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1943 ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1944 __ompt_lw_taskteam_unlink(master_th);
1945 if (ompt_enabled.ompt_callback_parallel_end) {
1946 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1947 &ompt_parallel_data, parent_task_data,
1948 OMPT_INVOKER(call_context), return_address);
1950 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1956 }
else if (call_context == fork_context_gnu) {
1958 ompt_lw_taskteam_t lwt;
1959 __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data,
1962 lwt.ompt_task_info.frame.exit_frame = ompt_data_none;
1963 __ompt_lw_taskteam_link(&lwt, master_th, 1);
1968 KA_TRACE(20, (
"__kmp_fork_call: T#%d serial exit\n", gtid));
1971 KMP_ASSERT2(call_context < fork_context_last,
1972 "__kmp_fork_call: unknown fork_context parameter");
1975 KA_TRACE(20, (
"__kmp_fork_call: T#%d serial exit\n", gtid));
1982 KF_TRACE(10, (
"__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, " 1983 "curtask=%p, curtask_max_aclevel=%d\n",
1984 parent_team->t.t_active_level, master_th,
1985 master_th->th.th_current_task,
1986 master_th->th.th_current_task->td_icvs.max_active_levels));
1990 master_th->th.th_current_task->td_flags.executing = 0;
1993 if (!master_th->th.th_teams_microtask || level > teams_level)
1997 KMP_ATOMIC_INC(&root->r.r_in_parallel);
2001 int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
2002 if ((level + 1 < __kmp_nested_nth.used) &&
2003 (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
2004 nthreads_icv = __kmp_nested_nth.nth[level + 1];
2011 kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
2012 kmp_proc_bind_t proc_bind_icv =
2014 if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
2015 proc_bind = proc_bind_false;
2017 if (proc_bind == proc_bind_default) {
2020 proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
2026 if ((level + 1 < __kmp_nested_proc_bind.used) &&
2027 (__kmp_nested_proc_bind.bind_types[level + 1] !=
2028 master_th->th.th_current_task->td_icvs.proc_bind)) {
2029 proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
2034 master_th->th.th_set_proc_bind = proc_bind_default;
2037 if ((nthreads_icv > 0)
2039 || (proc_bind_icv != proc_bind_default)
2042 kmp_internal_control_t new_icvs;
2043 copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
2044 new_icvs.next = NULL;
2045 if (nthreads_icv > 0) {
2046 new_icvs.nproc = nthreads_icv;
2050 if (proc_bind_icv != proc_bind_default) {
2051 new_icvs.proc_bind = proc_bind_icv;
2056 KF_TRACE(10, (
"__kmp_fork_call: before __kmp_allocate_team\n"));
2057 team = __kmp_allocate_team(root, nthreads, nthreads,
2064 &new_icvs, argc USE_NESTED_HOT_ARG(master_th));
2067 KF_TRACE(10, (
"__kmp_fork_call: before __kmp_allocate_team\n"));
2068 team = __kmp_allocate_team(root, nthreads, nthreads,
2075 &master_th->th.th_current_task->td_icvs,
2076 argc USE_NESTED_HOT_ARG(master_th));
2079 10, (
"__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2082 KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2083 KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2084 KMP_CHECK_UPDATE(team->t.t_ident, loc);
2085 KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2086 KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2088 KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2091 KMP_CHECK_UPDATE(team->t.t_invoke, invoker);
2094 if (!master_th->th.th_teams_microtask || level > teams_level) {
2096 int new_level = parent_team->t.t_level + 1;
2097 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2098 new_level = parent_team->t.t_active_level + 1;
2099 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2103 int new_level = parent_team->t.t_level;
2104 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2105 new_level = parent_team->t.t_active_level;
2106 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2109 kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2111 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2114 KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2117 KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2121 propagateFPControl(team);
2123 if (__kmp_tasking_mode != tskm_immediate_exec) {
2126 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2127 parent_team->t.t_task_team[master_th->th.th_task_state]);
2128 KA_TRACE(20, (
"__kmp_fork_call: Master T#%d pushing task_team %p / team " 2129 "%p, new task_team %p / team %p\n",
2130 __kmp_gtid_from_thread(master_th),
2131 master_th->th.th_task_team, parent_team,
2132 team->t.t_task_team[master_th->th.th_task_state], team));
2134 if (active_level || master_th->th.th_task_team) {
2136 KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2137 if (master_th->th.th_task_state_top >=
2138 master_th->th.th_task_state_stack_sz) {
2139 kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2140 kmp_uint8 *old_stack, *new_stack;
2142 new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2143 for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2144 new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2146 for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2150 old_stack = master_th->th.th_task_state_memo_stack;
2151 master_th->th.th_task_state_memo_stack = new_stack;
2152 master_th->th.th_task_state_stack_sz = new_size;
2153 __kmp_free(old_stack);
2157 .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2158 master_th->th.th_task_state;
2159 master_th->th.th_task_state_top++;
2160 #if KMP_NESTED_HOT_TEAMS 2161 if (master_th->th.th_hot_teams &&
2162 active_level < __kmp_hot_teams_max_level &&
2163 team == master_th->th.th_hot_teams[active_level].hot_team) {
2165 master_th->th.th_task_state =
2167 .th_task_state_memo_stack[master_th->th.th_task_state_top];
2170 master_th->th.th_task_state = 0;
2171 #if KMP_NESTED_HOT_TEAMS 2175 #if !KMP_NESTED_HOT_TEAMS 2176 KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2177 (team == root->r.r_hot_team));
2183 (
"__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2184 gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2186 KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2187 (team->t.t_master_tid == 0 &&
2188 (team->t.t_parent == root->r.r_root_team ||
2189 team->t.t_parent->t.t_serialized)));
2193 argv = (
void **)team->t.t_argv;
2197 for (i = argc - 1; i >= 0; --i) {
2199 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX 2200 void *new_argv = va_arg(*ap,
void *);
2202 void *new_argv = va_arg(ap,
void *);
2204 KMP_CHECK_UPDATE(*argv, new_argv);
2209 for (i = 0; i < argc; ++i) {
2211 KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2217 KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2218 if (!root->r.r_active)
2219 root->r.r_active = TRUE;
2221 __kmp_fork_team_threads(root, team, master_th, gtid);
2222 __kmp_setup_icv_copy(team, nthreads,
2223 &master_th->th.th_current_task->td_icvs, loc);
2226 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
2229 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2232 if (team->t.t_active_level == 1
2234 && !master_th->th.th_teams_microtask
2238 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2239 (__kmp_forkjoin_frames_mode == 3 ||
2240 __kmp_forkjoin_frames_mode == 1)) {
2241 kmp_uint64 tmp_time = 0;
2242 if (__itt_get_timestamp_ptr)
2243 tmp_time = __itt_get_timestamp();
2245 master_th->th.th_frame_time = tmp_time;
2246 if (__kmp_forkjoin_frames_mode == 3)
2247 team->t.t_region_time = tmp_time;
2251 if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2252 __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2254 __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2260 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2263 (
"__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2264 root, team, master_th, gtid));
2267 if (__itt_stack_caller_create_ptr) {
2268 team->t.t_stack_id =
2269 __kmp_itt_stack_caller_create();
2280 __kmp_internal_fork(loc, gtid, team);
2281 KF_TRACE(10, (
"__kmp_internal_fork : after : root=%p, team=%p, " 2282 "master_th=%p, gtid=%d\n",
2283 root, team, master_th, gtid));
2286 if (call_context == fork_context_gnu) {
2287 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
2292 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2293 team->t.t_id, team->t.t_pkfn));
2296 if (!team->t.t_invoke(gtid)) {
2297 KMP_ASSERT2(0,
"cannot invoke microtask for MASTER thread");
2299 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2300 team->t.t_id, team->t.t_pkfn));
2303 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
2306 if (ompt_enabled.enabled) {
2307 master_th->th.ompt_thread_info.state = ompt_state_overhead;
2315 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2318 thread->th.ompt_thread_info.state =
2319 ((team->t.t_serialized) ? ompt_state_work_serial
2320 : ompt_state_work_parallel);
2323 static inline void __kmp_join_ompt(
int gtid, kmp_info_t *thread,
2324 kmp_team_t *team, ompt_data_t *parallel_data,
2325 fork_context_e fork_context,
void *codeptr) {
2326 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2327 if (ompt_enabled.ompt_callback_parallel_end) {
2328 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
2329 parallel_data, &(task_info->task_data), OMPT_INVOKER(fork_context),
2333 task_info->frame.enter_frame = ompt_data_none;
2334 __kmp_join_restore_state(thread, team);
2338 void __kmp_join_call(
ident_t *loc,
int gtid
2341 enum fork_context_e fork_context
2348 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2350 kmp_team_t *parent_team;
2351 kmp_info_t *master_th;
2356 KA_TRACE(20, (
"__kmp_join_call: enter T#%d\n", gtid));
2359 master_th = __kmp_threads[gtid];
2360 root = master_th->th.th_root;
2361 team = master_th->th.th_team;
2362 parent_team = team->t.t_parent;
2364 master_th->th.th_ident = loc;
2367 if (ompt_enabled.enabled) {
2368 master_th->th.ompt_thread_info.state = ompt_state_overhead;
2373 if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2374 KA_TRACE(20, (
"__kmp_join_call: T#%d, old team = %p old task_team = %p, " 2375 "th_task_team = %p\n",
2376 __kmp_gtid_from_thread(master_th), team,
2377 team->t.t_task_team[master_th->th.th_task_state],
2378 master_th->th.th_task_team));
2379 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2380 team->t.t_task_team[master_th->th.th_task_state]);
2384 if (team->t.t_serialized) {
2386 if (master_th->th.th_teams_microtask) {
2388 int level = team->t.t_level;
2389 int tlevel = master_th->th.th_teams_level;
2390 if (level == tlevel) {
2394 }
else if (level == tlevel + 1) {
2398 team->t.t_serialized++;
2405 if (ompt_enabled.enabled) {
2406 __kmp_join_restore_state(master_th, parent_team);
2413 master_active = team->t.t_master_active;
2421 __kmp_internal_join(loc, gtid, team);
2425 master_th->th.th_task_state =
2433 ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2434 void *codeptr = team->t.ompt_team_info.master_return_address;
2438 if (__itt_stack_caller_create_ptr) {
2439 __kmp_itt_stack_caller_destroy(
2440 (__itt_caller)team->t
2445 if (team->t.t_active_level == 1
2447 && !master_th->th.th_teams_microtask
2450 master_th->th.th_ident = loc;
2453 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2454 __kmp_forkjoin_frames_mode == 3)
2455 __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2456 master_th->th.th_frame_time, 0, loc,
2457 master_th->th.th_team_nproc, 1);
2458 else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2459 !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2460 __kmp_itt_region_joined(gtid);
2465 if (master_th->th.th_teams_microtask && !exit_teams &&
2466 team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2467 team->t.t_level == master_th->th.th_teams_level + 1) {
2474 team->t.t_active_level--;
2475 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2478 if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2479 int old_num = master_th->th.th_team_nproc;
2480 int new_num = master_th->th.th_teams_size.nth;
2481 kmp_info_t **other_threads = team->t.t_threads;
2482 team->t.t_nproc = new_num;
2483 for (i = 0; i < old_num; ++i) {
2484 other_threads[i]->th.th_team_nproc = new_num;
2487 for (i = old_num; i < new_num; ++i) {
2490 kmp_balign_t *balign = other_threads[i]->th.th_bar;
2491 for (b = 0; b < bs_last_barrier; ++b) {
2492 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2493 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2495 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2498 if (__kmp_tasking_mode != tskm_immediate_exec) {
2500 other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2506 if (ompt_enabled.enabled) {
2507 __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2517 master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2518 master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2520 master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2525 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2528 if (!master_th->th.th_teams_microtask ||
2529 team->t.t_level > master_th->th.th_teams_level)
2533 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2535 KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2538 if (ompt_enabled.enabled) {
2539 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2540 if (ompt_enabled.ompt_callback_implicit_task) {
2541 int ompt_team_size = team->t.t_nproc;
2542 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2543 ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2544 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
2547 task_info->frame.exit_frame = ompt_data_none;
2548 task_info->task_data = ompt_data_none;
2552 KF_TRACE(10, (
"__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2554 __kmp_pop_current_task_from_thread(master_th);
2556 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED 2558 master_th->th.th_first_place = team->t.t_first_place;
2559 master_th->th.th_last_place = team->t.t_last_place;
2562 master_th->th.th_def_allocator = team->t.t_def_allocator;
2565 updateHWFPControl(team);
2567 if (root->r.r_active != master_active)
2568 root->r.r_active = master_active;
2570 __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2578 master_th->th.th_team = parent_team;
2579 master_th->th.th_team_nproc = parent_team->t.t_nproc;
2580 master_th->th.th_team_master = parent_team->t.t_threads[0];
2581 master_th->th.th_team_serialized = parent_team->t.t_serialized;
2584 if (parent_team->t.t_serialized &&
2585 parent_team != master_th->th.th_serial_team &&
2586 parent_team != root->r.r_root_team) {
2587 __kmp_free_team(root,
2588 master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2589 master_th->th.th_serial_team = parent_team;
2592 if (__kmp_tasking_mode != tskm_immediate_exec) {
2593 if (master_th->th.th_task_state_top >
2595 KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2597 master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2598 master_th->th.th_task_state;
2599 --master_th->th.th_task_state_top;
2601 master_th->th.th_task_state =
2603 .th_task_state_memo_stack[master_th->th.th_task_state_top];
2606 master_th->th.th_task_team =
2607 parent_team->t.t_task_team[master_th->th.th_task_state];
2609 (
"__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2610 __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2617 master_th->th.th_current_task->td_flags.executing = 1;
2619 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2622 if (ompt_enabled.enabled) {
2623 __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2629 KA_TRACE(20, (
"__kmp_join_call: exit T#%d\n", gtid));
2634 void __kmp_save_internal_controls(kmp_info_t *thread) {
2636 if (thread->th.th_team != thread->th.th_serial_team) {
2639 if (thread->th.th_team->t.t_serialized > 1) {
2642 if (thread->th.th_team->t.t_control_stack_top == NULL) {
2645 if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2646 thread->th.th_team->t.t_serialized) {
2651 kmp_internal_control_t *control =
2652 (kmp_internal_control_t *)__kmp_allocate(
2653 sizeof(kmp_internal_control_t));
2655 copy_icvs(control, &thread->th.th_current_task->td_icvs);
2657 control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2659 control->next = thread->th.th_team->t.t_control_stack_top;
2660 thread->th.th_team->t.t_control_stack_top = control;
2666 void __kmp_set_num_threads(
int new_nth,
int gtid) {
2670 KF_TRACE(10, (
"__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2671 KMP_DEBUG_ASSERT(__kmp_init_serial);
2675 else if (new_nth > __kmp_max_nth)
2676 new_nth = __kmp_max_nth;
2679 thread = __kmp_threads[gtid];
2680 if (thread->th.th_current_task->td_icvs.nproc == new_nth)
2683 __kmp_save_internal_controls(thread);
2685 set__nproc(thread, new_nth);
2690 root = thread->th.th_root;
2691 if (__kmp_init_parallel && (!root->r.r_active) &&
2692 (root->r.r_hot_team->t.t_nproc > new_nth)
2693 #
if KMP_NESTED_HOT_TEAMS
2694 && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2697 kmp_team_t *hot_team = root->r.r_hot_team;
2700 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2703 for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2704 KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2705 if (__kmp_tasking_mode != tskm_immediate_exec) {
2708 hot_team->t.t_threads[f]->th.th_task_team = NULL;
2710 __kmp_free_thread(hot_team->t.t_threads[f]);
2711 hot_team->t.t_threads[f] = NULL;
2713 hot_team->t.t_nproc = new_nth;
2714 #if KMP_NESTED_HOT_TEAMS 2715 if (thread->th.th_hot_teams) {
2716 KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2717 thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2721 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2724 for (f = 0; f < new_nth; f++) {
2725 KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2726 hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2729 hot_team->t.t_size_changed = -1;
2734 void __kmp_set_max_active_levels(
int gtid,
int max_active_levels) {
2737 KF_TRACE(10, (
"__kmp_set_max_active_levels: new max_active_levels for thread " 2739 gtid, max_active_levels));
2740 KMP_DEBUG_ASSERT(__kmp_init_serial);
2743 if (max_active_levels < 0) {
2744 KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2749 KF_TRACE(10, (
"__kmp_set_max_active_levels: the call is ignored: new " 2750 "max_active_levels for thread %d = (%d)\n",
2751 gtid, max_active_levels));
2754 if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2759 KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2760 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2761 max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2767 KF_TRACE(10, (
"__kmp_set_max_active_levels: after validation: new " 2768 "max_active_levels for thread %d = (%d)\n",
2769 gtid, max_active_levels));
2771 thread = __kmp_threads[gtid];
2773 __kmp_save_internal_controls(thread);
2775 set__max_active_levels(thread, max_active_levels);
2779 int __kmp_get_max_active_levels(
int gtid) {
2782 KF_TRACE(10, (
"__kmp_get_max_active_levels: thread %d\n", gtid));
2783 KMP_DEBUG_ASSERT(__kmp_init_serial);
2785 thread = __kmp_threads[gtid];
2786 KMP_DEBUG_ASSERT(thread->th.th_current_task);
2787 KF_TRACE(10, (
"__kmp_get_max_active_levels: thread %d, curtask=%p, " 2788 "curtask_maxaclevel=%d\n",
2789 gtid, thread->th.th_current_task,
2790 thread->th.th_current_task->td_icvs.max_active_levels));
2791 return thread->th.th_current_task->td_icvs.max_active_levels;
2795 void __kmp_set_schedule(
int gtid, kmp_sched_t kind,
int chunk) {
2799 KF_TRACE(10, (
"__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2800 gtid, (
int)kind, chunk));
2801 KMP_DEBUG_ASSERT(__kmp_init_serial);
2807 if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2808 (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2810 __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2811 KMP_HNT(DefaultScheduleKindUsed,
"static, no chunk"),
2813 kind = kmp_sched_default;
2817 thread = __kmp_threads[gtid];
2819 __kmp_save_internal_controls(thread);
2821 if (kind < kmp_sched_upper_std) {
2822 if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2825 thread->th.th_current_task->td_icvs.sched.r_sched_type =
kmp_sch_static;
2827 thread->th.th_current_task->td_icvs.sched.r_sched_type =
2828 __kmp_sch_map[kind - kmp_sched_lower - 1];
2833 thread->th.th_current_task->td_icvs.sched.r_sched_type =
2834 __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2835 kmp_sched_lower - 2];
2837 if (kind == kmp_sched_auto || chunk < 1) {
2839 thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2841 thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2846 void __kmp_get_schedule(
int gtid, kmp_sched_t *kind,
int *chunk) {
2850 KF_TRACE(10, (
"__kmp_get_schedule: thread %d\n", gtid));
2851 KMP_DEBUG_ASSERT(__kmp_init_serial);
2853 thread = __kmp_threads[gtid];
2855 th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2859 case kmp_sch_static_greedy:
2860 case kmp_sch_static_balanced:
2861 *kind = kmp_sched_static;
2864 case kmp_sch_static_chunked:
2865 *kind = kmp_sched_static;
2867 case kmp_sch_dynamic_chunked:
2868 *kind = kmp_sched_dynamic;
2871 case kmp_sch_guided_iterative_chunked:
2872 case kmp_sch_guided_analytical_chunked:
2873 *kind = kmp_sched_guided;
2876 *kind = kmp_sched_auto;
2878 case kmp_sch_trapezoidal:
2879 *kind = kmp_sched_trapezoidal;
2881 #if KMP_STATIC_STEAL_ENABLED 2882 case kmp_sch_static_steal:
2883 *kind = kmp_sched_static_steal;
2887 KMP_FATAL(UnknownSchedulingType, th_type);
2890 *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2893 int __kmp_get_ancestor_thread_num(
int gtid,
int level) {
2899 KF_TRACE(10, (
"__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2900 KMP_DEBUG_ASSERT(__kmp_init_serial);
2907 thr = __kmp_threads[gtid];
2908 team = thr->th.th_team;
2909 ii = team->t.t_level;
2914 if (thr->th.th_teams_microtask) {
2916 int tlevel = thr->th.th_teams_level;
2919 KMP_DEBUG_ASSERT(ii >= tlevel);
2932 return __kmp_tid_from_gtid(gtid);
2934 dd = team->t.t_serialized;
2936 while (ii > level) {
2937 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2939 if ((team->t.t_serialized) && (!dd)) {
2940 team = team->t.t_parent;
2944 team = team->t.t_parent;
2945 dd = team->t.t_serialized;
2950 return (dd > 1) ? (0) : (team->t.t_master_tid);
2953 int __kmp_get_team_size(
int gtid,
int level) {
2959 KF_TRACE(10, (
"__kmp_get_team_size: thread %d %d\n", gtid, level));
2960 KMP_DEBUG_ASSERT(__kmp_init_serial);
2967 thr = __kmp_threads[gtid];
2968 team = thr->th.th_team;
2969 ii = team->t.t_level;
2974 if (thr->th.th_teams_microtask) {
2976 int tlevel = thr->th.th_teams_level;
2979 KMP_DEBUG_ASSERT(ii >= tlevel);
2991 while (ii > level) {
2992 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2994 if (team->t.t_serialized && (!dd)) {
2995 team = team->t.t_parent;
2999 team = team->t.t_parent;
3004 return team->t.t_nproc;
3007 kmp_r_sched_t __kmp_get_schedule_global() {
3012 kmp_r_sched_t r_sched;
3020 r_sched.r_sched_type = __kmp_static;
3023 r_sched.r_sched_type = __kmp_guided;
3025 r_sched.r_sched_type = __kmp_sched;
3028 if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
3030 r_sched.chunk = KMP_DEFAULT_CHUNK;
3032 r_sched.chunk = __kmp_chunk;
3040 static void __kmp_alloc_argv_entries(
int argc, kmp_team_t *team,
int realloc) {
3042 KMP_DEBUG_ASSERT(team);
3043 if (!realloc || argc > team->t.t_max_argc) {
3045 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: needed entries=%d, " 3046 "current entries=%d\n",
3047 team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3049 if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3050 __kmp_free((
void *)team->t.t_argv);
3052 if (argc <= KMP_INLINE_ARGV_ENTRIES) {
3054 team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3055 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: inline allocate %d " 3057 team->t.t_id, team->t.t_max_argc));
3058 team->t.t_argv = &team->t.t_inline_argv[0];
3059 if (__kmp_storage_map) {
3060 __kmp_print_storage_map_gtid(
3061 -1, &team->t.t_inline_argv[0],
3062 &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3063 (
sizeof(
void *) * KMP_INLINE_ARGV_ENTRIES),
"team_%d.t_inline_argv",
3068 team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3069 ? KMP_MIN_MALLOC_ARGV_ENTRIES
3071 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: dynamic allocate %d " 3073 team->t.t_id, team->t.t_max_argc));
3075 (
void **)__kmp_page_allocate(
sizeof(
void *) * team->t.t_max_argc);
3076 if (__kmp_storage_map) {
3077 __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3078 &team->t.t_argv[team->t.t_max_argc],
3079 sizeof(
void *) * team->t.t_max_argc,
3080 "team_%d.t_argv", team->t.t_id);
3086 static void __kmp_allocate_team_arrays(kmp_team_t *team,
int max_nth) {
3088 int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3090 (kmp_info_t **)__kmp_allocate(
sizeof(kmp_info_t *) * max_nth);
3091 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3092 sizeof(dispatch_shared_info_t) * num_disp_buff);
3093 team->t.t_dispatch =
3094 (kmp_disp_t *)__kmp_allocate(
sizeof(kmp_disp_t) * max_nth);
3095 team->t.t_implicit_task_taskdata =
3096 (kmp_taskdata_t *)__kmp_allocate(
sizeof(kmp_taskdata_t) * max_nth);
3097 team->t.t_max_nproc = max_nth;
3100 for (i = 0; i < num_disp_buff; ++i) {
3101 team->t.t_disp_buffer[i].buffer_index = i;
3103 team->t.t_disp_buffer[i].doacross_buf_idx = i;
3108 static void __kmp_free_team_arrays(kmp_team_t *team) {
3111 for (i = 0; i < team->t.t_max_nproc; ++i) {
3112 if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3113 __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3114 team->t.t_dispatch[i].th_disp_buffer = NULL;
3117 #if KMP_USE_HIER_SCHED 3118 __kmp_dispatch_free_hierarchies(team);
3120 __kmp_free(team->t.t_threads);
3121 __kmp_free(team->t.t_disp_buffer);
3122 __kmp_free(team->t.t_dispatch);
3123 __kmp_free(team->t.t_implicit_task_taskdata);
3124 team->t.t_threads = NULL;
3125 team->t.t_disp_buffer = NULL;
3126 team->t.t_dispatch = NULL;
3127 team->t.t_implicit_task_taskdata = 0;
3130 static void __kmp_reallocate_team_arrays(kmp_team_t *team,
int max_nth) {
3131 kmp_info_t **oldThreads = team->t.t_threads;
3133 __kmp_free(team->t.t_disp_buffer);
3134 __kmp_free(team->t.t_dispatch);
3135 __kmp_free(team->t.t_implicit_task_taskdata);
3136 __kmp_allocate_team_arrays(team, max_nth);
3138 KMP_MEMCPY(team->t.t_threads, oldThreads,
3139 team->t.t_nproc *
sizeof(kmp_info_t *));
3141 __kmp_free(oldThreads);
3144 static kmp_internal_control_t __kmp_get_global_icvs(
void) {
3146 kmp_r_sched_t r_sched =
3147 __kmp_get_schedule_global();
3150 KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3153 kmp_internal_control_t g_icvs = {
3155 (kmp_int8)__kmp_dflt_nested,
3157 (kmp_int8)__kmp_global.g.g_dynamic,
3159 (kmp_int8)__kmp_env_blocktime,
3161 __kmp_dflt_blocktime,
3166 __kmp_dflt_team_nth,
3169 __kmp_dflt_max_active_levels,
3174 __kmp_nested_proc_bind.bind_types[0],
3175 __kmp_default_device,
3183 static kmp_internal_control_t __kmp_get_x_global_icvs(
const kmp_team_t *team) {
3185 kmp_internal_control_t gx_icvs;
3186 gx_icvs.serial_nesting_level =
3188 copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3189 gx_icvs.next = NULL;
3194 static void __kmp_initialize_root(kmp_root_t *root) {
3196 kmp_team_t *root_team;
3197 kmp_team_t *hot_team;
3198 int hot_team_max_nth;
3199 kmp_r_sched_t r_sched =
3200 __kmp_get_schedule_global();
3201 kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3202 KMP_DEBUG_ASSERT(root);
3203 KMP_ASSERT(!root->r.r_begin);
3206 __kmp_init_lock(&root->r.r_begin_lock);
3207 root->r.r_begin = FALSE;
3208 root->r.r_active = FALSE;
3209 root->r.r_in_parallel = 0;
3210 root->r.r_blocktime = __kmp_dflt_blocktime;
3211 root->r.r_nested = __kmp_dflt_nested;
3212 root->r.r_cg_nthreads = 1;
3216 KF_TRACE(10, (
"__kmp_initialize_root: before root_team\n"));
3219 __kmp_allocate_team(root,
3226 __kmp_nested_proc_bind.bind_types[0],
3230 USE_NESTED_HOT_ARG(NULL)
3235 TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3238 KF_TRACE(10, (
"__kmp_initialize_root: after root_team = %p\n", root_team));
3240 root->r.r_root_team = root_team;
3241 root_team->t.t_control_stack_top = NULL;
3244 root_team->t.t_threads[0] = NULL;
3245 root_team->t.t_nproc = 1;
3246 root_team->t.t_serialized = 1;
3248 root_team->t.t_sched.sched = r_sched.sched;
3251 (
"__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3252 root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3256 KF_TRACE(10, (
"__kmp_initialize_root: before hot_team\n"));
3259 __kmp_allocate_team(root,
3261 __kmp_dflt_team_nth_ub * 2,
3266 __kmp_nested_proc_bind.bind_types[0],
3270 USE_NESTED_HOT_ARG(NULL)
3272 KF_TRACE(10, (
"__kmp_initialize_root: after hot_team = %p\n", hot_team));
3274 root->r.r_hot_team = hot_team;
3275 root_team->t.t_control_stack_top = NULL;
3278 hot_team->t.t_parent = root_team;
3281 hot_team_max_nth = hot_team->t.t_max_nproc;
3282 for (f = 0; f < hot_team_max_nth; ++f) {
3283 hot_team->t.t_threads[f] = NULL;
3285 hot_team->t.t_nproc = 1;
3287 hot_team->t.t_sched.sched = r_sched.sched;
3288 hot_team->t.t_size_changed = 0;
3293 typedef struct kmp_team_list_item {
3294 kmp_team_p
const *entry;
3295 struct kmp_team_list_item *next;
3296 } kmp_team_list_item_t;
3297 typedef kmp_team_list_item_t *kmp_team_list_t;
3299 static void __kmp_print_structure_team_accum(
3300 kmp_team_list_t list,
3301 kmp_team_p
const *team
3311 KMP_DEBUG_ASSERT(list != NULL);
3316 __kmp_print_structure_team_accum(list, team->t.t_parent);
3317 __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3321 while (l->next != NULL && l->entry != team) {
3324 if (l->next != NULL) {
3330 while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3336 kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3337 sizeof(kmp_team_list_item_t));
3344 static void __kmp_print_structure_team(
char const *title, kmp_team_p
const *team
3347 __kmp_printf(
"%s", title);
3349 __kmp_printf(
"%2x %p\n", team->t.t_id, team);
3351 __kmp_printf(
" - (nil)\n");
3355 static void __kmp_print_structure_thread(
char const *title,
3356 kmp_info_p
const *thread) {
3357 __kmp_printf(
"%s", title);
3358 if (thread != NULL) {
3359 __kmp_printf(
"%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3361 __kmp_printf(
" - (nil)\n");
3365 void __kmp_print_structure(
void) {
3367 kmp_team_list_t list;
3371 (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
sizeof(kmp_team_list_item_t));
3375 __kmp_printf(
"\n------------------------------\nGlobal Thread " 3376 "Table\n------------------------------\n");
3379 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3380 __kmp_printf(
"%2d", gtid);
3381 if (__kmp_threads != NULL) {
3382 __kmp_printf(
" %p", __kmp_threads[gtid]);
3384 if (__kmp_root != NULL) {
3385 __kmp_printf(
" %p", __kmp_root[gtid]);
3392 __kmp_printf(
"\n------------------------------\nThreads\n--------------------" 3394 if (__kmp_threads != NULL) {
3396 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3397 kmp_info_t
const *thread = __kmp_threads[gtid];
3398 if (thread != NULL) {
3399 __kmp_printf(
"GTID %2d %p:\n", gtid, thread);
3400 __kmp_printf(
" Our Root: %p\n", thread->th.th_root);
3401 __kmp_print_structure_team(
" Our Team: ", thread->th.th_team);
3402 __kmp_print_structure_team(
" Serial Team: ",
3403 thread->th.th_serial_team);
3404 __kmp_printf(
" Threads: %2d\n", thread->th.th_team_nproc);
3405 __kmp_print_structure_thread(
" Master: ",
3406 thread->th.th_team_master);
3407 __kmp_printf(
" Serialized?: %2d\n", thread->th.th_team_serialized);
3408 __kmp_printf(
" Set NProc: %2d\n", thread->th.th_set_nproc);
3410 __kmp_printf(
" Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3412 __kmp_print_structure_thread(
" Next in pool: ",
3413 thread->th.th_next_pool);
3415 __kmp_print_structure_team_accum(list, thread->th.th_team);
3416 __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3420 __kmp_printf(
"Threads array is not allocated.\n");
3424 __kmp_printf(
"\n------------------------------\nUbers\n----------------------" 3426 if (__kmp_root != NULL) {
3428 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3429 kmp_root_t
const *root = __kmp_root[gtid];
3431 __kmp_printf(
"GTID %2d %p:\n", gtid, root);
3432 __kmp_print_structure_team(
" Root Team: ", root->r.r_root_team);
3433 __kmp_print_structure_team(
" Hot Team: ", root->r.r_hot_team);
3434 __kmp_print_structure_thread(
" Uber Thread: ",
3435 root->r.r_uber_thread);
3436 __kmp_printf(
" Active?: %2d\n", root->r.r_active);
3437 __kmp_printf(
" Nested?: %2d\n", root->r.r_nested);
3438 __kmp_printf(
" In Parallel: %2d\n",
3439 KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3441 __kmp_print_structure_team_accum(list, root->r.r_root_team);
3442 __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3446 __kmp_printf(
"Ubers array is not allocated.\n");
3449 __kmp_printf(
"\n------------------------------\nTeams\n----------------------" 3451 while (list->next != NULL) {
3452 kmp_team_p
const *team = list->entry;
3454 __kmp_printf(
"Team %2x %p:\n", team->t.t_id, team);
3455 __kmp_print_structure_team(
" Parent Team: ", team->t.t_parent);
3456 __kmp_printf(
" Master TID: %2d\n", team->t.t_master_tid);
3457 __kmp_printf(
" Max threads: %2d\n", team->t.t_max_nproc);
3458 __kmp_printf(
" Levels of serial: %2d\n", team->t.t_serialized);
3459 __kmp_printf(
" Number threads: %2d\n", team->t.t_nproc);
3460 for (i = 0; i < team->t.t_nproc; ++i) {
3461 __kmp_printf(
" Thread %2d: ", i);
3462 __kmp_print_structure_thread(
"", team->t.t_threads[i]);
3464 __kmp_print_structure_team(
" Next in pool: ", team->t.t_next_pool);
3470 __kmp_printf(
"\n------------------------------\nPools\n----------------------" 3472 __kmp_print_structure_thread(
"Thread pool: ",
3473 CCAST(kmp_info_t *, __kmp_thread_pool));
3474 __kmp_print_structure_team(
"Team pool: ",
3475 CCAST(kmp_team_t *, __kmp_team_pool));
3479 while (list != NULL) {
3480 kmp_team_list_item_t *item = list;
3482 KMP_INTERNAL_FREE(item);
3491 static const unsigned __kmp_primes[] = {
3492 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3493 0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3494 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3495 0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3496 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3497 0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3498 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3499 0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3500 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3501 0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3502 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3506 unsigned short __kmp_get_random(kmp_info_t *thread) {
3507 unsigned x = thread->th.th_x;
3508 unsigned short r = x >> 16;
3510 thread->th.th_x = x * thread->th.th_a + 1;
3512 KA_TRACE(30, (
"__kmp_get_random: THREAD: %d, RETURN: %u\n",
3513 thread->th.th_info.ds.ds_tid, r));
3519 void __kmp_init_random(kmp_info_t *thread) {
3520 unsigned seed = thread->th.th_info.ds.ds_tid;
3523 __kmp_primes[seed % (
sizeof(__kmp_primes) /
sizeof(__kmp_primes[0]))];
3524 thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3526 (
"__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3532 static int __kmp_reclaim_dead_roots(
void) {
3535 for (i = 0; i < __kmp_threads_capacity; ++i) {
3536 if (KMP_UBER_GTID(i) &&
3537 !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3540 r += __kmp_unregister_root_other_thread(i);
3565 static int __kmp_expand_threads(
int nNeed) {
3567 int minimumRequiredCapacity;
3569 kmp_info_t **newThreads;
3570 kmp_root_t **newRoot;
3576 #if KMP_OS_WINDOWS && !KMP_DYNAMIC_LIB 3579 added = __kmp_reclaim_dead_roots();
3608 KMP_DEBUG_ASSERT(__kmp_sys_max_nth >= __kmp_threads_capacity);
3611 if (__kmp_sys_max_nth - __kmp_threads_capacity < nNeed) {
3615 minimumRequiredCapacity = __kmp_threads_capacity + nNeed;
3617 newCapacity = __kmp_threads_capacity;
3619 newCapacity = newCapacity <= (__kmp_sys_max_nth >> 1) ? (newCapacity << 1)
3620 : __kmp_sys_max_nth;
3621 }
while (newCapacity < minimumRequiredCapacity);
3622 newThreads = (kmp_info_t **)__kmp_allocate(
3623 (
sizeof(kmp_info_t *) +
sizeof(kmp_root_t *)) * newCapacity + CACHE_LINE);
3625 (kmp_root_t **)((
char *)newThreads +
sizeof(kmp_info_t *) * newCapacity);
3626 KMP_MEMCPY(newThreads, __kmp_threads,
3627 __kmp_threads_capacity *
sizeof(kmp_info_t *));
3628 KMP_MEMCPY(newRoot, __kmp_root,
3629 __kmp_threads_capacity *
sizeof(kmp_root_t *));
3631 kmp_info_t **temp_threads = __kmp_threads;
3632 *(kmp_info_t * *
volatile *)&__kmp_threads = newThreads;
3633 *(kmp_root_t * *
volatile *)&__kmp_root = newRoot;
3634 __kmp_free(temp_threads);
3635 added += newCapacity - __kmp_threads_capacity;
3636 *(
volatile int *)&__kmp_threads_capacity = newCapacity;
3638 if (newCapacity > __kmp_tp_capacity) {
3639 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3640 if (__kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3641 __kmp_threadprivate_resize_cache(newCapacity);
3643 *(
volatile int *)&__kmp_tp_capacity = newCapacity;
3645 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3654 int __kmp_register_root(
int initial_thread) {
3655 kmp_info_t *root_thread;
3659 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3660 KA_TRACE(20, (
"__kmp_register_root: entered\n"));
3677 capacity = __kmp_threads_capacity;
3678 if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3683 if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1)) {
3684 if (__kmp_tp_cached) {
3685 __kmp_fatal(KMP_MSG(CantRegisterNewThread),
3686 KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3687 KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3689 __kmp_fatal(KMP_MSG(CantRegisterNewThread), KMP_HNT(SystemLimitOnThreads),
3697 for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3701 (
"__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3702 KMP_ASSERT(gtid < __kmp_threads_capacity);
3706 TCW_4(__kmp_nth, __kmp_nth + 1);
3710 if (__kmp_adjust_gtid_mode) {
3711 if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3712 if (TCR_4(__kmp_gtid_mode) != 2) {
3713 TCW_4(__kmp_gtid_mode, 2);
3716 if (TCR_4(__kmp_gtid_mode) != 1) {
3717 TCW_4(__kmp_gtid_mode, 1);
3722 #ifdef KMP_ADJUST_BLOCKTIME 3725 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3726 if (__kmp_nth > __kmp_avail_proc) {
3727 __kmp_zero_bt = TRUE;
3733 if (!(root = __kmp_root[gtid])) {
3734 root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(
sizeof(kmp_root_t));
3735 KMP_DEBUG_ASSERT(!root->r.r_root_team);
3738 #if KMP_STATS_ENABLED 3740 __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3741 __kmp_stats_thread_ptr->startLife();
3742 KMP_SET_THREAD_STATE(SERIAL_REGION);
3745 __kmp_initialize_root(root);
3748 if (root->r.r_uber_thread) {
3749 root_thread = root->r.r_uber_thread;
3751 root_thread = (kmp_info_t *)__kmp_allocate(
sizeof(kmp_info_t));
3752 if (__kmp_storage_map) {
3753 __kmp_print_thread_storage_map(root_thread, gtid);
3755 root_thread->th.th_info.ds.ds_gtid = gtid;
3757 root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
3759 root_thread->th.th_root = root;
3760 if (__kmp_env_consistency_check) {
3761 root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3764 __kmp_initialize_fast_memory(root_thread);
3768 KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3769 __kmp_initialize_bget(root_thread);
3771 __kmp_init_random(root_thread);
3775 if (!root_thread->th.th_serial_team) {
3776 kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3777 KF_TRACE(10, (
"__kmp_register_root: before serial_team\n"));
3778 root_thread->th.th_serial_team =
3779 __kmp_allocate_team(root, 1, 1,
3786 &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3788 KMP_ASSERT(root_thread->th.th_serial_team);
3789 KF_TRACE(10, (
"__kmp_register_root: after serial_team = %p\n",
3790 root_thread->th.th_serial_team));
3793 TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3795 root->r.r_root_team->t.t_threads[0] = root_thread;
3796 root->r.r_hot_team->t.t_threads[0] = root_thread;
3797 root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3799 root_thread->th.th_serial_team->t.t_serialized = 0;
3800 root->r.r_uber_thread = root_thread;
3803 __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3804 TCW_4(__kmp_init_gtid, TRUE);
3807 __kmp_gtid_set_specific(gtid);
3810 __kmp_itt_thread_name(gtid);
3813 #ifdef KMP_TDATA_GTID 3816 __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3817 KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3819 KA_TRACE(20, (
"__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, " 3821 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3822 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3823 KMP_INIT_BARRIER_STATE));
3826 for (b = 0; b < bs_last_barrier; ++b) {
3827 root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3829 root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3833 KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3834 KMP_INIT_BARRIER_STATE);
3836 #if KMP_AFFINITY_SUPPORTED 3838 root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3839 root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3840 root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3841 root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3843 if (TCR_4(__kmp_init_middle)) {
3844 __kmp_affinity_set_init_mask(gtid, TRUE);
3848 root_thread->th.th_def_allocator = __kmp_def_allocator;
3849 root_thread->th.th_prev_level = 0;
3850 root_thread->th.th_prev_num_threads = 1;
3853 __kmp_root_counter++;
3856 if (!initial_thread && ompt_enabled.enabled) {
3858 kmp_info_t *root_thread = ompt_get_thread();
3860 ompt_set_thread_state(root_thread, ompt_state_overhead);
3862 if (ompt_enabled.ompt_callback_thread_begin) {
3863 ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
3864 ompt_thread_initial, __ompt_get_thread_data_internal());
3866 ompt_data_t *task_data;
3867 __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
3868 if (ompt_enabled.ompt_callback_task_create) {
3869 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
3870 NULL, NULL, task_data, ompt_task_initial, 0, NULL);
3874 ompt_set_thread_state(root_thread, ompt_state_work_serial);
3879 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3884 #if KMP_NESTED_HOT_TEAMS 3885 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr,
int level,
3886 const int max_level) {
3888 kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3889 if (!hot_teams || !hot_teams[level].hot_team) {
3892 KMP_DEBUG_ASSERT(level < max_level);
3893 kmp_team_t *team = hot_teams[level].hot_team;
3894 nth = hot_teams[level].hot_team_nth;
3896 if (level < max_level - 1) {
3897 for (i = 0; i < nth; ++i) {
3898 kmp_info_t *th = team->t.t_threads[i];
3899 n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3900 if (i > 0 && th->th.th_hot_teams) {
3901 __kmp_free(th->th.th_hot_teams);
3902 th->th.th_hot_teams = NULL;
3906 __kmp_free_team(root, team, NULL);
3913 static int __kmp_reset_root(
int gtid, kmp_root_t *root) {
3914 kmp_team_t *root_team = root->r.r_root_team;
3915 kmp_team_t *hot_team = root->r.r_hot_team;
3916 int n = hot_team->t.t_nproc;
3919 KMP_DEBUG_ASSERT(!root->r.r_active);
3921 root->r.r_root_team = NULL;
3922 root->r.r_hot_team = NULL;
3925 __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3926 #if KMP_NESTED_HOT_TEAMS 3927 if (__kmp_hot_teams_max_level >
3929 for (i = 0; i < hot_team->t.t_nproc; ++i) {
3930 kmp_info_t *th = hot_team->t.t_threads[i];
3931 if (__kmp_hot_teams_max_level > 1) {
3932 n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3934 if (th->th.th_hot_teams) {
3935 __kmp_free(th->th.th_hot_teams);
3936 th->th.th_hot_teams = NULL;
3941 __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3946 if (__kmp_tasking_mode != tskm_immediate_exec) {
3947 __kmp_wait_to_unref_task_teams();
3953 10, (
"__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3955 (LPVOID) & (root->r.r_uber_thread->th),
3956 root->r.r_uber_thread->th.th_info.ds.ds_thread));
3957 __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3961 if (ompt_enabled.ompt_callback_thread_end) {
3962 ompt_callbacks.ompt_callback(ompt_callback_thread_end)(
3963 &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
3969 root->r.r_cg_nthreads--;
3971 __kmp_reap_thread(root->r.r_uber_thread, 1);
3975 root->r.r_uber_thread = NULL;
3977 root->r.r_begin = FALSE;
3982 void __kmp_unregister_root_current_thread(
int gtid) {
3983 KA_TRACE(1, (
"__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
3987 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3988 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
3989 KC_TRACE(10, (
"__kmp_unregister_root_current_thread: already finished, " 3992 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3995 kmp_root_t *root = __kmp_root[gtid];
3997 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3998 KMP_ASSERT(KMP_UBER_GTID(gtid));
3999 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4000 KMP_ASSERT(root->r.r_active == FALSE);
4005 kmp_info_t *thread = __kmp_threads[gtid];
4006 kmp_team_t *team = thread->th.th_team;
4007 kmp_task_team_t *task_team = thread->th.th_task_team;
4010 if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
4013 thread->th.ompt_thread_info.state = ompt_state_undefined;
4015 __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
4019 __kmp_reset_root(gtid, root);
4022 __kmp_gtid_set_specific(KMP_GTID_DNE);
4023 #ifdef KMP_TDATA_GTID 4024 __kmp_gtid = KMP_GTID_DNE;
4029 (
"__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
4031 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
4038 static int __kmp_unregister_root_other_thread(
int gtid) {
4039 kmp_root_t *root = __kmp_root[gtid];
4042 KA_TRACE(1, (
"__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
4043 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
4044 KMP_ASSERT(KMP_UBER_GTID(gtid));
4045 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4046 KMP_ASSERT(root->r.r_active == FALSE);
4048 r = __kmp_reset_root(gtid, root);
4050 (
"__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4056 void __kmp_task_info() {
4058 kmp_int32 gtid = __kmp_entry_gtid();
4059 kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4060 kmp_info_t *this_thr = __kmp_threads[gtid];
4061 kmp_team_t *steam = this_thr->th.th_serial_team;
4062 kmp_team_t *team = this_thr->th.th_team;
4065 "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p " 4067 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4068 team->t.t_implicit_task_taskdata[tid].td_parent);
4075 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4076 int tid,
int gtid) {
4080 kmp_info_t *master = team->t.t_threads[0];
4081 KMP_DEBUG_ASSERT(this_thr != NULL);
4082 KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4083 KMP_DEBUG_ASSERT(team);
4084 KMP_DEBUG_ASSERT(team->t.t_threads);
4085 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4086 KMP_DEBUG_ASSERT(master);
4087 KMP_DEBUG_ASSERT(master->th.th_root);
4091 TCW_SYNC_PTR(this_thr->th.th_team, team);
4093 this_thr->th.th_info.ds.ds_tid = tid;
4094 this_thr->th.th_set_nproc = 0;
4095 if (__kmp_tasking_mode != tskm_immediate_exec)
4098 this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4100 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4102 this_thr->th.th_set_proc_bind = proc_bind_default;
4103 #if KMP_AFFINITY_SUPPORTED 4104 this_thr->th.th_new_place = this_thr->th.th_current_place;
4107 this_thr->th.th_root = master->th.th_root;
4110 this_thr->th.th_team_nproc = team->t.t_nproc;
4111 this_thr->th.th_team_master = master;
4112 this_thr->th.th_team_serialized = team->t.t_serialized;
4113 TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4115 KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4117 KF_TRACE(10, (
"__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4118 tid, gtid, this_thr, this_thr->th.th_current_task));
4120 __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4123 KF_TRACE(10, (
"__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4124 tid, gtid, this_thr, this_thr->th.th_current_task));
4129 this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4131 this_thr->th.th_local.this_construct = 0;
4133 if (!this_thr->th.th_pri_common) {
4134 this_thr->th.th_pri_common =
4135 (
struct common_table *)__kmp_allocate(
sizeof(
struct common_table));
4136 if (__kmp_storage_map) {
4137 __kmp_print_storage_map_gtid(
4138 gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4139 sizeof(
struct common_table),
"th_%d.th_pri_common\n", gtid);
4141 this_thr->th.th_pri_head = NULL;
4146 volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4149 sizeof(dispatch_private_info_t) *
4150 (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4151 KD_TRACE(10, (
"__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4152 team->t.t_max_nproc));
4153 KMP_ASSERT(dispatch);
4154 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4155 KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4157 dispatch->th_disp_index = 0;
4159 dispatch->th_doacross_buf_idx = 0;
4161 if (!dispatch->th_disp_buffer) {
4162 dispatch->th_disp_buffer =
4163 (dispatch_private_info_t *)__kmp_allocate(disp_size);
4165 if (__kmp_storage_map) {
4166 __kmp_print_storage_map_gtid(
4167 gtid, &dispatch->th_disp_buffer[0],
4168 &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4170 : __kmp_dispatch_num_buffers],
4171 disp_size,
"th_%d.th_dispatch.th_disp_buffer " 4172 "(team_%d.t_dispatch[%d].th_disp_buffer)",
4173 gtid, team->t.t_id, gtid);
4176 memset(&dispatch->th_disp_buffer[0],
'\0', disp_size);
4179 dispatch->th_dispatch_pr_current = 0;
4180 dispatch->th_dispatch_sh_current = 0;
4182 dispatch->th_deo_fcn = 0;
4183 dispatch->th_dxo_fcn = 0;
4186 this_thr->th.th_next_pool = NULL;
4188 if (!this_thr->th.th_task_state_memo_stack) {
4190 this_thr->th.th_task_state_memo_stack =
4191 (kmp_uint8 *)__kmp_allocate(4 *
sizeof(kmp_uint8));
4192 this_thr->th.th_task_state_top = 0;
4193 this_thr->th.th_task_state_stack_sz = 4;
4194 for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4196 this_thr->th.th_task_state_memo_stack[i] = 0;
4199 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4200 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4210 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4212 kmp_team_t *serial_team;
4213 kmp_info_t *new_thr;
4216 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4217 KMP_DEBUG_ASSERT(root && team);
4218 #if !KMP_NESTED_HOT_TEAMS 4219 KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4224 if (__kmp_thread_pool) {
4226 new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4227 __kmp_thread_pool = (
volatile kmp_info_t *)new_thr->th.th_next_pool;
4228 if (new_thr == __kmp_thread_pool_insert_pt) {
4229 __kmp_thread_pool_insert_pt = NULL;
4231 TCW_4(new_thr->th.th_in_pool, FALSE);
4234 __kmp_thread_pool_nth--;
4236 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d using thread T#%d\n",
4237 __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4238 KMP_ASSERT(!new_thr->th.th_team);
4239 KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4240 KMP_DEBUG_ASSERT(__kmp_thread_pool_nth >= 0);
4243 __kmp_initialize_info(new_thr, team, new_tid,
4244 new_thr->th.th_info.ds.ds_gtid);
4245 KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4247 TCW_4(__kmp_nth, __kmp_nth + 1);
4248 root->r.r_cg_nthreads++;
4250 new_thr->th.th_task_state = 0;
4251 new_thr->th.th_task_state_top = 0;
4252 new_thr->th.th_task_state_stack_sz = 4;
4254 #ifdef KMP_ADJUST_BLOCKTIME 4257 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4258 if (__kmp_nth > __kmp_avail_proc) {
4259 __kmp_zero_bt = TRUE;
4268 kmp_balign_t *balign = new_thr->th.th_bar;
4269 for (b = 0; b < bs_last_barrier; ++b)
4270 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4273 KF_TRACE(10, (
"__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4274 __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4281 KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4282 KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4287 if (!TCR_4(__kmp_init_monitor)) {
4288 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4289 if (!TCR_4(__kmp_init_monitor)) {
4290 KF_TRACE(10, (
"before __kmp_create_monitor\n"));
4291 TCW_4(__kmp_init_monitor, 1);
4292 __kmp_create_monitor(&__kmp_monitor);
4293 KF_TRACE(10, (
"after __kmp_create_monitor\n"));
4304 while (TCR_4(__kmp_init_monitor) < 2) {
4307 KF_TRACE(10, (
"after monitor thread has started\n"));
4310 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4315 for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4316 KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4320 new_thr = (kmp_info_t *)__kmp_allocate(
sizeof(kmp_info_t));
4322 TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4324 if (__kmp_storage_map) {
4325 __kmp_print_thread_storage_map(new_thr, new_gtid);
4330 kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4331 KF_TRACE(10, (
"__kmp_allocate_thread: before th_serial/serial_team\n"));
4332 new_thr->th.th_serial_team = serial_team =
4333 (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4340 &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
4342 KMP_ASSERT(serial_team);
4343 serial_team->t.t_serialized = 0;
4345 serial_team->t.t_threads[0] = new_thr;
4347 (
"__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4351 __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4354 __kmp_initialize_fast_memory(new_thr);
4358 KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4359 __kmp_initialize_bget(new_thr);
4362 __kmp_init_random(new_thr);
4366 (
"__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4367 __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4370 kmp_balign_t *balign = new_thr->th.th_bar;
4371 for (b = 0; b < bs_last_barrier; ++b) {
4372 balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4373 balign[b].bb.team = NULL;
4374 balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4375 balign[b].bb.use_oncore_barrier = 0;
4378 new_thr->th.th_spin_here = FALSE;
4379 new_thr->th.th_next_waiting = 0;
4381 new_thr->th.th_blocking =
false;
4384 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED 4385 new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4386 new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4387 new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4388 new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4391 new_thr->th.th_def_allocator = __kmp_def_allocator;
4392 new_thr->th.th_prev_level = 0;
4393 new_thr->th.th_prev_num_threads = 1;
4396 TCW_4(new_thr->th.th_in_pool, FALSE);
4397 new_thr->th.th_active_in_pool = FALSE;
4398 TCW_4(new_thr->th.th_active, TRUE);
4404 root->r.r_cg_nthreads++;
4408 if (__kmp_adjust_gtid_mode) {
4409 if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4410 if (TCR_4(__kmp_gtid_mode) != 2) {
4411 TCW_4(__kmp_gtid_mode, 2);
4414 if (TCR_4(__kmp_gtid_mode) != 1) {
4415 TCW_4(__kmp_gtid_mode, 1);
4420 #ifdef KMP_ADJUST_BLOCKTIME 4423 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4424 if (__kmp_nth > __kmp_avail_proc) {
4425 __kmp_zero_bt = TRUE;
4432 10, (
"__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4433 __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4435 (
"__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4437 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4448 static void __kmp_reinitialize_team(kmp_team_t *team,
4449 kmp_internal_control_t *new_icvs,
4451 KF_TRACE(10, (
"__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4452 team->t.t_threads[0], team));
4453 KMP_DEBUG_ASSERT(team && new_icvs);
4454 KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4455 KMP_CHECK_UPDATE(team->t.t_ident, loc);
4457 KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4459 __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4460 copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4462 KF_TRACE(10, (
"__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4463 team->t.t_threads[0], team));
4469 static void __kmp_initialize_team(kmp_team_t *team,
int new_nproc,
4470 kmp_internal_control_t *new_icvs,
4472 KF_TRACE(10, (
"__kmp_initialize_team: enter: team=%p\n", team));
4475 KMP_DEBUG_ASSERT(team);
4476 KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4477 KMP_DEBUG_ASSERT(team->t.t_threads);
4480 team->t.t_master_tid = 0;
4482 team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4483 team->t.t_nproc = new_nproc;
4486 team->t.t_next_pool = NULL;
4490 TCW_SYNC_PTR(team->t.t_pkfn, NULL);
4491 team->t.t_invoke = NULL;
4494 team->t.t_sched.sched = new_icvs->sched.sched;
4496 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 4497 team->t.t_fp_control_saved = FALSE;
4498 team->t.t_x87_fpu_control_word = 0;
4499 team->t.t_mxcsr = 0;
4502 team->t.t_construct = 0;
4504 team->t.t_ordered.dt.t_value = 0;
4505 team->t.t_master_active = FALSE;
4507 memset(&team->t.t_taskq,
'\0',
sizeof(kmp_taskq_t));
4510 team->t.t_copypriv_data = NULL;
4513 team->t.t_copyin_counter = 0;
4516 team->t.t_control_stack_top = NULL;
4518 __kmp_reinitialize_team(team, new_icvs, loc);
4521 KF_TRACE(10, (
"__kmp_initialize_team: exit: team=%p\n", team));
4524 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 4527 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4528 if (KMP_AFFINITY_CAPABLE()) {
4530 if (old_mask != NULL) {
4531 status = __kmp_get_system_affinity(old_mask, TRUE);
4534 __kmp_fatal(KMP_MSG(ChangeThreadAffMaskError), KMP_ERR(error),
4538 __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4543 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED 4549 static void __kmp_partition_places(kmp_team_t *team,
int update_master_only) {
4551 kmp_info_t *master_th = team->t.t_threads[0];
4552 KMP_DEBUG_ASSERT(master_th != NULL);
4553 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4554 int first_place = master_th->th.th_first_place;
4555 int last_place = master_th->th.th_last_place;
4556 int masters_place = master_th->th.th_current_place;
4557 team->t.t_first_place = first_place;
4558 team->t.t_last_place = last_place;
4560 KA_TRACE(20, (
"__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) " 4561 "bound to place %d partition = [%d,%d]\n",
4562 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4563 team->t.t_id, masters_place, first_place, last_place));
4565 switch (proc_bind) {
4567 case proc_bind_default:
4570 KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4573 case proc_bind_master: {
4575 int n_th = team->t.t_nproc;
4576 for (f = 1; f < n_th; f++) {
4577 kmp_info_t *th = team->t.t_threads[f];
4578 KMP_DEBUG_ASSERT(th != NULL);
4579 th->th.th_first_place = first_place;
4580 th->th.th_last_place = last_place;
4581 th->th.th_new_place = masters_place;
4583 if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
4584 team->t.t_display_affinity != 1) {
4585 team->t.t_display_affinity = 1;
4589 KA_TRACE(100, (
"__kmp_partition_places: master: T#%d(%d:%d) place %d " 4590 "partition = [%d,%d]\n",
4591 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4592 f, masters_place, first_place, last_place));
4596 case proc_bind_close: {
4598 int n_th = team->t.t_nproc;
4600 if (first_place <= last_place) {
4601 n_places = last_place - first_place + 1;
4603 n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4605 if (n_th <= n_places) {
4606 int place = masters_place;
4607 for (f = 1; f < n_th; f++) {
4608 kmp_info_t *th = team->t.t_threads[f];
4609 KMP_DEBUG_ASSERT(th != NULL);
4611 if (place == last_place) {
4612 place = first_place;
4613 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4618 th->th.th_first_place = first_place;
4619 th->th.th_last_place = last_place;
4620 th->th.th_new_place = place;
4622 if (__kmp_display_affinity && place != th->th.th_current_place &&
4623 team->t.t_display_affinity != 1) {
4624 team->t.t_display_affinity = 1;
4628 KA_TRACE(100, (
"__kmp_partition_places: close: T#%d(%d:%d) place %d " 4629 "partition = [%d,%d]\n",
4630 __kmp_gtid_from_thread(team->t.t_threads[f]),
4631 team->t.t_id, f, place, first_place, last_place));
4634 int S, rem, gap, s_count;
4635 S = n_th / n_places;
4637 rem = n_th - (S * n_places);
4638 gap = rem > 0 ? n_places / rem : n_places;
4639 int place = masters_place;
4641 for (f = 0; f < n_th; f++) {
4642 kmp_info_t *th = team->t.t_threads[f];
4643 KMP_DEBUG_ASSERT(th != NULL);
4645 th->th.th_first_place = first_place;
4646 th->th.th_last_place = last_place;
4647 th->th.th_new_place = place;
4649 if (__kmp_display_affinity && place != th->th.th_current_place &&
4650 team->t.t_display_affinity != 1) {
4651 team->t.t_display_affinity = 1;
4656 if ((s_count == S) && rem && (gap_ct == gap)) {
4658 }
else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4660 if (place == last_place) {
4661 place = first_place;
4662 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4670 }
else if (s_count == S) {
4671 if (place == last_place) {
4672 place = first_place;
4673 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4683 (
"__kmp_partition_places: close: T#%d(%d:%d) place %d " 4684 "partition = [%d,%d]\n",
4685 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4686 th->th.th_new_place, first_place, last_place));
4688 KMP_DEBUG_ASSERT(place == masters_place);
4692 case proc_bind_spread: {
4694 int n_th = team->t.t_nproc;
4697 if (first_place <= last_place) {
4698 n_places = last_place - first_place + 1;
4700 n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4702 if (n_th <= n_places) {
4705 if (n_places != static_cast<int>(__kmp_affinity_num_masks)) {
4706 int S = n_places / n_th;
4707 int s_count, rem, gap, gap_ct;
4709 place = masters_place;
4710 rem = n_places - n_th * S;
4711 gap = rem ? n_th / rem : 1;
4714 if (update_master_only == 1)
4716 for (f = 0; f < thidx; f++) {
4717 kmp_info_t *th = team->t.t_threads[f];
4718 KMP_DEBUG_ASSERT(th != NULL);
4720 th->th.th_first_place = place;
4721 th->th.th_new_place = place;
4723 if (__kmp_display_affinity && place != th->th.th_current_place &&
4724 team->t.t_display_affinity != 1) {
4725 team->t.t_display_affinity = 1;
4729 while (s_count < S) {
4730 if (place == last_place) {
4731 place = first_place;
4732 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4739 if (rem && (gap_ct == gap)) {
4740 if (place == last_place) {
4741 place = first_place;
4742 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4750 th->th.th_last_place = place;
4753 if (place == last_place) {
4754 place = first_place;
4755 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4762 (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d " 4763 "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4764 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4765 f, th->th.th_new_place, th->th.th_first_place,
4766 th->th.th_last_place, __kmp_affinity_num_masks));
4772 double current =
static_cast<double>(masters_place);
4774 (
static_cast<double>(n_places + 1) / static_cast<double>(n_th));
4779 if (update_master_only == 1)
4781 for (f = 0; f < thidx; f++) {
4782 first =
static_cast<int>(current);
4783 last =
static_cast<int>(current + spacing) - 1;
4784 KMP_DEBUG_ASSERT(last >= first);
4785 if (first >= n_places) {
4786 if (masters_place) {
4789 if (first == (masters_place + 1)) {
4790 KMP_DEBUG_ASSERT(f == n_th);
4793 if (last == masters_place) {
4794 KMP_DEBUG_ASSERT(f == (n_th - 1));
4798 KMP_DEBUG_ASSERT(f == n_th);
4803 if (last >= n_places) {
4804 last = (n_places - 1);
4809 KMP_DEBUG_ASSERT(0 <= first);
4810 KMP_DEBUG_ASSERT(n_places > first);
4811 KMP_DEBUG_ASSERT(0 <= last);
4812 KMP_DEBUG_ASSERT(n_places > last);
4813 KMP_DEBUG_ASSERT(last_place >= first_place);
4814 th = team->t.t_threads[f];
4815 KMP_DEBUG_ASSERT(th);
4816 th->th.th_first_place = first;
4817 th->th.th_new_place = place;
4818 th->th.th_last_place = last;
4820 if (__kmp_display_affinity && place != th->th.th_current_place &&
4821 team->t.t_display_affinity != 1) {
4822 team->t.t_display_affinity = 1;
4826 (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d " 4827 "partition = [%d,%d], spacing = %.4f\n",
4828 __kmp_gtid_from_thread(team->t.t_threads[f]),
4829 team->t.t_id, f, th->th.th_new_place,
4830 th->th.th_first_place, th->th.th_last_place, spacing));
4834 KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4836 int S, rem, gap, s_count;
4837 S = n_th / n_places;
4839 rem = n_th - (S * n_places);
4840 gap = rem > 0 ? n_places / rem : n_places;
4841 int place = masters_place;
4844 if (update_master_only == 1)
4846 for (f = 0; f < thidx; f++) {
4847 kmp_info_t *th = team->t.t_threads[f];
4848 KMP_DEBUG_ASSERT(th != NULL);
4850 th->th.th_first_place = place;
4851 th->th.th_last_place = place;
4852 th->th.th_new_place = place;
4854 if (__kmp_display_affinity && place != th->th.th_current_place &&
4855 team->t.t_display_affinity != 1) {
4856 team->t.t_display_affinity = 1;
4861 if ((s_count == S) && rem && (gap_ct == gap)) {
4863 }
else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4865 if (place == last_place) {
4866 place = first_place;
4867 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4875 }
else if (s_count == S) {
4876 if (place == last_place) {
4877 place = first_place;
4878 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4887 KA_TRACE(100, (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d " 4888 "partition = [%d,%d]\n",
4889 __kmp_gtid_from_thread(team->t.t_threads[f]),
4890 team->t.t_id, f, th->th.th_new_place,
4891 th->th.th_first_place, th->th.th_last_place));
4893 KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4901 KA_TRACE(20, (
"__kmp_partition_places: exit T#%d\n", team->t.t_id));
4909 __kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
4911 ompt_data_t ompt_parallel_data,
4914 kmp_proc_bind_t new_proc_bind,
4916 kmp_internal_control_t *new_icvs,
4917 int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4918 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4921 int use_hot_team = !root->r.r_active;
4924 KA_TRACE(20, (
"__kmp_allocate_team: called\n"));
4925 KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
4926 KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
4929 #if KMP_NESTED_HOT_TEAMS 4930 kmp_hot_team_ptr_t *hot_teams;
4932 team = master->th.th_team;
4933 level = team->t.t_active_level;
4934 if (master->th.th_teams_microtask) {
4935 if (master->th.th_teams_size.nteams > 1 &&
4938 (microtask_t)__kmp_teams_master ||
4939 master->th.th_teams_level <
4945 hot_teams = master->th.th_hot_teams;
4946 if (level < __kmp_hot_teams_max_level && hot_teams &&
4956 if (use_hot_team && new_nproc > 1) {
4957 KMP_DEBUG_ASSERT(new_nproc == max_nproc);
4958 #if KMP_NESTED_HOT_TEAMS 4959 team = hot_teams[level].hot_team;
4961 team = root->r.r_hot_team;
4964 if (__kmp_tasking_mode != tskm_immediate_exec) {
4965 KA_TRACE(20, (
"__kmp_allocate_team: hot team task_team[0] = %p " 4966 "task_team[1] = %p before reinit\n",
4967 team->t.t_task_team[0], team->t.t_task_team[1]));
4974 if (team->t.t_nproc == new_nproc) {
4975 KA_TRACE(20, (
"__kmp_allocate_team: reusing hot team\n"));
4978 if (team->t.t_size_changed == -1) {
4979 team->t.t_size_changed = 1;
4981 KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4985 kmp_r_sched_t new_sched = new_icvs->sched;
4987 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
4989 __kmp_reinitialize_team(team, new_icvs,
4990 root->r.r_uber_thread->th.th_ident);
4992 KF_TRACE(10, (
"__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4993 team->t.t_threads[0], team));
4994 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4997 #if KMP_AFFINITY_SUPPORTED 4998 if ((team->t.t_size_changed == 0) &&
4999 (team->t.t_proc_bind == new_proc_bind)) {
5000 if (new_proc_bind == proc_bind_spread) {
5001 __kmp_partition_places(
5004 KA_TRACE(200, (
"__kmp_allocate_team: reusing hot team #%d bindings: " 5005 "proc_bind = %d, partition = [%d,%d]\n",
5006 team->t.t_id, new_proc_bind, team->t.t_first_place,
5007 team->t.t_last_place));
5009 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5010 __kmp_partition_places(team);
5013 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5016 }
else if (team->t.t_nproc > new_nproc) {
5018 (
"__kmp_allocate_team: decreasing hot team thread count to %d\n",
5021 team->t.t_size_changed = 1;
5022 #if KMP_NESTED_HOT_TEAMS 5023 if (__kmp_hot_teams_mode == 0) {
5026 KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
5027 hot_teams[level].hot_team_nth = new_nproc;
5028 #endif // KMP_NESTED_HOT_TEAMS 5030 for (f = new_nproc; f < team->t.t_nproc; f++) {
5031 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5032 if (__kmp_tasking_mode != tskm_immediate_exec) {
5035 team->t.t_threads[f]->th.th_task_team = NULL;
5037 __kmp_free_thread(team->t.t_threads[f]);
5038 team->t.t_threads[f] = NULL;
5040 #if KMP_NESTED_HOT_TEAMS 5045 for (f = new_nproc; f < team->t.t_nproc; ++f) {
5046 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5047 kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
5048 for (
int b = 0; b < bs_last_barrier; ++b) {
5049 if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
5050 balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5052 KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
5056 #endif // KMP_NESTED_HOT_TEAMS 5057 team->t.t_nproc = new_nproc;
5059 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
5060 __kmp_reinitialize_team(team, new_icvs,
5061 root->r.r_uber_thread->th.th_ident);
5064 for (f = 0; f < new_nproc; ++f) {
5065 team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5069 KF_TRACE(10, (
"__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5070 team->t.t_threads[0], team));
5072 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5075 for (f = 0; f < team->t.t_nproc; f++) {
5076 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5077 team->t.t_threads[f]->th.th_team_nproc ==
5083 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5084 #if KMP_AFFINITY_SUPPORTED 5085 __kmp_partition_places(team);
5089 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 5090 kmp_affin_mask_t *old_mask;
5091 if (KMP_AFFINITY_CAPABLE()) {
5092 KMP_CPU_ALLOC(old_mask);
5097 (
"__kmp_allocate_team: increasing hot team thread count to %d\n",
5100 team->t.t_size_changed = 1;
5102 #if KMP_NESTED_HOT_TEAMS 5103 int avail_threads = hot_teams[level].hot_team_nth;
5104 if (new_nproc < avail_threads)
5105 avail_threads = new_nproc;
5106 kmp_info_t **other_threads = team->t.t_threads;
5107 for (f = team->t.t_nproc; f < avail_threads; ++f) {
5111 kmp_balign_t *balign = other_threads[f]->th.th_bar;
5112 for (b = 0; b < bs_last_barrier; ++b) {
5113 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5114 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5116 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5120 if (hot_teams[level].hot_team_nth >= new_nproc) {
5123 KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5124 team->t.t_nproc = new_nproc;
5130 hot_teams[level].hot_team_nth = new_nproc;
5131 #endif // KMP_NESTED_HOT_TEAMS 5132 if (team->t.t_max_nproc < new_nproc) {
5134 __kmp_reallocate_team_arrays(team, new_nproc);
5135 __kmp_reinitialize_team(team, new_icvs, NULL);
5138 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 5143 __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5147 for (f = team->t.t_nproc; f < new_nproc; f++) {
5148 kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5149 KMP_DEBUG_ASSERT(new_worker);
5150 team->t.t_threads[f] = new_worker;
5153 (
"__kmp_allocate_team: team %d init T#%d arrived: " 5154 "join=%llu, plain=%llu\n",
5155 team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5156 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5157 team->t.t_bar[bs_plain_barrier].b_arrived));
5161 kmp_balign_t *balign = new_worker->th.th_bar;
5162 for (b = 0; b < bs_last_barrier; ++b) {
5163 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5164 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5165 KMP_BARRIER_PARENT_FLAG);
5167 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5173 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 5174 if (KMP_AFFINITY_CAPABLE()) {
5176 __kmp_set_system_affinity(old_mask, TRUE);
5177 KMP_CPU_FREE(old_mask);
5180 #if KMP_NESTED_HOT_TEAMS 5182 #endif // KMP_NESTED_HOT_TEAMS 5184 int old_nproc = team->t.t_nproc;
5186 __kmp_initialize_team(team, new_nproc, new_icvs,
5187 root->r.r_uber_thread->th.th_ident);
5190 KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5191 for (f = 0; f < team->t.t_nproc; ++f)
5192 __kmp_initialize_info(team->t.t_threads[f], team, f,
5193 __kmp_gtid_from_tid(f, team));
5200 for (f = old_nproc; f < team->t.t_nproc; ++f)
5201 team->t.t_threads[f]->th.th_task_state =
5202 team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5205 team->t.t_threads[0]->th.th_task_state;
5206 for (f = old_nproc; f < team->t.t_nproc; ++f)
5207 team->t.t_threads[f]->th.th_task_state = old_state;
5211 for (f = 0; f < team->t.t_nproc; ++f) {
5212 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5213 team->t.t_threads[f]->th.th_team_nproc ==
5219 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5220 #if KMP_AFFINITY_SUPPORTED 5221 __kmp_partition_places(team);
5227 kmp_info_t *master = team->t.t_threads[0];
5228 if (master->th.th_teams_microtask) {
5229 for (f = 1; f < new_nproc; ++f) {
5231 kmp_info_t *thr = team->t.t_threads[f];
5232 thr->th.th_teams_microtask = master->th.th_teams_microtask;
5233 thr->th.th_teams_level = master->th.th_teams_level;
5234 thr->th.th_teams_size = master->th.th_teams_size;
5238 #if KMP_NESTED_HOT_TEAMS 5242 for (f = 1; f < new_nproc; ++f) {
5243 kmp_info_t *thr = team->t.t_threads[f];
5245 kmp_balign_t *balign = thr->th.th_bar;
5246 for (b = 0; b < bs_last_barrier; ++b) {
5247 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5248 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5250 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5255 #endif // KMP_NESTED_HOT_TEAMS 5258 __kmp_alloc_argv_entries(argc, team, TRUE);
5259 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5263 KF_TRACE(10, (
" hot_team = %p\n", team));
5266 if (__kmp_tasking_mode != tskm_immediate_exec) {
5267 KA_TRACE(20, (
"__kmp_allocate_team: hot team task_team[0] = %p " 5268 "task_team[1] = %p after reinit\n",
5269 team->t.t_task_team[0], team->t.t_task_team[1]));
5274 __ompt_team_assign_id(team, ompt_parallel_data);
5284 for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5287 if (team->t.t_max_nproc >= max_nproc) {
5289 __kmp_team_pool = team->t.t_next_pool;
5292 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5294 KA_TRACE(20, (
"__kmp_allocate_team: setting task_team[0] %p and " 5295 "task_team[1] %p to NULL\n",
5296 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5297 team->t.t_task_team[0] = NULL;
5298 team->t.t_task_team[1] = NULL;
5301 __kmp_alloc_argv_entries(argc, team, TRUE);
5302 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5305 20, (
"__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5306 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5309 for (b = 0; b < bs_last_barrier; ++b) {
5310 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5312 team->t.t_bar[b].b_master_arrived = 0;
5313 team->t.t_bar[b].b_team_arrived = 0;
5319 team->t.t_proc_bind = new_proc_bind;
5322 KA_TRACE(20, (
"__kmp_allocate_team: using team from pool %d.\n",
5326 __ompt_team_assign_id(team, ompt_parallel_data);
5338 team = __kmp_reap_team(team);
5339 __kmp_team_pool = team;
5344 team = (kmp_team_t *)__kmp_allocate(
sizeof(kmp_team_t));
5347 team->t.t_max_nproc = max_nproc;
5350 __kmp_allocate_team_arrays(team, max_nproc);
5352 KA_TRACE(20, (
"__kmp_allocate_team: making a new team\n"));
5353 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5355 KA_TRACE(20, (
"__kmp_allocate_team: setting task_team[0] %p and task_team[1] " 5357 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5358 team->t.t_task_team[0] = NULL;
5360 team->t.t_task_team[1] = NULL;
5363 if (__kmp_storage_map) {
5364 __kmp_print_team_storage_map(
"team", team, team->t.t_id, new_nproc);
5368 __kmp_alloc_argv_entries(argc, team, FALSE);
5369 team->t.t_argc = argc;
5372 (
"__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5373 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5376 for (b = 0; b < bs_last_barrier; ++b) {
5377 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5379 team->t.t_bar[b].b_master_arrived = 0;
5380 team->t.t_bar[b].b_team_arrived = 0;
5386 team->t.t_proc_bind = new_proc_bind;
5390 __ompt_team_assign_id(team, ompt_parallel_data);
5391 team->t.ompt_serialized_team_info = NULL;
5396 KA_TRACE(20, (
"__kmp_allocate_team: done creating a new team %d.\n",
5407 void __kmp_free_team(kmp_root_t *root,
5408 kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5410 KA_TRACE(20, (
"__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5414 KMP_DEBUG_ASSERT(root);
5415 KMP_DEBUG_ASSERT(team);
5416 KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5417 KMP_DEBUG_ASSERT(team->t.t_threads);
5419 int use_hot_team = team == root->r.r_hot_team;
5420 #if KMP_NESTED_HOT_TEAMS 5422 kmp_hot_team_ptr_t *hot_teams;
5424 level = team->t.t_active_level - 1;
5425 if (master->th.th_teams_microtask) {
5426 if (master->th.th_teams_size.nteams > 1) {
5430 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5431 master->th.th_teams_level == team->t.t_level) {
5436 hot_teams = master->th.th_hot_teams;
5437 if (level < __kmp_hot_teams_max_level) {
5438 KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5442 #endif // KMP_NESTED_HOT_TEAMS 5445 TCW_SYNC_PTR(team->t.t_pkfn,
5448 team->t.t_copyin_counter = 0;
5453 if (!use_hot_team) {
5454 if (__kmp_tasking_mode != tskm_immediate_exec) {
5456 for (f = 1; f < team->t.t_nproc; ++f) {
5457 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5458 kmp_info_t *th = team->t.t_threads[f];
5459 volatile kmp_uint32 *state = &th->th.th_reap_state;
5460 while (*state != KMP_SAFE_TO_REAP) {
5464 if (!__kmp_is_thread_alive(th, &ecode)) {
5465 *state = KMP_SAFE_TO_REAP;
5470 kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5471 if (fl.is_sleeping())
5472 fl.resume(__kmp_gtid_from_thread(th));
5479 for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5480 kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5481 if (task_team != NULL) {
5482 for (f = 0; f < team->t.t_nproc;
5484 team->t.t_threads[f]->th.th_task_team = NULL;
5488 (
"__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5489 __kmp_get_gtid(), task_team, team->t.t_id));
5490 #if KMP_NESTED_HOT_TEAMS 5491 __kmp_free_task_team(master, task_team);
5493 team->t.t_task_team[tt_idx] = NULL;
5499 team->t.t_parent = NULL;
5500 team->t.t_level = 0;
5501 team->t.t_active_level = 0;
5504 for (f = 1; f < team->t.t_nproc; ++f) {
5505 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5506 __kmp_free_thread(team->t.t_threads[f]);
5507 team->t.t_threads[f] = NULL;
5512 team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5513 __kmp_team_pool = (
volatile kmp_team_t *)team;
5520 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5521 kmp_team_t *next_pool = team->t.t_next_pool;
5523 KMP_DEBUG_ASSERT(team);
5524 KMP_DEBUG_ASSERT(team->t.t_dispatch);
5525 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5526 KMP_DEBUG_ASSERT(team->t.t_threads);
5527 KMP_DEBUG_ASSERT(team->t.t_argv);
5532 __kmp_free_team_arrays(team);
5533 if (team->t.t_argv != &team->t.t_inline_argv[0])
5534 __kmp_free((
void *)team->t.t_argv);
5566 void __kmp_free_thread(kmp_info_t *this_th) {
5569 kmp_root_t *root = this_th->th.th_root;
5571 KA_TRACE(20, (
"__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5572 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5574 KMP_DEBUG_ASSERT(this_th);
5579 kmp_balign_t *balign = this_th->th.th_bar;
5580 for (b = 0; b < bs_last_barrier; ++b) {
5581 if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5582 balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5583 balign[b].bb.team = NULL;
5584 balign[b].bb.leaf_kids = 0;
5586 this_th->th.th_task_state = 0;
5587 this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
5590 TCW_PTR(this_th->th.th_team, NULL);
5591 TCW_PTR(this_th->th.th_root, NULL);
5592 TCW_PTR(this_th->th.th_dispatch, NULL);
5599 __kmp_free_implicit_task(this_th);
5600 this_th->th.th_current_task = NULL;
5604 gtid = this_th->th.th_info.ds.ds_gtid;
5605 if (__kmp_thread_pool_insert_pt != NULL) {
5606 KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5607 if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5608 __kmp_thread_pool_insert_pt = NULL;
5617 if (__kmp_thread_pool_insert_pt != NULL) {
5618 scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5620 scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5622 for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5623 scan = &((*scan)->th.th_next_pool))
5628 TCW_PTR(this_th->th.th_next_pool, *scan);
5629 __kmp_thread_pool_insert_pt = *scan = this_th;
5630 KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5631 (this_th->th.th_info.ds.ds_gtid <
5632 this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5633 TCW_4(this_th->th.th_in_pool, TRUE);
5634 __kmp_thread_pool_nth++;
5636 TCW_4(__kmp_nth, __kmp_nth - 1);
5637 root->r.r_cg_nthreads--;
5639 #ifdef KMP_ADJUST_BLOCKTIME 5642 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5643 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5644 if (__kmp_nth <= __kmp_avail_proc) {
5645 __kmp_zero_bt = FALSE;
5655 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5656 int gtid = this_thr->th.th_info.ds.ds_gtid;
5658 kmp_team_t *(*
volatile pteam);
5661 KA_TRACE(10, (
"__kmp_launch_thread: T#%d start\n", gtid));
5663 if (__kmp_env_consistency_check) {
5664 this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid);
5668 ompt_data_t *thread_data;
5669 if (ompt_enabled.enabled) {
5670 thread_data = &(this_thr->th.ompt_thread_info.thread_data);
5671 *thread_data = ompt_data_none;
5673 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5674 this_thr->th.ompt_thread_info.wait_id = 0;
5675 this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
5676 if (ompt_enabled.ompt_callback_thread_begin) {
5677 ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
5678 ompt_thread_worker, thread_data);
5684 if (ompt_enabled.enabled) {
5685 this_thr->th.ompt_thread_info.state = ompt_state_idle;
5689 while (!TCR_4(__kmp_global.g.g_done)) {
5690 KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5694 KA_TRACE(20, (
"__kmp_launch_thread: T#%d waiting for work\n", gtid));
5697 __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5700 if (ompt_enabled.enabled) {
5701 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5705 pteam = (kmp_team_t * (*))(&this_thr->th.th_team);
5708 if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5710 if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5713 (
"__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5714 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5715 (*pteam)->t.t_pkfn));
5717 updateHWFPControl(*pteam);
5720 if (ompt_enabled.enabled) {
5721 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
5725 rc = (*pteam)->t.t_invoke(gtid);
5729 KA_TRACE(20, (
"__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5730 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5731 (*pteam)->t.t_pkfn));
5734 if (ompt_enabled.enabled) {
5736 __ompt_get_task_info_object(0)->frame.exit_frame = ompt_data_none;
5738 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5742 __kmp_join_barrier(gtid);
5745 TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5748 if (ompt_enabled.ompt_callback_thread_end) {
5749 ompt_callbacks.ompt_callback(ompt_callback_thread_end)(thread_data);
5753 this_thr->th.th_task_team = NULL;
5755 __kmp_common_destroy_gtid(gtid);
5757 KA_TRACE(10, (
"__kmp_launch_thread: T#%d done\n", gtid));
5764 void __kmp_internal_end_dest(
void *specific_gtid) {
5765 #if KMP_COMPILER_ICC 5766 #pragma warning(push) 5767 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose 5771 int gtid = (kmp_intptr_t)specific_gtid - 1;
5772 #if KMP_COMPILER_ICC 5773 #pragma warning(pop) 5776 KA_TRACE(30, (
"__kmp_internal_end_dest: T#%d\n", gtid));
5789 if (gtid >= 0 && KMP_UBER_GTID(gtid))
5790 __kmp_gtid_set_specific(gtid);
5791 #ifdef KMP_TDATA_GTID 5794 __kmp_internal_end_thread(gtid);
5797 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB 5803 __attribute__((destructor))
void __kmp_internal_end_dtor(
void) {
5804 __kmp_internal_end_atexit();
5807 void __kmp_internal_end_fini(
void) { __kmp_internal_end_atexit(); }
5813 void __kmp_internal_end_atexit(
void) {
5814 KA_TRACE(30, (
"__kmp_internal_end_atexit\n"));
5838 __kmp_internal_end_library(-1);
5840 __kmp_close_console();
5844 static void __kmp_reap_thread(kmp_info_t *thread,
int is_root) {
5849 KMP_DEBUG_ASSERT(thread != NULL);
5851 gtid = thread->th.th_info.ds.ds_gtid;
5855 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5858 20, (
"__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5862 ANNOTATE_HAPPENS_BEFORE(thread);
5863 kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5864 __kmp_release_64(&flag);
5868 __kmp_reap_worker(thread);
5880 if (thread->th.th_active_in_pool) {
5881 thread->th.th_active_in_pool = FALSE;
5882 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
5883 KMP_DEBUG_ASSERT(__kmp_thread_pool_active_nth >= 0);
5887 KMP_DEBUG_ASSERT(__kmp_thread_pool_nth > 0);
5888 --__kmp_thread_pool_nth;
5891 __kmp_free_implicit_task(thread);
5895 __kmp_free_fast_memory(thread);
5898 __kmp_suspend_uninitialize_thread(thread);
5900 KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
5901 TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
5906 #ifdef KMP_ADJUST_BLOCKTIME 5909 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5910 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5911 if (__kmp_nth <= __kmp_avail_proc) {
5912 __kmp_zero_bt = FALSE;
5918 if (__kmp_env_consistency_check) {
5919 if (thread->th.th_cons) {
5920 __kmp_free_cons_stack(thread->th.th_cons);
5921 thread->th.th_cons = NULL;
5925 if (thread->th.th_pri_common != NULL) {
5926 __kmp_free(thread->th.th_pri_common);
5927 thread->th.th_pri_common = NULL;
5930 if (thread->th.th_task_state_memo_stack != NULL) {
5931 __kmp_free(thread->th.th_task_state_memo_stack);
5932 thread->th.th_task_state_memo_stack = NULL;
5936 if (thread->th.th_local.bget_data != NULL) {
5937 __kmp_finalize_bget(thread);
5941 #if KMP_AFFINITY_SUPPORTED 5942 if (thread->th.th_affin_mask != NULL) {
5943 KMP_CPU_FREE(thread->th.th_affin_mask);
5944 thread->th.th_affin_mask = NULL;
5948 #if KMP_USE_HIER_SCHED 5949 if (thread->th.th_hier_bar_data != NULL) {
5950 __kmp_free(thread->th.th_hier_bar_data);
5951 thread->th.th_hier_bar_data = NULL;
5955 __kmp_reap_team(thread->th.th_serial_team);
5956 thread->th.th_serial_team = NULL;
5963 static void __kmp_internal_end(
void) {
5967 __kmp_unregister_library();
5974 __kmp_reclaim_dead_roots();
5978 for (i = 0; i < __kmp_threads_capacity; i++)
5980 if (__kmp_root[i]->r.r_active)
5983 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
5985 if (i < __kmp_threads_capacity) {
5997 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
5998 if (TCR_4(__kmp_init_monitor)) {
5999 __kmp_reap_monitor(&__kmp_monitor);
6000 TCW_4(__kmp_init_monitor, 0);
6002 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6003 KA_TRACE(10, (
"__kmp_internal_end: monitor reaped\n"));
6004 #endif // KMP_USE_MONITOR 6009 for (i = 0; i < __kmp_threads_capacity; i++) {
6010 if (__kmp_root[i]) {
6013 KMP_ASSERT(!__kmp_root[i]->r.r_active);
6022 while (__kmp_thread_pool != NULL) {
6024 kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
6025 __kmp_thread_pool = thread->th.th_next_pool;
6027 KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
6028 thread->th.th_next_pool = NULL;
6029 thread->th.th_in_pool = FALSE;
6030 __kmp_reap_thread(thread, 0);
6032 __kmp_thread_pool_insert_pt = NULL;
6035 while (__kmp_team_pool != NULL) {
6037 kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6038 __kmp_team_pool = team->t.t_next_pool;
6040 team->t.t_next_pool = NULL;
6041 __kmp_reap_team(team);
6044 __kmp_reap_task_teams();
6051 for (i = 0; i < __kmp_threads_capacity; i++) {
6052 kmp_info_t *thr = __kmp_threads[i];
6053 while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
6058 for (i = 0; i < __kmp_threads_capacity; ++i) {
6065 TCW_SYNC_4(__kmp_init_common, FALSE);
6067 KA_TRACE(10, (
"__kmp_internal_end: all workers reaped\n"));
6075 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6076 if (TCR_4(__kmp_init_monitor)) {
6077 __kmp_reap_monitor(&__kmp_monitor);
6078 TCW_4(__kmp_init_monitor, 0);
6080 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6081 KA_TRACE(10, (
"__kmp_internal_end: monitor reaped\n"));
6084 TCW_4(__kmp_init_gtid, FALSE);
6093 void __kmp_internal_end_library(
int gtid_req) {
6100 if (__kmp_global.g.g_abort) {
6101 KA_TRACE(11, (
"__kmp_internal_end_library: abort, exiting\n"));
6105 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6106 KA_TRACE(10, (
"__kmp_internal_end_library: already finished\n"));
6114 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6116 10, (
"__kmp_internal_end_library: enter T#%d (%d)\n", gtid, gtid_req));
6117 if (gtid == KMP_GTID_SHUTDOWN) {
6118 KA_TRACE(10, (
"__kmp_internal_end_library: !__kmp_init_runtime, system " 6119 "already shutdown\n"));
6121 }
else if (gtid == KMP_GTID_MONITOR) {
6122 KA_TRACE(10, (
"__kmp_internal_end_library: monitor thread, gtid not " 6123 "registered, or system shutdown\n"));
6125 }
else if (gtid == KMP_GTID_DNE) {
6126 KA_TRACE(10, (
"__kmp_internal_end_library: gtid not registered or system " 6129 }
else if (KMP_UBER_GTID(gtid)) {
6131 if (__kmp_root[gtid]->r.r_active) {
6132 __kmp_global.g.g_abort = -1;
6133 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6135 (
"__kmp_internal_end_library: root still active, abort T#%d\n",
6141 (
"__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6142 __kmp_unregister_root_current_thread(gtid);
6149 #ifdef DUMP_DEBUG_ON_EXIT 6150 if (__kmp_debug_buf)
6151 __kmp_dump_debug_buffer();
6157 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6160 if (__kmp_global.g.g_abort) {
6161 KA_TRACE(10, (
"__kmp_internal_end_library: abort, exiting\n"));
6163 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6166 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6167 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6176 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6179 __kmp_internal_end();
6181 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6182 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6184 KA_TRACE(10, (
"__kmp_internal_end_library: exit\n"));
6186 #ifdef DUMP_DEBUG_ON_EXIT 6187 if (__kmp_debug_buf)
6188 __kmp_dump_debug_buffer();
6192 __kmp_close_console();
6195 __kmp_fini_allocator();
6199 void __kmp_internal_end_thread(
int gtid_req) {
6208 if (__kmp_global.g.g_abort) {
6209 KA_TRACE(11, (
"__kmp_internal_end_thread: abort, exiting\n"));
6213 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6214 KA_TRACE(10, (
"__kmp_internal_end_thread: already finished\n"));
6222 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6224 (
"__kmp_internal_end_thread: enter T#%d (%d)\n", gtid, gtid_req));
6225 if (gtid == KMP_GTID_SHUTDOWN) {
6226 KA_TRACE(10, (
"__kmp_internal_end_thread: !__kmp_init_runtime, system " 6227 "already shutdown\n"));
6229 }
else if (gtid == KMP_GTID_MONITOR) {
6230 KA_TRACE(10, (
"__kmp_internal_end_thread: monitor thread, gtid not " 6231 "registered, or system shutdown\n"));
6233 }
else if (gtid == KMP_GTID_DNE) {
6234 KA_TRACE(10, (
"__kmp_internal_end_thread: gtid not registered or system " 6238 }
else if (KMP_UBER_GTID(gtid)) {
6240 if (__kmp_root[gtid]->r.r_active) {
6241 __kmp_global.g.g_abort = -1;
6242 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6244 (
"__kmp_internal_end_thread: root still active, abort T#%d\n",
6248 KA_TRACE(10, (
"__kmp_internal_end_thread: unregistering sibling T#%d\n",
6250 __kmp_unregister_root_current_thread(gtid);
6254 KA_TRACE(10, (
"__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6257 __kmp_threads[gtid]->th.th_task_team = NULL;
6261 (
"__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6275 KA_TRACE(10, (
"__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6279 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6282 if (__kmp_global.g.g_abort) {
6283 KA_TRACE(10, (
"__kmp_internal_end_thread: abort, exiting\n"));
6285 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6288 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6289 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6300 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6302 for (i = 0; i < __kmp_threads_capacity; ++i) {
6303 if (KMP_UBER_GTID(i)) {
6306 (
"__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6307 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6308 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6315 __kmp_internal_end();
6317 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6318 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6320 KA_TRACE(10, (
"__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6322 #ifdef DUMP_DEBUG_ON_EXIT 6323 if (__kmp_debug_buf)
6324 __kmp_dump_debug_buffer();
6331 static long __kmp_registration_flag = 0;
6333 static char *__kmp_registration_str = NULL;
6336 static inline char *__kmp_reg_status_name() {
6341 return __kmp_str_format(
"__KMP_REGISTERED_LIB_%d", (
int)getpid());
6344 void __kmp_register_library_startup(
void) {
6346 char *name = __kmp_reg_status_name();
6352 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 6353 __kmp_initialize_system_tick();
6355 __kmp_read_system_time(&time.dtime);
6356 __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6357 __kmp_registration_str =
6358 __kmp_str_format(
"%p-%lx-%s", &__kmp_registration_flag,
6359 __kmp_registration_flag, KMP_LIBRARY_FILE);
6361 KA_TRACE(50, (
"__kmp_register_library_startup: %s=\"%s\"\n", name,
6362 __kmp_registration_str));
6369 __kmp_env_set(name, __kmp_registration_str, 0);
6371 value = __kmp_env_get(name);
6372 if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6382 char *flag_addr_str = NULL;
6383 char *flag_val_str = NULL;
6384 char const *file_name = NULL;
6385 __kmp_str_split(tail,
'-', &flag_addr_str, &tail);
6386 __kmp_str_split(tail,
'-', &flag_val_str, &tail);
6389 long *flag_addr = 0;
6391 KMP_SSCANF(flag_addr_str,
"%p", RCAST(
void**, &flag_addr));
6392 KMP_SSCANF(flag_val_str,
"%lx", &flag_val);
6393 if (flag_addr != 0 && flag_val != 0 && strcmp(file_name,
"") != 0) {
6397 if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6411 file_name =
"unknown library";
6415 char *duplicate_ok = __kmp_env_get(
"KMP_DUPLICATE_LIB_OK");
6416 if (!__kmp_str_match_true(duplicate_ok)) {
6418 __kmp_fatal(KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6419 KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6421 KMP_INTERNAL_FREE(duplicate_ok);
6422 __kmp_duplicate_library_ok = 1;
6427 __kmp_env_unset(name);
6429 default: { KMP_DEBUG_ASSERT(0); }
break;
6432 KMP_INTERNAL_FREE((
void *)value);
6434 KMP_INTERNAL_FREE((
void *)name);
6438 void __kmp_unregister_library(
void) {
6440 char *name = __kmp_reg_status_name();
6441 char *value = __kmp_env_get(name);
6443 KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6444 KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6445 if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6447 __kmp_env_unset(name);
6450 KMP_INTERNAL_FREE(__kmp_registration_str);
6451 KMP_INTERNAL_FREE(value);
6452 KMP_INTERNAL_FREE(name);
6454 __kmp_registration_flag = 0;
6455 __kmp_registration_str = NULL;
6462 #if KMP_MIC_SUPPORTED 6464 static void __kmp_check_mic_type() {
6465 kmp_cpuid_t cpuid_state = {0};
6466 kmp_cpuid_t *cs_p = &cpuid_state;
6467 __kmp_x86_cpuid(1, 0, cs_p);
6469 if ((cs_p->eax & 0xff0) == 0xB10) {
6470 __kmp_mic_type = mic2;
6471 }
else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6472 __kmp_mic_type = mic3;
6474 __kmp_mic_type = non_mic;
6480 static void __kmp_do_serial_initialize(
void) {
6484 KA_TRACE(10, (
"__kmp_do_serial_initialize: enter\n"));
6486 KMP_DEBUG_ASSERT(
sizeof(kmp_int32) == 4);
6487 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) == 4);
6488 KMP_DEBUG_ASSERT(
sizeof(kmp_int64) == 8);
6489 KMP_DEBUG_ASSERT(
sizeof(kmp_uint64) == 8);
6490 KMP_DEBUG_ASSERT(
sizeof(kmp_intptr_t) ==
sizeof(
void *));
6496 __kmp_validate_locks();
6499 __kmp_init_allocator();
6504 __kmp_register_library_startup();
6507 if (TCR_4(__kmp_global.g.g_done)) {
6508 KA_TRACE(10, (
"__kmp_do_serial_initialize: reinitialization of library\n"));
6511 __kmp_global.g.g_abort = 0;
6512 TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6515 #if KMP_USE_ADAPTIVE_LOCKS 6516 #if KMP_DEBUG_ADAPTIVE_LOCKS 6517 __kmp_init_speculative_stats();
6520 #if KMP_STATS_ENABLED 6523 __kmp_init_lock(&__kmp_global_lock);
6524 __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6525 __kmp_init_lock(&__kmp_debug_lock);
6526 __kmp_init_atomic_lock(&__kmp_atomic_lock);
6527 __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6528 __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6529 __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6530 __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6531 __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6532 __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6533 __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6534 __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6535 __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6536 __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6537 __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6538 __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6539 __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6540 __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6542 __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6544 __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6548 __kmp_runtime_initialize();
6550 #if KMP_MIC_SUPPORTED 6551 __kmp_check_mic_type();
6558 __kmp_abort_delay = 0;
6562 __kmp_dflt_team_nth_ub = __kmp_xproc;
6563 if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6564 __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6566 if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6567 __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6569 __kmp_max_nth = __kmp_sys_max_nth;
6570 __kmp_cg_max_nth = __kmp_sys_max_nth;
6571 __kmp_teams_max_nth = __kmp_xproc;
6572 if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6573 __kmp_teams_max_nth = __kmp_sys_max_nth;
6578 __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6580 __kmp_monitor_wakeups =
6581 KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6582 __kmp_bt_intervals =
6583 KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6586 __kmp_library = library_throughput;
6588 __kmp_static = kmp_sch_static_balanced;
6595 #if KMP_FAST_REDUCTION_BARRIER 6596 #define kmp_reduction_barrier_gather_bb ((int)1) 6597 #define kmp_reduction_barrier_release_bb ((int)1) 6598 #define kmp_reduction_barrier_gather_pat bp_hyper_bar 6599 #define kmp_reduction_barrier_release_pat bp_hyper_bar 6600 #endif // KMP_FAST_REDUCTION_BARRIER 6601 for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6602 __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6603 __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6604 __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6605 __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6606 #if KMP_FAST_REDUCTION_BARRIER 6607 if (i == bs_reduction_barrier) {
6609 __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6610 __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6611 __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6612 __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6614 #endif // KMP_FAST_REDUCTION_BARRIER 6616 #if KMP_FAST_REDUCTION_BARRIER 6617 #undef kmp_reduction_barrier_release_pat 6618 #undef kmp_reduction_barrier_gather_pat 6619 #undef kmp_reduction_barrier_release_bb 6620 #undef kmp_reduction_barrier_gather_bb 6621 #endif // KMP_FAST_REDUCTION_BARRIER 6622 #if KMP_MIC_SUPPORTED 6623 if (__kmp_mic_type == mic2) {
6625 __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3;
6626 __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6628 __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6629 __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6631 #if KMP_FAST_REDUCTION_BARRIER 6632 if (__kmp_mic_type == mic2) {
6633 __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6634 __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6636 #endif // KMP_FAST_REDUCTION_BARRIER 6637 #endif // KMP_MIC_SUPPORTED 6641 __kmp_env_checks = TRUE;
6643 __kmp_env_checks = FALSE;
6647 __kmp_foreign_tp = TRUE;
6649 __kmp_global.g.g_dynamic = FALSE;
6650 __kmp_global.g.g_dynamic_mode = dynamic_default;
6652 __kmp_env_initialize(NULL);
6656 char const *val = __kmp_env_get(
"KMP_DUMP_CATALOG");
6657 if (__kmp_str_match_true(val)) {
6658 kmp_str_buf_t buffer;
6659 __kmp_str_buf_init(&buffer);
6660 __kmp_i18n_dump_catalog(&buffer);
6661 __kmp_printf(
"%s", buffer.str);
6662 __kmp_str_buf_free(&buffer);
6664 __kmp_env_free(&val);
6667 __kmp_threads_capacity =
6668 __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6670 __kmp_tp_capacity = __kmp_default_tp_capacity(
6671 __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6676 KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6677 KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6678 KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6679 __kmp_thread_pool = NULL;
6680 __kmp_thread_pool_insert_pt = NULL;
6681 __kmp_team_pool = NULL;
6688 (
sizeof(kmp_info_t *) +
sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6690 __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6691 __kmp_root = (kmp_root_t **)((
char *)__kmp_threads +
6692 sizeof(kmp_info_t *) * __kmp_threads_capacity);
6695 KMP_DEBUG_ASSERT(__kmp_all_nth ==
6697 KMP_DEBUG_ASSERT(__kmp_nth == 0);
6702 gtid = __kmp_register_root(TRUE);
6703 KA_TRACE(10, (
"__kmp_do_serial_initialize T#%d\n", gtid));
6704 KMP_ASSERT(KMP_UBER_GTID(gtid));
6705 KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6709 __kmp_common_initialize();
6713 __kmp_register_atfork();
6716 #if !KMP_DYNAMIC_LIB 6720 int rc = atexit(__kmp_internal_end_atexit);
6722 __kmp_fatal(KMP_MSG(FunctionError,
"atexit()"), KMP_ERR(rc),
6728 #if KMP_HANDLE_SIGNALS 6734 __kmp_install_signals(FALSE);
6737 __kmp_install_signals(TRUE);
6742 __kmp_init_counter++;
6744 __kmp_init_serial = TRUE;
6746 if (__kmp_settings) {
6751 if (__kmp_display_env || __kmp_display_env_verbose) {
6752 __kmp_env_print_2();
6754 #endif // OMP_40_ENABLED 6762 KA_TRACE(10, (
"__kmp_do_serial_initialize: exit\n"));
6765 void __kmp_serial_initialize(
void) {
6766 if (__kmp_init_serial) {
6769 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6770 if (__kmp_init_serial) {
6771 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6774 __kmp_do_serial_initialize();
6775 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6778 static void __kmp_do_middle_initialize(
void) {
6780 int prev_dflt_team_nth;
6782 if (!__kmp_init_serial) {
6783 __kmp_do_serial_initialize();
6786 KA_TRACE(10, (
"__kmp_middle_initialize: enter\n"));
6790 prev_dflt_team_nth = __kmp_dflt_team_nth;
6792 #if KMP_AFFINITY_SUPPORTED 6795 __kmp_affinity_initialize();
6799 for (i = 0; i < __kmp_threads_capacity; i++) {
6800 if (TCR_PTR(__kmp_threads[i]) != NULL) {
6801 __kmp_affinity_set_init_mask(i, TRUE);
6806 KMP_ASSERT(__kmp_xproc > 0);
6807 if (__kmp_avail_proc == 0) {
6808 __kmp_avail_proc = __kmp_xproc;
6814 while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6815 __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6820 if (__kmp_dflt_team_nth == 0) {
6821 #ifdef KMP_DFLT_NTH_CORES 6823 __kmp_dflt_team_nth = __kmp_ncores;
6824 KA_TRACE(20, (
"__kmp_middle_initialize: setting __kmp_dflt_team_nth = " 6825 "__kmp_ncores (%d)\n",
6826 __kmp_dflt_team_nth));
6829 __kmp_dflt_team_nth = __kmp_avail_proc;
6830 KA_TRACE(20, (
"__kmp_middle_initialize: setting __kmp_dflt_team_nth = " 6831 "__kmp_avail_proc(%d)\n",
6832 __kmp_dflt_team_nth));
6836 if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6837 __kmp_dflt_team_nth = KMP_MIN_NTH;
6839 if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6840 __kmp_dflt_team_nth = __kmp_sys_max_nth;
6845 KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6847 if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6852 for (i = 0; i < __kmp_threads_capacity; i++) {
6853 kmp_info_t *thread = __kmp_threads[i];
6856 if (thread->th.th_current_task->td_icvs.nproc != 0)
6859 set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6864 (
"__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6865 __kmp_dflt_team_nth));
6867 #ifdef KMP_ADJUST_BLOCKTIME 6869 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6870 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6871 if (__kmp_nth > __kmp_avail_proc) {
6872 __kmp_zero_bt = TRUE;
6878 TCW_SYNC_4(__kmp_init_middle, TRUE);
6880 KA_TRACE(10, (
"__kmp_do_middle_initialize: exit\n"));
6883 void __kmp_middle_initialize(
void) {
6884 if (__kmp_init_middle) {
6887 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6888 if (__kmp_init_middle) {
6889 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6892 __kmp_do_middle_initialize();
6893 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6896 void __kmp_parallel_initialize(
void) {
6897 int gtid = __kmp_entry_gtid();
6900 if (TCR_4(__kmp_init_parallel))
6902 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6903 if (TCR_4(__kmp_init_parallel)) {
6904 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6909 if (TCR_4(__kmp_global.g.g_done)) {
6912 (
"__kmp_parallel_initialize: attempt to init while shutting down\n"));
6913 __kmp_infinite_loop();
6919 if (!__kmp_init_middle) {
6920 __kmp_do_middle_initialize();
6924 KA_TRACE(10, (
"__kmp_parallel_initialize: enter\n"));
6925 KMP_ASSERT(KMP_UBER_GTID(gtid));
6927 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 6930 __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
6931 __kmp_store_mxcsr(&__kmp_init_mxcsr);
6932 __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
6936 #if KMP_HANDLE_SIGNALS 6938 __kmp_install_signals(TRUE);
6942 __kmp_suspend_initialize();
6944 #if defined(USE_LOAD_BALANCE) 6945 if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6946 __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
6949 if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6950 __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
6954 if (__kmp_version) {
6955 __kmp_print_version_2();
6959 TCW_SYNC_4(__kmp_init_parallel, TRUE);
6962 KA_TRACE(10, (
"__kmp_parallel_initialize: exit\n"));
6964 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6969 void __kmp_run_before_invoked_task(
int gtid,
int tid, kmp_info_t *this_thr,
6971 kmp_disp_t *dispatch;
6976 this_thr->th.th_local.this_construct = 0;
6977 #if KMP_CACHE_MANAGE 6978 KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
6980 dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
6981 KMP_DEBUG_ASSERT(dispatch);
6982 KMP_DEBUG_ASSERT(team->t.t_dispatch);
6986 dispatch->th_disp_index = 0;
6988 dispatch->th_doacross_buf_idx =
6991 if (__kmp_env_consistency_check)
6992 __kmp_push_parallel(gtid, team->t.t_ident);
6997 void __kmp_run_after_invoked_task(
int gtid,
int tid, kmp_info_t *this_thr,
6999 if (__kmp_env_consistency_check)
7000 __kmp_pop_parallel(gtid, team->t.t_ident);
7002 __kmp_finish_implicit_task(this_thr);
7005 int __kmp_invoke_task_func(
int gtid) {
7007 int tid = __kmp_tid_from_gtid(gtid);
7008 kmp_info_t *this_thr = __kmp_threads[gtid];
7009 kmp_team_t *team = this_thr->th.th_team;
7011 __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
7013 if (__itt_stack_caller_create_ptr) {
7014 __kmp_itt_stack_callee_enter(
7016 team->t.t_stack_id);
7019 #if INCLUDE_SSC_MARKS 7020 SSC_MARK_INVOKING();
7025 void **exit_runtime_p;
7026 ompt_data_t *my_task_data;
7027 ompt_data_t *my_parallel_data;
7030 if (ompt_enabled.enabled) {
7032 team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
7034 exit_runtime_p = &dummy;
7038 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
7039 my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7040 if (ompt_enabled.ompt_callback_implicit_task) {
7041 ompt_team_size = team->t.t_nproc;
7042 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7043 ompt_scope_begin, my_parallel_data, my_task_data, ompt_team_size,
7044 __kmp_tid_from_gtid(gtid), ompt_task_implicit);
7045 OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
7050 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
7051 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
7053 __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7054 tid, (
int)team->t.t_argc, (
void **)team->t.t_argv
7061 *exit_runtime_p = NULL;
7066 if (__itt_stack_caller_create_ptr) {
7067 __kmp_itt_stack_callee_leave(
7069 team->t.t_stack_id);
7072 __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7078 void __kmp_teams_master(
int gtid) {
7080 kmp_info_t *thr = __kmp_threads[gtid];
7081 kmp_team_t *team = thr->th.th_team;
7082 ident_t *loc = team->t.t_ident;
7083 thr->th.th_set_nproc = thr->th.th_teams_size.nth;
7084 KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
7085 KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
7086 KA_TRACE(20, (
"__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
7087 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
7090 #if INCLUDE_SSC_MARKS 7093 __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7094 (microtask_t)thr->th.th_teams_microtask,
7095 VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7096 #if INCLUDE_SSC_MARKS 7102 __kmp_join_call(loc, gtid
7111 int __kmp_invoke_teams_master(
int gtid) {
7112 kmp_info_t *this_thr = __kmp_threads[gtid];
7113 kmp_team_t *team = this_thr->th.th_team;
7115 if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7116 KMP_DEBUG_ASSERT((
void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7117 (
void *)__kmp_teams_master);
7119 __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7120 __kmp_teams_master(gtid);
7121 __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7131 void __kmp_push_num_threads(
ident_t *
id,
int gtid,
int num_threads) {
7132 kmp_info_t *thr = __kmp_threads[gtid];
7134 if (num_threads > 0)
7135 thr->th.th_set_nproc = num_threads;
7142 void __kmp_push_num_teams(
ident_t *
id,
int gtid,
int num_teams,
7144 kmp_info_t *thr = __kmp_threads[gtid];
7145 KMP_DEBUG_ASSERT(num_teams >= 0);
7146 KMP_DEBUG_ASSERT(num_threads >= 0);
7150 if (num_teams > __kmp_teams_max_nth) {
7151 if (!__kmp_reserve_warn) {
7152 __kmp_reserve_warn = 1;
7153 __kmp_msg(kmp_ms_warning,
7154 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7155 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7157 num_teams = __kmp_teams_max_nth;
7161 thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7164 if (num_threads == 0) {
7165 if (!TCR_4(__kmp_init_middle))
7166 __kmp_middle_initialize();
7167 num_threads = __kmp_avail_proc / num_teams;
7168 if (num_teams * num_threads > __kmp_teams_max_nth) {
7170 num_threads = __kmp_teams_max_nth / num_teams;
7173 if (num_teams * num_threads > __kmp_teams_max_nth) {
7174 int new_threads = __kmp_teams_max_nth / num_teams;
7175 if (!__kmp_reserve_warn) {
7176 __kmp_reserve_warn = 1;
7177 __kmp_msg(kmp_ms_warning,
7178 KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7179 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7181 num_threads = new_threads;
7184 thr->th.th_teams_size.nth = num_threads;
7188 void __kmp_push_proc_bind(
ident_t *
id,
int gtid, kmp_proc_bind_t proc_bind) {
7189 kmp_info_t *thr = __kmp_threads[gtid];
7190 thr->th.th_set_proc_bind = proc_bind;
7197 void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team) {
7198 kmp_info_t *this_thr = __kmp_threads[gtid];
7204 KMP_DEBUG_ASSERT(team);
7205 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7206 KMP_ASSERT(KMP_MASTER_GTID(gtid));
7209 team->t.t_construct = 0;
7210 team->t.t_ordered.dt.t_value =
7214 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7215 if (team->t.t_max_nproc > 1) {
7217 for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7218 team->t.t_disp_buffer[i].buffer_index = i;
7220 team->t.t_disp_buffer[i].doacross_buf_idx = i;
7224 team->t.t_disp_buffer[0].buffer_index = 0;
7226 team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7231 KMP_ASSERT(this_thr->th.th_team == team);
7234 for (f = 0; f < team->t.t_nproc; f++) {
7235 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7236 team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7241 __kmp_fork_barrier(gtid, 0);
7244 void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team) {
7245 kmp_info_t *this_thr = __kmp_threads[gtid];
7247 KMP_DEBUG_ASSERT(team);
7248 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7249 KMP_ASSERT(KMP_MASTER_GTID(gtid));
7255 if (__kmp_threads[gtid] &&
7256 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7257 __kmp_printf(
"GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7258 __kmp_threads[gtid]);
7259 __kmp_printf(
"__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, " 7260 "team->t.t_nproc=%d\n",
7261 gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7263 __kmp_print_structure();
7265 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7266 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7269 __kmp_join_barrier(gtid);
7271 if (ompt_enabled.enabled &&
7272 this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
7273 int ds_tid = this_thr->th.th_info.ds.ds_tid;
7274 ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
7275 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
7277 void *codeptr = NULL;
7278 if (KMP_MASTER_TID(ds_tid) &&
7279 (ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait) ||
7280 ompt_callbacks.ompt_callback(ompt_callback_sync_region)))
7281 codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
7283 if (ompt_enabled.ompt_callback_sync_region_wait) {
7284 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
7285 ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
7287 if (ompt_enabled.ompt_callback_sync_region) {
7288 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
7289 ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
7292 if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
7293 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7294 ompt_scope_end, NULL, task_data, 0, ds_tid, ompt_task_implicit);
7300 KMP_ASSERT(this_thr->th.th_team == team);
7305 #ifdef USE_LOAD_BALANCE 7309 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7312 kmp_team_t *hot_team;
7314 if (root->r.r_active) {
7317 hot_team = root->r.r_hot_team;
7318 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7319 return hot_team->t.t_nproc - 1;
7324 for (i = 1; i < hot_team->t.t_nproc; i++) {
7325 if (hot_team->t.t_threads[i]->th.th_active) {
7334 static int __kmp_load_balance_nproc(kmp_root_t *root,
int set_nproc) {
7337 int hot_team_active;
7338 int team_curr_active;
7341 KB_TRACE(20, (
"__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7343 KMP_DEBUG_ASSERT(root);
7344 KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7345 ->th.th_current_task->td_icvs.dynamic == TRUE);
7346 KMP_DEBUG_ASSERT(set_nproc > 1);
7348 if (set_nproc == 1) {
7349 KB_TRACE(20, (
"__kmp_load_balance_nproc: serial execution.\n"));
7358 pool_active = __kmp_thread_pool_active_nth;
7359 hot_team_active = __kmp_active_hot_team_nproc(root);
7360 team_curr_active = pool_active + hot_team_active + 1;
7363 system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7364 KB_TRACE(30, (
"__kmp_load_balance_nproc: system active = %d pool active = %d " 7365 "hot team active = %d\n",
7366 system_active, pool_active, hot_team_active));
7368 if (system_active < 0) {
7372 __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7373 KMP_WARNING(CantLoadBalUsing,
"KMP_DYNAMIC_MODE=thread limit");
7376 retval = __kmp_avail_proc - __kmp_nth +
7377 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7378 if (retval > set_nproc) {
7381 if (retval < KMP_MIN_NTH) {
7382 retval = KMP_MIN_NTH;
7385 KB_TRACE(20, (
"__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7393 if (system_active < team_curr_active) {
7394 system_active = team_curr_active;
7396 retval = __kmp_avail_proc - system_active + team_curr_active;
7397 if (retval > set_nproc) {
7400 if (retval < KMP_MIN_NTH) {
7401 retval = KMP_MIN_NTH;
7404 KB_TRACE(20, (
"__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7413 void __kmp_cleanup(
void) {
7416 KA_TRACE(10, (
"__kmp_cleanup: enter\n"));
7418 if (TCR_4(__kmp_init_parallel)) {
7419 #if KMP_HANDLE_SIGNALS 7420 __kmp_remove_signals();
7422 TCW_4(__kmp_init_parallel, FALSE);
7425 if (TCR_4(__kmp_init_middle)) {
7426 #if KMP_AFFINITY_SUPPORTED 7427 __kmp_affinity_uninitialize();
7429 __kmp_cleanup_hierarchy();
7430 TCW_4(__kmp_init_middle, FALSE);
7433 KA_TRACE(10, (
"__kmp_cleanup: go serial cleanup\n"));
7435 if (__kmp_init_serial) {
7436 __kmp_runtime_destroy();
7437 __kmp_init_serial = FALSE;
7440 __kmp_cleanup_threadprivate_caches();
7442 for (f = 0; f < __kmp_threads_capacity; f++) {
7443 if (__kmp_root[f] != NULL) {
7444 __kmp_free(__kmp_root[f]);
7445 __kmp_root[f] = NULL;
7448 __kmp_free(__kmp_threads);
7451 __kmp_threads = NULL;
7453 __kmp_threads_capacity = 0;
7455 #if KMP_USE_DYNAMIC_LOCK 7456 __kmp_cleanup_indirect_user_locks();
7458 __kmp_cleanup_user_locks();
7461 #if KMP_AFFINITY_SUPPORTED 7462 KMP_INTERNAL_FREE(CCAST(
char *, __kmp_cpuinfo_file));
7463 __kmp_cpuinfo_file = NULL;
7466 #if KMP_USE_ADAPTIVE_LOCKS 7467 #if KMP_DEBUG_ADAPTIVE_LOCKS 7468 __kmp_print_speculative_stats();
7471 KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7472 __kmp_nested_nth.nth = NULL;
7473 __kmp_nested_nth.size = 0;
7474 __kmp_nested_nth.used = 0;
7475 KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7476 __kmp_nested_proc_bind.bind_types = NULL;
7477 __kmp_nested_proc_bind.size = 0;
7478 __kmp_nested_proc_bind.used = 0;
7480 if (__kmp_affinity_format) {
7481 KMP_INTERNAL_FREE(__kmp_affinity_format);
7482 __kmp_affinity_format = NULL;
7486 __kmp_i18n_catclose();
7488 #if KMP_USE_HIER_SCHED 7489 __kmp_hier_scheds.deallocate();
7492 #if KMP_STATS_ENABLED 7496 KA_TRACE(10, (
"__kmp_cleanup: exit\n"));
7501 int __kmp_ignore_mppbeg(
void) {
7504 if ((env = getenv(
"KMP_IGNORE_MPPBEG")) != NULL) {
7505 if (__kmp_str_match_false(env))
7512 int __kmp_ignore_mppend(
void) {
7515 if ((env = getenv(
"KMP_IGNORE_MPPEND")) != NULL) {
7516 if (__kmp_str_match_false(env))
7523 void __kmp_internal_begin(
void) {
7529 gtid = __kmp_entry_gtid();
7530 root = __kmp_threads[gtid]->th.th_root;
7531 KMP_ASSERT(KMP_UBER_GTID(gtid));
7533 if (root->r.r_begin)
7535 __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7536 if (root->r.r_begin) {
7537 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7541 root->r.r_begin = TRUE;
7543 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7548 void __kmp_user_set_library(
enum library_type arg) {
7555 gtid = __kmp_entry_gtid();
7556 thread = __kmp_threads[gtid];
7558 root = thread->th.th_root;
7560 KA_TRACE(20, (
"__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7562 if (root->r.r_in_parallel) {
7564 KMP_WARNING(SetLibraryIncorrectCall);
7569 case library_serial:
7570 thread->th.th_set_nproc = 0;
7571 set__nproc(thread, 1);
7573 case library_turnaround:
7574 thread->th.th_set_nproc = 0;
7575 set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7576 : __kmp_dflt_team_nth_ub);
7578 case library_throughput:
7579 thread->th.th_set_nproc = 0;
7580 set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7581 : __kmp_dflt_team_nth_ub);
7584 KMP_FATAL(UnknownLibraryType, arg);
7587 __kmp_aux_set_library(arg);
7590 void __kmp_aux_set_stacksize(
size_t arg) {
7591 if (!__kmp_init_serial)
7592 __kmp_serial_initialize();
7595 if (arg & (0x1000 - 1)) {
7596 arg &= ~(0x1000 - 1);
7601 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7604 if (!TCR_4(__kmp_init_parallel)) {
7607 if (value < __kmp_sys_min_stksize)
7608 value = __kmp_sys_min_stksize;
7609 else if (value > KMP_MAX_STKSIZE)
7610 value = KMP_MAX_STKSIZE;
7612 __kmp_stksize = value;
7614 __kmp_env_stksize = TRUE;
7617 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7622 void __kmp_aux_set_library(
enum library_type arg) {
7623 __kmp_library = arg;
7625 switch (__kmp_library) {
7626 case library_serial: {
7627 KMP_INFORM(LibraryIsSerial);
7628 (void)__kmp_change_library(TRUE);
7630 case library_turnaround:
7631 (void)__kmp_change_library(TRUE);
7633 case library_throughput:
7634 (void)__kmp_change_library(FALSE);
7637 KMP_FATAL(UnknownLibraryType, arg);
7643 static kmp_team_t *__kmp_aux_get_team_info(
int &teams_serialized) {
7644 kmp_info_t *thr = __kmp_entry_thread();
7645 teams_serialized = 0;
7646 if (thr->th.th_teams_microtask) {
7647 kmp_team_t *team = thr->th.th_team;
7648 int tlevel = thr->th.th_teams_level;
7649 int ii = team->t.t_level;
7650 teams_serialized = team->t.t_serialized;
7651 int level = tlevel + 1;
7652 KMP_DEBUG_ASSERT(ii >= tlevel);
7653 while (ii > level) {
7654 for (teams_serialized = team->t.t_serialized;
7655 (teams_serialized > 0) && (ii > level); teams_serialized--, ii--) {
7657 if (team->t.t_serialized && (!teams_serialized)) {
7658 team = team->t.t_parent;
7662 team = team->t.t_parent;
7671 int __kmp_aux_get_team_num() {
7673 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7675 if (serialized > 1) {
7678 return team->t.t_master_tid;
7684 int __kmp_aux_get_num_teams() {
7686 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7688 if (serialized > 1) {
7691 return team->t.t_parent->t.t_nproc;
7731 typedef struct kmp_affinity_format_field_t {
7733 const char *long_name;
7736 } kmp_affinity_format_field_t;
7738 static const kmp_affinity_format_field_t __kmp_affinity_format_table[] = {
7739 #if KMP_AFFINITY_SUPPORTED 7740 {
'A',
"thread_affinity",
's'},
7742 {
't',
"team_num",
'd'},
7743 {
'T',
"num_teams",
'd'},
7744 {
'L',
"nesting_level",
'd'},
7745 {
'n',
"thread_num",
'd'},
7746 {
'N',
"num_threads",
'd'},
7747 {
'a',
"ancestor_tnum",
'd'},
7749 {
'P',
"process_id",
'd'},
7750 {
'i',
"native_thread_id",
'd'}};
7753 static int __kmp_aux_capture_affinity_field(
int gtid,
const kmp_info_t *th,
7755 kmp_str_buf_t *field_buffer) {
7756 int rc, format_index, field_value;
7757 const char *width_left, *width_right;
7758 bool pad_zeros, right_justify, parse_long_name, found_valid_name;
7759 static const int FORMAT_SIZE = 20;
7760 char format[FORMAT_SIZE] = {0};
7761 char absolute_short_name = 0;
7763 KMP_DEBUG_ASSERT(gtid >= 0);
7764 KMP_DEBUG_ASSERT(th);
7765 KMP_DEBUG_ASSERT(**ptr ==
'%');
7766 KMP_DEBUG_ASSERT(field_buffer);
7768 __kmp_str_buf_clear(field_buffer);
7775 __kmp_str_buf_cat(field_buffer,
"%", 1);
7786 right_justify =
false;
7788 right_justify =
true;
7792 width_left = width_right = NULL;
7793 if (**ptr >=
'0' && **ptr <=
'9') {
7801 format[format_index++] =
'%';
7803 format[format_index++] =
'-';
7805 format[format_index++] =
'0';
7806 if (width_left && width_right) {
7810 while (i < 8 && width_left < width_right) {
7811 format[format_index++] = *width_left;
7819 found_valid_name =
false;
7820 parse_long_name = (**ptr ==
'{');
7821 if (parse_long_name)
7823 for (
size_t i = 0; i <
sizeof(__kmp_affinity_format_table) /
7824 sizeof(__kmp_affinity_format_table[0]);
7826 char short_name = __kmp_affinity_format_table[i].short_name;
7827 const char *long_name = __kmp_affinity_format_table[i].long_name;
7828 char field_format = __kmp_affinity_format_table[i].field_format;
7829 if (parse_long_name) {
7830 int length = KMP_STRLEN(long_name);
7831 if (strncmp(*ptr, long_name, length) == 0) {
7832 found_valid_name =
true;
7835 }
else if (**ptr == short_name) {
7836 found_valid_name =
true;
7839 if (found_valid_name) {
7840 format[format_index++] = field_format;
7841 format[format_index++] =
'\0';
7842 absolute_short_name = short_name;
7846 if (parse_long_name) {
7848 absolute_short_name = 0;
7856 switch (absolute_short_name) {
7858 rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_team_num());
7861 rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_num_teams());
7864 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
7867 rc = __kmp_str_buf_print(field_buffer, format, __kmp_tid_from_gtid(gtid));
7870 static const int BUFFER_SIZE = 256;
7871 char buf[BUFFER_SIZE];
7872 __kmp_expand_host_name(buf, BUFFER_SIZE);
7873 rc = __kmp_str_buf_print(field_buffer, format, buf);
7876 rc = __kmp_str_buf_print(field_buffer, format, getpid());
7879 rc = __kmp_str_buf_print(field_buffer, format, __kmp_gettid());
7882 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
7886 __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
7887 rc = __kmp_str_buf_print(field_buffer, format, field_value);
7889 #if KMP_AFFINITY_SUPPORTED 7892 __kmp_str_buf_init(&buf);
7893 __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask);
7894 rc = __kmp_str_buf_print(field_buffer, format, buf.str);
7895 __kmp_str_buf_free(&buf);
7901 rc = __kmp_str_buf_print(field_buffer,
"%s",
"undefined");
7903 if (parse_long_name) {
7912 KMP_ASSERT(format_index <= FORMAT_SIZE);
7922 size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
7923 kmp_str_buf_t *buffer) {
7924 const char *parse_ptr;
7926 const kmp_info_t *th;
7927 kmp_str_buf_t field;
7929 KMP_DEBUG_ASSERT(buffer);
7930 KMP_DEBUG_ASSERT(gtid >= 0);
7932 __kmp_str_buf_init(&field);
7933 __kmp_str_buf_clear(buffer);
7935 th = __kmp_threads[gtid];
7941 if (parse_ptr == NULL || *parse_ptr ==
'\0') {
7942 parse_ptr = __kmp_affinity_format;
7944 KMP_DEBUG_ASSERT(parse_ptr);
7946 while (*parse_ptr !=
'\0') {
7948 if (*parse_ptr ==
'%') {
7950 int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field);
7951 __kmp_str_buf_catbuf(buffer, &field);
7955 __kmp_str_buf_cat(buffer, parse_ptr, 1);
7960 __kmp_str_buf_free(&field);
7965 void __kmp_aux_display_affinity(
int gtid,
const char *format) {
7967 __kmp_str_buf_init(&buf);
7968 __kmp_aux_capture_affinity(gtid, format, &buf);
7969 __kmp_fprintf(kmp_out,
"%s" KMP_END_OF_LINE, buf.str);
7970 __kmp_str_buf_free(&buf);
7972 #endif // OMP_50_ENABLED 7976 void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid) {
7977 int blocktime = arg;
7983 __kmp_save_internal_controls(thread);
7986 if (blocktime < KMP_MIN_BLOCKTIME)
7987 blocktime = KMP_MIN_BLOCKTIME;
7988 else if (blocktime > KMP_MAX_BLOCKTIME)
7989 blocktime = KMP_MAX_BLOCKTIME;
7991 set__blocktime_team(thread->th.th_team, tid, blocktime);
7992 set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
7996 bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
7998 set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
7999 set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
8005 set__bt_set_team(thread->th.th_team, tid, bt_set);
8006 set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
8008 KF_TRACE(10, (
"kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, " 8009 "bt_intervals=%d, monitor_updates=%d\n",
8010 __kmp_gtid_from_tid(tid, thread->th.th_team),
8011 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
8012 __kmp_monitor_wakeups));
8014 KF_TRACE(10, (
"kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
8015 __kmp_gtid_from_tid(tid, thread->th.th_team),
8016 thread->th.th_team->t.t_id, tid, blocktime));
8020 void __kmp_aux_set_defaults(
char const *str,
int len) {
8021 if (!__kmp_init_serial) {
8022 __kmp_serial_initialize();
8024 __kmp_env_initialize(str);
8028 || __kmp_display_env || __kmp_display_env_verbose
8038 PACKED_REDUCTION_METHOD_T
8039 __kmp_determine_reduction_method(
8040 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
8041 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
8042 kmp_critical_name *lck) {
8053 PACKED_REDUCTION_METHOD_T retval;
8057 KMP_DEBUG_ASSERT(loc);
8058 KMP_DEBUG_ASSERT(lck);
8060 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED \ 8061 ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE)) 8062 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func)) 8064 retval = critical_reduce_block;
8067 team_size = __kmp_get_team_num_threads(global_tid);
8068 if (team_size == 1) {
8070 retval = empty_reduce_block;
8074 int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8076 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64 8078 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 8079 KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD || KMP_OS_KFREEBSD 8081 int teamsize_cutoff = 4;
8083 #if KMP_MIC_SUPPORTED 8084 if (__kmp_mic_type != non_mic) {
8085 teamsize_cutoff = 8;
8088 int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8089 if (tree_available) {
8090 if (team_size <= teamsize_cutoff) {
8091 if (atomic_available) {
8092 retval = atomic_reduce_block;
8095 retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8097 }
else if (atomic_available) {
8098 retval = atomic_reduce_block;
8101 #error "Unknown or unsupported OS" 8102 #endif // KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || 8105 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS 8107 #if KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_HURD || KMP_OS_KFREEBSD 8111 if (atomic_available) {
8112 if (num_vars <= 2) {
8113 retval = atomic_reduce_block;
8119 int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8120 if (atomic_available && (num_vars <= 3)) {
8121 retval = atomic_reduce_block;
8122 }
else if (tree_available) {
8123 if ((reduce_size > (9 *
sizeof(kmp_real64))) &&
8124 (reduce_size < (2000 *
sizeof(kmp_real64)))) {
8125 retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
8130 #error "Unknown or unsupported OS" 8134 #error "Unknown or unsupported architecture" 8142 if (__kmp_force_reduction_method != reduction_method_not_defined &&
8145 PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
8147 int atomic_available, tree_available;
8149 switch ((forced_retval = __kmp_force_reduction_method)) {
8150 case critical_reduce_block:
8154 case atomic_reduce_block:
8155 atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8156 if (!atomic_available) {
8157 KMP_WARNING(RedMethodNotSupported,
"atomic");
8158 forced_retval = critical_reduce_block;
8162 case tree_reduce_block:
8163 tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8164 if (!tree_available) {
8165 KMP_WARNING(RedMethodNotSupported,
"tree");
8166 forced_retval = critical_reduce_block;
8168 #if KMP_FAST_REDUCTION_BARRIER 8169 forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8178 retval = forced_retval;
8181 KA_TRACE(10, (
"reduction method selected=%08x\n", retval));
8183 #undef FAST_REDUCTION_TREE_METHOD_GENERATED 8184 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED 8190 kmp_int32 __kmp_get_reduce_method(
void) {
8191 return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the paritioned timers to begin with name.
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)