21 #include "kmp_error.h" 24 #include "kmp_stats.h" 28 #include "ompt-specific.h" 34 char const *traits_t<int>::spec =
"d";
35 char const *traits_t<unsigned int>::spec =
"u";
36 char const *traits_t<long long>::spec =
"lld";
37 char const *traits_t<unsigned long long>::spec =
"llu";
38 char const *traits_t<long>::spec =
"ld";
43 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
44 kmp_int32 schedtype, kmp_int32 *plastiter,
46 typename traits_t<T>::signed_t *pstride,
47 typename traits_t<T>::signed_t incr,
48 typename traits_t<T>::signed_t chunk
49 #
if OMPT_SUPPORT && OMPT_OPTIONAL
55 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
56 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
58 typedef typename traits_t<T>::unsigned_t UT;
59 typedef typename traits_t<T>::signed_t ST;
61 kmp_int32 gtid = global_tid;
66 kmp_info_t *th = __kmp_threads[gtid];
68 #if OMPT_SUPPORT && OMPT_OPTIONAL 69 ompt_team_info_t *team_info = NULL;
70 ompt_task_info_t *task_info = NULL;
71 ompt_work_type_t ompt_work_type = ompt_work_loop;
73 static kmp_int8 warn = 0;
75 if (ompt_enabled.ompt_callback_work) {
77 team_info = __ompt_get_teaminfo(0, NULL);
78 task_info = __ompt_get_task_info_object(0);
81 if ((loc->
flags & KMP_IDENT_WORK_LOOP) != 0) {
82 ompt_work_type = ompt_work_loop;
83 }
else if ((loc->
flags & KMP_IDENT_WORK_SECTIONS) != 0) {
84 ompt_work_type = ompt_work_sections;
85 }
else if ((loc->
flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
86 ompt_work_type = ompt_work_distribute;
89 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
91 KMP_WARNING(OmptOutdatedWorkshare);
93 KMP_DEBUG_ASSERT(ompt_work_type);
98 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
99 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
104 buff = __kmp_str_format(
105 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," 106 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
107 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
108 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
109 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
110 *pstride, incr, chunk));
111 __kmp_str_free(&buff);
115 if (__kmp_env_consistency_check) {
116 __kmp_push_workshare(global_tid, ct_pdo, loc);
118 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
123 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
124 if (plastiter != NULL)
136 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d " 137 "lower=%%%s upper=%%%s stride = %%%s " 138 "signed?<%s>, loc = %%s\n",
139 traits_t<T>::spec, traits_t<T>::spec,
140 traits_t<ST>::spec, traits_t<T>::spec);
142 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
143 __kmp_str_free(&buff);
146 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
148 #if OMPT_SUPPORT && OMPT_OPTIONAL 149 if (ompt_enabled.ompt_callback_work) {
150 ompt_callbacks.ompt_callback(ompt_callback_work)(
151 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
152 &(task_info->task_data), 0, codeptr);
167 tid = th->th.th_team->t.t_master_tid;
168 team = th->th.th_team->t.t_parent;
172 tid = __kmp_tid_from_gtid(global_tid);
173 team = th->th.th_team;
177 if (team->t.t_serialized) {
179 if (plastiter != NULL)
183 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
189 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 190 "lower=%%%s upper=%%%s stride = %%%s\n",
191 traits_t<T>::spec, traits_t<T>::spec,
193 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
194 __kmp_str_free(&buff);
197 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
199 #if OMPT_SUPPORT && OMPT_OPTIONAL 200 if (ompt_enabled.ompt_callback_work) {
201 ompt_callbacks.ompt_callback(ompt_callback_work)(
202 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
203 &(task_info->task_data), *pstride, codeptr);
208 nth = team->t.t_nproc;
210 if (plastiter != NULL)
213 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
218 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d " 219 "lower=%%%s upper=%%%s stride = %%%s\n",
220 traits_t<T>::spec, traits_t<T>::spec,
222 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
223 __kmp_str_free(&buff);
226 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
228 #if OMPT_SUPPORT && OMPT_OPTIONAL 229 if (ompt_enabled.ompt_callback_work) {
230 ompt_callbacks.ompt_callback(ompt_callback_work)(
231 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
232 &(task_info->task_data), *pstride, codeptr);
240 trip_count = *pupper - *plower + 1;
241 }
else if (incr == -1) {
242 trip_count = *plower - *pupper + 1;
243 }
else if (incr > 0) {
245 trip_count = (UT)(*pupper - *plower) / incr + 1;
247 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
250 if (__kmp_env_consistency_check) {
252 if (trip_count == 0 && *pupper != *plower) {
253 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
261 if (trip_count < nth) {
263 __kmp_static == kmp_sch_static_greedy ||
265 kmp_sch_static_balanced);
266 if (tid < trip_count) {
267 *pupper = *plower = *plower + tid * incr;
269 *plower = *pupper + incr;
271 if (plastiter != NULL)
272 *plastiter = (tid == trip_count - 1);
274 if (__kmp_static == kmp_sch_static_balanced) {
275 UT small_chunk = trip_count / nth;
276 UT extras = trip_count % nth;
277 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
278 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
279 if (plastiter != NULL)
280 *plastiter = (tid == nth - 1);
282 T big_chunk_inc_count =
283 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
284 T old_upper = *pupper;
286 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
289 *plower += tid * big_chunk_inc_count;
290 *pupper = *plower + big_chunk_inc_count - incr;
292 if (*pupper < *plower)
293 *pupper = traits_t<T>::max_value;
294 if (plastiter != NULL)
295 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
296 if (*pupper > old_upper)
299 if (*pupper > *plower)
300 *pupper = traits_t<T>::min_value;
301 if (plastiter != NULL)
302 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
303 if (*pupper < old_upper)
308 *pstride = trip_count;
311 case kmp_sch_static_chunked: {
317 *pstride = span * nth;
318 *plower = *plower + (span * tid);
319 *pupper = *plower + span - incr;
320 if (plastiter != NULL)
321 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
325 case kmp_sch_static_balanced_chunked: {
326 T old_upper = *pupper;
328 UT span = (trip_count + nth - 1) / nth;
331 chunk = (span + chunk - 1) & ~(chunk - 1);
334 *plower = *plower + (span * tid);
335 *pupper = *plower + span - incr;
337 if (*pupper > old_upper)
339 }
else if (*pupper < old_upper)
342 if (plastiter != NULL)
343 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
348 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
354 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
355 __kmp_forkjoin_frames_mode == 3 &&
357 th->th.th_teams_microtask == NULL &&
359 team->t.t_active_level == 1) {
360 kmp_uint64 cur_chunk = chunk;
364 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
367 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
374 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s " 375 "upper=%%%s stride = %%%s signed?<%s>\n",
376 traits_t<T>::spec, traits_t<T>::spec,
377 traits_t<ST>::spec, traits_t<T>::spec);
378 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
379 __kmp_str_free(&buff);
382 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
384 #if OMPT_SUPPORT && OMPT_OPTIONAL 385 if (ompt_enabled.ompt_callback_work) {
386 ompt_callbacks.ompt_callback(ompt_callback_work)(
387 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
388 &(task_info->task_data), trip_count, codeptr);
392 #if KMP_STATS_ENABLED 395 kmp_int64 u = (kmp_int64)(*pupper);
396 kmp_int64 l = (kmp_int64)(*plower);
397 kmp_int64 i = (kmp_int64)incr;
401 }
else if (i == -1) {
406 t = (l - u) / (-i) + 1;
409 KMP_POP_PARTITIONED_TIMER();
415 template <
typename T>
416 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
417 kmp_int32 schedule, kmp_int32 *plastiter,
418 T *plower, T *pupper, T *pupperDist,
419 typename traits_t<T>::signed_t *pstride,
420 typename traits_t<T>::signed_t incr,
421 typename traits_t<T>::signed_t chunk) {
423 typedef typename traits_t<T>::unsigned_t UT;
424 typedef typename traits_t<T>::signed_t ST;
433 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
434 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
439 buff = __kmp_str_format(
440 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d " 441 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
442 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
443 traits_t<ST>::spec, traits_t<T>::spec);
445 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
446 __kmp_str_free(&buff);
450 if (__kmp_env_consistency_check) {
451 __kmp_push_workshare(gtid, ct_pdo, loc);
453 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
456 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
466 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
469 tid = __kmp_tid_from_gtid(gtid);
470 th = __kmp_threads[gtid];
471 nth = th->th.th_team_nproc;
472 team = th->th.th_team;
474 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
475 nteams = th->th.th_teams_size.nteams;
477 team_id = team->t.t_master_tid;
478 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
482 trip_count = *pupper - *plower + 1;
483 }
else if (incr == -1) {
484 trip_count = *plower - *pupper + 1;
485 }
else if (incr > 0) {
487 trip_count = (UT)(*pupper - *plower) / incr + 1;
489 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
492 *pstride = *pupper - *plower;
493 if (trip_count <= nteams) {
495 __kmp_static == kmp_sch_static_greedy ||
497 kmp_sch_static_balanced);
500 if (team_id < trip_count && tid == 0) {
501 *pupper = *pupperDist = *plower = *plower + team_id * incr;
503 *pupperDist = *pupper;
504 *plower = *pupper + incr;
506 if (plastiter != NULL)
507 *plastiter = (tid == 0 && team_id == trip_count - 1);
510 if (__kmp_static == kmp_sch_static_balanced) {
511 UT chunkD = trip_count / nteams;
512 UT extras = trip_count % nteams;
514 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
515 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
516 if (plastiter != NULL)
517 *plastiter = (team_id == nteams - 1);
520 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
522 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
524 *plower += team_id * chunk_inc_count;
525 *pupperDist = *plower + chunk_inc_count - incr;
528 if (*pupperDist < *plower)
529 *pupperDist = traits_t<T>::max_value;
530 if (plastiter != NULL)
531 *plastiter = *plower <= upper && *pupperDist > upper - incr;
532 if (*pupperDist > upper)
534 if (*plower > *pupperDist) {
535 *pupper = *pupperDist;
539 if (*pupperDist > *plower)
540 *pupperDist = traits_t<T>::min_value;
541 if (plastiter != NULL)
542 *plastiter = *plower >= upper && *pupperDist < upper - incr;
543 if (*pupperDist < upper)
545 if (*plower < *pupperDist) {
546 *pupper = *pupperDist;
554 trip_count = *pupperDist - *plower + 1;
555 }
else if (incr == -1) {
556 trip_count = *plower - *pupperDist + 1;
557 }
else if (incr > 1) {
559 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
561 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
563 KMP_DEBUG_ASSERT(trip_count);
566 if (trip_count <= nth) {
568 __kmp_static == kmp_sch_static_greedy ||
570 kmp_sch_static_balanced);
571 if (tid < trip_count)
572 *pupper = *plower = *plower + tid * incr;
574 *plower = *pupper + incr;
575 if (plastiter != NULL)
576 if (*plastiter != 0 && !(tid == trip_count - 1))
579 if (__kmp_static == kmp_sch_static_balanced) {
580 UT chunkL = trip_count / nth;
581 UT extras = trip_count % nth;
582 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
583 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
584 if (plastiter != NULL)
585 if (*plastiter != 0 && !(tid == nth - 1))
589 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
590 T upper = *pupperDist;
591 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
593 *plower += tid * chunk_inc_count;
594 *pupper = *plower + chunk_inc_count - incr;
596 if (*pupper < *plower)
597 *pupper = traits_t<T>::max_value;
598 if (plastiter != NULL)
599 if (*plastiter != 0 &&
600 !(*plower <= upper && *pupper > upper - incr))
605 if (*pupper > *plower)
606 *pupper = traits_t<T>::min_value;
607 if (plastiter != NULL)
608 if (*plastiter != 0 &&
609 !(*plower >= upper && *pupper < upper - incr))
618 case kmp_sch_static_chunked: {
623 *pstride = span * nth;
624 *plower = *plower + (span * tid);
625 *pupper = *plower + span - incr;
626 if (plastiter != NULL)
627 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
633 "__kmpc_dist_for_static_init: unknown loop scheduling type");
642 buff = __kmp_str_format(
643 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s " 644 "stride=%%%s signed?<%s>\n",
645 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
646 traits_t<ST>::spec, traits_t<T>::spec);
647 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
648 __kmp_str_free(&buff);
651 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
655 template <
typename T>
656 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
657 kmp_int32 *p_last, T *p_lb, T *p_ub,
658 typename traits_t<T>::signed_t *p_st,
659 typename traits_t<T>::signed_t incr,
660 typename traits_t<T>::signed_t chunk) {
666 typedef typename traits_t<T>::unsigned_t UT;
667 typedef typename traits_t<T>::signed_t ST;
677 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
678 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
683 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d " 684 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
685 traits_t<T>::spec, traits_t<T>::spec,
686 traits_t<ST>::spec, traits_t<ST>::spec,
688 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
689 __kmp_str_free(&buff);
695 if (__kmp_env_consistency_check) {
697 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
700 if (incr > 0 ? (upper < lower) : (lower < upper)) {
710 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
713 th = __kmp_threads[gtid];
714 team = th->th.th_team;
716 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
717 nteams = th->th.th_teams_size.nteams;
719 team_id = team->t.t_master_tid;
720 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
724 trip_count = upper - lower + 1;
725 }
else if (incr == -1) {
726 trip_count = lower - upper + 1;
727 }
else if (incr > 0) {
729 trip_count = (UT)(upper - lower) / incr + 1;
731 trip_count = (UT)(lower - upper) / (-incr) + 1;
736 *p_st = span * nteams;
737 *p_lb = lower + (span * team_id);
738 *p_ub = *p_lb + span - incr;
740 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
744 *p_ub = traits_t<T>::max_value;
749 *p_ub = traits_t<T>::min_value;
758 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d " 759 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
760 traits_t<T>::spec, traits_t<T>::spec,
761 traits_t<ST>::spec, traits_t<ST>::spec);
762 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
763 __kmp_str_free(&buff);
792 kmp_int32 *plastiter, kmp_int32 *plower,
793 kmp_int32 *pupper, kmp_int32 *pstride,
794 kmp_int32 incr, kmp_int32 chunk) {
795 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
796 pupper, pstride, incr, chunk
797 #if OMPT_SUPPORT && OMPT_OPTIONAL 799 OMPT_GET_RETURN_ADDRESS(0)
808 kmp_int32 schedtype, kmp_int32 *plastiter,
809 kmp_uint32 *plower, kmp_uint32 *pupper,
810 kmp_int32 *pstride, kmp_int32 incr,
812 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
813 pupper, pstride, incr, chunk
814 #if OMPT_SUPPORT && OMPT_OPTIONAL 816 OMPT_GET_RETURN_ADDRESS(0)
825 kmp_int32 *plastiter, kmp_int64 *plower,
826 kmp_int64 *pupper, kmp_int64 *pstride,
827 kmp_int64 incr, kmp_int64 chunk) {
828 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
829 pupper, pstride, incr, chunk
830 #if OMPT_SUPPORT && OMPT_OPTIONAL 832 OMPT_GET_RETURN_ADDRESS(0)
841 kmp_int32 schedtype, kmp_int32 *plastiter,
842 kmp_uint64 *plower, kmp_uint64 *pupper,
843 kmp_int64 *pstride, kmp_int64 incr,
845 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
846 pupper, pstride, incr, chunk
847 #if OMPT_SUPPORT && OMPT_OPTIONAL 849 OMPT_GET_RETURN_ADDRESS(0)
880 kmp_int32 schedule, kmp_int32 *plastiter,
881 kmp_int32 *plower, kmp_int32 *pupper,
882 kmp_int32 *pupperD, kmp_int32 *pstride,
883 kmp_int32 incr, kmp_int32 chunk) {
884 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
885 pupper, pupperD, pstride, incr, chunk);
892 kmp_int32 schedule, kmp_int32 *plastiter,
893 kmp_uint32 *plower, kmp_uint32 *pupper,
894 kmp_uint32 *pupperD, kmp_int32 *pstride,
895 kmp_int32 incr, kmp_int32 chunk) {
896 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
897 pupper, pupperD, pstride, incr, chunk);
904 kmp_int32 schedule, kmp_int32 *plastiter,
905 kmp_int64 *plower, kmp_int64 *pupper,
906 kmp_int64 *pupperD, kmp_int64 *pstride,
907 kmp_int64 incr, kmp_int64 chunk) {
908 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
909 pupper, pupperD, pstride, incr, chunk);
916 kmp_int32 schedule, kmp_int32 *plastiter,
917 kmp_uint64 *plower, kmp_uint64 *pupper,
918 kmp_uint64 *pupperD, kmp_int64 *pstride,
919 kmp_int64 incr, kmp_int64 chunk) {
920 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
921 pupper, pupperD, pstride, incr, chunk);
954 kmp_int32 *p_lb, kmp_int32 *p_ub,
955 kmp_int32 *p_st, kmp_int32 incr,
957 KMP_DEBUG_ASSERT(__kmp_init_serial);
958 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
966 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
967 kmp_int32 *p_st, kmp_int32 incr,
969 KMP_DEBUG_ASSERT(__kmp_init_serial);
970 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
978 kmp_int64 *p_lb, kmp_int64 *p_ub,
979 kmp_int64 *p_st, kmp_int64 incr,
981 KMP_DEBUG_ASSERT(__kmp_init_serial);
982 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
990 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
991 kmp_int64 *p_st, kmp_int64 incr,
993 KMP_DEBUG_ASSERT(__kmp_init_serial);
994 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)