27#include "ompt-specific.h"
33char const *traits_t<int>::spec =
"d";
34char const *traits_t<unsigned int>::spec =
"u";
35char const *traits_t<long long>::spec =
"lld";
36char const *traits_t<unsigned long long>::spec =
"llu";
37char const *traits_t<long>::spec =
"ld";
42#define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61#define KMP_STATS_LOOP_END(stat)
65static inline void check_loc(
ident_t *&loc) {
71static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77#
if OMPT_SUPPORT && OMPT_OPTIONAL
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
87 schedtype = SCHEDULE_WITHOUT_MODIFIERS(schedtype);
89 typedef typename traits_t<T>::unsigned_t UT;
90 typedef typename traits_t<T>::signed_t ST;
92 kmp_int32 gtid = global_tid;
97 __kmp_assert_valid_gtid(gtid);
98 kmp_info_t *th = __kmp_threads[gtid];
100#if OMPT_SUPPORT && OMPT_OPTIONAL
101 ompt_team_info_t *team_info = NULL;
102 ompt_task_info_t *task_info = NULL;
103 ompt_work_t ompt_work_type = ompt_work_loop;
105 static kmp_int8 warn = 0;
107 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
109 team_info = __ompt_get_teaminfo(0, NULL);
110 task_info = __ompt_get_task_info_object(0);
114 ompt_work_type = ompt_work_loop;
116 ompt_work_type = ompt_work_sections;
118 ompt_work_type = ompt_work_distribute;
121 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
123 KMP_WARNING(OmptOutdatedWorkshare);
125 KMP_DEBUG_ASSERT(ompt_work_type);
130 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
131 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
136 buff = __kmp_str_format(
137 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
138 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
139 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
140 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
141 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
142 *pstride, incr, chunk));
143 __kmp_str_free(&buff);
147 if (__kmp_env_consistency_check) {
148 __kmp_push_workshare(global_tid, ct_pdo, loc);
150 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
155 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
156 if (plastiter != NULL)
168 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
169 "lower=%%%s upper=%%%s stride = %%%s "
170 "signed?<%s>, loc = %%s\n",
171 traits_t<T>::spec, traits_t<T>::spec,
172 traits_t<ST>::spec, traits_t<T>::spec);
175 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
176 __kmp_str_free(&buff);
179 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
181#if OMPT_SUPPORT && OMPT_OPTIONAL
182 if (ompt_enabled.ompt_callback_work) {
183 ompt_callbacks.ompt_callback(ompt_callback_work)(
184 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
185 &(task_info->task_data), 0, codeptr);
188 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
200 if (th->th.th_team->t.t_serialized > 1) {
202 team = th->th.th_team;
204 tid = th->th.th_team->t.t_master_tid;
205 team = th->th.th_team->t.t_parent;
208 tid = __kmp_tid_from_gtid(global_tid);
209 team = th->th.th_team;
213 if (team->t.t_serialized) {
215 if (plastiter != NULL)
219 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
225 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
226 "lower=%%%s upper=%%%s stride = %%%s\n",
227 traits_t<T>::spec, traits_t<T>::spec,
229 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
230 __kmp_str_free(&buff);
233 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
235#if OMPT_SUPPORT && OMPT_OPTIONAL
236 if (ompt_enabled.ompt_callback_work) {
237 ompt_callbacks.ompt_callback(ompt_callback_work)(
238 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
239 &(task_info->task_data), *pstride, codeptr);
242 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
245 nth = team->t.t_nproc;
247 if (plastiter != NULL)
250 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
255 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
256 "lower=%%%s upper=%%%s stride = %%%s\n",
257 traits_t<T>::spec, traits_t<T>::spec,
259 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
260 __kmp_str_free(&buff);
263 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
265#if OMPT_SUPPORT && OMPT_OPTIONAL
266 if (ompt_enabled.ompt_callback_work) {
267 ompt_callbacks.ompt_callback(ompt_callback_work)(
268 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
269 &(task_info->task_data), *pstride, codeptr);
272 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
278 trip_count = *pupper - *plower + 1;
279 }
else if (incr == -1) {
280 trip_count = *plower - *pupper + 1;
281 }
else if (incr > 0) {
283 trip_count = (UT)(*pupper - *plower) / incr + 1;
285 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
289 if (KMP_MASTER_GTID(gtid)) {
294 if (__kmp_env_consistency_check) {
296 if (trip_count == 0 && *pupper != *plower) {
297 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
305 if (trip_count < nth) {
307 __kmp_static == kmp_sch_static_greedy ||
309 kmp_sch_static_balanced);
310 if (tid < trip_count) {
311 *pupper = *plower = *plower + tid * incr;
314 *plower = *pupper + (incr > 0 ? 1 : -1);
316 if (plastiter != NULL)
317 *plastiter = (tid == trip_count - 1);
319 if (__kmp_static == kmp_sch_static_balanced) {
320 UT small_chunk = trip_count / nth;
321 UT extras = trip_count % nth;
322 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
323 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
324 if (plastiter != NULL)
325 *plastiter = (tid == nth - 1);
327 T big_chunk_inc_count =
328 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
329 T old_upper = *pupper;
331 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
334 *plower += tid * big_chunk_inc_count;
335 *pupper = *plower + big_chunk_inc_count - incr;
337 if (*pupper < *plower)
338 *pupper = traits_t<T>::max_value;
339 if (plastiter != NULL)
340 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
341 if (*pupper > old_upper)
344 if (*pupper > *plower)
345 *pupper = traits_t<T>::min_value;
346 if (plastiter != NULL)
347 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
348 if (*pupper < old_upper)
353 *pstride = trip_count;
356 case kmp_sch_static_chunked: {
361 else if ((UT)chunk > trip_count)
363 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
366 *pstride = span * nchunks;
368 *plower = *plower + (span * tid);
369 *pupper = *plower + span - incr;
371 *plower = *pupper + (incr > 0 ? 1 : -1);
374 *pstride = span * nth;
375 *plower = *plower + (span * tid);
376 *pupper = *plower + span - incr;
378 if (plastiter != NULL)
379 *plastiter = (tid == (nchunks - 1) % nth);
382 case kmp_sch_static_balanced_chunked: {
383 T old_upper = *pupper;
385 UT span = (trip_count + nth - 1) / nth;
388 chunk = (span + chunk - 1) & ~(chunk - 1);
391 *plower = *plower + (span * tid);
392 *pupper = *plower + span - incr;
394 if (*pupper > old_upper)
396 }
else if (*pupper < old_upper)
399 if (plastiter != NULL)
400 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
404 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
410 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
411 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
412 team->t.t_active_level == 1) {
413 kmp_uint64 cur_chunk = chunk;
418 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
421 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
428 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
429 "upper=%%%s stride = %%%s signed?<%s>\n",
430 traits_t<T>::spec, traits_t<T>::spec,
431 traits_t<ST>::spec, traits_t<T>::spec);
432 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
433 __kmp_str_free(&buff);
436 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
438#if OMPT_SUPPORT && OMPT_OPTIONAL
439 if (ompt_enabled.ompt_callback_work) {
440 ompt_callbacks.ompt_callback(ompt_callback_work)(
441 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
442 &(task_info->task_data), trip_count, codeptr);
444 if (ompt_enabled.ompt_callback_dispatch) {
445 ompt_dispatch_t dispatch_type;
446 ompt_data_t instance = ompt_data_none;
447 ompt_dispatch_chunk_t dispatch_chunk;
448 if (ompt_work_type == ompt_work_sections) {
449 dispatch_type = ompt_dispatch_section;
450 instance.ptr = codeptr;
452 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupper, incr);
453 dispatch_type = (ompt_work_type == ompt_work_distribute)
454 ? ompt_dispatch_distribute_chunk
455 : ompt_dispatch_ws_loop_chunk;
456 instance.ptr = &dispatch_chunk;
458 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
459 &(team_info->parallel_data), &(task_info->task_data), dispatch_type,
464 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
469static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
470 kmp_int32 schedule, kmp_int32 *plastiter,
471 T *plower, T *pupper, T *pupperDist,
472 typename traits_t<T>::signed_t *pstride,
473 typename traits_t<T>::signed_t incr,
474 typename traits_t<T>::signed_t chunk
475#
if OMPT_SUPPORT && OMPT_OPTIONAL
481 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
482 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
483 typedef typename traits_t<T>::unsigned_t UT;
484 typedef typename traits_t<T>::signed_t ST;
493 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
494 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
495 __kmp_assert_valid_gtid(gtid);
500 buff = __kmp_str_format(
501 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
502 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
503 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
504 traits_t<ST>::spec, traits_t<T>::spec);
506 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
507 __kmp_str_free(&buff);
511 if (__kmp_env_consistency_check) {
512 __kmp_push_workshare(gtid, ct_pdo, loc);
514 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
517 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
527 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
530 tid = __kmp_tid_from_gtid(gtid);
531 th = __kmp_threads[gtid];
532 nth = th->th.th_team_nproc;
533 team = th->th.th_team;
534 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
535 nteams = th->th.th_teams_size.nteams;
536 team_id = team->t.t_master_tid;
537 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
541 trip_count = *pupper - *plower + 1;
542 }
else if (incr == -1) {
543 trip_count = *plower - *pupper + 1;
544 }
else if (incr > 0) {
546 trip_count = (UT)(*pupper - *plower) / incr + 1;
548 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
551 *pstride = *pupper - *plower;
552 if (trip_count <= nteams) {
554 __kmp_static == kmp_sch_static_greedy ||
556 kmp_sch_static_balanced);
559 if (team_id < trip_count && tid == 0) {
560 *pupper = *pupperDist = *plower = *plower + team_id * incr;
562 *pupperDist = *pupper;
563 *plower = *pupper + incr;
565 if (plastiter != NULL)
566 *plastiter = (tid == 0 && team_id == trip_count - 1);
569 if (__kmp_static == kmp_sch_static_balanced) {
570 UT chunkD = trip_count / nteams;
571 UT extras = trip_count % nteams;
573 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
574 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
575 if (plastiter != NULL)
576 *plastiter = (team_id == nteams - 1);
579 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
581 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
583 *plower += team_id * chunk_inc_count;
584 *pupperDist = *plower + chunk_inc_count - incr;
587 if (*pupperDist < *plower)
588 *pupperDist = traits_t<T>::max_value;
589 if (plastiter != NULL)
590 *plastiter = *plower <= upper && *pupperDist > upper - incr;
591 if (*pupperDist > upper)
593 if (*plower > *pupperDist) {
594 *pupper = *pupperDist;
598 if (*pupperDist > *plower)
599 *pupperDist = traits_t<T>::min_value;
600 if (plastiter != NULL)
601 *plastiter = *plower >= upper && *pupperDist < upper - incr;
602 if (*pupperDist < upper)
604 if (*plower < *pupperDist) {
605 *pupper = *pupperDist;
613 trip_count = *pupperDist - *plower + 1;
614 }
else if (incr == -1) {
615 trip_count = *plower - *pupperDist + 1;
616 }
else if (incr > 1) {
618 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
620 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
622 KMP_DEBUG_ASSERT(trip_count);
625 if (trip_count <= nth) {
627 __kmp_static == kmp_sch_static_greedy ||
629 kmp_sch_static_balanced);
630 if (tid < trip_count)
631 *pupper = *plower = *plower + tid * incr;
633 *plower = *pupper + incr;
634 if (plastiter != NULL)
635 if (*plastiter != 0 && !(tid == trip_count - 1))
638 if (__kmp_static == kmp_sch_static_balanced) {
639 UT chunkL = trip_count / nth;
640 UT extras = trip_count % nth;
641 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
642 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
643 if (plastiter != NULL)
644 if (*plastiter != 0 && !(tid == nth - 1))
648 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
649 T upper = *pupperDist;
650 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
652 *plower += tid * chunk_inc_count;
653 *pupper = *plower + chunk_inc_count - incr;
655 if (*pupper < *plower)
656 *pupper = traits_t<T>::max_value;
657 if (plastiter != NULL)
658 if (*plastiter != 0 &&
659 !(*plower <= upper && *pupper > upper - incr))
664 if (*pupper > *plower)
665 *pupper = traits_t<T>::min_value;
666 if (plastiter != NULL)
667 if (*plastiter != 0 &&
668 !(*plower >= upper && *pupper < upper - incr))
677 case kmp_sch_static_chunked: {
682 *pstride = span * nth;
683 *plower = *plower + (span * tid);
684 *pupper = *plower + span - incr;
685 if (plastiter != NULL)
686 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
692 "__kmpc_dist_for_static_init: unknown loop scheduling type");
701 buff = __kmp_str_format(
702 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
703 "stride=%%%s signed?<%s>\n",
704 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
705 traits_t<ST>::spec, traits_t<T>::spec);
706 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
707 __kmp_str_free(&buff);
710 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
711#if OMPT_SUPPORT && OMPT_OPTIONAL
712 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
713 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
714 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
715 if (ompt_enabled.ompt_callback_work) {
716 ompt_callbacks.ompt_callback(ompt_callback_work)(
717 ompt_work_distribute, ompt_scope_begin, &(team_info->parallel_data),
718 &(task_info->task_data), 0, codeptr);
720 if (ompt_enabled.ompt_callback_dispatch) {
721 ompt_data_t instance = ompt_data_none;
722 ompt_dispatch_chunk_t dispatch_chunk;
723 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupperDist, incr);
724 instance.ptr = &dispatch_chunk;
725 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
726 &(team_info->parallel_data), &(task_info->task_data),
727 ompt_dispatch_distribute_chunk, instance);
731 KMP_STATS_LOOP_END(OMP_distribute_iterations);
736static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
737 kmp_int32 *p_last, T *p_lb, T *p_ub,
738 typename traits_t<T>::signed_t *p_st,
739 typename traits_t<T>::signed_t incr,
740 typename traits_t<T>::signed_t chunk) {
746 typedef typename traits_t<T>::unsigned_t UT;
747 typedef typename traits_t<T>::signed_t ST;
757 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
758 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
759 __kmp_assert_valid_gtid(gtid);
764 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
765 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
766 traits_t<T>::spec, traits_t<T>::spec,
767 traits_t<ST>::spec, traits_t<ST>::spec,
769 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
770 __kmp_str_free(&buff);
776 if (__kmp_env_consistency_check) {
778 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
781 if (incr > 0 ? (upper < lower) : (lower < upper)) {
791 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
794 th = __kmp_threads[gtid];
795 team = th->th.th_team;
796 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
797 nteams = th->th.th_teams_size.nteams;
798 team_id = team->t.t_master_tid;
799 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
803 trip_count = upper - lower + 1;
804 }
else if (incr == -1) {
805 trip_count = lower - upper + 1;
806 }
else if (incr > 0) {
808 trip_count = (UT)(upper - lower) / incr + 1;
810 trip_count = (UT)(lower - upper) / (-incr) + 1;
815 *p_st = span * nteams;
816 *p_lb = lower + (span * team_id);
817 *p_ub = *p_lb + span - incr;
819 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
823 *p_ub = traits_t<T>::max_value;
828 *p_ub = traits_t<T>::min_value;
837 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
838 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
839 traits_t<T>::spec, traits_t<T>::spec,
840 traits_t<ST>::spec, traits_t<ST>::spec);
841 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
842 __kmp_str_free(&buff);
871 kmp_int32 *plastiter, kmp_int32 *plower,
872 kmp_int32 *pupper, kmp_int32 *pstride,
873 kmp_int32 incr, kmp_int32 chunk) {
874 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
875 pupper, pstride, incr, chunk
876#
if OMPT_SUPPORT && OMPT_OPTIONAL
878 OMPT_GET_RETURN_ADDRESS(0)
887 kmp_int32 schedtype, kmp_int32 *plastiter,
888 kmp_uint32 *plower, kmp_uint32 *pupper,
889 kmp_int32 *pstride, kmp_int32 incr,
891 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
892 pupper, pstride, incr, chunk
893#
if OMPT_SUPPORT && OMPT_OPTIONAL
895 OMPT_GET_RETURN_ADDRESS(0)
904 kmp_int32 *plastiter, kmp_int64 *plower,
905 kmp_int64 *pupper, kmp_int64 *pstride,
906 kmp_int64 incr, kmp_int64 chunk) {
907 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
908 pupper, pstride, incr, chunk
909#
if OMPT_SUPPORT && OMPT_OPTIONAL
911 OMPT_GET_RETURN_ADDRESS(0)
920 kmp_int32 schedtype, kmp_int32 *plastiter,
921 kmp_uint64 *plower, kmp_uint64 *pupper,
922 kmp_int64 *pstride, kmp_int64 incr,
924 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
925 pupper, pstride, incr, chunk
926#
if OMPT_SUPPORT && OMPT_OPTIONAL
928 OMPT_GET_RETURN_ADDRESS(0)
936#if OMPT_SUPPORT && OMPT_OPTIONAL
937#define OMPT_CODEPTR_ARG , OMPT_GET_RETURN_ADDRESS(0)
939#define OMPT_CODEPTR_ARG
965 kmp_int32 schedule, kmp_int32 *plastiter,
966 kmp_int32 *plower, kmp_int32 *pupper,
967 kmp_int32 *pupperD, kmp_int32 *pstride,
968 kmp_int32 incr, kmp_int32 chunk) {
969 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
970 pupper, pupperD, pstride, incr,
971 chunk OMPT_CODEPTR_ARG);
978 kmp_int32 schedule, kmp_int32 *plastiter,
979 kmp_uint32 *plower, kmp_uint32 *pupper,
980 kmp_uint32 *pupperD, kmp_int32 *pstride,
981 kmp_int32 incr, kmp_int32 chunk) {
982 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
983 pupper, pupperD, pstride, incr,
984 chunk OMPT_CODEPTR_ARG);
991 kmp_int32 schedule, kmp_int32 *plastiter,
992 kmp_int64 *plower, kmp_int64 *pupper,
993 kmp_int64 *pupperD, kmp_int64 *pstride,
994 kmp_int64 incr, kmp_int64 chunk) {
995 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
996 pupper, pupperD, pstride, incr,
997 chunk OMPT_CODEPTR_ARG);
1004 kmp_int32 schedule, kmp_int32 *plastiter,
1005 kmp_uint64 *plower, kmp_uint64 *pupper,
1006 kmp_uint64 *pupperD, kmp_int64 *pstride,
1007 kmp_int64 incr, kmp_int64 chunk) {
1008 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
1009 pupper, pupperD, pstride, incr,
1010 chunk OMPT_CODEPTR_ARG);
1043 kmp_int32 *p_lb, kmp_int32 *p_ub,
1044 kmp_int32 *p_st, kmp_int32 incr,
1046 KMP_DEBUG_ASSERT(__kmp_init_serial);
1047 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1055 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1056 kmp_int32 *p_st, kmp_int32 incr,
1058 KMP_DEBUG_ASSERT(__kmp_init_serial);
1059 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1067 kmp_int64 *p_lb, kmp_int64 *p_ub,
1068 kmp_int64 *p_st, kmp_int64 incr,
1070 KMP_DEBUG_ASSERT(__kmp_init_serial);
1071 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1079 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1080 kmp_int64 *p_st, kmp_int64 incr,
1082 KMP_DEBUG_ASSERT(__kmp_init_serial);
1083 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)