28 #include "kmp_error.h" 29 #include "kmp_stats.h" 33 #include "ompt-specific.h" 37 template<
typename T >
43 struct i_maxmin< int > {
44 static const int mx = 0x7fffffff;
45 static const int mn = 0x80000000;
48 struct i_maxmin< unsigned int > {
49 static const unsigned int mx = 0xffffffff;
50 static const unsigned int mn = 0x00000000;
53 struct i_maxmin< long long > {
54 static const long long mx = 0x7fffffffffffffffLL;
55 static const long long mn = 0x8000000000000000LL;
58 struct i_maxmin< unsigned long long > {
59 static const unsigned long long mx = 0xffffffffffffffffLL;
60 static const unsigned long long mn = 0x0000000000000000LL;
66 char const * traits_t< int >::spec =
"d";
67 char const * traits_t< unsigned int >::spec =
"u";
68 char const * traits_t< long long >::spec =
"lld";
69 char const * traits_t< unsigned long long >::spec =
"llu";
73 template<
typename T >
75 __kmp_for_static_init(
82 typename traits_t< T >::signed_t *pstride,
83 typename traits_t< T >::signed_t incr,
84 typename traits_t< T >::signed_t chunk
89 typedef typename traits_t< T >::unsigned_t UT;
90 typedef typename traits_t< T >::signed_t ST;
92 register kmp_int32 gtid = global_tid;
93 register kmp_uint32 tid;
94 register kmp_uint32 nth;
95 register UT trip_count;
96 register kmp_team_t *team;
97 register kmp_info_t *th = __kmp_threads[ gtid ];
99 #if OMPT_SUPPORT && OMPT_TRACE 100 ompt_team_info_t *team_info = NULL;
101 ompt_task_info_t *task_info = NULL;
105 team_info = __ompt_get_teaminfo(0, NULL);
106 task_info = __ompt_get_taskinfo(0);
110 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
111 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
116 buff = __kmp_str_format(
117 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
118 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
119 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
120 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
121 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
122 *plower, *pupper, *pstride, incr, chunk ) );
123 __kmp_str_free( &buff );
127 if ( __kmp_env_consistency_check ) {
128 __kmp_push_workshare( global_tid, ct_pdo, loc );
130 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
134 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
135 if( plastiter != NULL )
145 buff = __kmp_str_format(
146 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
147 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
148 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
149 __kmp_str_free( &buff );
152 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
154 #if OMPT_SUPPORT && OMPT_TRACE 156 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
157 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
158 team_info->parallel_id, task_info->task_id,
159 team_info->microtask);
170 tid = th->th.th_team->t.t_master_tid;
171 team = th->th.th_team->t.t_parent;
175 tid = __kmp_tid_from_gtid( global_tid );
176 team = th->th.th_team;
180 if ( team -> t.t_serialized ) {
182 if( plastiter != NULL )
185 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
191 buff = __kmp_str_format(
192 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
193 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
194 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
195 __kmp_str_free( &buff );
198 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
200 #if OMPT_SUPPORT && OMPT_TRACE 202 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
203 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
204 team_info->parallel_id, task_info->task_id,
205 team_info->microtask);
210 nth = team->t.t_nproc;
212 if( plastiter != NULL )
214 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
219 buff = __kmp_str_format(
220 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
221 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
222 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
223 __kmp_str_free( &buff );
226 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
228 #if OMPT_SUPPORT && OMPT_TRACE 230 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
231 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
232 team_info->parallel_id, task_info->task_id,
233 team_info->microtask);
241 trip_count = *pupper - *plower + 1;
242 }
else if (incr == -1) {
243 trip_count = *plower - *pupper + 1;
246 trip_count = (*pupper - *plower) / incr + 1;
248 trip_count = (*plower - *pupper) / ( -incr ) + 1;
252 if ( __kmp_env_consistency_check ) {
254 if ( trip_count == 0 && *pupper != *plower ) {
255 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
261 switch ( schedtype ) {
264 if ( trip_count < nth ) {
266 __kmp_static == kmp_sch_static_greedy || \
267 __kmp_static == kmp_sch_static_balanced
269 if ( tid < trip_count ) {
270 *pupper = *plower = *plower + tid * incr;
272 *plower = *pupper + incr;
274 if( plastiter != NULL )
275 *plastiter = ( tid == trip_count - 1 );
277 if ( __kmp_static == kmp_sch_static_balanced ) {
278 register UT small_chunk = trip_count / nth;
279 register UT extras = trip_count % nth;
280 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
281 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
282 if( plastiter != NULL )
283 *plastiter = ( tid == nth - 1 );
285 register T big_chunk_inc_count = ( trip_count/nth +
286 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
287 register T old_upper = *pupper;
289 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
292 *plower += tid * big_chunk_inc_count;
293 *pupper = *plower + big_chunk_inc_count - incr;
295 if( *pupper < *plower )
296 *pupper = i_maxmin< T >::mx;
297 if( plastiter != NULL )
298 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
299 if ( *pupper > old_upper ) *pupper = old_upper;
301 if( *pupper > *plower )
302 *pupper = i_maxmin< T >::mn;
303 if( plastiter != NULL )
304 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
305 if ( *pupper < old_upper ) *pupper = old_upper;
311 case kmp_sch_static_chunked:
318 *pstride = span * nth;
319 *plower = *plower + (span * tid);
320 *pupper = *plower + span - incr;
321 if( plastiter != NULL )
322 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
326 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
332 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
334 th->th.th_teams_microtask == NULL &&
336 team->t.t_active_level == 1 )
338 kmp_uint64 cur_chunk = chunk;
341 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
344 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
351 buff = __kmp_str_format(
352 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
353 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
354 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
355 __kmp_str_free( &buff );
358 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
360 #if OMPT_SUPPORT && OMPT_TRACE 362 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
363 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
364 team_info->parallel_id, task_info->task_id, team_info->microtask);
371 template<
typename T >
373 __kmp_dist_for_static_init(
377 kmp_int32 *plastiter,
381 typename traits_t< T >::signed_t *pstride,
382 typename traits_t< T >::signed_t incr,
383 typename traits_t< T >::signed_t chunk
386 typedef typename traits_t< T >::unsigned_t UT;
387 typedef typename traits_t< T >::signed_t ST;
388 register kmp_uint32 tid;
389 register kmp_uint32 nth;
390 register kmp_uint32 team_id;
391 register kmp_uint32 nteams;
392 register UT trip_count;
393 register kmp_team_t *team;
396 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
397 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
402 buff = __kmp_str_format(
403 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
404 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
405 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
406 traits_t< ST >::spec, traits_t< T >::spec );
407 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
408 *plower, *pupper, incr, chunk ) );
409 __kmp_str_free( &buff );
413 if( __kmp_env_consistency_check ) {
414 __kmp_push_workshare( gtid, ct_pdo, loc );
416 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
418 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
428 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
431 tid = __kmp_tid_from_gtid( gtid );
432 th = __kmp_threads[gtid];
433 nth = th->th.th_team_nproc;
434 team = th->th.th_team;
436 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
437 nteams = th->th.th_teams_size.nteams;
439 team_id = team->t.t_master_tid;
440 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
444 trip_count = *pupper - *plower + 1;
445 }
else if(incr == -1) {
446 trip_count = *plower - *pupper + 1;
448 trip_count = (ST)(*pupper - *plower) / incr + 1;
451 *pstride = *pupper - *plower;
452 if( trip_count <= nteams ) {
454 __kmp_static == kmp_sch_static_greedy || \
455 __kmp_static == kmp_sch_static_balanced
458 if( team_id < trip_count && tid == 0 ) {
459 *pupper = *pupperDist = *plower = *plower + team_id * incr;
461 *pupperDist = *pupper;
462 *plower = *pupper + incr;
464 if( plastiter != NULL )
465 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
468 if( __kmp_static == kmp_sch_static_balanced ) {
469 register UT chunkD = trip_count / nteams;
470 register UT extras = trip_count % nteams;
471 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
472 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
473 if( plastiter != NULL )
474 *plastiter = ( team_id == nteams - 1 );
476 register T chunk_inc_count =
477 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
478 register T upper = *pupper;
479 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
481 *plower += team_id * chunk_inc_count;
482 *pupperDist = *plower + chunk_inc_count - incr;
485 if( *pupperDist < *plower )
486 *pupperDist = i_maxmin< T >::mx;
487 if( plastiter != NULL )
488 *plastiter = *plower <= upper && *pupperDist > upper - incr;
489 if( *pupperDist > upper )
491 if( *plower > *pupperDist ) {
492 *pupper = *pupperDist;
496 if( *pupperDist > *plower )
497 *pupperDist = i_maxmin< T >::mn;
498 if( plastiter != NULL )
499 *plastiter = *plower >= upper && *pupperDist < upper - incr;
500 if( *pupperDist < upper )
502 if( *plower < *pupperDist ) {
503 *pupper = *pupperDist;
511 trip_count = *pupperDist - *plower + 1;
512 }
else if(incr == -1) {
513 trip_count = *plower - *pupperDist + 1;
515 trip_count = (ST)(*pupperDist - *plower) / incr + 1;
517 KMP_DEBUG_ASSERT( trip_count );
521 if( trip_count <= nth ) {
523 __kmp_static == kmp_sch_static_greedy || \
524 __kmp_static == kmp_sch_static_balanced
526 if( tid < trip_count )
527 *pupper = *plower = *plower + tid * incr;
529 *plower = *pupper + incr;
530 if( plastiter != NULL )
531 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
534 if( __kmp_static == kmp_sch_static_balanced ) {
535 register UT chunkL = trip_count / nth;
536 register UT extras = trip_count % nth;
537 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
538 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
539 if( plastiter != NULL )
540 if( *plastiter != 0 && !( tid == nth - 1 ) )
543 register T chunk_inc_count =
544 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
545 register T upper = *pupperDist;
546 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
548 *plower += tid * chunk_inc_count;
549 *pupper = *plower + chunk_inc_count - incr;
551 if( *pupper < *plower )
552 *pupper = i_maxmin< T >::mx;
553 if( plastiter != NULL )
554 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
556 if( *pupper > upper )
559 if( *pupper > *plower )
560 *pupper = i_maxmin< T >::mn;
561 if( plastiter != NULL )
562 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
564 if( *pupper < upper )
571 case kmp_sch_static_chunked:
577 *pstride = span * nth;
578 *plower = *plower + (span * tid);
579 *pupper = *plower + span - incr;
580 if( plastiter != NULL )
581 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
586 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
595 buff = __kmp_str_format(
596 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
597 "stride=%%%s signed?<%s>\n",
598 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
599 traits_t< ST >::spec, traits_t< T >::spec );
600 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
601 __kmp_str_free( &buff );
604 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
608 template<
typename T >
610 __kmp_team_static_init(
616 typename traits_t< T >::signed_t *p_st,
617 typename traits_t< T >::signed_t incr,
618 typename traits_t< T >::signed_t chunk
625 typedef typename traits_t< T >::unsigned_t UT;
626 typedef typename traits_t< T >::signed_t ST;
636 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
637 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
642 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
643 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
644 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
645 traits_t< ST >::spec, traits_t< T >::spec );
646 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
647 __kmp_str_free( &buff );
653 if( __kmp_env_consistency_check ) {
655 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
657 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
667 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
670 th = __kmp_threads[gtid];
671 team = th->th.th_team;
673 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
674 nteams = th->th.th_teams_size.nteams;
676 team_id = team->t.t_master_tid;
677 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
681 trip_count = upper - lower + 1;
682 }
else if(incr == -1) {
683 trip_count = lower - upper + 1;
685 trip_count = (ST)(upper - lower) / incr + 1;
690 *p_st = span * nteams;
691 *p_lb = lower + (span * team_id);
692 *p_ub = *p_lb + span - incr;
693 if ( p_last != NULL )
694 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
698 *p_ub = i_maxmin< T >::mx;
703 *p_ub = i_maxmin< T >::mn;
711 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
712 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
713 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
714 traits_t< ST >::spec );
715 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
716 __kmp_str_free( &buff );
746 kmp_int32 *plower, kmp_int32 *pupper,
747 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
749 __kmp_for_static_init< kmp_int32 >(
750 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
758 kmp_uint32 *plower, kmp_uint32 *pupper,
759 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
761 __kmp_for_static_init< kmp_uint32 >(
762 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
770 kmp_int64 *plower, kmp_int64 *pupper,
771 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
773 __kmp_for_static_init< kmp_int64 >(
774 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
782 kmp_uint64 *plower, kmp_uint64 *pupper,
783 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
785 __kmp_for_static_init< kmp_uint64 >(
786 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
816 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
817 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
818 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
820 __kmp_dist_for_static_init< kmp_int32 >(
821 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
829 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
830 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
831 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
833 __kmp_dist_for_static_init< kmp_uint32 >(
834 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
842 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
843 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
844 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
846 __kmp_dist_for_static_init< kmp_int64 >(
847 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
855 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
856 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
857 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
859 __kmp_dist_for_static_init< kmp_uint64 >(
860 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
893 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
894 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
896 KMP_DEBUG_ASSERT( __kmp_init_serial );
897 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
905 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
906 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
908 KMP_DEBUG_ASSERT( __kmp_init_serial );
909 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
917 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
918 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
920 KMP_DEBUG_ASSERT( __kmp_init_serial );
921 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
929 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
930 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
932 KMP_DEBUG_ASSERT( __kmp_init_serial );
933 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_TIME_BLOCK(name)
Uses specified timer (name) to time code block.
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)