27#include "ompt-specific.h"
33char const *traits_t<int>::spec =
"d";
34char const *traits_t<unsigned int>::spec =
"u";
35char const *traits_t<long long>::spec =
"lld";
36char const *traits_t<unsigned long long>::spec =
"llu";
37char const *traits_t<long>::spec =
"ld";
42#define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61#define KMP_STATS_LOOP_END(stat)
65static inline void check_loc(
ident_t *&loc) {
71static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77#
if OMPT_SUPPORT && OMPT_OPTIONAL
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
86 typedef typename traits_t<T>::unsigned_t UT;
87 typedef typename traits_t<T>::signed_t ST;
89 kmp_int32 gtid = global_tid;
94 __kmp_assert_valid_gtid(gtid);
95 kmp_info_t *th = __kmp_threads[gtid];
97#if OMPT_SUPPORT && OMPT_OPTIONAL
98 ompt_team_info_t *team_info = NULL;
99 ompt_task_info_t *task_info = NULL;
100 ompt_work_t ompt_work_type = ompt_work_loop;
102 static kmp_int8 warn = 0;
104 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
106 team_info = __ompt_get_teaminfo(0, NULL);
107 task_info = __ompt_get_task_info_object(0);
111 ompt_work_type = ompt_work_loop;
113 ompt_work_type = ompt_work_sections;
115 ompt_work_type = ompt_work_distribute;
118 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
120 KMP_WARNING(OmptOutdatedWorkshare);
122 KMP_DEBUG_ASSERT(ompt_work_type);
127 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
128 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
133 buff = __kmp_str_format(
134 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
135 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
136 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
137 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
138 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
139 *pstride, incr, chunk));
140 __kmp_str_free(&buff);
144 if (__kmp_env_consistency_check) {
145 __kmp_push_workshare(global_tid, ct_pdo, loc);
147 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
152 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
153 if (plastiter != NULL)
165 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
166 "lower=%%%s upper=%%%s stride = %%%s "
167 "signed?<%s>, loc = %%s\n",
168 traits_t<T>::spec, traits_t<T>::spec,
169 traits_t<ST>::spec, traits_t<T>::spec);
172 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
173 __kmp_str_free(&buff);
176 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
178#if OMPT_SUPPORT && OMPT_OPTIONAL
179 if (ompt_enabled.ompt_callback_work) {
180 ompt_callbacks.ompt_callback(ompt_callback_work)(
181 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
182 &(task_info->task_data), 0, codeptr);
185 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
197 if (th->th.th_team->t.t_serialized > 1) {
199 team = th->th.th_team;
201 tid = th->th.th_team->t.t_master_tid;
202 team = th->th.th_team->t.t_parent;
205 tid = __kmp_tid_from_gtid(global_tid);
206 team = th->th.th_team;
210 if (team->t.t_serialized) {
212 if (plastiter != NULL)
216 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
222 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
223 "lower=%%%s upper=%%%s stride = %%%s\n",
224 traits_t<T>::spec, traits_t<T>::spec,
226 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
227 __kmp_str_free(&buff);
230 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
232#if OMPT_SUPPORT && OMPT_OPTIONAL
233 if (ompt_enabled.ompt_callback_work) {
234 ompt_callbacks.ompt_callback(ompt_callback_work)(
235 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
236 &(task_info->task_data), *pstride, codeptr);
239 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
242 nth = team->t.t_nproc;
244 if (plastiter != NULL)
247 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
252 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
253 "lower=%%%s upper=%%%s stride = %%%s\n",
254 traits_t<T>::spec, traits_t<T>::spec,
256 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
257 __kmp_str_free(&buff);
260 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
262#if OMPT_SUPPORT && OMPT_OPTIONAL
263 if (ompt_enabled.ompt_callback_work) {
264 ompt_callbacks.ompt_callback(ompt_callback_work)(
265 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
266 &(task_info->task_data), *pstride, codeptr);
269 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
275 trip_count = *pupper - *plower + 1;
276 }
else if (incr == -1) {
277 trip_count = *plower - *pupper + 1;
278 }
else if (incr > 0) {
280 trip_count = (UT)(*pupper - *plower) / incr + 1;
282 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
286 if (KMP_MASTER_GTID(gtid)) {
291 if (__kmp_env_consistency_check) {
293 if (trip_count == 0 && *pupper != *plower) {
294 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
302 if (trip_count < nth) {
304 __kmp_static == kmp_sch_static_greedy ||
306 kmp_sch_static_balanced);
307 if (tid < trip_count) {
308 *pupper = *plower = *plower + tid * incr;
311 *plower = *pupper + (incr > 0 ? 1 : -1);
313 if (plastiter != NULL)
314 *plastiter = (tid == trip_count - 1);
316 if (__kmp_static == kmp_sch_static_balanced) {
317 UT small_chunk = trip_count / nth;
318 UT extras = trip_count % nth;
319 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
320 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
321 if (plastiter != NULL)
322 *plastiter = (tid == nth - 1);
324 T big_chunk_inc_count =
325 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
326 T old_upper = *pupper;
328 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
331 *plower += tid * big_chunk_inc_count;
332 *pupper = *plower + big_chunk_inc_count - incr;
334 if (*pupper < *plower)
335 *pupper = traits_t<T>::max_value;
336 if (plastiter != NULL)
337 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
338 if (*pupper > old_upper)
341 if (*pupper > *plower)
342 *pupper = traits_t<T>::min_value;
343 if (plastiter != NULL)
344 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
345 if (*pupper < old_upper)
350 *pstride = trip_count;
353 case kmp_sch_static_chunked: {
358 else if ((UT)chunk > trip_count)
360 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
363 *pstride = span * nchunks;
365 *plower = *plower + (span * tid);
366 *pupper = *plower + span - incr;
368 *plower = *pupper + (incr > 0 ? 1 : -1);
371 *pstride = span * nth;
372 *plower = *plower + (span * tid);
373 *pupper = *plower + span - incr;
375 if (plastiter != NULL)
376 *plastiter = (tid == (nchunks - 1) % nth);
379 case kmp_sch_static_balanced_chunked: {
380 T old_upper = *pupper;
382 UT span = (trip_count + nth - 1) / nth;
385 chunk = (span + chunk - 1) & ~(chunk - 1);
388 *plower = *plower + (span * tid);
389 *pupper = *plower + span - incr;
391 if (*pupper > old_upper)
393 }
else if (*pupper < old_upper)
396 if (plastiter != NULL)
397 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
401 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
407 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
408 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
409 team->t.t_active_level == 1) {
410 kmp_uint64 cur_chunk = chunk;
415 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
418 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
425 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
426 "upper=%%%s stride = %%%s signed?<%s>\n",
427 traits_t<T>::spec, traits_t<T>::spec,
428 traits_t<ST>::spec, traits_t<T>::spec);
429 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
430 __kmp_str_free(&buff);
433 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
435#if OMPT_SUPPORT && OMPT_OPTIONAL
436 if (ompt_enabled.ompt_callback_work) {
437 ompt_callbacks.ompt_callback(ompt_callback_work)(
438 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
439 &(task_info->task_data), trip_count, codeptr);
441 if (ompt_enabled.ompt_callback_dispatch) {
442 ompt_dispatch_t dispatch_type;
443 ompt_data_t instance = ompt_data_none;
444 ompt_dispatch_chunk_t dispatch_chunk;
445 if (ompt_work_type == ompt_work_sections) {
446 dispatch_type = ompt_dispatch_section;
447 instance.ptr = codeptr;
449 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupper, incr);
450 dispatch_type = (ompt_work_type == ompt_work_distribute)
451 ? ompt_dispatch_distribute_chunk
452 : ompt_dispatch_ws_loop_chunk;
453 instance.ptr = &dispatch_chunk;
455 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
456 &(team_info->parallel_data), &(task_info->task_data), dispatch_type,
461 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
466static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
467 kmp_int32 schedule, kmp_int32 *plastiter,
468 T *plower, T *pupper, T *pupperDist,
469 typename traits_t<T>::signed_t *pstride,
470 typename traits_t<T>::signed_t incr,
471 typename traits_t<T>::signed_t chunk
472#
if OMPT_SUPPORT && OMPT_OPTIONAL
478 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
479 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
480 typedef typename traits_t<T>::unsigned_t UT;
481 typedef typename traits_t<T>::signed_t ST;
490 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
491 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
492 __kmp_assert_valid_gtid(gtid);
497 buff = __kmp_str_format(
498 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
499 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
500 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
501 traits_t<ST>::spec, traits_t<T>::spec);
503 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
504 __kmp_str_free(&buff);
508 if (__kmp_env_consistency_check) {
509 __kmp_push_workshare(gtid, ct_pdo, loc);
511 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
514 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
524 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
527 tid = __kmp_tid_from_gtid(gtid);
528 th = __kmp_threads[gtid];
529 nth = th->th.th_team_nproc;
530 team = th->th.th_team;
531 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
532 nteams = th->th.th_teams_size.nteams;
533 team_id = team->t.t_master_tid;
534 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
538 trip_count = *pupper - *plower + 1;
539 }
else if (incr == -1) {
540 trip_count = *plower - *pupper + 1;
541 }
else if (incr > 0) {
543 trip_count = (UT)(*pupper - *plower) / incr + 1;
545 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
548 *pstride = *pupper - *plower;
549 if (trip_count <= nteams) {
551 __kmp_static == kmp_sch_static_greedy ||
553 kmp_sch_static_balanced);
556 if (team_id < trip_count && tid == 0) {
557 *pupper = *pupperDist = *plower = *plower + team_id * incr;
559 *pupperDist = *pupper;
560 *plower = *pupper + incr;
562 if (plastiter != NULL)
563 *plastiter = (tid == 0 && team_id == trip_count - 1);
566 if (__kmp_static == kmp_sch_static_balanced) {
567 UT chunkD = trip_count / nteams;
568 UT extras = trip_count % nteams;
570 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
571 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
572 if (plastiter != NULL)
573 *plastiter = (team_id == nteams - 1);
576 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
578 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
580 *plower += team_id * chunk_inc_count;
581 *pupperDist = *plower + chunk_inc_count - incr;
584 if (*pupperDist < *plower)
585 *pupperDist = traits_t<T>::max_value;
586 if (plastiter != NULL)
587 *plastiter = *plower <= upper && *pupperDist > upper - incr;
588 if (*pupperDist > upper)
590 if (*plower > *pupperDist) {
591 *pupper = *pupperDist;
595 if (*pupperDist > *plower)
596 *pupperDist = traits_t<T>::min_value;
597 if (plastiter != NULL)
598 *plastiter = *plower >= upper && *pupperDist < upper - incr;
599 if (*pupperDist < upper)
601 if (*plower < *pupperDist) {
602 *pupper = *pupperDist;
610 trip_count = *pupperDist - *plower + 1;
611 }
else if (incr == -1) {
612 trip_count = *plower - *pupperDist + 1;
613 }
else if (incr > 1) {
615 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
617 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
619 KMP_DEBUG_ASSERT(trip_count);
622 if (trip_count <= nth) {
624 __kmp_static == kmp_sch_static_greedy ||
626 kmp_sch_static_balanced);
627 if (tid < trip_count)
628 *pupper = *plower = *plower + tid * incr;
630 *plower = *pupper + incr;
631 if (plastiter != NULL)
632 if (*plastiter != 0 && !(tid == trip_count - 1))
635 if (__kmp_static == kmp_sch_static_balanced) {
636 UT chunkL = trip_count / nth;
637 UT extras = trip_count % nth;
638 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
639 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
640 if (plastiter != NULL)
641 if (*plastiter != 0 && !(tid == nth - 1))
645 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
646 T upper = *pupperDist;
647 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
649 *plower += tid * chunk_inc_count;
650 *pupper = *plower + chunk_inc_count - incr;
652 if (*pupper < *plower)
653 *pupper = traits_t<T>::max_value;
654 if (plastiter != NULL)
655 if (*plastiter != 0 &&
656 !(*plower <= upper && *pupper > upper - incr))
661 if (*pupper > *plower)
662 *pupper = traits_t<T>::min_value;
663 if (plastiter != NULL)
664 if (*plastiter != 0 &&
665 !(*plower >= upper && *pupper < upper - incr))
674 case kmp_sch_static_chunked: {
679 *pstride = span * nth;
680 *plower = *plower + (span * tid);
681 *pupper = *plower + span - incr;
682 if (plastiter != NULL)
683 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
689 "__kmpc_dist_for_static_init: unknown loop scheduling type");
698 buff = __kmp_str_format(
699 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
700 "stride=%%%s signed?<%s>\n",
701 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
702 traits_t<ST>::spec, traits_t<T>::spec);
703 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
704 __kmp_str_free(&buff);
707 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
708#if OMPT_SUPPORT && OMPT_OPTIONAL
709 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
710 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
711 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
712 if (ompt_enabled.ompt_callback_work) {
713 ompt_callbacks.ompt_callback(ompt_callback_work)(
714 ompt_work_distribute, ompt_scope_begin, &(team_info->parallel_data),
715 &(task_info->task_data), 0, codeptr);
717 if (ompt_enabled.ompt_callback_dispatch) {
718 ompt_data_t instance = ompt_data_none;
719 ompt_dispatch_chunk_t dispatch_chunk;
720 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupperDist, incr);
721 instance.ptr = &dispatch_chunk;
722 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
723 &(team_info->parallel_data), &(task_info->task_data),
724 ompt_dispatch_distribute_chunk, instance);
728 KMP_STATS_LOOP_END(OMP_distribute_iterations);
733static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
734 kmp_int32 *p_last, T *p_lb, T *p_ub,
735 typename traits_t<T>::signed_t *p_st,
736 typename traits_t<T>::signed_t incr,
737 typename traits_t<T>::signed_t chunk) {
743 typedef typename traits_t<T>::unsigned_t UT;
744 typedef typename traits_t<T>::signed_t ST;
754 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
755 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
756 __kmp_assert_valid_gtid(gtid);
761 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
762 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
763 traits_t<T>::spec, traits_t<T>::spec,
764 traits_t<ST>::spec, traits_t<ST>::spec,
766 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
767 __kmp_str_free(&buff);
773 if (__kmp_env_consistency_check) {
775 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
778 if (incr > 0 ? (upper < lower) : (lower < upper)) {
788 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
791 th = __kmp_threads[gtid];
792 team = th->th.th_team;
793 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
794 nteams = th->th.th_teams_size.nteams;
795 team_id = team->t.t_master_tid;
796 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
800 trip_count = upper - lower + 1;
801 }
else if (incr == -1) {
802 trip_count = lower - upper + 1;
803 }
else if (incr > 0) {
805 trip_count = (UT)(upper - lower) / incr + 1;
807 trip_count = (UT)(lower - upper) / (-incr) + 1;
812 *p_st = span * nteams;
813 *p_lb = lower + (span * team_id);
814 *p_ub = *p_lb + span - incr;
816 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
820 *p_ub = traits_t<T>::max_value;
825 *p_ub = traits_t<T>::min_value;
834 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
835 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
836 traits_t<T>::spec, traits_t<T>::spec,
837 traits_t<ST>::spec, traits_t<ST>::spec);
838 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
839 __kmp_str_free(&buff);
868 kmp_int32 *plastiter, kmp_int32 *plower,
869 kmp_int32 *pupper, kmp_int32 *pstride,
870 kmp_int32 incr, kmp_int32 chunk) {
871 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
872 pupper, pstride, incr, chunk
873#
if OMPT_SUPPORT && OMPT_OPTIONAL
875 OMPT_GET_RETURN_ADDRESS(0)
884 kmp_int32 schedtype, kmp_int32 *plastiter,
885 kmp_uint32 *plower, kmp_uint32 *pupper,
886 kmp_int32 *pstride, kmp_int32 incr,
888 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
889 pupper, pstride, incr, chunk
890#
if OMPT_SUPPORT && OMPT_OPTIONAL
892 OMPT_GET_RETURN_ADDRESS(0)
901 kmp_int32 *plastiter, kmp_int64 *plower,
902 kmp_int64 *pupper, kmp_int64 *pstride,
903 kmp_int64 incr, kmp_int64 chunk) {
904 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
905 pupper, pstride, incr, chunk
906#
if OMPT_SUPPORT && OMPT_OPTIONAL
908 OMPT_GET_RETURN_ADDRESS(0)
917 kmp_int32 schedtype, kmp_int32 *plastiter,
918 kmp_uint64 *plower, kmp_uint64 *pupper,
919 kmp_int64 *pstride, kmp_int64 incr,
921 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
922 pupper, pstride, incr, chunk
923#
if OMPT_SUPPORT && OMPT_OPTIONAL
925 OMPT_GET_RETURN_ADDRESS(0)
933#if OMPT_SUPPORT && OMPT_OPTIONAL
934#define OMPT_CODEPTR_ARG , OMPT_GET_RETURN_ADDRESS(0)
936#define OMPT_CODEPTR_ARG
962 kmp_int32 schedule, kmp_int32 *plastiter,
963 kmp_int32 *plower, kmp_int32 *pupper,
964 kmp_int32 *pupperD, kmp_int32 *pstride,
965 kmp_int32 incr, kmp_int32 chunk) {
966 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
967 pupper, pupperD, pstride, incr,
968 chunk OMPT_CODEPTR_ARG);
975 kmp_int32 schedule, kmp_int32 *plastiter,
976 kmp_uint32 *plower, kmp_uint32 *pupper,
977 kmp_uint32 *pupperD, kmp_int32 *pstride,
978 kmp_int32 incr, kmp_int32 chunk) {
979 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
980 pupper, pupperD, pstride, incr,
981 chunk OMPT_CODEPTR_ARG);
988 kmp_int32 schedule, kmp_int32 *plastiter,
989 kmp_int64 *plower, kmp_int64 *pupper,
990 kmp_int64 *pupperD, kmp_int64 *pstride,
991 kmp_int64 incr, kmp_int64 chunk) {
992 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
993 pupper, pupperD, pstride, incr,
994 chunk OMPT_CODEPTR_ARG);
1001 kmp_int32 schedule, kmp_int32 *plastiter,
1002 kmp_uint64 *plower, kmp_uint64 *pupper,
1003 kmp_uint64 *pupperD, kmp_int64 *pstride,
1004 kmp_int64 incr, kmp_int64 chunk) {
1005 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
1006 pupper, pupperD, pstride, incr,
1007 chunk OMPT_CODEPTR_ARG);
1040 kmp_int32 *p_lb, kmp_int32 *p_ub,
1041 kmp_int32 *p_st, kmp_int32 incr,
1043 KMP_DEBUG_ASSERT(__kmp_init_serial);
1044 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1052 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1053 kmp_int32 *p_st, kmp_int32 incr,
1055 KMP_DEBUG_ASSERT(__kmp_init_serial);
1056 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1064 kmp_int64 *p_lb, kmp_int64 *p_ub,
1065 kmp_int64 *p_st, kmp_int64 incr,
1067 KMP_DEBUG_ASSERT(__kmp_init_serial);
1068 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1076 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1077 kmp_int64 *p_st, kmp_int64 incr,
1079 KMP_DEBUG_ASSERT(__kmp_init_serial);
1080 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)