27#include "ompt-specific.h"
33char const *traits_t<int>::spec =
"d";
34char const *traits_t<unsigned int>::spec =
"u";
35char const *traits_t<long long>::spec =
"lld";
36char const *traits_t<unsigned long long>::spec =
"llu";
37char const *traits_t<long>::spec =
"ld";
42#define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61#define KMP_STATS_LOOP_END(stat)
64#if USE_ITT_BUILD || defined KMP_DEBUG
66static inline void check_loc(
ident_t *&loc) {
73static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
74 kmp_int32 schedtype, kmp_int32 *plastiter,
76 typename traits_t<T>::signed_t *pstride,
77 typename traits_t<T>::signed_t incr,
78 typename traits_t<T>::signed_t chunk
79#
if OMPT_SUPPORT && OMPT_OPTIONAL
85 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
86 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
89 schedtype = SCHEDULE_WITHOUT_MODIFIERS(schedtype);
91 typedef typename traits_t<T>::unsigned_t UT;
92 typedef typename traits_t<T>::signed_t ST;
94 kmp_int32 gtid = global_tid;
99 __kmp_assert_valid_gtid(gtid);
100 kmp_info_t *th = __kmp_threads[gtid];
102#if OMPT_SUPPORT && OMPT_OPTIONAL
103 ompt_team_info_t *team_info = NULL;
104 ompt_task_info_t *task_info = NULL;
105 ompt_work_t ompt_work_type = ompt_work_loop;
107 static kmp_int8 warn = 0;
109 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
111 team_info = __ompt_get_teaminfo(0, NULL);
112 task_info = __ompt_get_task_info_object(0);
116 ompt_work_type = ompt_work_loop;
118 ompt_work_type = ompt_work_sections;
120 ompt_work_type = ompt_work_distribute;
123 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
125 KMP_WARNING(OmptOutdatedWorkshare);
127 KMP_DEBUG_ASSERT(ompt_work_type);
132 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
133 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
138 buff = __kmp_str_format(
139 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
140 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
141 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
142 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
143 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
144 *pstride, incr, chunk));
145 __kmp_str_free(&buff);
149 if (__kmp_env_consistency_check) {
150 __kmp_push_workshare(global_tid, ct_pdo, loc);
152 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
157 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
158 if (plastiter != NULL)
170 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
171 "lower=%%%s upper=%%%s stride = %%%s "
172 "signed?<%s>, loc = %%s\n",
173 traits_t<T>::spec, traits_t<T>::spec,
174 traits_t<ST>::spec, traits_t<T>::spec);
177 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
178 __kmp_str_free(&buff);
181 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
183#if OMPT_SUPPORT && OMPT_OPTIONAL
184 if (ompt_enabled.ompt_callback_work) {
185 ompt_callbacks.ompt_callback(ompt_callback_work)(
186 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
187 &(task_info->task_data), 0, codeptr);
190 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
202 if (th->th.th_team->t.t_serialized > 1) {
204 team = th->th.th_team;
206 tid = th->th.th_team->t.t_master_tid;
207 team = th->th.th_team->t.t_parent;
210 tid = __kmp_tid_from_gtid(global_tid);
211 team = th->th.th_team;
215 if (team->t.t_serialized) {
217 if (plastiter != NULL)
221 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
227 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
228 "lower=%%%s upper=%%%s stride = %%%s\n",
229 traits_t<T>::spec, traits_t<T>::spec,
231 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
232 __kmp_str_free(&buff);
235 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
237#if OMPT_SUPPORT && OMPT_OPTIONAL
238 if (ompt_enabled.ompt_callback_work) {
239 ompt_callbacks.ompt_callback(ompt_callback_work)(
240 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
241 &(task_info->task_data), *pstride, codeptr);
244 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
247 nth = team->t.t_nproc;
249 if (plastiter != NULL)
252 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
257 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
258 "lower=%%%s upper=%%%s stride = %%%s\n",
259 traits_t<T>::spec, traits_t<T>::spec,
261 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
262 __kmp_str_free(&buff);
265 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
267#if OMPT_SUPPORT && OMPT_OPTIONAL
268 if (ompt_enabled.ompt_callback_work) {
269 ompt_callbacks.ompt_callback(ompt_callback_work)(
270 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
271 &(task_info->task_data), *pstride, codeptr);
274 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
280 trip_count = *pupper - *plower + 1;
281 }
else if (incr == -1) {
282 trip_count = *plower - *pupper + 1;
283 }
else if (incr > 0) {
285 trip_count = (UT)(*pupper - *plower) / incr + 1;
287 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
291 if (KMP_MASTER_GTID(gtid)) {
296 if (__kmp_env_consistency_check) {
298 if (trip_count == 0 && *pupper != *plower) {
299 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
307 if (trip_count < nth) {
309 __kmp_static == kmp_sch_static_greedy ||
311 kmp_sch_static_balanced);
312 if (tid < trip_count) {
313 *pupper = *plower = *plower + tid * incr;
316 *plower = *pupper + (incr > 0 ? 1 : -1);
318 if (plastiter != NULL)
319 *plastiter = (tid == trip_count - 1);
321 if (__kmp_static == kmp_sch_static_balanced) {
322 UT small_chunk = trip_count / nth;
323 UT extras = trip_count % nth;
324 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
325 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
326 if (plastiter != NULL)
327 *plastiter = (tid == nth - 1);
329 T big_chunk_inc_count =
330 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
331 T old_upper = *pupper;
333 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
336 *plower += tid * big_chunk_inc_count;
337 *pupper = *plower + big_chunk_inc_count - incr;
339 if (*pupper < *plower)
340 *pupper = traits_t<T>::max_value;
341 if (plastiter != NULL)
342 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
343 if (*pupper > old_upper)
346 if (*pupper > *plower)
347 *pupper = traits_t<T>::min_value;
348 if (plastiter != NULL)
349 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
350 if (*pupper < old_upper)
355 *pstride = trip_count;
358 case kmp_sch_static_chunked: {
363 else if ((UT)chunk > trip_count)
365 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
368 *pstride = span * nchunks;
370 *plower = *plower + (span * tid);
371 *pupper = *plower + span - incr;
373 *plower = *pupper + (incr > 0 ? 1 : -1);
376 *pstride = span * nth;
377 *plower = *plower + (span * tid);
378 *pupper = *plower + span - incr;
380 if (plastiter != NULL)
381 *plastiter = (tid == (nchunks - 1) % nth);
384 case kmp_sch_static_balanced_chunked: {
385 T old_upper = *pupper;
387 UT span = (trip_count + nth - 1) / nth;
390 chunk = (span + chunk - 1) & ~(chunk - 1);
393 *plower = *plower + (span * tid);
394 *pupper = *plower + span - incr;
396 if (*pupper > old_upper)
398 }
else if (*pupper < old_upper)
401 if (plastiter != NULL)
402 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
406 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
412 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
413 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
414 team->t.t_active_level == 1) {
415 kmp_uint64 cur_chunk = chunk;
420 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
423 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
430 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
431 "upper=%%%s stride = %%%s signed?<%s>\n",
432 traits_t<T>::spec, traits_t<T>::spec,
433 traits_t<ST>::spec, traits_t<T>::spec);
434 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
435 __kmp_str_free(&buff);
438 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
440#if OMPT_SUPPORT && OMPT_OPTIONAL
441 if (ompt_enabled.ompt_callback_work) {
442 ompt_callbacks.ompt_callback(ompt_callback_work)(
443 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
444 &(task_info->task_data), trip_count, codeptr);
446 if (ompt_enabled.ompt_callback_dispatch) {
447 ompt_dispatch_t dispatch_type;
448 ompt_data_t instance = ompt_data_none;
449 ompt_dispatch_chunk_t dispatch_chunk;
450 if (ompt_work_type == ompt_work_sections) {
451 dispatch_type = ompt_dispatch_section;
452 instance.ptr = codeptr;
454 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupper, incr);
455 dispatch_type = (ompt_work_type == ompt_work_distribute)
456 ? ompt_dispatch_distribute_chunk
457 : ompt_dispatch_ws_loop_chunk;
458 instance.ptr = &dispatch_chunk;
460 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
461 &(team_info->parallel_data), &(task_info->task_data), dispatch_type,
466 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
471static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
472 kmp_int32 schedule, kmp_int32 *plastiter,
473 T *plower, T *pupper, T *pupperDist,
474 typename traits_t<T>::signed_t *pstride,
475 typename traits_t<T>::signed_t incr,
476 typename traits_t<T>::signed_t chunk
477#
if OMPT_SUPPORT && OMPT_OPTIONAL
483 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
484 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
485 typedef typename traits_t<T>::unsigned_t UT;
486 typedef typename traits_t<T>::signed_t ST;
495 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
496 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
497 __kmp_assert_valid_gtid(gtid);
502 buff = __kmp_str_format(
503 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
504 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
505 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
506 traits_t<ST>::spec, traits_t<T>::spec);
508 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
509 __kmp_str_free(&buff);
513 if (__kmp_env_consistency_check) {
514 __kmp_push_workshare(gtid, ct_pdo, loc);
516 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
519 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
529 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
532 tid = __kmp_tid_from_gtid(gtid);
533 th = __kmp_threads[gtid];
534 nth = th->th.th_team_nproc;
535 team = th->th.th_team;
536 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
537 nteams = th->th.th_teams_size.nteams;
538 team_id = team->t.t_master_tid;
539 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
543 trip_count = *pupper - *plower + 1;
544 }
else if (incr == -1) {
545 trip_count = *plower - *pupper + 1;
546 }
else if (incr > 0) {
548 trip_count = (UT)(*pupper - *plower) / incr + 1;
550 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
553 *pstride = *pupper - *plower;
554 if (trip_count <= nteams) {
556 __kmp_static == kmp_sch_static_greedy ||
558 kmp_sch_static_balanced);
561 if (team_id < trip_count && tid == 0) {
562 *pupper = *pupperDist = *plower = *plower + team_id * incr;
564 *pupperDist = *pupper;
565 *plower = *pupper + incr;
567 if (plastiter != NULL)
568 *plastiter = (tid == 0 && team_id == trip_count - 1);
571 if (__kmp_static == kmp_sch_static_balanced) {
572 UT chunkD = trip_count / nteams;
573 UT extras = trip_count % nteams;
575 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
576 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
577 if (plastiter != NULL)
578 *plastiter = (team_id == nteams - 1);
581 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
583 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
585 *plower += team_id * chunk_inc_count;
586 *pupperDist = *plower + chunk_inc_count - incr;
589 if (*pupperDist < *plower)
590 *pupperDist = traits_t<T>::max_value;
591 if (plastiter != NULL)
592 *plastiter = *plower <= upper && *pupperDist > upper - incr;
593 if (*pupperDist > upper)
595 if (*plower > *pupperDist) {
596 *pupper = *pupperDist;
600 if (*pupperDist > *plower)
601 *pupperDist = traits_t<T>::min_value;
602 if (plastiter != NULL)
603 *plastiter = *plower >= upper && *pupperDist < upper - incr;
604 if (*pupperDist < upper)
606 if (*plower < *pupperDist) {
607 *pupper = *pupperDist;
615 trip_count = *pupperDist - *plower + 1;
616 }
else if (incr == -1) {
617 trip_count = *plower - *pupperDist + 1;
618 }
else if (incr > 1) {
620 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
622 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
624 KMP_DEBUG_ASSERT(trip_count);
627 if (trip_count <= nth) {
629 __kmp_static == kmp_sch_static_greedy ||
631 kmp_sch_static_balanced);
632 if (tid < trip_count)
633 *pupper = *plower = *plower + tid * incr;
635 *plower = *pupper + incr;
636 if (plastiter != NULL)
637 if (*plastiter != 0 && !(tid == trip_count - 1))
640 if (__kmp_static == kmp_sch_static_balanced) {
641 UT chunkL = trip_count / nth;
642 UT extras = trip_count % nth;
643 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
644 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
645 if (plastiter != NULL)
646 if (*plastiter != 0 && !(tid == nth - 1))
650 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
651 T upper = *pupperDist;
652 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
654 *plower += tid * chunk_inc_count;
655 *pupper = *plower + chunk_inc_count - incr;
657 if (*pupper < *plower)
658 *pupper = traits_t<T>::max_value;
659 if (plastiter != NULL)
660 if (*plastiter != 0 &&
661 !(*plower <= upper && *pupper > upper - incr))
666 if (*pupper > *plower)
667 *pupper = traits_t<T>::min_value;
668 if (plastiter != NULL)
669 if (*plastiter != 0 &&
670 !(*plower >= upper && *pupper < upper - incr))
679 case kmp_sch_static_chunked: {
684 *pstride = span * nth;
685 *plower = *plower + (span * tid);
686 *pupper = *plower + span - incr;
687 if (plastiter != NULL)
688 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
694 "__kmpc_dist_for_static_init: unknown loop scheduling type");
703 buff = __kmp_str_format(
704 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
705 "stride=%%%s signed?<%s>\n",
706 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
707 traits_t<ST>::spec, traits_t<T>::spec);
708 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
709 __kmp_str_free(&buff);
712 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
713#if OMPT_SUPPORT && OMPT_OPTIONAL
714 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
715 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
716 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
717 if (ompt_enabled.ompt_callback_work) {
718 ompt_callbacks.ompt_callback(ompt_callback_work)(
719 ompt_work_distribute, ompt_scope_begin, &(team_info->parallel_data),
720 &(task_info->task_data), 0, codeptr);
722 if (ompt_enabled.ompt_callback_dispatch) {
723 ompt_data_t instance = ompt_data_none;
724 ompt_dispatch_chunk_t dispatch_chunk;
725 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupperDist, incr);
726 instance.ptr = &dispatch_chunk;
727 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
728 &(team_info->parallel_data), &(task_info->task_data),
729 ompt_dispatch_distribute_chunk, instance);
733 KMP_STATS_LOOP_END(OMP_distribute_iterations);
738static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
739 kmp_int32 *p_last, T *p_lb, T *p_ub,
740 typename traits_t<T>::signed_t *p_st,
741 typename traits_t<T>::signed_t incr,
742 typename traits_t<T>::signed_t chunk) {
748 typedef typename traits_t<T>::unsigned_t UT;
749 typedef typename traits_t<T>::signed_t ST;
759 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
760 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
761 __kmp_assert_valid_gtid(gtid);
766 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
767 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
768 traits_t<T>::spec, traits_t<T>::spec,
769 traits_t<ST>::spec, traits_t<ST>::spec,
771 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
772 __kmp_str_free(&buff);
778 if (__kmp_env_consistency_check) {
780 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
783 if (incr > 0 ? (upper < lower) : (lower < upper)) {
793 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
796 th = __kmp_threads[gtid];
797 team = th->th.th_team;
798 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
799 nteams = th->th.th_teams_size.nteams;
800 team_id = team->t.t_master_tid;
801 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
805 trip_count = upper - lower + 1;
806 }
else if (incr == -1) {
807 trip_count = lower - upper + 1;
808 }
else if (incr > 0) {
810 trip_count = (UT)(upper - lower) / incr + 1;
812 trip_count = (UT)(lower - upper) / (-incr) + 1;
817 *p_st = span * nteams;
818 *p_lb = lower + (span * team_id);
819 *p_ub = *p_lb + span - incr;
821 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
825 *p_ub = traits_t<T>::max_value;
830 *p_ub = traits_t<T>::min_value;
839 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
840 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
841 traits_t<T>::spec, traits_t<T>::spec,
842 traits_t<ST>::spec, traits_t<ST>::spec);
843 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
844 __kmp_str_free(&buff);
873 kmp_int32 *plastiter, kmp_int32 *plower,
874 kmp_int32 *pupper, kmp_int32 *pstride,
875 kmp_int32 incr, kmp_int32 chunk) {
876 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
877 pupper, pstride, incr, chunk
878#
if OMPT_SUPPORT && OMPT_OPTIONAL
880 OMPT_GET_RETURN_ADDRESS(0)
889 kmp_int32 schedtype, kmp_int32 *plastiter,
890 kmp_uint32 *plower, kmp_uint32 *pupper,
891 kmp_int32 *pstride, kmp_int32 incr,
893 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
894 pupper, pstride, incr, chunk
895#
if OMPT_SUPPORT && OMPT_OPTIONAL
897 OMPT_GET_RETURN_ADDRESS(0)
906 kmp_int32 *plastiter, kmp_int64 *plower,
907 kmp_int64 *pupper, kmp_int64 *pstride,
908 kmp_int64 incr, kmp_int64 chunk) {
909 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
910 pupper, pstride, incr, chunk
911#
if OMPT_SUPPORT && OMPT_OPTIONAL
913 OMPT_GET_RETURN_ADDRESS(0)
922 kmp_int32 schedtype, kmp_int32 *plastiter,
923 kmp_uint64 *plower, kmp_uint64 *pupper,
924 kmp_int64 *pstride, kmp_int64 incr,
926 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
927 pupper, pstride, incr, chunk
928#
if OMPT_SUPPORT && OMPT_OPTIONAL
930 OMPT_GET_RETURN_ADDRESS(0)
938#if OMPT_SUPPORT && OMPT_OPTIONAL
939#define OMPT_CODEPTR_ARG , OMPT_GET_RETURN_ADDRESS(0)
941#define OMPT_CODEPTR_ARG
967 kmp_int32 schedule, kmp_int32 *plastiter,
968 kmp_int32 *plower, kmp_int32 *pupper,
969 kmp_int32 *pupperD, kmp_int32 *pstride,
970 kmp_int32 incr, kmp_int32 chunk) {
971 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
972 pupper, pupperD, pstride, incr,
973 chunk OMPT_CODEPTR_ARG);
980 kmp_int32 schedule, kmp_int32 *plastiter,
981 kmp_uint32 *plower, kmp_uint32 *pupper,
982 kmp_uint32 *pupperD, kmp_int32 *pstride,
983 kmp_int32 incr, kmp_int32 chunk) {
984 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
985 pupper, pupperD, pstride, incr,
986 chunk OMPT_CODEPTR_ARG);
993 kmp_int32 schedule, kmp_int32 *plastiter,
994 kmp_int64 *plower, kmp_int64 *pupper,
995 kmp_int64 *pupperD, kmp_int64 *pstride,
996 kmp_int64 incr, kmp_int64 chunk) {
997 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
998 pupper, pupperD, pstride, incr,
999 chunk OMPT_CODEPTR_ARG);
1006 kmp_int32 schedule, kmp_int32 *plastiter,
1007 kmp_uint64 *plower, kmp_uint64 *pupper,
1008 kmp_uint64 *pupperD, kmp_int64 *pstride,
1009 kmp_int64 incr, kmp_int64 chunk) {
1010 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
1011 pupper, pupperD, pstride, incr,
1012 chunk OMPT_CODEPTR_ARG);
1045 kmp_int32 *p_lb, kmp_int32 *p_ub,
1046 kmp_int32 *p_st, kmp_int32 incr,
1048 KMP_DEBUG_ASSERT(__kmp_init_serial);
1049 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1057 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1058 kmp_int32 *p_st, kmp_int32 incr,
1060 KMP_DEBUG_ASSERT(__kmp_init_serial);
1061 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1069 kmp_int64 *p_lb, kmp_int64 *p_ub,
1070 kmp_int64 *p_st, kmp_int64 incr,
1072 KMP_DEBUG_ASSERT(__kmp_init_serial);
1073 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1081 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1082 kmp_int64 *p_st, kmp_int64 incr,
1084 KMP_DEBUG_ASSERT(__kmp_init_serial);
1085 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)