示例#1
0
static void
__kmp_for_static_init(
    ident_t                          *loc,
    kmp_int32                         global_tid,
    kmp_int32                         schedtype,
    kmp_int32                        *plastiter,
    T                                *plower,
    T                                *pupper,
    typename traits_t< T >::signed_t *pstride,
    typename traits_t< T >::signed_t  incr,
    typename traits_t< T >::signed_t  chunk
) {
    KMP_COUNT_BLOCK(OMP_FOR_static);
    typedef typename traits_t< T >::unsigned_t  UT;
    typedef typename traits_t< T >::signed_t    ST;
    /*  this all has to be changed back to TID and such.. */
    register kmp_int32   gtid = global_tid;
    register kmp_uint32  tid;
    register kmp_uint32  nth;
    register UT          trip_count;
    register kmp_team_t *team;
    register kmp_info_t *th = __kmp_threads[ gtid ];

#if OMPT_SUPPORT && OMPT_TRACE
    ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
    ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
#endif

    KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
    KE_TRACE( 10, ("__kmpc_for_static_init called (%d)\n", global_tid));
    #ifdef KMP_DEBUG
    {
        const char * buff;
        // create format specifiers before the debug output
        buff = __kmp_str_format(
            "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
            " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
            traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
            traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
        KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
            *plower, *pupper, *pstride, incr, chunk ) );
        __kmp_str_free( &buff );
    }
    #endif

    if ( __kmp_env_consistency_check ) {
        __kmp_push_workshare( global_tid, ct_pdo, loc );
        if ( incr == 0 ) {
            __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
        }
    }
    /* special handling for zero-trip loops */
    if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
        if( plastiter != NULL )
            *plastiter = FALSE;
        /* leave pupper and plower set to entire iteration space */
        *pstride = incr;   /* value should never be used */
	//        *plower = *pupper - incr;   // let compiler bypass the illegal loop (like for(i=1;i<10;i--))  THIS LINE CAUSED shape2F/h_tests_1.f TO HAVE A FAILURE ON A ZERO-TRIP LOOP (lower=1,\
	  upper=0,stride=1) - JPH June 23, 2009.
        #ifdef KMP_DEBUG
        {
            const char * buff;
            // create format specifiers before the debug output
            buff = __kmp_str_format(
                "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
                traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
            KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->psource ) );
            __kmp_str_free( &buff );
        }
        #endif
        KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );

#if OMPT_SUPPORT && OMPT_TRACE
        if ((ompt_status == ompt_status_track_callback) &&
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
                team_info->parallel_id, task_info->task_id,
                team_info->microtask);
        }
#endif
        return;
    }

    #if OMP_40_ENABLED
    if ( schedtype > kmp_ord_upper ) {
        // we are in DISTRIBUTE construct
        schedtype += kmp_sch_static - kmp_distribute_static;      // AC: convert to usual schedule type
        tid  = th->th.th_team->t.t_master_tid;
        team = th->th.th_team->t.t_parent;
    } else
    #endif
    {
        tid  = __kmp_tid_from_gtid( global_tid );
        team = th->th.th_team;
    }

    /* determine if "for" loop is an active worksharing construct */
    if ( team -> t.t_serialized ) {
        /* serialized parallel, each thread executes whole iteration space */
        if( plastiter != NULL )
            *plastiter = TRUE;
        /* leave pupper and plower set to entire iteration space */
        *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));

        #ifdef KMP_DEBUG
        {
            const char * buff;
            // create format specifiers before the debug output
            buff = __kmp_str_format(
                "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
                traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
            KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
            __kmp_str_free( &buff );
        }
        #endif
        KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );

#if OMPT_SUPPORT && OMPT_TRACE
        if ((ompt_status == ompt_status_track_callback) &&
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
                team_info->parallel_id, task_info->task_id,
                team_info->microtask);
        }
#endif
        return;
    }
    nth = team->t.t_nproc;
    if ( nth == 1 ) {
        if( plastiter != NULL )
            *plastiter = TRUE;
        *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
        #ifdef KMP_DEBUG
        {
            const char * buff;
            // create format specifiers before the debug output
            buff = __kmp_str_format(
                "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
                traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
            KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
            __kmp_str_free( &buff );
        }
        #endif
        KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );

#if OMPT_SUPPORT && OMPT_TRACE
        if ((ompt_status == ompt_status_track_callback) &&
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
            ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
                team_info->parallel_id, task_info->task_id,
                team_info->microtask);
        }
#endif
        return;
    }

    /* compute trip count */
    if ( incr == 1 ) {
        trip_count = *pupper - *plower + 1;
    } else if (incr == -1) {
        trip_count = *plower - *pupper + 1;
    } else {
        if ( incr > 1 ) {  // the check is needed for unsigned division when incr < 0
            trip_count = (*pupper - *plower) / incr + 1;
        } else {
            trip_count = (*plower - *pupper) / ( -incr ) + 1;
        }
    }

    if ( __kmp_env_consistency_check ) {
        /* tripcount overflow? */
        if ( trip_count == 0 && *pupper != *plower ) {
            __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
        }
    }

    /* compute remaining parameters */
    switch ( schedtype ) {
    case kmp_sch_static:
        {
            if ( trip_count < nth ) {
                KMP_DEBUG_ASSERT(
                    __kmp_static == kmp_sch_static_greedy || \
                    __kmp_static == kmp_sch_static_balanced
                ); // Unknown static scheduling type.
                if ( tid < trip_count ) {
                    *pupper = *plower = *plower + tid * incr;
                } else {
                    *plower = *pupper + incr;
                }
                if( plastiter != NULL )
                    *plastiter = ( tid == trip_count - 1 );
            } else {
                if ( __kmp_static == kmp_sch_static_balanced ) {
                    register UT small_chunk = trip_count / nth;
                    register UT extras = trip_count % nth;
                    *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
                    *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
                    if( plastiter != NULL )
                        *plastiter = ( tid == nth - 1 );
                } else {
                    register T big_chunk_inc_count = ( trip_count/nth +
                                                     ( ( trip_count % nth ) ? 1 : 0) ) * incr;
                    register T old_upper = *pupper;

                    KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
                        // Unknown static scheduling type.

                    *plower += tid * big_chunk_inc_count;
                    *pupper = *plower + big_chunk_inc_count - incr;
                    if ( incr > 0 ) {
                        if( *pupper < *plower )
                            *pupper = i_maxmin< T >::mx;
                        if( plastiter != NULL )
                            *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
                        if ( *pupper > old_upper ) *pupper = old_upper; // tracker C73258
                    } else {
                        if( *pupper > *plower )
                            *pupper = i_maxmin< T >::mn;
                        if( plastiter != NULL )
                            *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
                        if ( *pupper < old_upper ) *pupper = old_upper; // tracker C73258
                    }
                }
            }
            break;
        }
    case kmp_sch_static_chunked:
        {
            register ST span;
            if ( chunk < 1 ) {
                chunk = 1;
            }
            span = chunk * incr;
            *pstride = span * nth;
            *plower = *plower + (span * tid);
            *pupper = *plower + span - incr;
            if( plastiter != NULL )
                *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
            break;
        }
    default:
        KMP_ASSERT2( 0, "__kmpc_for_static_init: unknown scheduling type" );
        break;
    }

#if USE_ITT_BUILD
    // Report loop metadata
    if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
#if OMP_40_ENABLED
        th->th.th_teams_microtask == NULL &&
#endif
        team->t.t_active_level == 1 )
    {
        kmp_uint64 cur_chunk = chunk;
        // Calculate chunk in case it was not specified; it is specified for kmp_sch_static_chunked
        if ( schedtype == kmp_sch_static ) {
            cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
        }
        // 0 - "static" schedule
        __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
    }
#endif
    #ifdef KMP_DEBUG
    {
        const char * buff;
        // create format specifiers before the debug output
        buff = __kmp_str_format(
            "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
            traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
        KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
        __kmp_str_free( &buff );
    }
    #endif
    KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );

#if OMPT_SUPPORT && OMPT_TRACE
    if ((ompt_status == ompt_status_track_callback) &&
        ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
        ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
            team_info->parallel_id, task_info->task_id, team_info->microtask);
    }
#endif

    return;
}
static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
                                  kmp_int32 schedtype, kmp_int32 *plastiter,
                                  T *plower, T *pupper,
                                  typename traits_t<T>::signed_t *pstride,
                                  typename traits_t<T>::signed_t incr,
                                  typename traits_t<T>::signed_t chunk
#if OMPT_SUPPORT && OMPT_OPTIONAL
                                  ,
                                  void *codeptr
#endif
                                  ) {
  KMP_COUNT_BLOCK(OMP_FOR_static);
  KMP_TIME_PARTITIONED_BLOCK(FOR_static_scheduling);

  typedef typename traits_t<T>::unsigned_t UT;
  typedef typename traits_t<T>::signed_t ST;
  /*  this all has to be changed back to TID and such.. */
  kmp_int32 gtid = global_tid;
  kmp_uint32 tid;
  kmp_uint32 nth;
  UT trip_count;
  kmp_team_t *team;
  kmp_info_t *th = __kmp_threads[gtid];

#if OMPT_SUPPORT && OMPT_OPTIONAL
  ompt_team_info_t *team_info = NULL;
  ompt_task_info_t *task_info = NULL;
  ompt_work_type_t ompt_work_type = ompt_work_loop;

  static kmp_int8 warn = 0;

  if (ompt_enabled.ompt_callback_work) {
    // Only fully initialize variables needed by OMPT if OMPT is enabled.
    team_info = __ompt_get_teaminfo(0, NULL);
    task_info = __ompt_get_task_info_object(0);
    // Determine workshare type
    if (loc != NULL) {
      if ((loc->flags & KMP_IDENT_WORK_LOOP) != 0) {
        ompt_work_type = ompt_work_loop;
      } else if ((loc->flags & KMP_IDENT_WORK_SECTIONS) != 0) {
        ompt_work_type = ompt_work_sections;
      } else if ((loc->flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
        ompt_work_type = ompt_work_distribute;
      } else {
        kmp_int8 bool_res =
            KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
        if (bool_res)
          KMP_WARNING(OmptOutdatedWorkshare);
      }
      KMP_DEBUG_ASSERT(ompt_work_type);
    }
  }
#endif

  KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
  KE_TRACE(10, ("__kmpc_for_static_init called (%d)\n", global_tid));
#ifdef KMP_DEBUG
  {
    char *buff;
    // create format specifiers before the debug output
    buff = __kmp_str_format(
        "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
        " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
        traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
        traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
    KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
                   *pstride, incr, chunk));
    __kmp_str_free(&buff);
  }
#endif

  if (__kmp_env_consistency_check) {
    __kmp_push_workshare(global_tid, ct_pdo, loc);
    if (incr == 0) {
      __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
                            loc);
    }
  }
  /* special handling for zero-trip loops */
  if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
    if (plastiter != NULL)
      *plastiter = FALSE;
    /* leave pupper and plower set to entire iteration space */
    *pstride = incr; /* value should never be used */
// *plower = *pupper - incr;
// let compiler bypass the illegal loop (like for(i=1;i<10;i--))
// THE LINE COMMENTED ABOVE CAUSED shape2F/h_tests_1.f TO HAVE A FAILURE
// ON A ZERO-TRIP LOOP (lower=1, upper=0,stride=1) - JPH June 23, 2009.
#ifdef KMP_DEBUG
    {
      char *buff;
      // create format specifiers before the debug output
      buff = __kmp_str_format("__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
                              "lower=%%%s upper=%%%s stride = %%%s "
                              "signed?<%s>, loc = %%s\n",
                              traits_t<T>::spec, traits_t<T>::spec,
                              traits_t<ST>::spec, traits_t<T>::spec);
      KD_TRACE(100,
               (buff, *plastiter, *plower, *pupper, *pstride, loc->psource));
      __kmp_str_free(&buff);
    }
#endif
    KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));

#if OMPT_SUPPORT && OMPT_OPTIONAL
    if (ompt_enabled.ompt_callback_work) {
      ompt_callbacks.ompt_callback(ompt_callback_work)(
          ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
          &(task_info->task_data), 0, codeptr);
    }
#endif
    KMP_COUNT_VALUE(FOR_static_iterations, 0);
    return;
  }

#if OMP_40_ENABLED
  // Although there are schedule enumerations above kmp_ord_upper which are not
  // schedules for "distribute", the only ones which are useful are dynamic, so
  // cannot be seen here, since this codepath is only executed for static
  // schedules.
  if (schedtype > kmp_ord_upper) {
    // we are in DISTRIBUTE construct
    schedtype += kmp_sch_static -
                 kmp_distribute_static; // AC: convert to usual schedule type
    tid = th->th.th_team->t.t_master_tid;
    team = th->th.th_team->t.t_parent;
  } else
#endif
  {
    tid = __kmp_tid_from_gtid(global_tid);
    team = th->th.th_team;
  }

  /* determine if "for" loop is an active worksharing construct */
  if (team->t.t_serialized) {
    /* serialized parallel, each thread executes whole iteration space */
    if (plastiter != NULL)
      *plastiter = TRUE;
    /* leave pupper and plower set to entire iteration space */
    *pstride =
        (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));

#ifdef KMP_DEBUG
    {
      char *buff;
      // create format specifiers before the debug output
      buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
                              "lower=%%%s upper=%%%s stride = %%%s\n",
                              traits_t<T>::spec, traits_t<T>::spec,
                              traits_t<ST>::spec);
      KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
      __kmp_str_free(&buff);
    }
#endif
    KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));

#if OMPT_SUPPORT && OMPT_OPTIONAL
    if (ompt_enabled.ompt_callback_work) {
      ompt_callbacks.ompt_callback(ompt_callback_work)(
          ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
          &(task_info->task_data), *pstride, codeptr);
    }
#endif
    return;
  }
  nth = team->t.t_nproc;
  if (nth == 1) {
    if (plastiter != NULL)
      *plastiter = TRUE;
    *pstride =
        (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
#ifdef KMP_DEBUG
    {
      char *buff;
      // create format specifiers before the debug output
      buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
                              "lower=%%%s upper=%%%s stride = %%%s\n",
                              traits_t<T>::spec, traits_t<T>::spec,
                              traits_t<ST>::spec);
      KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
      __kmp_str_free(&buff);
    }
#endif
    KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));

#if OMPT_SUPPORT && OMPT_OPTIONAL
    if (ompt_enabled.ompt_callback_work) {
      ompt_callbacks.ompt_callback(ompt_callback_work)(
          ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
          &(task_info->task_data), *pstride, codeptr);
    }
#endif
    return;
  }

  /* compute trip count */
  if (incr == 1) {
    trip_count = *pupper - *plower + 1;
  } else if (incr == -1) {
    trip_count = *plower - *pupper + 1;
  } else if (incr > 0) {
    // upper-lower can exceed the limit of signed type
    trip_count = (UT)(*pupper - *plower) / incr + 1;
  } else {
    trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
  }

  if (__kmp_env_consistency_check) {
    /* tripcount overflow? */
    if (trip_count == 0 && *pupper != *plower) {
      __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
                            loc);
    }
  }
  KMP_COUNT_VALUE(FOR_static_iterations, trip_count);

  /* compute remaining parameters */
  switch (schedtype) {
  case kmp_sch_static: {
    if (trip_count < nth) {
      KMP_DEBUG_ASSERT(
          __kmp_static == kmp_sch_static_greedy ||
          __kmp_static ==
              kmp_sch_static_balanced); // Unknown static scheduling type.
      if (tid < trip_count) {
        *pupper = *plower = *plower + tid * incr;
      } else {
        *plower = *pupper + incr;
      }
      if (plastiter != NULL)
        *plastiter = (tid == trip_count - 1);
    } else {
      if (__kmp_static == kmp_sch_static_balanced) {
        UT small_chunk = trip_count / nth;
        UT extras = trip_count % nth;
        *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
        *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
        if (plastiter != NULL)
          *plastiter = (tid == nth - 1);
      } else {
        T big_chunk_inc_count =
            (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
        T old_upper = *pupper;

        KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
        // Unknown static scheduling type.

        *plower += tid * big_chunk_inc_count;
        *pupper = *plower + big_chunk_inc_count - incr;
        if (incr > 0) {
          if (*pupper < *plower)
            *pupper = traits_t<T>::max_value;
          if (plastiter != NULL)
            *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
          if (*pupper > old_upper)
            *pupper = old_upper; // tracker C73258
        } else {
          if (*pupper > *plower)
            *pupper = traits_t<T>::min_value;
          if (plastiter != NULL)
            *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
          if (*pupper < old_upper)
            *pupper = old_upper; // tracker C73258
        }
      }
    }
    *pstride = trip_count;
    break;
  }
  case kmp_sch_static_chunked: {
    ST span;
    if (chunk < 1) {
      chunk = 1;
    }
    span = chunk * incr;
    *pstride = span * nth;
    *plower = *plower + (span * tid);
    *pupper = *plower + span - incr;
    if (plastiter != NULL)
      *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
    break;
  }
#if OMP_45_ENABLED
  case kmp_sch_static_balanced_chunked: {
    T old_upper = *pupper;
    // round up to make sure the chunk is enough to cover all iterations
    UT span = (trip_count + nth - 1) / nth;

    // perform chunk adjustment
    chunk = (span + chunk - 1) & ~(chunk - 1);

    span = chunk * incr;
    *plower = *plower + (span * tid);
    *pupper = *plower + span - incr;
    if (incr > 0) {
      if (*pupper > old_upper)
        *pupper = old_upper;
    } else if (*pupper < old_upper)
      *pupper = old_upper;

    if (plastiter != NULL)
      *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
    break;
  }
#endif
  default:
    KMP_ASSERT2(0, "__kmpc_for_static_init: unknown scheduling type");
    break;
  }

#if USE_ITT_BUILD
  // Report loop metadata
  if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
      __kmp_forkjoin_frames_mode == 3 &&
#if OMP_40_ENABLED
      th->th.th_teams_microtask == NULL &&
#endif
      team->t.t_active_level == 1) {
    kmp_uint64 cur_chunk = chunk;
    // Calculate chunk in case it was not specified; it is specified for
    // kmp_sch_static_chunked
    if (schedtype == kmp_sch_static) {
      cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
    }
    // 0 - "static" schedule
    __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
  }
#endif
#ifdef KMP_DEBUG
  {
    char *buff;
    // create format specifiers before the debug output
    buff = __kmp_str_format("__kmpc_for_static_init: liter=%%d lower=%%%s "
                            "upper=%%%s stride = %%%s signed?<%s>\n",
                            traits_t<T>::spec, traits_t<T>::spec,
                            traits_t<ST>::spec, traits_t<T>::spec);
    KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
    __kmp_str_free(&buff);
  }
#endif
  KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));

#if OMPT_SUPPORT && OMPT_OPTIONAL
  if (ompt_enabled.ompt_callback_work) {
    ompt_callbacks.ompt_callback(ompt_callback_work)(
        ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
        &(task_info->task_data), trip_count, codeptr);
  }
#endif

  return;
}