Esempio n. 1
0
void
__kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
{
    int tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
    tos = p->stack_top;
    KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
    if ( tos == 0 || p->s_top == 0 ) {
        __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
    };
    if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
        __kmp_check_null_func();
        __kmp_error_construct2(
            kmp_i18n_msg_CnsExpectedEnd,
            ct, ident,
            & p->stack_data[ tos ]
        );
    };
    if ( gtid < 0 ) {
        __kmp_check_null_func();
    };
    KE_TRACE( 100, ( POP_MSG( p ) ) );
    p->s_top = p->stack_data[ tos ].prev;
    p->stack_data[ tos ].type = ct_none;
    p->stack_data[ tos ].ident = NULL;
    p->stack_top = tos - 1;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
Esempio n. 2
0
void
__kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
{
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
    KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
    if ( ident != 0 ) {
        __kmp_check_null_func();
    }
    if ( p->w_top > p->p_top ) {
        /* we are already in a WORKSHARING construct for this PARALLEL region */
        __kmp_error_construct2(
            kmp_i18n_msg_CnsInvalidNesting,
            ct, ident,
            & p->stack_data[ p->w_top ]
        );
    }
    if (p->s_top > p->p_top) {
        /* we are already in a SYNC construct for this PARALLEL region */
        __kmp_error_construct2(
            kmp_i18n_msg_CnsInvalidNesting,
            ct, ident,
            & p->stack_data[ p->s_top ]
        );
    }
}
OMPT_API_ROUTINE int ompt_get_partition_place_nums(int place_nums_size,
                                                   int *place_nums) {
// copied from kmp_ftn_entry.h (but modified)
#if !KMP_AFFINITY_SUPPORTED
  return 0;
#else
  if (__kmp_get_gtid() < 0)
    return 0;

  int i, gtid, place_num, first_place, last_place, start, end;
  kmp_info_t *thread;
  if (!KMP_AFFINITY_CAPABLE())
    return 0;
  gtid = __kmp_entry_gtid();
  thread = __kmp_thread_from_gtid(gtid);
  if (thread == NULL)
    return 0;
  first_place = thread->th.th_first_place;
  last_place = thread->th.th_last_place;
  if (first_place < 0 || last_place < 0)
    return 0;
  if (first_place <= last_place) {
    start = first_place;
    end = last_place;
  } else {
    start = last_place;
    end = first_place;
  }
  if (end - start <= place_nums_size)
    for (i = 0, place_num = start; place_num <= end; ++place_num, ++i) {
      place_nums[i] = place_num;
    }
  return end - start + 1;
#endif
}
Esempio n. 4
0
void
__ompt_init_internal()
{
    if (ompt_status & ompt_status_track) {
        // initialize initial thread for OMPT
        kmp_info_t *root_thread = ompt_get_thread();
        __kmp_task_init_ompt(
            root_thread->th.th_team->t.t_implicit_task_taskdata, 0);
        __kmp_task_init_ompt(
            root_thread->th.th_serial_team->t.t_implicit_task_taskdata, 0);

        // make mandatory callback for creation of initial thread
        // this needs to occur here rather than in __kmp_register_root because
        // __kmp_register_root is called before ompt_initialize
        int gtid = __kmp_get_gtid();
        if (KMP_UBER_GTID(gtid)) {
            // initialize the initial thread's idle frame and state
            root_thread->th.ompt_thread_info.idle_frame = 0;
            root_thread->th.ompt_thread_info.state = ompt_state_overhead;
            if ((ompt_status == ompt_status_track_callback) &&
                ompt_callbacks.ompt_callback(ompt_event_thread_begin)) {
                __ompt_thread_begin(ompt_thread_initial, gtid);
            }
            root_thread->th.ompt_thread_info.state = ompt_state_work_serial;
        }
    }
}
Esempio n. 5
0
enum cons_type
__kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
{
    int tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    tos = p->stack_top;
    KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
    if ( tos == 0 || p->w_top == 0 ) {
        __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
    }

    if ( tos != p->w_top ||
         ( p->stack_data[ tos ].type != ct &&
          /* below are two exceptions to the rule that construct types must match */
          ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
          ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
         )
       ) {
        __kmp_check_null_func();
        __kmp_error_construct2(
            kmp_i18n_msg_CnsExpectedEnd,
            ct, ident,
            & p->stack_data[ tos ]
        );
    }
    KE_TRACE( 100, ( POP_MSG( p ) ) );
    p->w_top = p->stack_data[ tos ].prev;
    p->stack_data[ tos ].type = ct_none;
    p->stack_data[ tos ].ident = NULL;
    p->stack_top = tos - 1;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
    return p->stack_data[ p->w_top ].type;
}
OMPT_API_ROUTINE int ompt_get_proc_id(void) {
#if KMP_OS_LINUX
  if (__kmp_get_gtid() < 0)
    return -1;

  return sched_getcpu();
#else
  return -1;
#endif
}
Esempio n. 7
0
ompt_thread_id_t
__ompt_get_thread_id_internal()
{
    // FIXME
    // until we have a better way of assigning ids, use __kmp_get_gtid
    // since the return value might be negative, we need to test that before
    // assigning it to an ompt_thread_id_t, which is unsigned.
    int id = __kmp_get_gtid();
    assert(id >= 0);

    return GTID_TO_OMPT_THREAD_ID(id);
}
Esempio n. 8
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread

@return returns true if a matching cancellation request has been flagged in the
RTL and the encountering thread has to cancel..

Barrier with cancellation point to send threads from the barrier to the
end of the parallel region.  Needs a special code pattern as documented
in the design document for the cancellation feature.
*/
kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
  int ret = 0 /* false */;
  kmp_info_t *this_thr = __kmp_threads[gtid];
  kmp_team_t *this_team = this_thr->th.th_team;

  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  // call into the standard barrier
  __kmpc_barrier(loc, gtid);

  // if cancellation is active, check cancellation flag
  if (__kmp_omp_cancellation) {
    // depending on which construct to cancel, check the flag and
    // reset the flag
    switch (KMP_ATOMIC_LD_RLX(&(this_team->t.t_cancel_request))) {
    case cancel_parallel:
      ret = 1;
      // ensure that threads have checked the flag, when
      // leaving the above barrier
      __kmpc_barrier(loc, gtid);
      this_team->t.t_cancel_request = cancel_noreq;
      // the next barrier is the fork/join barrier, which
      // synchronizes the threads leaving here
      break;
    case cancel_loop:
    case cancel_sections:
      ret = 1;
      // ensure that threads have checked the flag, when
      // leaving the above barrier
      __kmpc_barrier(loc, gtid);
      this_team->t.t_cancel_request = cancel_noreq;
      // synchronize the threads again to make sure we do not have any run-away
      // threads that cause a race on the cancellation flag
      __kmpc_barrier(loc, gtid);
      break;
    case cancel_taskgroup:
      // this case should not occur
      KMP_ASSERT(0 /* false */);
      break;
    case cancel_noreq:
      // do nothing
      break;
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  return ret;
}
Esempio n. 9
0
void
__kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
{
    int         tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
    KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
    __kmp_check_workshare( gtid, ct, ident );
    KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
    tos = ++p->stack_top;
    p->stack_data[ tos ].type = ct;
    p->stack_data[ tos ].prev = p->w_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name = NULL;
    p->w_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
Esempio n. 10
0
void
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
{
    int         tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_ASSERT( gtid == __kmp_get_gtid() );
    KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
    __kmp_check_sync( gtid, ct, ident, lck );
    KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
    tos = ++ p->stack_top;
    p->stack_data[ tos ].type  = ct;
    p->stack_data[ tos ].prev  = p->s_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name  = lck;
    p->s_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
OMPT_API_ROUTINE int ompt_get_place_num(void) {
// copied from kmp_ftn_entry.h (but modified)
#if !KMP_AFFINITY_SUPPORTED
  return -1;
#else
  if (__kmp_get_gtid() < 0)
    return -1;

  int gtid;
  kmp_info_t *thread;
  if (!KMP_AFFINITY_CAPABLE())
    return -1;
  gtid = __kmp_entry_gtid();
  thread = __kmp_thread_from_gtid(gtid);
  if (thread == NULL || thread->th.th_current_place < 0)
    return -1;
  return thread->th.th_current_place;
#endif
}
Esempio n. 12
0
void
__kmp_push_parallel( int gtid, ident_t const * ident )
{
    int tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
    KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
    KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
    if ( p->stack_top >= p->stack_size ) {
        __kmp_expand_cons_stack( gtid, p );
    }; // if
    tos = ++p->stack_top;
    p->stack_data[ tos ].type = ct_parallel;
    p->stack_data[ tos ].prev = p->p_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name = NULL;
    p->p_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
Esempio n. 13
0
void
__kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
{
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
    KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );


    if ( p->stack_top >= p->stack_size ) {
        __kmp_expand_cons_stack( gtid, p );
    }; // if
    if ( p->w_top > p->p_top &&
        !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
        // We are already in a WORKSHARE construct for this PARALLEL region.
        __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
    }; // if
    if ( p->s_top > p->p_top ) {
        // We are already in a SYNC construct for this PARALLEL region.
        __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
    }; // if
}
Esempio n. 14
0
static void
__kmp_expand_cons_stack( int gtid, struct cons_header *p )
{
    int    i;
    struct cons_data *d;

    /* TODO for monitor perhaps? */
    if (gtid < 0)
        __kmp_check_null_func();

    KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );

    d = p->stack_data;

    p->stack_size = (p->stack_size * 2) + 100;

    /* TODO free the old data */
    p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );

    for (i = p->stack_top; i >= 0; --i)
        p->stack_data[i] = d[i];

    /* NOTE: we do not free the old stack_data */
}
Esempio n. 15
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread
@param cncl_kind Cancellation kind (parallel, for, sections, taskgroup)

@return returns true if the cancellation request has been activated and the
execution thread needs to proceed to the end of the canceled region.

Request cancellation of the binding OpenMP region.
*/
kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
  kmp_info_t *this_thr = __kmp_threads[gtid];

  KC_TRACE(10, ("__kmpc_cancel: T#%d request %d OMP_CANCELLATION=%d\n", gtid,
                cncl_kind, __kmp_omp_cancellation));

  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
                   cncl_kind == cancel_sections ||
                   cncl_kind == cancel_taskgroup);
  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  if (__kmp_omp_cancellation) {
    switch (cncl_kind) {
    case cancel_parallel:
    case cancel_loop:
    case cancel_sections:
      // cancellation requests for parallel and worksharing constructs
      // are handled through the team structure
      {
        kmp_team_t *this_team = this_thr->th.th_team;
        KMP_DEBUG_ASSERT(this_team);
        kmp_int32 old = cancel_noreq;
        this_team->t.t_cancel_request.compare_exchange_strong(old, cncl_kind);
        if (old == cancel_noreq || old == cncl_kind) {
// we do not have a cancellation request in this team or we do have
// one that matches the current request -> cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
          if (ompt_enabled.ompt_callback_cancel) {
            ompt_data_t *task_data;
            __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                          NULL);
            ompt_cancel_flag_t type = ompt_cancel_parallel;
            if (cncl_kind == cancel_parallel)
              type = ompt_cancel_parallel;
            else if (cncl_kind == cancel_loop)
              type = ompt_cancel_loop;
            else if (cncl_kind == cancel_sections)
              type = ompt_cancel_sections;
            ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                task_data, type | ompt_cancel_activated,
                OMPT_GET_RETURN_ADDRESS(0));
          }
#endif
          return 1 /* true */;
        }
        break;
      }
    case cancel_taskgroup:
      // cancellation requests for a task group
      // are handled through the taskgroup structure
      {
        kmp_taskdata_t *task;
        kmp_taskgroup_t *taskgroup;

        task = this_thr->th.th_current_task;
        KMP_DEBUG_ASSERT(task);

        taskgroup = task->td_taskgroup;
        if (taskgroup) {
          kmp_int32 old = cancel_noreq;
          taskgroup->cancel_request.compare_exchange_strong(old, cncl_kind);
          if (old == cancel_noreq || old == cncl_kind) {
// we do not have a cancellation request in this taskgroup or we do
// have one that matches the current request -> cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
            if (ompt_enabled.ompt_callback_cancel) {
              ompt_data_t *task_data;
              __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                            NULL);
              ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                  task_data, ompt_cancel_taskgroup | ompt_cancel_activated,
                  OMPT_GET_RETURN_ADDRESS(0));
            }
#endif
            return 1 /* true */;
          }
        } else {
          // TODO: what needs to happen here?
          // the specification disallows cancellation w/o taskgroups
          // so we might do anything here, let's abort for now
          KMP_ASSERT(0 /* false */);
        }
      }
      break;
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  // ICV OMP_CANCELLATION=false, so we ignored this cancel request
  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
  return 0 /* false */;
}
Esempio n. 16
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread
@param cncl_kind Cancellation kind (parallel, for, sections, taskgroup)

@return returns true if a matching cancellation request has been flagged in the
RTL and the encountering thread has to cancel..

Cancellation point for the encountering thread.
*/
kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
                                   kmp_int32 cncl_kind) {
  kmp_info_t *this_thr = __kmp_threads[gtid];

  KC_TRACE(10,
           ("__kmpc_cancellationpoint: T#%d request %d OMP_CANCELLATION=%d\n",
            gtid, cncl_kind, __kmp_omp_cancellation));

  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
                   cncl_kind == cancel_sections ||
                   cncl_kind == cancel_taskgroup);
  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  if (__kmp_omp_cancellation) {
    switch (cncl_kind) {
    case cancel_parallel:
    case cancel_loop:
    case cancel_sections:
      // cancellation requests for parallel and worksharing constructs
      // are handled through the team structure
      {
        kmp_team_t *this_team = this_thr->th.th_team;
        KMP_DEBUG_ASSERT(this_team);
        if (this_team->t.t_cancel_request) {
          if (cncl_kind == this_team->t.t_cancel_request) {
// the request in the team structure matches the type of
// cancellation point so we can cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
            if (ompt_enabled.ompt_callback_cancel) {
              ompt_data_t *task_data;
              __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                            NULL);
              ompt_cancel_flag_t type = ompt_cancel_parallel;
              if (cncl_kind == cancel_parallel)
                type = ompt_cancel_parallel;
              else if (cncl_kind == cancel_loop)
                type = ompt_cancel_loop;
              else if (cncl_kind == cancel_sections)
                type = ompt_cancel_sections;
              ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                  task_data, type | ompt_cancel_detected,
                  OMPT_GET_RETURN_ADDRESS(0));
            }
#endif
            return 1 /* true */;
          }
          KMP_ASSERT(0 /* false */);
        } else {
          // we do not have a cancellation request pending, so we just
          // ignore this cancellation point
          return 0;
        }
        break;
      }
    case cancel_taskgroup:
      // cancellation requests for a task group
      // are handled through the taskgroup structure
      {
        kmp_taskdata_t *task;
        kmp_taskgroup_t *taskgroup;

        task = this_thr->th.th_current_task;
        KMP_DEBUG_ASSERT(task);

        taskgroup = task->td_taskgroup;
        if (taskgroup) {
// return the current status of cancellation for the taskgroup
#if OMPT_SUPPORT && OMPT_OPTIONAL
          if (ompt_enabled.ompt_callback_cancel &&
              !!taskgroup->cancel_request) {
            ompt_data_t *task_data;
            __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                          NULL);
            ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                task_data, ompt_cancel_taskgroup | ompt_cancel_detected,
                OMPT_GET_RETURN_ADDRESS(0));
          }
#endif
          return !!taskgroup->cancel_request;
        } else {
          // if a cancellation point is encountered by a task that does not
          // belong to a taskgroup, it is OK to ignore it
          return 0 /* false */;
        }
      }
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  // ICV OMP_CANCELLATION=false, so we ignore the cancellation point
  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
  return 0 /* false */;
}
Esempio n. 17
0
__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
#endif
{
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );

    if (p->stack_top >= p->stack_size)
       __kmp_expand_cons_stack( gtid, p );

    if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
        if (p->w_top <= p->p_top) {
            /* we are not in a worksharing construct */
            #ifdef BUILD_PARALLEL_ORDERED
                /* do not report error messages for PARALLEL ORDERED */
                KMP_ASSERT( ct == ct_ordered_in_parallel );
            #else
                __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
            #endif /* BUILD_PARALLEL_ORDERED */
        } else {
            /* inside a WORKSHARING construct for this PARALLEL region */
            if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
                if (p->stack_data[ p->w_top ].type == ct_taskq) {
                    __kmp_error_construct2(
                        kmp_i18n_msg_CnsNotInTaskConstruct,
                        ct, ident,
                        & p->stack_data[ p->w_top ]
                    );
                } else {
                    __kmp_error_construct2(
                        kmp_i18n_msg_CnsNoOrderedClause,
                        ct, ident,
                        & p->stack_data[ p->w_top ]
                    );
               }
            }
        }
        if (p->s_top > p->p_top && p->s_top > p->w_top) {
            /* inside a sync construct which is inside a worksharing construct */
            int index = p->s_top;
            enum cons_type stack_type;

            stack_type = p->stack_data[ index ].type;

            if (stack_type == ct_critical ||
                ( ( stack_type == ct_ordered_in_parallel ||
                    stack_type == ct_ordered_in_pdo      ||
                    stack_type == ct_ordered_in_taskq  ) &&     /* C doesn't allow named ordered; ordered in ordered gets error */
                 p->stack_data[ index ].ident != NULL &&
                 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
                /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
                __kmp_error_construct2(
                    kmp_i18n_msg_CnsInvalidNesting,
                    ct, ident,
                    & p->stack_data[ index ]
                );
            }
        }
    } else if ( ct == ct_critical ) {
#if KMP_USE_DYNAMIC_LOCK
        if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) {    /* this same thread already has lock for this critical section */
#else
        if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {    /* this same thread already has lock for this critical section */
#endif
            int index = p->s_top;
            struct cons_data cons = { NULL, ct_critical, 0, NULL };
            /* walk up construct stack and try to find critical with matching name */
            while ( index != 0 && p->stack_data[ index ].name != lck ) {
                index = p->stack_data[ index ].prev;
            }
            if ( index != 0 ) {
                /* found match on the stack (may not always because of interleaved critical for Fortran) */
                cons = p->stack_data[ index ];
            }
            /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
            __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
        }
    } else if ( ct == ct_master || ct == ct_reduce ) {
        if (p->w_top > p->p_top) {
            /* inside a WORKSHARING construct for this PARALLEL region */
           __kmp_error_construct2(
               kmp_i18n_msg_CnsInvalidNesting,
               ct, ident,
               & p->stack_data[ p->w_top ]
           );
        }
        if (ct == ct_reduce && p->s_top > p->p_top) {
            /* inside a another SYNC construct for this PARALLEL region */
            __kmp_error_construct2(
                kmp_i18n_msg_CnsInvalidNesting,
                ct, ident,
                & p->stack_data[ p->s_top ]
            );
        }; // if
    }; // if
}

void
#if KMP_USE_DYNAMIC_LOCK
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
#else
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
#endif
{
    int         tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_ASSERT( gtid == __kmp_get_gtid() );
    KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
#if KMP_USE_DYNAMIC_LOCK
    __kmp_check_sync( gtid, ct, ident, lck, seq );
#else
    __kmp_check_sync( gtid, ct, ident, lck );
#endif
    KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
    tos = ++ p->stack_top;
    p->stack_data[ tos ].type  = ct;
    p->stack_data[ tos ].prev  = p->s_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name  = lck;
    p->s_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}