Ejemplo n.º 1
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread

@return returns true if a matching cancellation request has been flagged in the
RTL and the encountering thread has to cancel..

Barrier with cancellation point to send threads from the barrier to the
end of the parallel region.  Needs a special code pattern as documented
in the design document for the cancellation feature.
*/
kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
  int ret = 0 /* false */;
  kmp_info_t *this_thr = __kmp_threads[gtid];
  kmp_team_t *this_team = this_thr->th.th_team;

  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  // call into the standard barrier
  __kmpc_barrier(loc, gtid);

  // if cancellation is active, check cancellation flag
  if (__kmp_omp_cancellation) {
    // depending on which construct to cancel, check the flag and
    // reset the flag
    switch (KMP_ATOMIC_LD_RLX(&(this_team->t.t_cancel_request))) {
    case cancel_parallel:
      ret = 1;
      // ensure that threads have checked the flag, when
      // leaving the above barrier
      __kmpc_barrier(loc, gtid);
      this_team->t.t_cancel_request = cancel_noreq;
      // the next barrier is the fork/join barrier, which
      // synchronizes the threads leaving here
      break;
    case cancel_loop:
    case cancel_sections:
      ret = 1;
      // ensure that threads have checked the flag, when
      // leaving the above barrier
      __kmpc_barrier(loc, gtid);
      this_team->t.t_cancel_request = cancel_noreq;
      // synchronize the threads again to make sure we do not have any run-away
      // threads that cause a race on the cancellation flag
      __kmpc_barrier(loc, gtid);
      break;
    case cancel_taskgroup:
      // this case should not occur
      KMP_ASSERT(0 /* false */);
      break;
    case cancel_noreq:
      // do nothing
      break;
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  return ret;
}
Ejemplo n.º 2
0
/*!
 @ingroup THREADPRIVATE

 @param loc source location information
 @param data  pointer to data being privatized
 @param ctor  pointer to constructor function for data
 @param cctor  pointer to copy constructor function for data
 @param dtor  pointer to destructor function for data

 Register constructors and destructors for thread private data.
 This function is called when executing in parallel, when we know the thread id.
*/
void
__kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
{
    struct shared_common *d_tn, **lnk_tn;

    KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );

#ifdef USE_CHECKS_COMMON
    /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
    KMP_ASSERT( cctor == 0);
#endif /* USE_CHECKS_COMMON */

    /* Only the global data table exists. */
    d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );

    if (d_tn == 0) {
        d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
        d_tn->gbl_addr = data;

        d_tn->ct.ctor = ctor;
        d_tn->cct.cctor = cctor;
        d_tn->dt.dtor = dtor;
/*
        d_tn->is_vec = FALSE;  // AC: commented out because __kmp_allocate zeroes the memory
        d_tn->vec_len = 0L;
        d_tn->obj_init = 0;
        d_tn->pod_init = 0;
*/
        lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);

        d_tn->next = *lnk_tn;
        *lnk_tn = d_tn;
    }
}
Ejemplo n.º 3
0
void
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
{
    int         tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_ASSERT( gtid == __kmp_get_gtid() );
    KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
    __kmp_check_sync( gtid, ct, ident, lck );
    KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
    tos = ++ p->stack_top;
    p->stack_data[ tos ].type  = ct;
    p->stack_data[ tos ].prev  = p->s_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name  = lck;
    p->s_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
Ejemplo n.º 4
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread
@param cncl_kind Cancellation kind (parallel, for, sections, taskgroup)

@return returns true if the cancellation request has been activated and the
execution thread needs to proceed to the end of the canceled region.

Request cancellation of the binding OpenMP region.
*/
kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
  kmp_info_t *this_thr = __kmp_threads[gtid];

  KC_TRACE(10, ("__kmpc_cancel: T#%d request %d OMP_CANCELLATION=%d\n", gtid,
                cncl_kind, __kmp_omp_cancellation));

  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
                   cncl_kind == cancel_sections ||
                   cncl_kind == cancel_taskgroup);
  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  if (__kmp_omp_cancellation) {
    switch (cncl_kind) {
    case cancel_parallel:
    case cancel_loop:
    case cancel_sections:
      // cancellation requests for parallel and worksharing constructs
      // are handled through the team structure
      {
        kmp_team_t *this_team = this_thr->th.th_team;
        KMP_DEBUG_ASSERT(this_team);
        kmp_int32 old = cancel_noreq;
        this_team->t.t_cancel_request.compare_exchange_strong(old, cncl_kind);
        if (old == cancel_noreq || old == cncl_kind) {
// we do not have a cancellation request in this team or we do have
// one that matches the current request -> cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
          if (ompt_enabled.ompt_callback_cancel) {
            ompt_data_t *task_data;
            __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                          NULL);
            ompt_cancel_flag_t type = ompt_cancel_parallel;
            if (cncl_kind == cancel_parallel)
              type = ompt_cancel_parallel;
            else if (cncl_kind == cancel_loop)
              type = ompt_cancel_loop;
            else if (cncl_kind == cancel_sections)
              type = ompt_cancel_sections;
            ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                task_data, type | ompt_cancel_activated,
                OMPT_GET_RETURN_ADDRESS(0));
          }
#endif
          return 1 /* true */;
        }
        break;
      }
    case cancel_taskgroup:
      // cancellation requests for a task group
      // are handled through the taskgroup structure
      {
        kmp_taskdata_t *task;
        kmp_taskgroup_t *taskgroup;

        task = this_thr->th.th_current_task;
        KMP_DEBUG_ASSERT(task);

        taskgroup = task->td_taskgroup;
        if (taskgroup) {
          kmp_int32 old = cancel_noreq;
          taskgroup->cancel_request.compare_exchange_strong(old, cncl_kind);
          if (old == cancel_noreq || old == cncl_kind) {
// we do not have a cancellation request in this taskgroup or we do
// have one that matches the current request -> cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
            if (ompt_enabled.ompt_callback_cancel) {
              ompt_data_t *task_data;
              __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                            NULL);
              ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                  task_data, ompt_cancel_taskgroup | ompt_cancel_activated,
                  OMPT_GET_RETURN_ADDRESS(0));
            }
#endif
            return 1 /* true */;
          }
        } else {
          // TODO: what needs to happen here?
          // the specification disallows cancellation w/o taskgroups
          // so we might do anything here, let's abort for now
          KMP_ASSERT(0 /* false */);
        }
      }
      break;
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  // ICV OMP_CANCELLATION=false, so we ignored this cancel request
  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
  return 0 /* false */;
}
Ejemplo n.º 5
0
/*!
@ingroup CANCELLATION
@param loc_ref location of the original task directive
@param gtid Global thread ID of encountering thread
@param cncl_kind Cancellation kind (parallel, for, sections, taskgroup)

@return returns true if a matching cancellation request has been flagged in the
RTL and the encountering thread has to cancel..

Cancellation point for the encountering thread.
*/
kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
                                   kmp_int32 cncl_kind) {
  kmp_info_t *this_thr = __kmp_threads[gtid];

  KC_TRACE(10,
           ("__kmpc_cancellationpoint: T#%d request %d OMP_CANCELLATION=%d\n",
            gtid, cncl_kind, __kmp_omp_cancellation));

  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
                   cncl_kind == cancel_sections ||
                   cncl_kind == cancel_taskgroup);
  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);

  if (__kmp_omp_cancellation) {
    switch (cncl_kind) {
    case cancel_parallel:
    case cancel_loop:
    case cancel_sections:
      // cancellation requests for parallel and worksharing constructs
      // are handled through the team structure
      {
        kmp_team_t *this_team = this_thr->th.th_team;
        KMP_DEBUG_ASSERT(this_team);
        if (this_team->t.t_cancel_request) {
          if (cncl_kind == this_team->t.t_cancel_request) {
// the request in the team structure matches the type of
// cancellation point so we can cancel
#if OMPT_SUPPORT && OMPT_OPTIONAL
            if (ompt_enabled.ompt_callback_cancel) {
              ompt_data_t *task_data;
              __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                            NULL);
              ompt_cancel_flag_t type = ompt_cancel_parallel;
              if (cncl_kind == cancel_parallel)
                type = ompt_cancel_parallel;
              else if (cncl_kind == cancel_loop)
                type = ompt_cancel_loop;
              else if (cncl_kind == cancel_sections)
                type = ompt_cancel_sections;
              ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                  task_data, type | ompt_cancel_detected,
                  OMPT_GET_RETURN_ADDRESS(0));
            }
#endif
            return 1 /* true */;
          }
          KMP_ASSERT(0 /* false */);
        } else {
          // we do not have a cancellation request pending, so we just
          // ignore this cancellation point
          return 0;
        }
        break;
      }
    case cancel_taskgroup:
      // cancellation requests for a task group
      // are handled through the taskgroup structure
      {
        kmp_taskdata_t *task;
        kmp_taskgroup_t *taskgroup;

        task = this_thr->th.th_current_task;
        KMP_DEBUG_ASSERT(task);

        taskgroup = task->td_taskgroup;
        if (taskgroup) {
// return the current status of cancellation for the taskgroup
#if OMPT_SUPPORT && OMPT_OPTIONAL
          if (ompt_enabled.ompt_callback_cancel &&
              !!taskgroup->cancel_request) {
            ompt_data_t *task_data;
            __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
                                          NULL);
            ompt_callbacks.ompt_callback(ompt_callback_cancel)(
                task_data, ompt_cancel_taskgroup | ompt_cancel_detected,
                OMPT_GET_RETURN_ADDRESS(0));
          }
#endif
          return !!taskgroup->cancel_request;
        } else {
          // if a cancellation point is encountered by a task that does not
          // belong to a taskgroup, it is OK to ignore it
          return 0 /* false */;
        }
      }
    default:
      KMP_ASSERT(0 /* false */);
    }
  }

  // ICV OMP_CANCELLATION=false, so we ignore the cancellation point
  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
  return 0 /* false */;
}
Ejemplo n.º 6
0
__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
#endif
{
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );

    if (p->stack_top >= p->stack_size)
       __kmp_expand_cons_stack( gtid, p );

    if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
        if (p->w_top <= p->p_top) {
            /* we are not in a worksharing construct */
            #ifdef BUILD_PARALLEL_ORDERED
                /* do not report error messages for PARALLEL ORDERED */
                KMP_ASSERT( ct == ct_ordered_in_parallel );
            #else
                __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
            #endif /* BUILD_PARALLEL_ORDERED */
        } else {
            /* inside a WORKSHARING construct for this PARALLEL region */
            if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
                if (p->stack_data[ p->w_top ].type == ct_taskq) {
                    __kmp_error_construct2(
                        kmp_i18n_msg_CnsNotInTaskConstruct,
                        ct, ident,
                        & p->stack_data[ p->w_top ]
                    );
                } else {
                    __kmp_error_construct2(
                        kmp_i18n_msg_CnsNoOrderedClause,
                        ct, ident,
                        & p->stack_data[ p->w_top ]
                    );
               }
            }
        }
        if (p->s_top > p->p_top && p->s_top > p->w_top) {
            /* inside a sync construct which is inside a worksharing construct */
            int index = p->s_top;
            enum cons_type stack_type;

            stack_type = p->stack_data[ index ].type;

            if (stack_type == ct_critical ||
                ( ( stack_type == ct_ordered_in_parallel ||
                    stack_type == ct_ordered_in_pdo      ||
                    stack_type == ct_ordered_in_taskq  ) &&     /* C doesn't allow named ordered; ordered in ordered gets error */
                 p->stack_data[ index ].ident != NULL &&
                 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
                /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
                __kmp_error_construct2(
                    kmp_i18n_msg_CnsInvalidNesting,
                    ct, ident,
                    & p->stack_data[ index ]
                );
            }
        }
    } else if ( ct == ct_critical ) {
#if KMP_USE_DYNAMIC_LOCK
        if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) {    /* this same thread already has lock for this critical section */
#else
        if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {    /* this same thread already has lock for this critical section */
#endif
            int index = p->s_top;
            struct cons_data cons = { NULL, ct_critical, 0, NULL };
            /* walk up construct stack and try to find critical with matching name */
            while ( index != 0 && p->stack_data[ index ].name != lck ) {
                index = p->stack_data[ index ].prev;
            }
            if ( index != 0 ) {
                /* found match on the stack (may not always because of interleaved critical for Fortran) */
                cons = p->stack_data[ index ];
            }
            /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
            __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
        }
    } else if ( ct == ct_master || ct == ct_reduce ) {
        if (p->w_top > p->p_top) {
            /* inside a WORKSHARING construct for this PARALLEL region */
           __kmp_error_construct2(
               kmp_i18n_msg_CnsInvalidNesting,
               ct, ident,
               & p->stack_data[ p->w_top ]
           );
        }
        if (ct == ct_reduce && p->s_top > p->p_top) {
            /* inside a another SYNC construct for this PARALLEL region */
            __kmp_error_construct2(
                kmp_i18n_msg_CnsInvalidNesting,
                ct, ident,
                & p->stack_data[ p->s_top ]
            );
        }; // if
    }; // if
}

void
#if KMP_USE_DYNAMIC_LOCK
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
#else
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
#endif
{
    int         tos;
    struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;

    KMP_ASSERT( gtid == __kmp_get_gtid() );
    KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
#if KMP_USE_DYNAMIC_LOCK
    __kmp_check_sync( gtid, ct, ident, lck, seq );
#else
    __kmp_check_sync( gtid, ct, ident, lck );
#endif
    KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
    tos = ++ p->stack_top;
    p->stack_data[ tos ].type  = ct;
    p->stack_data[ tos ].prev  = p->s_top;
    p->stack_data[ tos ].ident = ident;
    p->stack_data[ tos ].name  = lck;
    p->s_top = tos;
    KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
}
Ejemplo n.º 7
0
/*!
 @ingroup THREADPRIVATE

 @param loc source location information
 @param data  pointer to data being privatized
 @param ctor  pointer to constructor function for data
 @param cctor  pointer to copy constructor function for data
 @param dtor  pointer to destructor function for data

 Register constructors and destructors for thread private data.
 This function is called when executing in parallel, when we know the thread id.
*/
void
__kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
{
    struct shared_common *d_tn, **lnk_tn;

    KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );

#ifdef USE_CHECKS_COMMON
    /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
    KMP_ASSERT( cctor == 0);
#endif /* USE_CHECKS_COMMON */

#if KMP_THREADPRIVATE_TLS
    /*
       Check if unique. We use here only data[0], as we need to
       maintain a uniqe ordering between each of the entries on the
       list. This is because each thread maintain a private variable
       pointing to the entries it has already initialized.  When a
       worker starts a parallel region, it will check if his private
       pointer points to the most recent entry. If not, the thread
       will call the constructors for all entries between the top and
       the ones already processed.
    */
    for (d_tn = __kmp_threadprivate_d_table.data[0]; d_tn; d_tn = d_tn->next) {
        if (d_tn->gbl_addr == data) {
            // nothing to be done, already here
            return;
        }
    }
    // not found, create one
    d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
    d_tn->gbl_addr = data;
    d_tn->ct.ctor = ctor;
    d_tn->cct.cctor = cctor;
    d_tn->dt.dtor = dtor;

    // use only one list (0th, arbitrary)
    lnk_tn = &(__kmp_threadprivate_d_table.data[0]);
    // set new element at head of list
    d_tn->next = __kmp_threadprivate_d_table.data[0];
    // Make sure that if a thread see the new element, then it must
    // see the new next pointer value.
    KMP_MB();
    __kmp_threadprivate_d_table.data[0] = d_tn;
    return;
#endif

    /* Only the global data table exists. */
    d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );

    if (d_tn == 0) {
        d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
        d_tn->gbl_addr = data;

        d_tn->ct.ctor = ctor;
        d_tn->cct.cctor = cctor;
        d_tn->dt.dtor = dtor;
/*
        d_tn->is_vec = FALSE;  // AC: commented out because __kmp_allocate zeroes the memory
        d_tn->vec_len = 0L;
        d_tn->obj_init = 0;
        d_tn->pod_init = 0;
*/
        lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);

        d_tn->next = *lnk_tn;
        *lnk_tn = d_tn;
    }
}