void __kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident ) { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; tos = p->stack_top; KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) ); if ( tos == 0 || p->s_top == 0 ) { __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); }; if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) { __kmp_check_null_func(); __kmp_error_construct2( kmp_i18n_msg_CnsExpectedEnd, ct, ident, & p->stack_data[ tos ] ); }; if ( gtid < 0 ) { __kmp_check_null_func(); }; KE_TRACE( 100, ( POP_MSG( p ) ) ); p->s_top = p->stack_data[ tos ].prev; p->stack_data[ tos ].type = ct_none; p->stack_data[ tos ].ident = NULL; p->stack_top = tos - 1; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); }
enum cons_type __kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident ) { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; tos = p->stack_top; KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); if ( tos == 0 || p->w_top == 0 ) { __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); } if ( tos != p->w_top || ( p->stack_data[ tos ].type != ct && /* below are two exceptions to the rule that construct types must match */ ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) && ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task ) ) ) { __kmp_check_null_func(); __kmp_error_construct2( kmp_i18n_msg_CnsExpectedEnd, ct, ident, & p->stack_data[ tos ] ); } KE_TRACE( 100, ( POP_MSG( p ) ) ); p->w_top = p->stack_data[ tos ].prev; p->stack_data[ tos ].type = ct_none; p->stack_data[ tos ].ident = NULL; p->stack_top = tos - 1; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); return p->stack_data[ p->w_top ].type; }
void __kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident ) { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); __kmp_check_workshare( gtid, ct, ident ); KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); tos = ++p->stack_top; p->stack_data[ tos ].type = ct; p->stack_data[ tos ].prev = p->w_top; p->stack_data[ tos ].ident = ident; p->stack_data[ tos ].name = NULL; p->w_top = tos; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); }
void __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; KMP_ASSERT( gtid == __kmp_get_gtid() ); KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) ); __kmp_check_sync( gtid, ct, ident, lck ); KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); tos = ++ p->stack_top; p->stack_data[ tos ].type = ct; p->stack_data[ tos ].prev = p->s_top; p->stack_data[ tos ].ident = ident; p->stack_data[ tos ].name = lck; p->s_top = tos; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); }
void __kmp_push_parallel( int gtid, ident_t const * ident ) { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons ); KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) ); KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) ); if ( p->stack_top >= p->stack_size ) { __kmp_expand_cons_stack( gtid, p ); }; // if tos = ++p->stack_top; p->stack_data[ tos ].type = ct_parallel; p->stack_data[ tos ].prev = p->p_top; p->stack_data[ tos ].ident = ident; p->stack_data[ tos ].name = NULL; p->p_top = tos; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); }
__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) #endif { struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) ); if (p->stack_top >= p->stack_size) __kmp_expand_cons_stack( gtid, p ); if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) { if (p->w_top <= p->p_top) { /* we are not in a worksharing construct */ #ifdef BUILD_PARALLEL_ORDERED /* do not report error messages for PARALLEL ORDERED */ KMP_ASSERT( ct == ct_ordered_in_parallel ); #else __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident ); #endif /* BUILD_PARALLEL_ORDERED */ } else { /* inside a WORKSHARING construct for this PARALLEL region */ if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) { if (p->stack_data[ p->w_top ].type == ct_taskq) { __kmp_error_construct2( kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident, & p->stack_data[ p->w_top ] ); } else { __kmp_error_construct2( kmp_i18n_msg_CnsNoOrderedClause, ct, ident, & p->stack_data[ p->w_top ] ); } } } if (p->s_top > p->p_top && p->s_top > p->w_top) { /* inside a sync construct which is inside a worksharing construct */ int index = p->s_top; enum cons_type stack_type; stack_type = p->stack_data[ index ].type; if (stack_type == ct_critical || ( ( stack_type == ct_ordered_in_parallel || stack_type == ct_ordered_in_pdo || stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */ p->stack_data[ index ].ident != NULL && (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) { /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */ __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ index ] ); } } } else if ( ct == ct_critical ) { #if KMP_USE_DYNAMIC_LOCK if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) { /* this same thread already has lock for this critical section */ #else if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */ #endif int index = p->s_top; struct cons_data cons = { NULL, ct_critical, 0, NULL }; /* walk up construct stack and try to find critical with matching name */ while ( index != 0 && p->stack_data[ index ].name != lck ) { index = p->stack_data[ index ].prev; } if ( index != 0 ) { /* found match on the stack (may not always because of interleaved critical for Fortran) */ cons = p->stack_data[ index ]; } /* we are in CRITICAL which is inside a CRITICAL construct of the same name */ __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons ); } } else if ( ct == ct_master || ct == ct_reduce ) { if (p->w_top > p->p_top) { /* inside a WORKSHARING construct for this PARALLEL region */ __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] ); } if (ct == ct_reduce && p->s_top > p->p_top) { /* inside a another SYNC construct for this PARALLEL region */ __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] ); }; // if }; // if } void #if KMP_USE_DYNAMIC_LOCK __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) #else __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) #endif { int tos; struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; KMP_ASSERT( gtid == __kmp_get_gtid() ); KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) ); #if KMP_USE_DYNAMIC_LOCK __kmp_check_sync( gtid, ct, ident, lck, seq ); #else __kmp_check_sync( gtid, ct, ident, lck ); #endif KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); tos = ++ p->stack_top; p->stack_data[ tos ].type = ct; p->stack_data[ tos ].prev = p->s_top; p->stack_data[ tos ].ident = ident; p->stack_data[ tos ].name = lck; p->s_top = tos; KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); }