void _perfctr_dispatch_timer( int signal, siginfo_t * si, void *context ) { ( void ) signal; /*unused */ _papi_hwi_context_t ctx; ThreadInfo_t *master = NULL; int isHardware = 0; caddr_t address; int cidx = _perfctr_vector.cmp_info.CmpIdx; hwd_context_t *our_context; ctx.si = si; ctx.ucontext = ( ucontext_t * ) context; #define OVERFLOW_MASK si->si_pmc_ovf_mask #define GEN_OVERFLOW 0 address = ( caddr_t ) GET_OVERFLOW_ADDRESS( ( ctx ) ); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address, &isHardware, OVERFLOW_MASK, GEN_OVERFLOW, &master, _perfctr_vector.cmp_info.CmpIdx ); /* We are done, resume interrupting counters */ if ( isHardware ) { our_context=(hwd_context_t *) master->context[cidx]; errno = vperfctr_iresume( our_context->perfctr ); if ( errno < 0 ) { PAPIERROR( "vperfctr_iresume errno %d", errno ); } } }
/* * user_signal_handler * * This function is used when hardware overflows are working or when * software overflows are forced */ void user_signal_handler_IOUNIT( int hEvtSet, uint64_t address, uint64_t ovfVector, const ucontext_t *pContext ) { #ifdef DEBUG_BGQ printf( "user_signal_handler_IOUNIT\n" ); #endif int retval, i; int isHardware = 1; int cidx = _IOunit_vector.cmp_info.CmpIdx; long_long overflow_bit = 0; caddr_t address1; _papi_hwi_context_t ctx; ctx.ucontext = ( hwd_ucontext_t * ) pContext; ThreadInfo_t *thread = _papi_hwi_lookup_thread( 0 ); EventSetInfo_t *ESI; ESI = thread->running_eventset[cidx]; // Get the indices of all events which have overflowed. unsigned ovfIdxs[BGPM_MAX_OVERFLOW_EVENTS]; unsigned len = BGPM_MAX_OVERFLOW_EVENTS; retval = Bgpm_GetOverflowEventIndices( hEvtSet, ovfVector, ovfIdxs, &len ); if ( retval < 0 ) { #ifdef DEBUG_BGPM printf ( "Error: ret value is %d for BGPM API function Bgpm_GetOverflowEventIndices.\n", retval ); #endif return; } if ( thread == NULL ) { PAPIERROR( "thread == NULL in user_signal_handler!" ); return; } if ( ESI == NULL ) { PAPIERROR( "ESI == NULL in user_signal_handler!"); return; } if ( ESI->overflow.flags == 0 ) { PAPIERROR( "ESI->overflow.flags == 0 in user_signal_handler!"); return; } for ( i = 0; i < len; i++ ) { uint64_t hProf; Bgpm_GetEventUser1( hEvtSet, ovfIdxs[i], &hProf ); if ( hProf ) { overflow_bit ^= 1 << ovfIdxs[i]; break; } } if ( ESI->overflow.flags & PAPI_OVERFLOW_FORCE_SW ) { #ifdef DEBUG_BGQ printf("OVERFLOW_SOFTWARE\n"); #endif address1 = GET_OVERFLOW_ADDRESS( ctx ); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address1, NULL, 0, 0, &thread, cidx ); return; } else if ( ESI->overflow.flags & PAPI_OVERFLOW_HARDWARE ) { #ifdef DEBUG_BGQ printf("OVERFLOW_HARDWARE\n"); #endif address1 = GET_OVERFLOW_ADDRESS( ctx ); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address1, &isHardware, overflow_bit, 0, &thread, cidx ); } else { #ifdef DEBUG_BGQ printf("OVERFLOW_NONE\n"); #endif PAPIERROR( "ESI->overflow.flags is set to something other than PAPI_OVERFLOW_HARDWARE or PAPI_OVERFLOW_FORCE_SW (%x)", thread->running_eventset[cidx]->overflow.flags); } }
void _ultra_hwd_dispatch_timer( int signal, siginfo_t * si, void *context ) { _papi_hwi_context_t ctx; ThreadInfo_t *master = NULL; int isHardware = 0; caddr_t address; int cidx = _solaris_vector.cmp_info.CmpIdx; ctx.si = si; ctx.ucontext = ( ucontext_t * ) context; address = GET_OVERFLOW_ADDRESS( ctx ); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address, &isHardware, 0, 0, &master, _solaris_vector.cmp_info.CmpIdx ); /* We are done, resume interrupting counters */ if ( isHardware ) { // errno = vperfctr_iresume( master->context[cidx]->perfctr ); //if ( errno < 0 ) { // PAPIERROR( "vperfctr_iresume errno %d", errno ); //} } #if 0 EventSetInfo_t *ESI = NULL; ThreadInfo_t *thread = NULL; int overflow_vector = 0; hwd_control_state_t *ctrl = NULL; long_long results[MAX_COUNTERS]; int i; _papi_hwi_context_t ctx; caddr_t address; int cidx = _solaris_vector.cmp_info.CmpIdx; ctx.si = si; ctx.ucontext = ( hwd_ucontext_t * ) info; thread = _papi_hwi_lookup_thread( 0 ); if ( thread == NULL ) { PAPIERROR( "thread == NULL in _papi_hwd_dispatch_timer"); return; } ESI = ( EventSetInfo_t * ) thread->running_eventset[cidx]; if ( ESI == NULL || ESI->master != thread || ESI->ctl_state == NULL || ( ( ESI->state & PAPI_OVERFLOWING ) == 0 ) ) { if ( ESI == NULL ) PAPIERROR( "ESI is NULL\n"); if ( ESI->master != thread ) PAPIERROR( "Thread mismatch, ESI->master=%x thread=%x\n", ESI->master, thread ); if ( ESI->ctl_state == NULL ) PAPIERROR( "Counter state invalid\n"); if ( ( ( ESI->state & PAPI_OVERFLOWING ) == 0 ) ) PAPIERROR( "Overflow flag missing"); } ctrl = ESI->ctl_state; if ( thread->running_eventset[cidx]->overflow.flags & PAPI_OVERFLOW_FORCE_SW ) { address = GET_OVERFLOW_ADDRESS( ctx ); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address, NULL, 0, 0, &thread, cidx ); } else { PAPIERROR ( "Need to implement additional code in _papi_hwd_dispatch_timer!" ); } #endif }
static void dispatch_emt( int signal, siginfo_t * sip, void *arg ) { int event_counter; _papi_hwi_context_t ctx; caddr_t address; ctx.si = sip; ctx.ucontext = arg; SUBDBG( "%d, %p, %p\n", signal, sip, arg ); if ( sip->si_code == EMT_CPCOVF ) { papi_cpc_event_t *sample; EventSetInfo_t *ESI; ThreadInfo_t *thread = NULL; int t, overflow_vector, readvalue; thread = _papi_hwi_lookup_thread( 0 ); ESI = ( EventSetInfo_t * ) thread->running_eventset; int cidx = ESI->CmpIdx; if ( ( ESI == NULL ) || ( ( ESI->state & PAPI_OVERFLOWING ) == 0 ) ) { OVFDBG( "Either no eventset or eventset not set to overflow.\n" ); return; } if ( ESI->master != thread ) { PAPIERROR ( "eventset->thread 0x%lx vs. current thread 0x%lx mismatch", ESI->master, thread ); return; } event_counter = ESI->overflow.event_counter; sample = &( ESI->ctl_state->counter_cmd ); /* GROSS! This is a hack to 'push' the correct values back into the hardware, such that when PAPI handles the overflow and reads the values, it gets the correct ones. */ /* Find which HW counter overflowed */ if ( ESI->EventInfoArray[ESI->overflow.EventIndex[0]].pos[0] == 0 ) t = 0; else t = 1; if ( cpc_take_sample( &sample->cmd ) == -1 ) return; if ( event_counter == 1 ) { /* only one event is set to be the overflow monitor */ /* generate the overflow vector */ overflow_vector = 1 << t; /* reset the threshold */ sample->cmd.ce_pic[t] = UINT64_MAX - ESI->overflow.threshold[0]; } else { /* two events are set to be the overflow monitors */ overflow_vector = 0; readvalue = sample->cmd.ce_pic[0]; if ( readvalue >= 0 ) { /* the first counter overflowed */ /* generate the overflow vector */ overflow_vector = 1; /* reset the threshold */ if ( t == 0 ) sample->cmd.ce_pic[0] = UINT64_MAX - ESI->overflow.threshold[0]; else sample->cmd.ce_pic[0] = UINT64_MAX - ESI->overflow.threshold[1]; } readvalue = sample->cmd.ce_pic[1]; if ( readvalue >= 0 ) { /* the second counter overflowed */ /* generate the overflow vector */ overflow_vector ^= 1 << 1; /* reset the threshold */ if ( t == 0 ) sample->cmd.ce_pic[1] = UINT64_MAX - ESI->overflow.threshold[1]; else sample->cmd.ce_pic[1] = UINT64_MAX - ESI->overflow.threshold[0]; } SUBDBG( "overflow_vector, = %d\n", overflow_vector ); /* something is wrong here */ if ( overflow_vector == 0 ) { PAPIERROR( "BUG! overflow_vector is 0, dropping interrupt" ); return; } } /* Call the regular overflow function in extras.c */ if ( thread->running_eventset[cidx]->overflow. flags & PAPI_OVERFLOW_FORCE_SW ) { address = GET_OVERFLOW_ADDRESS(ctx); _papi_hwi_dispatch_overflow_signal( ( void * ) &ctx, address, NULL, overflow_vector, 0, &thread, cidx ); } else { PAPIERROR( "Additional implementation needed in dispatch_emt!" ); } #if DEBUG dump_cmd( sample ); #endif /* push back the correct values and start counting again */ if ( cpc_bind_event( &sample->cmd, sample->flags ) == -1 ) return; } else { SUBDBG( "dispatch_emt() dropped, si_code = %d\n", sip->si_code ); return; } }