Beispiel #1
0
// Function Specification
//
// Name: errlTestTime
//
// Description: errlTestTime
//
// End Function Specification
uint32_t errlTestTime()
{
    uint32_t l_rc = 0;

    do
    {
        ERRL_DBG("START");
        errlHndl_t l_handle = NULL;
        uint64_t l_start = 0;
        uint64_t l_end = 0;


        /****************************************************/
        // Check timeStamp
        // Create one log
        l_start = ssx_timebase_get();
        l_handle = createErrl( 0x1716, 0x08, OCC_NO_EXTENDED_RC, ERRL_SEV_CALLHOME_DATA, g_trac_inf, 128, 0x1, 0x2);
        CHECK_CONDITION( l_handle != INVALID_ERR_HNDL, l_rc);


        // check time stamp
        errlHndl_t l_handle2 = l_handle;
        commitErrl( &l_handle );
        l_end = ssx_timebase_get();
        CHECK_CONDITION( (l_handle2->iv_userDetails.iv_timeStamp >= l_start) &&
                         (l_handle2->iv_userDetails.iv_timeStamp <= l_end ), l_rc);

        deleteErrl(&l_handle2);
        ERRL_DBG("END \n");

    }while(0);

    return l_rc;
}
Beispiel #2
0
int
ssx_timer_schedule(SsxTimer    *timer, 
                   SsxInterval interval, 
                   SsxInterval period)
{
    return ssx_timer_schedule_absolute(timer,
                                       ssx_timebase_get() + interval,
                                       period);
}
Beispiel #3
0
static int 
poll_scom(SsxInterval timeout, pmc_o2p_ctrl_status_reg_t *cs)
{
    SsxTimebase start;
    int timed_out;

    start = ssx_timebase_get();
    timed_out = 0;
    do {
        cs->value = in32(PMC_O2P_CTRL_STATUS_REG);
        if (!(cs->fields.o2p_ongoing)) {
            break;
        }
        if (timed_out) {
            return -SCOM_TIMEOUT_ERROR;
        }
        timed_out = 
            ((timeout != SSX_WAIT_FOREVER) &&
             ((ssx_timebase_get() - start) > timeout));
    } while (1);

    return 0;
}
Beispiel #4
0
int
clock_gettime(clockid_t clock_id, struct timespec* tp)
{
    int rc;
    SsxTimebase now;

    if ((clock_id != CLOCK_MONOTONIC) || (tp == 0)) {
        rc = -EINVAL;
    } else {

        now = ssx_timebase_get();
        tp->tv_sec = now / SSX_TIMEBASE_FREQUENCY_HZ;
        tp->tv_nsec = 
            ((now % SSX_TIMEBASE_FREQUENCY_HZ) * 1000000000) / 
            SSX_TIMEBASE_FREQUENCY_HZ;
        rc = 0;
    }

    return rc;
}
Beispiel #5
0
void
ssx_dump(FILE* stream, int options)
{
    int i, sep;
    SsxThread* thread;

    fprintf(stream, 
            "------------------------------------------------------------\n");
    fprintf(stream,
            "-- SSX Kernel Dump @ 0x%016llx\n"
            "--          USPRG0 = 0x%08x\n"
            "-- __ssx_run_queue = 0x%08x\n",
            ssx_timebase_get(), 
            mfspr(SPRN_USPRG0),
            __ssx_run_queue);
    fprintf(stream, 
            "------------------------------------------------------------\n");

    sep = 0;

    for (i = 0; i < SSX_THREADS; i++) {

        ssx_thread_at_priority(i, &thread);
        if (thread) {
            if (sep) {
                fprintf(stream, 
                        "*********************************************\n");
            }
            _dumpThread(stream, thread);
            sep = 1;
        }
    }

    fprintf(stream, 
            "------------------------------------------------------------\n");
}
Beispiel #6
0
int
ssx_sleep(SsxInterval interval) 
{
    return ssx_sleep_absolute(ssx_timebase_get() + interval);
}
Beispiel #7
0
// Function Specification
//
// Name:  task_dimm_sm
//
// Description: DIMM State Machine - Called every other tick to collect all of
//              the DIMM temperatures.
//
// Task Flags: RTL_FLAG_ACTIVE
//
// End Function Specification
void task_dimm_sm(struct task *i_self)
{
    static uint8_t L_dimmIndex = 0x00;
    static uint8_t L_dimmPort  = 0x00;
    static uint8_t L_notReadyCount = 0;
#define MAX_READ_ATTEMPT 3
    static uint8_t L_readAttempt = 0;
    static bool L_readIssued = false;
    const uint8_t engine = G_sysConfigData.dimm_i2c_engine;
    static bool L_occ_owns_lock = true;

    if (G_mem_monitoring_allowed)
    {
#ifdef DEBUG_LOCK_TESTING
        // TODO: remove testing code once SIMICS_FLAG_ISSUE removed
        SIMULATE_HOST();
#endif

        // First handle any outstanding I2C reset
        if (G_dimm_i2c_reset_required)
        {
            if ((G_dimm_state != DIMM_STATE_RESET_MASTER) && (check_for_i2c_failure()))
            {
                // I2C failure occurred during a reset...
                INTR_TRAC_ERR("task_dimm_sm: Failure during I2C reset - memory monitoring disabled");
                // release I2C lock to the host for this engine and stop monitoring
                occ_i2c_lock_release(G_dimm_sm_args.i2cEngine);
                L_occ_owns_lock = false;
                G_mem_monitoring_allowed = false;
                // TODO: What else do we need to do?  go to Safe State?
            }
            else
            {
                if (G_dimm_state == DIMM_STATE_INIT)
                {
                    // Reset has completed successfully
                    TRAC_INFO("task_dimm_sm: I2C reset completed");
                    G_dimm_i2c_reset_required = false;
                    // Check if host needs I2C lock
                    L_occ_owns_lock = check_and_update_i2c_lock(engine);
                }
                else
                {
                    // Reset still in progress
                    G_dimm_state = dimm_reset_sm();
                }
            }
        }

        if (G_dimm_i2c_reset_required == false)
        {
            if ((L_occ_owns_lock == false) && ((DIMM_TICK == 0) || (DIMM_TICK == 8)))
            {
                // Check if host gave up the I2C lock
                L_occ_owns_lock = check_and_update_i2c_lock(engine);
                if (L_occ_owns_lock)
                {
                    // Start over at the INIT state after receiving the lock
                    G_dimm_state = DIMM_STATE_INIT;
                }
            }

            if (L_occ_owns_lock)
            {
                // Check for failure on prior operation
                if (check_for_i2c_failure())
                {
                    // If there was a failure, continue to the next DIMM (after I2c reset)
                    use_next_dimm(&L_dimmPort, &L_dimmIndex);
                }

                uint8_t nextState = G_dimm_state;

                if (G_dimm_state == DIMM_STATE_INIT)
                {
                    // Setup I2C Interrupt Mask Register
                    DIMM_DBG("DIMM_STATE_INIT: (I2C Engine 0x%02X, Memory Type 0x%02X)",
                             engine, G_sysConfigData.mem_type);
                    G_dimm_sm_args.i2cEngine = engine;
                    if (schedule_dimm_req(DIMM_STATE_INIT))
                    {
                        nextState = DIMM_STATE_WRITE_MODE;
                    }
                }
                else
                {
                    bool intTriggered = check_for_i2c_interrupt(engine);
                    if (intTriggered == false)
                    {
                        // Interrupt not generated, I2C operation may not have completed.
                        // After MAX_TICK_COUNT_WAIT, attempt operation anyway.
                        ++L_notReadyCount;
                    }

                    // Check if prior command completed (or timed out waiting for it)
                    if (intTriggered || (L_notReadyCount > MAX_TICK_COUNT_WAIT))
                    {
                        if (ASYNC_REQUEST_STATE_COMPLETE == G_dimm_sm_request.request.completion_state)
                        {
                            // IPC request completed, now check return code
                            if (GPE_RC_SUCCESS == G_dimm_sm_args.error.rc)
                            {
                                // last request completed without error
                                switch (G_dimm_sm_args.state)
                                {
                                    case DIMM_STATE_INIT:
                                        // Save max I2C ports
                                        if (G_maxDimmPorts != G_dimm_sm_args.maxPorts)
                                        {
                                            G_maxDimmPorts = G_dimm_sm_args.maxPorts;
                                            DIMM_DBG("task_dimm_sm: updating DIMM Max I2C Ports to %d", G_maxDimmPorts);
                                        }
                                        break;

                                    case DIMM_STATE_READ_TEMP:
                                        if (L_readIssued)
                                        {
                                            const uint8_t port = G_dimm_sm_args.i2cPort;
                                            const uint8_t dimm = G_dimm_sm_args.dimm;

                                            // Last DIMM read completed, update sensor and clear error count
                                            DIMM_DBG("task_dimm_sm: Successfully read DIMM%04X temperature: %dC, tick %d",
                                                     DIMM_AND_PORT, G_dimm_sm_args.temp, DIMM_TICK);
                                            g_amec->proc[0].memctl[port].centaur.dimm_temps[dimm].cur_temp = G_dimm_sm_args.temp;
                                            G_dimm[port][dimm].lastReading = ((ssx_timebase_get())/(SSX_TIMEBASE_FREQUENCY_HZ/1000000));
                                            G_dimm[port][dimm].errorCount = 0;

                                            // Move on to next DIMM
                                            use_next_dimm(&L_dimmPort, &L_dimmIndex);
                                            L_readIssued = false;

                                            // Check if host needs the I2C lock
                                            L_occ_owns_lock = check_and_update_i2c_lock(engine);
                                        }
                                        break;

                                    default:
                                        // Nothing to do
                                        break;
                                }
                            }
                            else
                            {
                                // last request did not return success
                                switch (G_dimm_sm_args.state)
                                {
                                    case DIMM_STATE_INITIATE_READ:
                                        if (++L_readAttempt < MAX_READ_ATTEMPT)
                                        {
                                            // The initiate_read didnt complete, retry
                                            DIMM_DBG("task_dimm_sm: initiate read didn't start (%d attempts)", L_readAttempt);
                                            // Force the read again
                                            G_dimm_state = DIMM_STATE_INITIATE_READ;
                                            nextState = G_dimm_state;
                                        }
                                        else
                                        {
                                            INTR_TRAC_ERR("task_dimm_sm: initiate read didn't start after %d attempts... forcing reset", L_readAttempt);
                                            mark_dimm_failed();
                                        }
                                        break;

                                    case DIMM_STATE_READ_TEMP:
                                        if (L_readIssued)
                                        {
                                            if (++L_readAttempt < MAX_READ_ATTEMPT)
                                            {
                                                DIMM_DBG("task_dimm_sm: read didn't complete (%d attempts)", L_readAttempt);
                                                // Force the read again
                                                G_dimm_state = DIMM_STATE_READ_TEMP;
                                                nextState = G_dimm_state;
                                            }
                                            else
                                            {
                                                INTR_TRAC_ERR("task_dimm_sm: read did not complete after %d attempts... forcing reset", L_readAttempt);
                                                mark_dimm_failed();
                                            }
                                        }
                                        break;

                                    default:
                                        // Nothing to do
                                        break;
                                }
                            }
                        }
                    }

                    if (L_occ_owns_lock)
                    {
                        if (false == G_dimm_i2c_reset_required)
                        {
                            // Handle new DIMM state
                            switch (G_dimm_state)
                            {
                                case DIMM_STATE_WRITE_MODE:
                                    // Only start a DIMM read on tick 0 or 8
                                    if ((DIMM_TICK == 0) || (DIMM_TICK == 8))
                                    {
                                        // If DIMM has huid/sensor then it should be present
                                        if ((0 != G_sysConfigData.dimm_huids[L_dimmPort][L_dimmIndex]) &&
                                            (G_dimm[L_dimmPort][L_dimmIndex].disabled == false))
                                        {
                                            G_dimm_sm_args.i2cPort = L_dimmPort;
                                            G_dimm_sm_args.dimm = L_dimmIndex;
                                            DIMM_DBG("task_dimm_sm: Starting collection for DIMM%04X at tick %d",
                                                     DIMM_AND_PORT, DIMM_TICK);
                                            if (schedule_dimm_req(DIMM_STATE_WRITE_MODE))
                                            {
                                                nextState = DIMM_STATE_WRITE_ADDR;
                                            }
                                        }
                                        else
                                        {
                                            // Skip current DIMM and move on to next one
                                            use_next_dimm(&L_dimmPort, &L_dimmIndex);
                                        }
                                    }
                                    break;

                                case DIMM_STATE_WRITE_ADDR:
                                    if (intTriggered || (L_notReadyCount > MAX_TICK_COUNT_WAIT))
                                    {
                                        G_dimm_sm_args.dimm = L_dimmIndex;
                                        G_dimm_sm_args.i2cAddr = get_dimm_addr(L_dimmIndex);
                                        if (schedule_dimm_req(DIMM_STATE_WRITE_ADDR))
                                        {
                                            nextState = DIMM_STATE_INITIATE_READ;
                                            L_readAttempt = 0;
                                            L_readIssued = false;
                                        }
                                    }
                                    break;

                                case DIMM_STATE_INITIATE_READ:
                                    if (intTriggered || (L_notReadyCount > MAX_TICK_COUNT_WAIT))
                                    {
                                        G_dimm_sm_args.dimm = L_dimmIndex;
                                        if (schedule_dimm_req(DIMM_STATE_INITIATE_READ))
                                        {
                                            nextState = DIMM_STATE_READ_TEMP;
                                        }
                                    }
                                    break;

                                case DIMM_STATE_READ_TEMP:
                                    if (intTriggered || (L_notReadyCount > MAX_TICK_COUNT_WAIT))
                                    {
                                        if (schedule_dimm_req(DIMM_STATE_READ_TEMP))
                                        {
                                            L_readIssued = true;
                                            nextState = DIMM_STATE_WRITE_MODE;
                                        }
                                    }
                                    break;

                                default:
                                    INTR_TRAC_ERR("task_dimm_sm: INVALID STATE: 0x%02X", G_dimm_state);
                                    break;
                            }
                        }
                        else
                        {
                            // Previous op triggered reset
                            nextState = dimm_reset_sm();
                        }
                    }
                    else
                    {
                        // OCC no longer holds the i2c lock (no DIMM state change required)
                        nextState = G_dimm_state;
                    }
                }

                if (nextState != G_dimm_state)
                {
                    DIMM_DBG("task_dimm_sm: Updating state to 0x%02X (DIMM%04X) end of tick %d", nextState, (L_dimmPort<<8)|L_dimmIndex, DIMM_TICK);
                    G_dimm_state = nextState;
                    L_notReadyCount = 0;
                }
            }
        }
    }

} // end task_dimm_sm()
Beispiel #8
0
// Schedule a GPE request for the specified DIMM state
bool schedule_dimm_req(uint8_t i_state)
{
    bool l_scheduled = false;
    bool scheduleRequest = true;

    DIMM_DBG("dimm_sm called with state 0x%02X (tick=%d)", i_state, DIMM_TICK);

    if (!async_request_is_idle(&G_dimm_sm_request.request))
    {
        INTR_TRAC_ERR("dimm_sm: request is not idle.");
    }
    else
    {
        switch(i_state)
        {
            // Init
            case DIMM_STATE_INIT:
                break;

                // Read DIMM temp
            case DIMM_STATE_WRITE_MODE:
            case DIMM_STATE_WRITE_ADDR:
            case DIMM_STATE_INITIATE_READ:
            case DIMM_STATE_READ_TEMP:
                break;

                // I2C reset
            case DIMM_STATE_RESET_MASTER:
            case DIMM_STATE_RESET_SLAVE_P0:
            case DIMM_STATE_RESET_SLAVE_P0_COMPLETE:
            case DIMM_STATE_RESET_SLAVE_P1:
            case DIMM_STATE_RESET_SLAVE_P1_COMPLETE:
                break;

            default:
                INTR_TRAC_ERR("dimm_sm: Invalid state (0x%02X)", i_state);
                errlHndl_t err = NULL;
                /*
                 * @errortype
                 * @moduleid    DIMM_MID_DIMM_SM
                 * @reasoncode  DIMM_INVALID_STATE
                 * @userdata1   DIMM state
                 * @userdata2   0
                 * @devdesc     Invalid DIMM I2C state requested
                 */
                err = createErrl(DIMM_MID_DIMM_SM,
                                 DIMM_INVALID_STATE,
                                 OCC_NO_EXTENDED_RC,
                                 ERRL_SEV_PREDICTIVE,
                                 NULL,
                                 DEFAULT_TRACE_SIZE,
                                 i_state,
                                 0);
                // Request reset since this should never happen.
                REQUEST_RESET(err);
                scheduleRequest = false;
                break;
        }

        if (scheduleRequest)
        {
            // Clear errors and init common arguments for GPE
            G_dimm_sm_args.error.error = 0;
            G_dimm_sm_args.state = i_state;

            DIMM_DBG("dimm_sm: Scheduling GPE1 DIMM I2C state 0x%02X (tick %d)", i_state, DIMM_TICK);
            int l_rc = gpe_request_schedule(&G_dimm_sm_request);
            if (0 == l_rc)
            {
                l_scheduled = true;
            }
            else
            {
                errlHndl_t l_err = NULL;
                INTR_TRAC_ERR("dimm_sm: schedule failed w/rc=0x%08X (%d us)",
                              l_rc, (int) ((ssx_timebase_get())/(SSX_TIMEBASE_FREQUENCY_HZ/1000000)));
                /*
                 * @errortype
                 * @moduleid    DIMM_MID_DIMM_SM
                 * @reasoncode  SSX_GENERIC_FAILURE
                 * @userdata1   GPE shedule returned rc code
                 * @userdata2   state
                 * @devdesc     dimm_sm schedule failed
                 */
                l_err = createErrl(DIMM_MID_DIMM_SM,
                                   SSX_GENERIC_FAILURE,
                                   ERC_DIMM_SCHEDULE_FAILURE,
                                   ERRL_SEV_PREDICTIVE,
                                   NULL,
                                   DEFAULT_TRACE_SIZE,
                                   l_rc,
                                   i_state);
                // Request reset since this should never happen.
                REQUEST_RESET(l_err);
            }
        }
    }

    return l_scheduled;

} // end schedule_dimm_req()
Beispiel #9
0
// Function Specification
//
// Name: amec_slv_check_perf
//
// Description: Slave OCC's Detect and log degraded performance errors
//              This function will run every tick.
//
// Thread: RealTime Loop
//
// Task Flags:
//
// End Function Specification
void amec_slv_check_perf(void)
{
    /*------------------------------------------------------------------------*/
    /*  Local Variables                                                       */
    /*------------------------------------------------------------------------*/
    static BOOLEAN          l_prev_failsafe_state = FALSE;
    static BOOLEAN          l_prev_ovs_state = FALSE;
    static BOOLEAN          l_prev_pcap_state = FALSE;
    static ERRL_SEVERITY    l_pcap_sev =  ERRL_SEV_PREDICTIVE;
    static BOOLEAN          l_throttle_traced = FALSE;
    static uint64_t         l_time = 0;

    /*------------------------------------------------------------------------*/
    /*  Code                                                                  */
    /*------------------------------------------------------------------------*/

    // Verify that cores are at proper frequency
    amec_verify_pstate();

    do
    {
        // was frequency limited by power ?
        if ( G_non_dps_power_limited != TRUE )
        {
            if(l_throttle_traced)
            {
                TRAC_INFO("Frequency not limited by power algorithms anymore");
                l_throttle_traced = FALSE;
            }
            // we are done break and return
            break;
        }

        // frequency limited due to failsafe condition ?
        if ( AMEC_INTF_GET_FAILSAFE() == TRUE )
        {
            if ( l_prev_failsafe_state == TRUE)
            {
                // we are done break and return
                break;
            }
            else
            {
                // log this error ONLY ONCE per IPL
                l_prev_failsafe_state = TRUE;

                TRAC_ERR("Frequency limited due to failsafe condition(mode:%d, state:%d)",
                          CURRENT_MODE(), CURRENT_STATE());
                l_throttle_traced = TRUE;
                l_time = ssx_timebase_get();

                // log error that calls out OVS procedure
                // set error severity to RRL_SEV_PREDICTIVE

                /* @
                 * @errortype
                 * @moduleid    AMEC_SLAVE_CHECK_PERFORMANCE
                 * @reasoncode  INTERNAL_FAILURE
                 * @userdata1   Previous FailSafe State
                 * @userdata4   ERC_AMEC_SLAVE_FAILSAFE_STATE
                 * @devdesc     Frequency limited due to failsafe condition
                 */
                errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId
                                              INTERNAL_FAILURE,             //reasoncode
                                              ERC_AMEC_SLAVE_FAILSAFE_STATE,//Extended reason code
                                              ERRL_SEV_PREDICTIVE,          //Severity
                                              NULL,                         //Trace Buf
                                              DEFAULT_TRACE_SIZE,           //Trace Size
                                              l_prev_failsafe_state,        //userdata1
                                              0);                           //userdata2

                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_COMPONENT_ID,
                                    ERRL_COMPONENT_ID_OVERSUBSCRIPTION,
                                    ERRL_CALLOUT_PRIORITY_HIGH
                                );

                // and sets the consolidate action flag
                setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS );

                // Commit Error
                commitErrl(&l_errl);

                // we are done lets break
                break;
            }
        }

        // frequency limited due to oversubscription condition ?
        if ( AMEC_INTF_GET_OVERSUBSCRIPTION() == TRUE )
        {
            if ( l_prev_ovs_state == TRUE)
            {
                // we are done break and return
                break;
            }
            else
            {
                // log this error ONLY ONCE per IPL
                l_prev_ovs_state = TRUE;

                TRAC_ERR("Frequency limited due to oversubscription condition(mode:%d, state:%d)",
                          CURRENT_MODE(), CURRENT_STATE());
                l_throttle_traced = TRUE;
                l_time = ssx_timebase_get();

                // log error that calls out OVS procedure
                // set error severity to RRL_SEV_PREDICTIVE

                // Updated the RC to match the actual RC passed to createErrl()
                /* @
                 * @errortype
                 * @moduleid    AMEC_SLAVE_CHECK_PERFORMANCE
                 * @reasoncode  OVERSUB_LIMIT_ALERT
                 * @userdata1   Previous OVS State
                 * @userdata4   ERC_AMEC_SLAVE_OVS_STATE
                 * @devdesc     Frequency limited due to oversubscription condition
                 */
                errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId
                                              OVERSUB_LIMIT_ALERT,           //reasoncode
                                              ERC_AMEC_SLAVE_OVS_STATE,      //Extended reason code
                                              ERRL_SEV_PREDICTIVE,           //Severity
                                              NULL,                          //Trace Buf
                                              DEFAULT_TRACE_SIZE,            //Trace Size
                                              l_prev_ovs_state,              //userdata1
                                              0);                            //userdata2

                // Callout to Oversubscription
                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_COMPONENT_ID,
                                    ERRL_COMPONENT_ID_OVERSUBSCRIPTION,
                                    ERRL_CALLOUT_PRIORITY_HIGH
                                );

                // Callout to APSS
                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_HUID,
                                    G_sysConfigData.apss_huid,
                                    ERRL_CALLOUT_PRIORITY_MED
                                );

                // Callout to Firmware
                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_COMPONENT_ID,
                                    ERRL_COMPONENT_ID_FIRMWARE,
                                    ERRL_CALLOUT_PRIORITY_LOW
                                );

                // and sets the consolidate action flag
                setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS );

                // Commit Error
                commitErrl(&l_errl);

                // we are done lets break
                break;
            }
        }

        uint16_t l_snrBulkPwr = AMECSENSOR_PTR(PWR250US)->sample;

        // frequency limited due to system power cap condition ?
        if (( l_snrBulkPwr > (G_sysConfigData.pcap.system_pcap - PDROP_THRESH) )
            &&
            ( G_sysConfigData.pcap.current_pcap == 0 ))
        {
            if ( l_prev_pcap_state == TRUE)
            {
                // we are done break and return
                break;
            }
            else
            {
                //log this error ONLY ONCE per IPL
                l_prev_pcap_state = TRUE;

                TRAC_ERR("Frequency limited due to power cap condition(mode:%d, state:%d)",
                         CURRENT_MODE(), CURRENT_STATE());

                TRAC_ERR("SnrBulkPwr %d > Sys Pcap %d ",l_snrBulkPwr,
                         G_sysConfigData.pcap.system_pcap );

                TRAC_ERR("SnrFanPwr %d, SnrIOPwr %d, SnrStoragePwr %d, SnrGpuPrw %d ",
                        AMECSENSOR_PTR(PWR250USFAN)->sample,
                        AMECSENSOR_PTR(PWR250USIO)->sample,
                        AMECSENSOR_PTR(PWR250USSTORE)->sample,
                        AMECSENSOR_PTR(PWR250USGPU)->sample );

                TRAC_ERR("SnrProcPwr 0 %d, SnrProcPwr 1 %d, SnrProcPwr 2 %d, SnrProcPwr 3 %d",
                        g_amec->proc_snr_pwr[0],
                        g_amec->proc_snr_pwr[1],
                        g_amec->proc_snr_pwr[2],
                        g_amec->proc_snr_pwr[3] );

                TRAC_ERR("SnrMemPwr 0 %d, SnrMemPwr 1 %d, SnrMemPwr 2 %d, SnrMemPwr 3 %d",
                        g_amec->mem_snr_pwr[0],
                        g_amec->mem_snr_pwr[1],
                        g_amec->mem_snr_pwr[2],
                        g_amec->mem_snr_pwr[3] );


                l_throttle_traced = TRUE;
                l_time = ssx_timebase_get();

                // log error that calls out firmware and APSS procedure
                // set error severity to l_pcap_sev

                /* @
                 * @errortype
                 * @moduleid    AMEC_SLAVE_CHECK_PERFORMANCE
                 * @reasoncode  PCAP_THROTTLE_POWER_LIMIT
                 * @userdata1   Current Sensor Bulk Power
                 * @userdata2   System PCAP
                 * @userdata4   ERC_AMEC_SLAVE_POWERCAP
                 * @devdesc     Frequency limited due to PowerCap  condition
                 */
                errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId
                                              PCAP_THROTTLE_POWER_LIMIT,     //reasoncode
                                              ERC_AMEC_SLAVE_POWERCAP,       //Extended reason code
                                              l_pcap_sev,                    //Severity
                                              NULL,                          //Trace Buf
                                              DEFAULT_TRACE_SIZE,            //Trace Size
                                              l_snrBulkPwr,                  //userdata1
                                              G_sysConfigData.pcap.system_pcap);//userdata2

                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_COMPONENT_ID,
                                    ERRL_COMPONENT_ID_FIRMWARE,
                                    ERRL_CALLOUT_PRIORITY_HIGH
                                );

                addCalloutToErrl(   l_errl,
                                    ERRL_CALLOUT_TYPE_HUID,
                                    G_sysConfigData.apss_huid,
                                    ERRL_CALLOUT_PRIORITY_HIGH
                                );

                // and sets the consolidate action flag
                setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS );

                // then l_pcap_sev to informational
                l_pcap_sev = ERRL_SEV_INFORMATIONAL;

                // Commit Error
                commitErrl(&l_errl);

                // we are done lets break
                break;
            }
        }

        // trottle trace to every 3600 seconds (1hr = 3600000)
        if(!l_throttle_traced && ( DURATION_IN_MS_UNTIL_NOW_FROM(l_time) > 3600000 ) )
        {
            TRAC_INFO("Frequency power limited due to transient condition: PowerLimited=%x, FailSafe=%x, OverSubScription=%x CurrentBulkPwr=%x",
            G_non_dps_power_limited, AMEC_INTF_GET_FAILSAFE(), AMEC_INTF_GET_OVERSUBSCRIPTION(), l_snrBulkPwr );
            l_throttle_traced = TRUE;

            l_time = ssx_timebase_get();
        }
    }
    while( 0 );

    return;
}
Beispiel #10
0
void
__ssx_timer_handler()
{
    SsxTimeQueue* tq;
    SsxTimebase now;
    SsxTimer* timer;
    SsxDeque* timer_deque;
    SsxTimerCallback callback;

    tq = &__ssx_time_queue;

    if (SSX_ERROR_CHECK_KERNEL) {
        if (tq->cursor != 0) {
            SSX_PANIC(SSX_TIMER_HANDLER_INVARIANT);
        }
    }

    while ((now = ssx_timebase_get()) >= tq->next_timeout) {

        tq->next_timeout = SSX_TIMEBASE_MAX;
        timer_deque = ((SsxDeque*)tq)->next;

        while (timer_deque != (SsxDeque*)tq) {
    
            timer = (SsxTimer*)timer_deque;
            tq->cursor = timer_deque->next;

            if (timer->timeout <= now) {

                // The timer timed out.  It is removed from the queue unless
                // it is a peridic timer that needs to be rescheduled.  We do
                // rescheduling here in the critical section to correctly
                // handle timers whose callbacks may cancel the timer.  The
                // timer is rescheduled in absolute time.
                //
                // The callback may be made with interrupt preemption enabled
                // or disabled.  However to mitigate kernel interrupt latency
                // we go ahead and open up to interrupts after the callback if
                // the callback itself was not preemptible.

                if (timer->period == 0) {
                    ssx_deque_delete(timer_deque);
                } else {
                    timer->timeout += timer->period;
                    tq->next_timeout = MIN(timer->timeout, tq->next_timeout);
                }

                callback = timer->callback;
                if (callback) {
                    if (timer->options & SSX_TIMER_CALLBACK_PREEMPTIBLE) {
                        ssx_interrupt_preemption_enable();
                        callback(timer->arg);
                    } else {
                        callback(timer->arg);
                        ssx_interrupt_preemption_enable();
                    }
                }                        
                ssx_interrupt_preemption_disable();

            } else {

                // This timer has not timed out.  Its timeout will simply
                // participate in the computation of the next timeout.  For
                // interrupt latency reasons we always allow a period of
                // interrupt preemption.

                tq->next_timeout = MIN(timer->timeout, tq->next_timeout);
                ssx_interrupt_preemption_enable();
                ssx_interrupt_preemption_disable();
            }

            timer_deque = tq->cursor;
        }
    }
    
    tq->cursor = 0;

    // Finally, reschedule the next timeout

    __ssx_schedule_hardware_timeout(tq->next_timeout);
}
Beispiel #11
0
// Function Specification
//
// Name: errlTestCreateMaxLogs
//
// Description: errlTestCreateMaxLogs
//
// End Function Specification
uint32_t errlTestCreateMaxLogs()
{
    uint32_t l_rc = 0;

    ERRL_DBG("START");
    do
    {

        /****************************************************/
        // Check max logs
        ERRL_SEVERITY l_sev = 0;
        errlHndl_t l_backupHandle[ERRL_MAX_SLOTS-2];
        errlHndl_t l_handle = NULL;

        uint32_t l_index = 0;
        // Create 7 ERRL_SEV_PREDICTIVE or ERRL_SEV_UNRECOVERABLE slots randomly
        for(l_index =0; l_index < ERRL_MAX_SLOTS-2; l_index++)
        {

            uint64_t l_time = ssx_timebase_get();
            l_sev = l_time%2 ? ERRL_SEV_PREDICTIVE : ERRL_SEV_UNRECOVERABLE;
            l_handle = createErrl( TEST_MODULE_ID, 0x08, OCC_NO_EXTENDED_RC, l_sev, g_trac_inf, 512, 0x1, l_index);
            CHECK_CONDITION( (l_handle != INVALID_ERR_HNDL) &&
                             (l_handle != NULL), l_rc);

            // backup handle
            l_backupHandle[l_index] = l_handle;

            ERRL_DBG("Log Created @ %p with Sev: %d\n",l_handle, l_sev );
            // addUsrDtlsToErrl
            memset( G_data, l_index, sizeof( G_data ) );
            addUsrDtlsToErrl( l_handle, G_data, sizeof(G_data), ERRL_USR_DTL_STRUCT_VERSION_1, ERRL_USR_DTL_TRACE_DATA );

            // commitErrl( &l_handle );
        }
        // check if something wrong in for loop
        if(l_rc != 0)
            break;

        // Create one more and it should fail
        l_handle = createErrl( TEST_MODULE_ID, 0x08, OCC_NO_EXTENDED_RC, l_sev, g_trac_inf, 512, 0x1, l_index);
        CHECK_CONDITION( l_handle == INVALID_ERR_HNDL, l_rc);

        // delete errl
        for(l_index = 0; l_index < ERRL_MAX_SLOTS-2; l_index++)
        {
            deleteErrl(&l_backupHandle[l_index]);
        }
        ppdumpslot();

        /****************************************************/
        // Check log id overflow
         for(l_index = 0; l_index < 256; l_index++)
        {
            l_handle = createErrl( TEST_MODULE_ID, 0x08, OCC_NO_EXTENDED_RC, l_sev, g_trac_inf, 512, 0x1, l_index);
            CHECK_CONDITION( (l_handle != INVALID_ERR_HNDL) &&
                             (l_handle != NULL), l_rc);

            deleteErrl(&l_handle);
        }

        ERRL_DBG("END \n");
    }while(0);

    return l_rc;
}
Beispiel #12
0
// Function Specification
//
// Name: dcom_initialize_roles
//
// Description: Initialize roles so we know if we are master or slave
//
// End Function Specification
void dcom_initialize_roles(void)
{
    G_occ_role = OCC_SLAVE;

    // Locals
    pba_xcfg_t pbax_cfg_reg;

    // Used as a debug tool to correlate time between OCCs & System Time
    // getscom_ffdc(OCB_OTBR, &G_dcomTime.tod, NULL); // Commits errors internally

    G_dcomTime.tod = in64(OCB_OTBR) >> 4;
    G_dcomTime.base = ssx_timebase_get();
    pbax_cfg_reg.value = in64(PBA_XCFG);

    if(pbax_cfg_reg.fields.rcv_groupid < MAX_NUM_NODES &&
       pbax_cfg_reg.fields.rcv_chipid < MAX_NUM_OCC)
    {

        TRAC_IMP("Proc ChipId (%d)  NodeId (%d)",
                 pbax_cfg_reg.fields.rcv_chipid,
                 pbax_cfg_reg.fields.rcv_groupid);

        G_pbax_id.valid     = 1;
        G_pbax_id.node_id   = pbax_cfg_reg.fields.rcv_groupid;
        G_pbax_id.chip_id   = pbax_cfg_reg.fields.rcv_chipid;
        G_pbax_id.module_id = G_pbax_id.chip_id;
        // Always start as OCC Slave
        G_occ_role = OCC_SLAVE;
        rtl_set_run_mask(RTL_FLAG_NOTMSTR);


        // Set the initial presence mask, and count the number of occ's present
        G_sysConfigData.is_occ_present |= (0x01 << G_pbax_id.chip_id);
        G_occ_num_present = __builtin_popcount(G_sysConfigData.is_occ_present);

    }
    else // Invalid chip/node ID(s)
    {
        TRAC_ERR("Proc ChipId (%d) and/or NodeId (%d) too high: request reset",
                 pbax_cfg_reg.fields.rcv_chipid,
                 pbax_cfg_reg.fields.rcv_groupid);
        /* @
         * @errortype
         * @moduleid    DCOM_MID_INIT_ROLES
         * @reasoncode  INVALID_CONFIG_DATA
         * @userdata1   PBAXCFG (upper)
         * @userdata2   PBAXCFG (lower)
         * @userdata4   ERC_CHIP_IDS_INVALID
         * @devdesc     Failure determining OCC role
         */
        errlHndl_t  l_errl = createErrl(
            DCOM_MID_INIT_ROLES,            //ModId
            INVALID_CONFIG_DATA,            //Reasoncode
            ERC_CHIP_IDS_INVALID,           //Extended reasoncode
            ERRL_SEV_UNRECOVERABLE,         //Severity
            NULL,                           //Trace Buf
            DEFAULT_TRACE_SIZE,             //Trace Size
            pbax_cfg_reg.words.high_order,  //Userdata1
            pbax_cfg_reg.words.low_order    //Userdata2
            );

        // Callout firmware
        addCalloutToErrl(l_errl,
                         ERRL_CALLOUT_TYPE_COMPONENT_ID,
                         ERRL_COMPONENT_ID_FIRMWARE,
                         ERRL_CALLOUT_PRIORITY_HIGH);

        //Add processor callout
        addCalloutToErrl(l_errl,
                         ERRL_CALLOUT_TYPE_HUID,
                         G_sysConfigData.proc_huid,
                         ERRL_CALLOUT_PRIORITY_LOW);

        G_pbax_id.valid   = 0;  // Invalid Chip/Node ID
    }

// Initialize DCOM Thread Sem
    ssx_semaphore_create( &G_dcomThreadWakeupSem, // Semaphore
                          1,                      // Initial Count
                          0);                     // No Max Count

}