Esempio n. 1
0
void task_core_data( task_t * i_task )
{

    errlHndl_t  l_err = NULL;       //Error handler
    tracDesc_t  l_trace = NULL;     //Temporary trace descriptor
    int         rc = 0;     //return code
    bulk_core_data_task_t * l_bulk_core_data_ptr = (bulk_core_data_task_t *)i_task->data_ptr;
    GpeGetCoreDataParms * l_parms = (GpeGetCoreDataParms *)(l_bulk_core_data_ptr->gpe_req.parameter);
    gpe_bulk_core_data_t  * l_temp = NULL;

    do
    {
        //First, check to see if the previous GPE request still running
        //A request is considered idle if it is not attached to any of the
        //asynchronous request queues
        if( !(async_request_is_idle(&l_bulk_core_data_ptr->gpe_req.request)) )
        {
            //This should not happen unless there's a problem
            //Trace 1 time
            if( !G_queue_not_idle_traced )
            {
                TRAC_ERR("Core data GPE is still running \n");
                G_queue_not_idle_traced = TRUE;
            }
            break;
        }

        //Need to complete collecting data for all assigned cores from previous interval
        //and tick 0 is the current tick before collect data again.
        if( (l_bulk_core_data_ptr->current_core == l_bulk_core_data_ptr->end_core)
            &&
            ((CURRENT_TICK & (MAX_NUM_TICKS - 1)) != 0) )
        {
            PROC_DBG("Not collect data. Need to wait for tick.\n");
            break;
        }

        //Check to see if the previously GPE request has successfully completed
        //A request is not considered complete until both the engine job
        //has finished without error and any callback has run to completion.

        if( async_request_completed(&l_bulk_core_data_ptr->gpe_req.request)
            &&
            CORE_PRESENT(l_bulk_core_data_ptr->current_core) )
        {
            //If the previous GPE request succeeded then swap core_data_ptr
            //with the global one. The gpe routine will write new data into
            //a buffer that is not being accessed by the RTLoop code.

            PROC_DBG( "Swap core_data_ptr [%x] with the global one\n",
                     l_bulk_core_data_ptr->current_core );

            //debug only
#ifdef PROC_DEBUG
            print_core_status(l_bulk_core_data_ptr->current_core);
            print_core_data_sensors(l_bulk_core_data_ptr->current_core);
#endif

            l_temp = l_bulk_core_data_ptr->core_data_ptr;
            l_bulk_core_data_ptr->core_data_ptr =
                    G_core_data_ptrs[l_bulk_core_data_ptr->current_core];
            G_core_data_ptrs[l_bulk_core_data_ptr->current_core] = l_temp;

            //Core data has been collected so set the bit in global mask.
            //AMEC code will know which cores to update sensors for. AMEC is
            //responsible for clearing the bit later on.
            G_updated_core_mask |= CORE0_PRESENT_MASK >> (l_bulk_core_data_ptr->current_core);

            // Presumptively clear the empath error mask
            G_empath_error_core_mask &=
                    ~(CORE0_PRESENT_MASK >> (l_bulk_core_data_ptr->current_core));

            // The gpe_data collection code has to handle the workaround for
            // HW280375.  Two new flags have been added to the OHA_RO_STATUS_REG
            // image to indicate whether the EMPATH collection failed, and
            // whether it was due to an "expected" error that we can ignore
            // (we can ignore the data as well), or an "unexpected" error that
            // we will create an informational log one time.
            //
            // The "expected" errors are very rare in practice, in fact we may
            // never even see them unless running a specific type of workload.
            // If you want to test the handling of expected errors compile the
            // GPE code with -DINJECT_HW280375_ERRORS which will inject an error
            // approximately every 1024 samples
            //
            // To determine if the expected error has occurred inspect the
            // CoreDataOha element of the CoreData structure written by the GPE
            // core data job.  The OHA element contains the oha_ro_status_reg.
            // Inside the OHA status register is a 16 bit reserved field.
            // gpe_data.h defines two masks that can be applied against the
            // reserved field to check for these errors:
            // CORE_DATA_EXPECTED_EMPATH_ERROR
            // CORE_DATA_UNEXPECTED_EMPATH_ERROR
            // Also, a 4-bit PCB parity + error code is saved at bit position:
            // CORE_DATA_EMPATH_ERROR_LOCATION, formally the length is
            // specified by: CORE_DATA_EMPATH_ERROR_BITS
            gpe_bulk_core_data_t *l_core_data =
                    G_core_data_ptrs[l_bulk_core_data_ptr->current_core];

            // We will trace the errors, but only a certain number of
            // times, we will only log the unexpected error once.
#define OCC_EMPATH_ERROR_THRESH 10
            static uint32_t L_expected_emp_err_cnt = 0;
            static uint32_t L_unexpected_emp_err_cnt = 0;

            // Check the reserved field for the expected or the unexpected error flag
            if ((l_core_data->oha.oha_ro_status_reg.fields._reserved0 & CORE_DATA_EXPECTED_EMPATH_ERROR)
                ||
                (l_core_data->oha.oha_ro_status_reg.fields._reserved0 & CORE_DATA_UNEXPECTED_EMPATH_ERROR))
            {
                // Indicate empath error on current core
                G_empath_error_core_mask |=
                        CORE0_PRESENT_MASK >> (l_bulk_core_data_ptr->current_core);

                // Save the high and low order words of the OHA status reg
                uint32_t l_oha_reg_high = l_core_data->oha.oha_ro_status_reg.words.high_order;
                uint32_t l_oha_reg_low = l_core_data->oha.oha_ro_status_reg.words.low_order;

                // Handle each error case
                if ((l_core_data->oha.oha_ro_status_reg.fields._reserved0 & CORE_DATA_EXPECTED_EMPATH_ERROR)
                    &&
                    (L_expected_emp_err_cnt < OCC_EMPATH_ERROR_THRESH))
                {
                    L_expected_emp_err_cnt++;
                    TRAC_IMP("Expected empath collection error occurred %d time(s)! Core = %d",
                             L_expected_emp_err_cnt,
                             l_bulk_core_data_ptr->current_core);
                    TRAC_IMP("OHA status register: 0x%4.4x%4.4x",
                             l_oha_reg_high, l_oha_reg_low);
                }

                if ((l_core_data->oha.oha_ro_status_reg.fields._reserved0 & CORE_DATA_UNEXPECTED_EMPATH_ERROR)
                    &&
                    (L_unexpected_emp_err_cnt < OCC_EMPATH_ERROR_THRESH))
                {
                    L_unexpected_emp_err_cnt++;
                    TRAC_ERR("Unexpected empath collection error occurred %d time(s)! Core = %d",
                             L_unexpected_emp_err_cnt,
                             l_bulk_core_data_ptr->current_core);
                    TRAC_ERR("OHA status register: 0x%4.4x%4.4x",
                             l_oha_reg_high, l_oha_reg_low);

                    // Create and commit an informational error the first
                    // time this occurs.
                    if (L_unexpected_emp_err_cnt == 1)
                    {
                        TRAC_IMP("Logging unexpected empath collection error 1 time only.");
                        /*
                        * @errortype
                        * @moduleid    PROC_TASK_CORE_DATA_MOD
                        * @reasoncode  INTERNAL_HW_FAILURE
                        * @userdata1   OHA status reg high
                        * @userdata2   OHA status reg low
                        * @userdata4   ERC_PROC_CORE_DATA_EMPATH_ERROR
                        * @devdesc     An unexpected error occurred while
                        *              collecting core empath data.
                        */
                        l_err = createErrl(
                                PROC_TASK_CORE_DATA_MOD, //modId
                                INTERNAL_HW_FAILURE,     //reason code
                                ERC_PROC_CORE_DATA_EMPATH_ERROR, //Extended reason code
                                ERRL_SEV_INFORMATIONAL,  //Severity
                                NULL,                    //Trace
                                DEFAULT_TRACE_SIZE,      //Trace Size
                                l_oha_reg_high,          //userdata1
                                l_oha_reg_low);          //userdata2

                        commitErrl(&l_err);
                    }
                }
            }
        }
Esempio n. 2
0
// Verifies that each core is at the correct frequency after they have had
// time to stabilize
void amec_verify_pstate()
{
    uint8_t                             l_core = 0;
    int8_t                              l_pstate_from_fmax = 0;
    gpe_bulk_core_data_t *              l_core_data_ptr;
    pmc_pmsr_ffcdc_data_t               l_pmc_pmsr_ffdc;
    errlHndl_t                          l_err = NULL;

    if ( (G_time_until_freq_check == 0) &&
            ( CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE ) &&
            ( CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP ) &&
            (!G_sysConfigData.system_type.kvm))
    {
        // Reset the counter
        G_time_until_freq_check = FREQ_CHG_CHECK_TIME;

        // Convert fmax to the corresponding pstate
        l_pstate_from_fmax = proc_freq2pstate(g_amec->sys.fmax);

        for( l_core = 0; l_core < MAX_NUM_CORES; l_core++ )
        {
            // If the core isn't present, skip it
            if(!CORE_PRESENT(l_core))
            {
                l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].value = 0;
                continue;
            }

            // Get pointer to core data
            l_core_data_ptr = proc_get_bulk_core_data_ptr(l_core);

            // Get the core's pmsr data
            l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core] = l_core_data_ptr->pcb_slave.pmsr;

            // Verify that the core is running at the correct frequency
            // If not, log an error
            if( (l_pstate_from_fmax != l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.local_pstate_actual) &&
                (l_pstate_from_fmax > l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.pv_min) &&
                (l_err == NULL) )
            {
                TRAC_ERR("Frequency mismatch in core %d: actual_ps[%d] req_ps[%d] fmax[%d] mode[%d].",
                    l_core,
                    l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.local_pstate_actual,
                    l_pstate_from_fmax,
                    g_amec->sys.fmax,
                    CURRENT_MODE());

                fill_pmc_ffdc_buffer(&l_pmc_pmsr_ffdc.pmc_ffcdc_data);

                /* @
                 * @moduleid   AMEC_VERIFY_FREQ_MID
                 * @reasonCode TARGET_FREQ_FAILURE
                 * @severity   ERRL_SEV_PREDICTIVE
                 * @userdata1  0
                 * @userdata2  0
                 * @userdata4  OCC_NO_EXTENDED_RC
                 * @devdesc    A core is not running at the expected frequency
                 */
                l_err = createErrl( AMEC_VERIFY_FREQ_MID,      // i_modId,
                                    TARGET_FREQ_FAILURE,       // i_reasonCode,
                                    OCC_NO_EXTENDED_RC,
                                    ERRL_SEV_UNRECOVERABLE,
                                    NULL,                      // i_trace,
                                    DEFAULT_TRACE_SIZE,        // i_traceSz,
                                    0,                         // i_userData1,
                                    0);                        // i_userData2

                //Add firmware callout
                addCalloutToErrl(l_err,
                        ERRL_CALLOUT_TYPE_COMPONENT_ID,
                        ERRL_COMPONENT_ID_FIRMWARE,
                        ERRL_CALLOUT_PRIORITY_HIGH);

                //Add processor callout
                addCalloutToErrl(l_err,
                        ERRL_CALLOUT_TYPE_HUID,
                        G_sysConfigData.proc_huid,
                        ERRL_CALLOUT_PRIORITY_MED);
            }
        }

        if( l_err != NULL)
        {
            //Add our register dump to the error log
            addUsrDtlsToErrl(l_err,
                    (uint8_t*) &l_pmc_pmsr_ffdc,
                    sizeof(l_pmc_pmsr_ffdc),
                    ERRL_USR_DTL_STRUCT_VERSION_1,
                    ERRL_USR_DTL_BINARY_DATA);

            REQUEST_RESET(l_err);
        }
    }
}
Esempio n. 3
0
// Function Specification
//
// Name: amec_update_proc_core_sensors
//
// Description: Update all the sensors for a given proc
//
// Thread: RealTime Loop
//
// End Function Specification
void amec_update_proc_core_sensors(uint8_t i_core)
{
  gpe_bulk_core_data_t * l_core_data_ptr;
  int i;
  uint16_t               l_temp16 = 0;
  uint32_t               l_temp32 = 0;

  // Make sure the core is present, and that it has updated data.
  if(CORE_PRESENT(i_core) && CORE_UPDATED(i_core))
  {
    // Clear flag indicating core was updated by proc task
    CLEAR_CORE_UPDATED(i_core);

    // Get pointer to core data
    l_core_data_ptr = proc_get_bulk_core_data_ptr(i_core);

    //-------------------------------------------------------
    // Thermal Sensors & Calc
    //-------------------------------------------------------
    amec_calc_dts_sensors(l_core_data_ptr, i_core);

    //-------------------------------------------------------
    //CPM - Commented out as requested by Malcolm
    // ------------------------------------------------------
    // amec_calc_cpm_sensors(l_core_data_ptr, i_core);

    //-------------------------------------------------------
    // Util / Freq
    //-------------------------------------------------------
    // Skip this update if there was an empath collection error
    if (!CORE_EMPATH_ERROR(i_core))
    {
        amec_calc_freq_and_util_sensors(l_core_data_ptr,i_core);
    }

    //-------------------------------------------------------
    // Performance counter - This function should be called
    // after amec_calc_freq_and_util_sensors().
    //-------------------------------------------------------
    amec_calc_dps_util_counters(i_core);

    //-------------------------------------------------------
    // IPS
    //-------------------------------------------------------
    // Skip this update if there was an empath collection error
    if (!CORE_EMPATH_ERROR(i_core))
    {
        amec_calc_ips_sensors(l_core_data_ptr,i_core);
    }

    //-------------------------------------------------------
    // SPURR
    //-------------------------------------------------------
    amec_calc_spurr(i_core);

    // ------------------------------------------------------
    // Update PREVIOUS values for next time
    // ------------------------------------------------------
    g_amec->proc[0].core[i_core].prev_PC_RAW_Th_CYCLES = l_core_data_ptr->per_thread[0].raw_cycles;

    // Skip empath updates if there was an empath collection error on this core
    if (!CORE_EMPATH_ERROR(i_core))
    {
        g_amec->proc[0].core[i_core].prev_PC_RAW_CYCLES    = l_core_data_ptr->empath.raw_cycles;
        g_amec->proc[0].core[i_core].prev_PC_RUN_CYCLES    = l_core_data_ptr->empath.run_cycles;
        g_amec->proc[0].core[i_core].prev_PC_COMPLETED     = l_core_data_ptr->empath.completion;
        g_amec->proc[0].core[i_core].prev_PC_DISPATCH      = l_core_data_ptr->empath.dispatch;
        g_amec->proc[0].core[i_core].prev_tod_2mhz         = l_core_data_ptr->empath.tod_2mhz;
        g_amec->proc[0].core[i_core].prev_FREQ_SENS_BUSY   = l_core_data_ptr->empath.freq_sens_busy;
        g_amec->proc[0].core[i_core].prev_FREQ_SENS_FINISH = l_core_data_ptr->empath.freq_sens_finish;
    }

    for(i=0; i<MAX_THREADS_PER_CORE; i++)
    {
      g_amec->proc[0].core[i_core].thread[i].prev_PC_RUN_Th_CYCLES = l_core_data_ptr->per_thread[i].run_cycles;
    }

    // Final step is to update TOD sensors
    // Extract 32 bits with 16usec resolution
    l_temp32 = (uint32_t)(G_dcom_slv_inbox_doorbell_rx.tod>>13);
    l_temp16 = (uint16_t)(l_temp32);
    // low 16 bits is 16usec resolution with 512MHz TOD clock
    sensor_update( AMECSENSOR_PTR(TODclock0), l_temp16);
    l_temp16 = (uint16_t)(l_temp32>>16);
    // mid 16 bits is 1.05sec resolution with 512MHz TOD clock
    sensor_update( AMECSENSOR_PTR(TODclock1), l_temp16);
    l_temp16 = (uint16_t)(G_dcom_slv_inbox_doorbell_rx.tod>>45);
    // hi 3 bits in 0.796 day resolution with 512MHz TOD clock
    sensor_update( AMECSENSOR_PTR(TODclock2), l_temp16);
  }
Esempio n. 4
0
// Function Specification
//
// Name: amec_slv_voting_box
//
// Description: Slave OCC's voting box that decides the frequency request.
//              This function will run every tick.
//
// Thread: RealTime Loop
//
// Task Flags:
//
// End Function Specification
void amec_slv_voting_box(void)
{
    /*------------------------------------------------------------------------*/
    /*  Local Variables                                                       */
    /*------------------------------------------------------------------------*/
    uint16_t                        k = 0;
    uint16_t                        l_chip_fmax = g_amec->sys.fmax;
    uint16_t                        l_core_freq = 0;
    uint32_t                        l_chip_reason = 0;
    uint32_t                        l_core_reason = 0;
    uint8_t                         l_kvm_throt_reason = NO_THROTTLE;
    amec_part_t                     *l_part = NULL;
    bool                            l_freq_req_changed = FALSE;

    /*------------------------------------------------------------------------*/
    /*  Code                                                                  */
    /*------------------------------------------------------------------------*/

    // Voting Box for CPU speed.
    // This function implements the voting box to decide which input gets the right
    // to actuate the system.

    //Reset the maximum core frequency requested prior to recalculation.
    g_amec->proc[0].core_max_freq = 0;

    // PPB_FMAX
    if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax;
        l_chip_reason = AMEC_VOTING_REASON_PPB;
        l_kvm_throt_reason = POWERCAP;
    }

    // PMAX_CLIP_FREQ
    if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq;
        l_chip_reason = AMEC_VOTING_REASON_PMAX;
        l_kvm_throt_reason = POWER_SUPPLY_FAILURE;
    }

    // Pmax_clip frequency request if there is an APSS failure
    if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq;
        l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX;
        l_kvm_throt_reason = POWER_SUPPLY_FAILURE;
    }

    //THERMALPROC.FREQ_REQUEST
    //Thermal controller input based on processor temperature
    if(g_amec->thermalproc.freq_request < l_chip_fmax)
    {
        l_chip_fmax = g_amec->thermalproc.freq_request;
        l_chip_reason = AMEC_VOTING_REASON_PROC_THRM;
        l_kvm_throt_reason = CPU_OVERTEMP;
    }

    // Controller request based on VRHOT signal from processor regulator
    if(g_amec->vrhotproc.freq_request < l_chip_fmax)
    {
        l_chip_fmax = g_amec->vrhotproc.freq_request;
        l_chip_reason = AMEC_VOTING_REASON_VRHOT_THRM;
        l_kvm_throt_reason = CPU_OVERTEMP;
    }

    // CONN_OC_VOTE
    if(g_amec->proc[0].pwr_votes.conn_oc_vote < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.conn_oc_vote;
        l_chip_reason = AMEC_VOTING_REASON_CONN_OC;
        l_kvm_throt_reason = OVERCURRENT;
    }

    for (k=0; k<MAX_NUM_CORES; k++)
    {
        if(CORE_PRESENT(k))
        {
            l_core_freq = l_chip_fmax;
            l_core_reason = l_chip_reason;

            // Disable DPS in KVM
            if(!G_sysConfigData.system_type.kvm)
            {
                l_part = amec_part_find_by_core(&g_amec->part_config, k);

                // Check frequency request generated by DPS algorithms
                if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request;
                    l_core_reason = AMEC_VOTING_REASON_UTIL;
                }

                // Adjust frequency based on soft frequency boundaries
                if(l_part != NULL)
                {
                    if(l_core_freq < l_part->soft_fmin)
                    {
                        // Before enforcing a soft Fmin, make sure we don't
                        // have a thermal or power emergency
                        if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM |
                                              AMEC_VOTING_REASON_VRHOT_THRM |
                                              AMEC_VOTING_REASON_PPB |
                                              AMEC_VOTING_REASON_PMAX |
                                              AMEC_VOTING_REASON_CONN_OC)))
                        {
                            l_core_freq = l_part->soft_fmin;
                            l_core_reason = AMEC_VOTING_REASON_SOFT_MIN;
                        }
                    }
                    else if(l_core_freq > l_part->soft_fmax)
                    {
                        l_core_freq = l_part->soft_fmax;
                        l_core_reason = AMEC_VOTING_REASON_SOFT_MAX;
                    }
                }
            }

            if(CURRENT_MODE() == OCC_MODE_NOMINAL)
            {
                // PROC_PCAP_NOM_VOTE
                if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote;
                    l_core_reason = AMEC_VOTING_REASON_PWR;
                    l_kvm_throt_reason = POWERCAP;
                }
            }
            else
            {
                // PROC_PCAP_VOTE
                if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote;
                    l_core_reason = AMEC_VOTING_REASON_PWR;
                    l_kvm_throt_reason = POWERCAP;
                }
            }

            // Check IPS frequency request sent by Master OCC
            if(g_amec->slv_ips_freq_request != 0)
            {
                if(g_amec->slv_ips_freq_request < l_core_freq)
                {
                    l_core_freq = g_amec->slv_ips_freq_request;
                    l_core_reason = AMEC_VOTING_REASON_IPS;
                }
            }

            // Override frequency with request from Master OCC
            if(g_amec->foverride_enable)
            {
                if(g_amec->foverride != 0)
                {
                    // Override the frequency on all cores if Master OCC sends
                    // a non-zero request
                    l_core_freq = g_amec->foverride;
                    l_core_reason = AMEC_VOTING_REASON_OVERRIDE;
                }
            }

            if(g_amec->pstate_foverride_enable)
            {
                if(g_amec->pstate_foverride != 0)
                {
                    // Override the frequency on all cores if the Global Pstate
                    // table has been modified
                    l_core_freq = g_amec->pstate_foverride;
                    l_core_reason = AMEC_VOTING_REASON_OVERRIDE;
                }
            }

            //Make sure the frequency is not less then the system min
            if(l_core_freq < g_amec->sys.fmin)
            {
                l_core_freq = g_amec->sys.fmin;
            }

            // Override frequency via Amester parameter interface
            if (g_amec->proc[0].parm_f_override_enable &&
                g_amec->proc[0].parm_f_override[k] > 0)
            {
                l_core_freq = g_amec->proc[0].parm_f_override[k];
                l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE;
            }

            // If frequency has changed, set the flag
            if ( (l_core_freq != g_amec->proc[0].core[k].f_request) ||
                    (l_core_freq != g_amec->sys.fmax))
            {
                l_freq_req_changed = TRUE;
            }

            //STORE core frequency and reason
            g_amec->proc[0].core[k].f_request = l_core_freq;
            g_amec->proc[0].core[k].f_reason = l_core_reason;

            // Update the Amester parameter telling us the reason. Needed for
            // parameter array.
            g_amec->proc[0].parm_f_reason[k] = l_core_reason;

            //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed
            //before MODE change
            if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE) &&
                (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) &&
                (CURRENT_MODE() != OCC_MODE_NOCHANGE) &&
                (l_core_reason & NON_DPS_POWER_LIMITED))
            {
                G_non_dps_power_limited = TRUE;
            }
            else
            {
                G_non_dps_power_limited = FALSE;
            }

            // Update the sensor telling us what the requested frequency is
            sensor_update( AMECSENSOR_ARRAY_PTR(FREQ250USP0C0,k),
                    (uint16_t) g_amec->proc[0].core[k].f_request);

#if 0
            /// TODO: This can be deleted if deemed useless
            /// This trace that can be used to debug the voting
            /// box an control loops.  It will trace the reason why a
            /// controller is lowering the freq, but will only do it once in a
            /// row for the specific freq it wants to control to.  It assumes
            /// that all cores will be controlled to same freq.
            if(l_chip_fmax != g_amec->sys.fmax){
                static uint16_t L_trace = 0;
                if(l_chip_fmax != L_trace){
                    L_trace = l_chip_fmax;
                    TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason);
                }
            }
#endif

            if(l_core_freq > g_amec->proc[0].core_max_freq)
            {
                g_amec->proc[0].core_max_freq = l_core_freq;
            }
        }
        else
        {
            l_core_freq = 0;
            l_core_reason = 0;
        }
    }//End of for loop

    // Check if the frequency is going to be changing
    if( l_freq_req_changed == TRUE )
    {
        G_time_until_freq_check = FREQ_CHG_CHECK_TIME;
    }
    else if (G_time_until_freq_check != 0)
    {
        G_time_until_freq_check--;
    }

    //convert POWERCAP reason to POWER_SUPPLY_FAILURE if ovs/failsafe is asserted
    if((l_kvm_throt_reason == POWERCAP) &&
        (AMEC_INTF_GET_FAILSAFE() || AMEC_INTF_GET_OVERSUBSCRIPTION()))
    {
        l_kvm_throt_reason = POWER_SUPPLY_FAILURE;
    }

    //check if we need to update the throttle reason in homer
    if(G_sysConfigData.system_type.kvm &&
       (l_kvm_throt_reason != G_amec_kvm_throt_reason))
    {
        //Notify dcom thread to update the table
        G_amec_kvm_throt_reason = l_kvm_throt_reason;
        ssx_semaphore_post(&G_dcomThreadWakeupSem);
    }
}
Esempio n. 5
0
//*************************************************************************
// Functions
//*************************************************************************
void amec_vectorize_core_sensor(sensor_t * l_sensor,
                                vectorSensor_t * l_vector,
                                const VECTOR_SENSOR_OP l_op,
                                uint16_t l_sensor_elem_array_gsid)
{
#define VECTOR_CREATE_FAILURE   1
#define VECTOR_ADD_ELEM_FAILURE 2

  int l_idx = 0;    // Used to index the for loops for vector create
  int l_rc  = 0;    // Indicates failure to add a sensor to vector
  uint16_t l_gsid  = 0xFFFF;
  errlHndl_t l_err = NULL;

  do
  {
    // Grab GSID for errl in case of failure
    l_gsid = l_sensor->gsid;

    // Vectorize the sensor
    sensor_vectorize(l_sensor,
        l_vector,
        l_op);

    // If vectorize worked, add elements to the vector sensor
    if(NULL != l_sensor->vector)
    {
      // Loop through cores
      for(l_idx = 0; l_idx < MAX_NUM_CORES; l_idx++)
      {
        // Add elements to the vector sensor
        sensor_vector_elem_add(l_sensor->vector,
                               l_idx,
                               AMECSENSOR_ARRAY_PTR(l_sensor_elem_array_gsid, l_idx));
        // If core is not present, disable this vector element
        if(!CORE_PRESENT(l_idx))
        {
          sensor_vector_elem_enable(l_sensor->vector,
                                    l_idx,
                                    0 /* Disable */);
        }
      }

      // Sanity check, we should have MAX_NUM_CORES entries in
      // vector sensor
      if(l_sensor->vector->size != MAX_NUM_CORES)
      {
        // Set l_rc and break out so that we can create an errl
        l_rc = VECTOR_ADD_ELEM_FAILURE;
        break;
      }
    }
    else
    {
      // Set l_rc and break out so that we can create an errl
      l_rc = VECTOR_CREATE_FAILURE;
      break;
    }
  }while(0);

  if(l_rc)
  {
    //If fail to create pore flex object then there is a problem.
    TRAC_ERR("Failed to vectorize sensor[0x%x, 0x%x]", l_gsid, l_rc );

    /* @
     * @errortype
     * @moduleid    AMEC_VECTORIZE_FW_SENSORS
     * @reasoncode  SSX_GENERIC_FAILURE
     * @userdata1   return code
     * @userdata2   gsid of failed sensor
     * @userdata4   OCC_NO_EXTENDED_RC
     * @devdesc     Firmware failure in call to vectorize sensor
     */
    l_err = createErrl(
        AMEC_VECTORIZE_FW_SENSORS,      //modId
        SSX_GENERIC_FAILURE,            //reasoncode
        OCC_NO_EXTENDED_RC,             //Extended reason code
        ERRL_SEV_UNRECOVERABLE,         //Severity
        NULL,//TODO: create trace       //Trace Buf
        DEFAULT_TRACE_SIZE,             //Trace Size
        l_rc,                           //userdata1
        l_gsid                          //userdata2
        );

    REQUEST_RESET(l_err);
  }
}
Esempio n. 6
0
// Function Specification
//
// Name: amec_dps_update_core_util
//
// Description: Update per-core utilization variables.
//
// End Function Specification
void amec_dps_update_core_util(void)
{
    /*------------------------------------------------------------------------*/
    /*  Local Variables                                                       */
    /*------------------------------------------------------------------------*/
    uint16_t                    l_temp16 = 0;
    uint8_t                     l_tempreg = 0;
    uint8_t                     l_idx = 0;
    amec_core_perf_counter_t*   l_perf = NULL;
    amec_part_t                 *l_part = NULL;

    /*------------------------------------------------------------------------*/
    /*  Code                                                                  */
    /*------------------------------------------------------------------------*/

    // If g_amec->fw.dps_no_update_flag=1, no updating is allowed
    if (!g_amec->fw.dps_no_update_flag)
    {
        // Update moving average of util_slack and util_active for all cores
        for(l_idx=0; l_idx<MAX_NUM_CORES; l_idx++)
        {
            if (!CORE_PRESENT(l_idx))
            {
                continue; //nothing to do if the core's disabled
            }

            l_part = amec_part_find_by_core(&g_amec->part_config, l_idx);

            // If this core doesn't belong to a core group, then do nothing
            if (l_part == NULL)
            {
                continue;
            }

            // Get pointer to the perf struc of this core
            l_perf = &g_amec->proc[0].core[l_idx].core_perf;

            l_perf->ptr_putUtilslack--; // Decrement put pointer
            if (l_perf->ptr_putUtilslack > TWO_TO_THE_POWER_OF_FIFTEEN)
            {
                // Wrap circular pointer. WARNING -> buffer size must be a
                // power of 2
                l_perf->ptr_putUtilslack &= (MAX_UTIL_SLACK_AVG_LEN-1);
            }

            // Locate oldest sample associated with 32-bit moving average
            // WARNING -> we need to read this sample first in case entire
            // buffer is used which would result in the write overwriting the
            // oldest sample we need to read.
            l_temp16=(uint16_t)(l_perf->ptr_putUtilslack+l_part->dpsalg.sample_count_util) & (MAX_UTIL_SLACK_AVG_LEN-1);
            l_tempreg=l_perf->ptr_util_slack_avg_buffer[l_temp16];
            // Bleed off oldest sample from moving average
            l_perf->util_slack_accumulator = l_perf->util_slack_accumulator-(uint32_t)l_tempreg;
            l_tempreg=l_perf->ptr_util_active_avg_buffer[l_temp16];
            // Bleed off oldest sample from moving average
            l_perf->util_active_accumulator = l_perf->util_active_accumulator-(uint32_t)l_tempreg;
            // Add in newest sample into moving average
            l_tempreg = l_perf->util_slack_core_counter;
            l_perf->util_slack_accumulator = l_perf->util_slack_accumulator+(uint32_t)l_tempreg;
            // Write new sample into buffer.
            l_perf->ptr_util_slack_avg_buffer[l_perf->ptr_putUtilslack]=l_tempreg;
            // Add in newest sample into moving average
            l_tempreg = l_perf->util_active_core_counter;
            l_perf->util_active_accumulator = l_perf->util_active_accumulator+(uint32_t)l_tempreg;
            // Write new sample into buffer.
            l_perf->ptr_util_active_avg_buffer[l_perf->ptr_putUtilslack]=l_tempreg;

            // Reset counters every 2msec
            l_perf->util_active_core_counter=0;
            l_perf->util_slack_core_counter=0;
        }
    }
}
Esempio n. 7
0
// Function Specification
//
// Name: amec_dps_main
//
// Description: Main DPS function.
//
// End Function Specification
void amec_dps_main(void)
{
    /*------------------------------------------------------------------------*/
    /*  Local Variables                                                       */
    /*------------------------------------------------------------------------*/
    uint16_t                    l_idx = 0;

    /*------------------------------------------------------------------------*/
    /*  Code                                                                  */
    /*------------------------------------------------------------------------*/

    // First, update the utilization variables for all cores
    amec_dps_update_core_util();

    // Loop through all core groups and apply energy-savings policy
    for (l_idx=0; l_idx<AMEC_PART_MAX_PART; l_idx++)
    {
        if (!g_amec->part_config.part_list[l_idx].valid)
        {
            continue;
        }

        if (g_amec->part_config.part_list[l_idx].ncores == 0)
        {
            continue;
        }

        switch (g_amec->part_config.part_list[l_idx].es_policy)
        {
            case OCC_INTERNAL_MODE_DPS:
            case OCC_INTERNAL_MODE_DPS_MP:
                amec_dps_partition_update_sensors(l_idx);
                amec_dps_partition_alg(l_idx);
                break;

            default:
                // No energy-savings policy: DPS vote is already disabled when
                // policy first selected for partition or core ownership
                // changes (amec_part.c)
                break;
        }
    }

    // For GA1, we need to send the Fwish to the Master OCC
    if ((g_amec->part_config.part_list[0].es_policy == OCC_INTERNAL_MODE_DPS) ||
        (g_amec->part_config.part_list[0].es_policy == OCC_INTERNAL_MODE_DPS_MP))
    {
        // If this core group policy is one of the DPS modes, then send the
        // frequency request from the DPS algorithm
        G_dcom_slv_outbox_tx.fwish =
            g_amec->part_config.part_list[0].dpsalg.freq_request;
    }
    else
    {
        // Else, send the nominal frequency of the system
        G_dcom_slv_outbox_tx.fwish =
            g_amec->part_mode_freq[OCC_INTERNAL_MODE_NOM].fmax;
    }
    // We also need to send the Factual to the Master OCC
    for (l_idx=0; l_idx<MAX_NUM_CORES; l_idx++)
    {
        // Find the first valid core and send its frequency
        if (CORE_PRESENT(l_idx))
        {
            G_dcom_slv_outbox_tx.factual =
                AMECSENSOR_ARRAY_PTR(FREQ250USP0C0,l_idx)->sample;
            break;
        }
    }
}
Esempio n. 8
0
// Function Specification
//
// Name: amec_slv_proc_voting_box
//
// Description: Slave OCC's voting box that decides the frequency request.
//              This function will run every tick.
//
// Thread: RealTime Loop
//
// Task Flags:
//
// End Function Specification
void amec_slv_proc_voting_box(void)
{
    /*------------------------------------------------------------------------*/
    /*  Local Variables                                                       */
    /*------------------------------------------------------------------------*/
    uint16_t                        k = 0;
    uint16_t                        l_chip_fmax = g_amec->sys.fmax;
    uint16_t                        l_core_freq = 0;
    uint16_t                        l_core_freq_max = 0;  // max freq across all cores
    uint16_t                        l_core_freq_min = g_amec->sys.fmax;  // min freq across all cores
    uint32_t                        l_current_reason = 0;  // used for debug purposes
    static uint32_t                 L_last_reason = 0;     // used for debug purposes
    uint32_t                        l_chip_reason = 0;
    uint32_t                        l_core_reason = 0;
    amec_proc_voting_reason_t       l_kvm_throt_reason = NO_THROTTLE;
    amec_part_t                     *l_part = NULL;

    // frequency threshold for reporting throttling
    uint16_t l_report_throttle_freq = G_sysConfigData.system_type.report_dvfs_nom ?  G_sysConfigData.sys_mode_freq.table[OCC_MODE_NOMINAL] : G_sysConfigData.sys_mode_freq.table[OCC_MODE_TURBO];

    /*------------------------------------------------------------------------*/
    /*  Code                                                                  */
    /*------------------------------------------------------------------------*/
    if (!G_allowPstates)
    {
        // Don't allow pstates to be sent until after initial mode has been set
        if ( (CURRENT_MODE()) || (G_sysConfigData.system_type.kvm) )
        {
            G_allowPstates = TRUE;
        }
    }

    // Voting Box for CPU speed.
    // This function implements the voting box to decide which input gets the right
    // to actuate the system.

    // check for oversubscription if redundant ps policy (oversubscription) is being enforced
    if (G_sysConfigData.system_type.non_redund_ps == false)
    {
        // If in oversubscription and there is a defined (non 0) OVERSUB frequency less than max then use it
        if( (AMEC_INTF_GET_OVERSUBSCRIPTION()) &&
            (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB]) &&
            (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB] < l_chip_fmax) )
        {
            l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB];
            l_chip_reason = AMEC_VOTING_REASON_OVERSUB;
        }
    }

    // If there is an active VRM fault and a defined (non 0) VRM N frequency less than max use it
    if( (g_amec->sys.vrm_fault_status) &&
        (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N]) &&
        (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N] < l_chip_fmax) )
    {
        l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N];
        l_chip_reason = AMEC_VOTING_REASON_VRM_N;
    }

    // PPB_FMAX
    if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax;
        l_chip_reason = AMEC_VOTING_REASON_PPB;

        if(l_report_throttle_freq <= l_chip_fmax)
        {
            l_kvm_throt_reason = PCAP_EXCEED_REPORT;
        }
        else
        {
            l_kvm_throt_reason = POWERCAP;
        }
    }

    // PMAX_CLIP_FREQ
    if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq;
        l_chip_reason = AMEC_VOTING_REASON_PMAX;
        l_kvm_throt_reason = POWER_SUPPLY_FAILURE;
    }

    // Pmax_clip frequency request if there is an APSS failure
    if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax)
    {
        l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq;
        l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX;
        l_kvm_throt_reason = POWER_SUPPLY_FAILURE;
    }

    //THERMALPROC.FREQ_REQUEST
    //Thermal controller input based on processor temperature
    if(g_amec->thermalproc.freq_request < l_chip_fmax)
    {
        l_chip_fmax = g_amec->thermalproc.freq_request;
        l_chip_reason = AMEC_VOTING_REASON_PROC_THRM;

        if( l_report_throttle_freq <= l_chip_fmax)
        {
            l_kvm_throt_reason = PROC_OVERTEMP_EXCEED_REPORT;
        }
        else
        {
            l_kvm_throt_reason = CPU_OVERTEMP;
        }
    }

    //Thermal controller input based on VRM Vdd temperature
    if(g_amec->thermalvdd.freq_request < l_chip_fmax)
    {
        l_chip_fmax = g_amec->thermalvdd.freq_request;
        l_chip_reason = AMEC_VOTING_REASON_VDD_THRM;

        if( l_report_throttle_freq <= l_chip_fmax)
        {
            l_kvm_throt_reason = VDD_OVERTEMP_EXCEED_REPORT;
        }
        else
        {
            l_kvm_throt_reason = VDD_OVERTEMP;
        }
    }

    for (k=0; k<MAX_NUM_CORES; k++)
    {
        if( CORE_PRESENT(k) && !CORE_OFFLINE(k) )
        {
            l_core_freq = l_chip_fmax;
            l_core_reason = l_chip_reason;

            // Disable DPS in KVM
            if(!G_sysConfigData.system_type.kvm)
            {
                l_part = amec_part_find_by_core(&g_amec->part_config, k);

                // Check frequency request generated by DPS algorithms
                if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request;
                    l_core_reason = AMEC_VOTING_REASON_UTIL;
                }

                // Adjust frequency based on soft frequency boundaries
                if(l_part != NULL)
                {
                    if(l_core_freq < l_part->soft_fmin)
                    {
                        // Before enforcing a soft Fmin, make sure we don't
                        // have a thermal or power emergency
                        if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM |
                                              AMEC_VOTING_REASON_VDD_THRM |
                                              AMEC_VOTING_REASON_PPB |
                                              AMEC_VOTING_REASON_PMAX |
                                              AMEC_VOTING_REASON_CONN_OC)))
                        {
                            l_core_freq = l_part->soft_fmin;
                            l_core_reason = AMEC_VOTING_REASON_SOFT_MIN;
                        }
                    }
                    else if(l_core_freq > l_part->soft_fmax)
                    {
                        l_core_freq = l_part->soft_fmax;
                        l_core_reason = AMEC_VOTING_REASON_SOFT_MAX;
                    }
                }
            }

            if(CURRENT_MODE() == OCC_MODE_NOMINAL)
            {
                // PROC_PCAP_NOM_VOTE
                if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote;
                    l_core_reason = AMEC_VOTING_REASON_PWR;
                    l_kvm_throt_reason = POWERCAP;
                }
            }
            else
            {
                // PROC_PCAP_VOTE
                if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq)
                {
                    l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote;
                    l_core_reason = AMEC_VOTING_REASON_PWR;

                    if(l_report_throttle_freq <= l_core_freq)
                    {
                        l_kvm_throt_reason = PCAP_EXCEED_REPORT;
                    }
                    else
                    {
                        l_kvm_throt_reason = POWERCAP;
                    }
                }
            }

            // Check IPS frequency request sent by Master OCC
            if(g_amec->slv_ips_freq_request != 0)
            {
                if(g_amec->slv_ips_freq_request < l_core_freq)
                {
                    l_core_freq = g_amec->slv_ips_freq_request;
                    l_core_reason = AMEC_VOTING_REASON_IPS;
                }
            }

            // Override frequency with request from Master OCC
            if(g_amec->foverride_enable)
            {
                if(g_amec->foverride != 0)
                {
                    // Override the frequency on all cores if Master OCC sends
                    // a non-zero request
                    l_core_freq = g_amec->foverride;
                    l_core_reason = AMEC_VOTING_REASON_OVERRIDE;
                }

                l_kvm_throt_reason = MANUFACTURING_OVERRIDE;
            }

            if(g_amec->pstate_foverride_enable)
            {
                if(g_amec->pstate_foverride != 0)
                {
                    // Override the frequency on all cores if the Global Pstate
                    // table has been modified
                    l_core_freq = g_amec->pstate_foverride;
                    l_core_reason = AMEC_VOTING_REASON_OVERRIDE;
                }
            }

            //Make sure the frequency is not less then the system min
            if(l_core_freq < g_amec->sys.fmin)
            {
                l_core_freq = g_amec->sys.fmin;
            }

            // Override frequency via Amester parameter interface
            if (g_amec->proc[0].parm_f_override_enable &&
                g_amec->proc[0].parm_f_override[k] > 0)
            {
                l_core_freq = g_amec->proc[0].parm_f_override[k];
                l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE;
            }

            //STORE core frequency and reason
            g_amec->proc[0].core[k].f_request = l_core_freq;
            g_amec->proc[0].core[k].f_reason = l_core_reason;
            if(l_core_freq < l_core_freq_min)
            {
                // store the new lowest frequency and reason to be used after all cores checked
                l_core_freq_min = l_core_freq;
                l_current_reason = l_core_reason;
            }

            // Update the Amester parameter telling us the reason. Needed for
            // parameter array.
            g_amec->proc[0].parm_f_reason[k] = l_core_reason;

            //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed
            //before MODE change
            if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE)    &&
                (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) &&
                (CURRENT_MODE() != OCC_MODE_NOM_PERFORMANCE)   &&
                (CURRENT_MODE() != OCC_MODE_MAX_PERFORMANCE)   &&
                (CURRENT_MODE() != OCC_MODE_FMF)               &&
                (CURRENT_MODE() != OCC_MODE_NOCHANGE)          &&
                (l_core_reason & NON_DPS_POWER_LIMITED))
            {
                G_non_dps_power_limited = TRUE;
            }
            else
            {
                G_non_dps_power_limited = FALSE;
            }

            // Update the sensor telling us what the requested frequency is
            sensor_update( AMECSENSOR_ARRAY_PTR(FREQREQC0,k),
                    (uint16_t) g_amec->proc[0].core[k].f_request);

#if DEBUG_PROC_VOTING_BOX
            /// This trace that can be used to debug the voting
            /// box and control loops.  It will trace the reason why a
            /// controller is lowering the freq, but will only do it once in a
            /// row for the specific freq it wants to control to.  It assumes
            /// that all cores will be controlled to same freq.
            if(l_chip_fmax != g_amec->sys.fmax){
                static uint16_t L_trace = 0;
                if(l_chip_fmax != L_trace){
                    L_trace = l_chip_fmax;
                    TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason);
                }
            }
#endif

            if(l_core_freq > l_core_freq_max)
            {
                l_core_freq_max = l_core_freq;
            }
        } // if core present and not offline
        else
        {
            //Set f_request to 0 so this core is ignored in amec_slv_freq_smh()
            g_amec->proc[0].core[k].f_request = 0;
            g_amec->proc[0].core[k].f_reason = 0;
        }
    }//End of for loop

    // update max core frequency if not 0 i.e. all cores offline (stop 2 or greater)
    // this is used by power capping alg, updating to 0 will cause power throttling when not needed
    if(l_core_freq_max)
    {
        g_amec->proc[0].core_max_freq = l_core_freq_max;
        // update the overall reason driving frequency across all cores
        g_amec->proc[0].f_reason = l_current_reason;
    }

    //check if there was a throttle reason change
    if(l_kvm_throt_reason != G_amec_opal_proc_throt_reason)
    {
        //Always update G_amec_opal_proc_throt_reason, this is used to set poll rsp bits for all system types
        G_amec_opal_proc_throt_reason = l_kvm_throt_reason;

        // Only if running OPAL need to notify dcom thread to update the table in HOMER for OPAL
        if(G_sysConfigData.system_type.kvm)
        {
            ssx_semaphore_post(&G_dcomThreadWakeupSem);
        }
    }
    // For debug... if lower than max update vars returned in poll response to give clipping reason
    g_amec->proc[0].core_min_freq = l_core_freq_min;
    if(l_core_freq_min < g_amec->sys.fmax)
    {
        if(l_current_reason == L_last_reason)
        {
            // same reason INC counter
            if(g_amec->proc[0].current_clip_count != 0xFF)
            {
                 g_amec->proc[0].current_clip_count++;
            }
        }
        else
        {
            // new reason update history and set counter to 1
            L_last_reason = l_current_reason;
            g_amec->proc[0].current_clip_count = 1;
            if( (g_amec->proc[0].chip_f_reason_history & l_current_reason) == 0)
            {
                g_amec->proc[0].chip_f_reason_history |= l_current_reason;
                TRAC_IMP("First time throttling for reason[0x%08X] History[0x%08X] freq = %d",
                          l_current_reason, g_amec->proc[0].chip_f_reason_history, l_core_freq_min);
            }
        }
    }
    else // no active clipping
    {
        L_last_reason = 0;
        g_amec->proc[0].current_clip_count = 0;
    }
}