// Function Specification // // Name: amec_calc_dps_util_counters // // Description: Calculate the performance counter for a core. // // End Function Specification void amec_calc_dps_util_counters(const uint8_t i_core_id) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ amec_part_t *l_part = NULL; amec_core_perf_counter_t *l_perf = NULL; sensor_ptr_t l_sensor = NULL; uint16_t l_utilization = 0; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ l_perf = &g_amec->proc[0].core[i_core_id].core_perf; // Read sensor for this core l_sensor = AMECSENSOR_ARRAY_PTR(UTIL2MSP0C0, i_core_id); l_utilization = l_sensor->sample; l_part = amec_part_find_by_core(&g_amec->part_config, i_core_id); // Type 41 input: Check if core's utilization is within // epsilon of the slack threshold: if yes declare the core // active if (l_part != NULL) { if (l_utilization > (l_part->dpsalg.tlutil - l_part->dpsalg.epsilon_perc)) { // indicate core is active l_perf->util_active_core_counter++; if (l_utilization < l_part->dpsalg.tlutil) { // indicate core has some slack l_perf->util_slack_core_counter++; } } } }
// Function Specification // // Name: amec_slv_voting_box // // Description: Slave OCC's voting box that decides the frequency request. // This function will run every tick. // // Thread: RealTime Loop // // Task Flags: // // End Function Specification void amec_slv_voting_box(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ uint16_t k = 0; uint16_t l_chip_fmax = g_amec->sys.fmax; uint16_t l_core_freq = 0; uint32_t l_chip_reason = 0; uint32_t l_core_reason = 0; uint8_t l_kvm_throt_reason = NO_THROTTLE; amec_part_t *l_part = NULL; bool l_freq_req_changed = FALSE; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ // Voting Box for CPU speed. // This function implements the voting box to decide which input gets the right // to actuate the system. //Reset the maximum core frequency requested prior to recalculation. g_amec->proc[0].core_max_freq = 0; // PPB_FMAX if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax; l_chip_reason = AMEC_VOTING_REASON_PPB; l_kvm_throt_reason = POWERCAP; } // PMAX_CLIP_FREQ if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } // Pmax_clip frequency request if there is an APSS failure if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //THERMALPROC.FREQ_REQUEST //Thermal controller input based on processor temperature if(g_amec->thermalproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_PROC_THRM; l_kvm_throt_reason = CPU_OVERTEMP; } // Controller request based on VRHOT signal from processor regulator if(g_amec->vrhotproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->vrhotproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_VRHOT_THRM; l_kvm_throt_reason = CPU_OVERTEMP; } // CONN_OC_VOTE if(g_amec->proc[0].pwr_votes.conn_oc_vote < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.conn_oc_vote; l_chip_reason = AMEC_VOTING_REASON_CONN_OC; l_kvm_throt_reason = OVERCURRENT; } for (k=0; k<MAX_NUM_CORES; k++) { if(CORE_PRESENT(k)) { l_core_freq = l_chip_fmax; l_core_reason = l_chip_reason; // Disable DPS in KVM if(!G_sysConfigData.system_type.kvm) { l_part = amec_part_find_by_core(&g_amec->part_config, k); // Check frequency request generated by DPS algorithms if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq) { l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request; l_core_reason = AMEC_VOTING_REASON_UTIL; } // Adjust frequency based on soft frequency boundaries if(l_part != NULL) { if(l_core_freq < l_part->soft_fmin) { // Before enforcing a soft Fmin, make sure we don't // have a thermal or power emergency if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM | AMEC_VOTING_REASON_VRHOT_THRM | AMEC_VOTING_REASON_PPB | AMEC_VOTING_REASON_PMAX | AMEC_VOTING_REASON_CONN_OC))) { l_core_freq = l_part->soft_fmin; l_core_reason = AMEC_VOTING_REASON_SOFT_MIN; } } else if(l_core_freq > l_part->soft_fmax) { l_core_freq = l_part->soft_fmax; l_core_reason = AMEC_VOTING_REASON_SOFT_MAX; } } } if(CURRENT_MODE() == OCC_MODE_NOMINAL) { // PROC_PCAP_NOM_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } else { // PROC_PCAP_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } // Check IPS frequency request sent by Master OCC if(g_amec->slv_ips_freq_request != 0) { if(g_amec->slv_ips_freq_request < l_core_freq) { l_core_freq = g_amec->slv_ips_freq_request; l_core_reason = AMEC_VOTING_REASON_IPS; } } // Override frequency with request from Master OCC if(g_amec->foverride_enable) { if(g_amec->foverride != 0) { // Override the frequency on all cores if Master OCC sends // a non-zero request l_core_freq = g_amec->foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } if(g_amec->pstate_foverride_enable) { if(g_amec->pstate_foverride != 0) { // Override the frequency on all cores if the Global Pstate // table has been modified l_core_freq = g_amec->pstate_foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } //Make sure the frequency is not less then the system min if(l_core_freq < g_amec->sys.fmin) { l_core_freq = g_amec->sys.fmin; } // Override frequency via Amester parameter interface if (g_amec->proc[0].parm_f_override_enable && g_amec->proc[0].parm_f_override[k] > 0) { l_core_freq = g_amec->proc[0].parm_f_override[k]; l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE; } // If frequency has changed, set the flag if ( (l_core_freq != g_amec->proc[0].core[k].f_request) || (l_core_freq != g_amec->sys.fmax)) { l_freq_req_changed = TRUE; } //STORE core frequency and reason g_amec->proc[0].core[k].f_request = l_core_freq; g_amec->proc[0].core[k].f_reason = l_core_reason; // Update the Amester parameter telling us the reason. Needed for // parameter array. g_amec->proc[0].parm_f_reason[k] = l_core_reason; //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed //before MODE change if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE) && (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) && (CURRENT_MODE() != OCC_MODE_NOCHANGE) && (l_core_reason & NON_DPS_POWER_LIMITED)) { G_non_dps_power_limited = TRUE; } else { G_non_dps_power_limited = FALSE; } // Update the sensor telling us what the requested frequency is sensor_update( AMECSENSOR_ARRAY_PTR(FREQ250USP0C0,k), (uint16_t) g_amec->proc[0].core[k].f_request); #if 0 /// TODO: This can be deleted if deemed useless /// This trace that can be used to debug the voting /// box an control loops. It will trace the reason why a /// controller is lowering the freq, but will only do it once in a /// row for the specific freq it wants to control to. It assumes /// that all cores will be controlled to same freq. if(l_chip_fmax != g_amec->sys.fmax){ static uint16_t L_trace = 0; if(l_chip_fmax != L_trace){ L_trace = l_chip_fmax; TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason); } } #endif if(l_core_freq > g_amec->proc[0].core_max_freq) { g_amec->proc[0].core_max_freq = l_core_freq; } } else { l_core_freq = 0; l_core_reason = 0; } }//End of for loop // Check if the frequency is going to be changing if( l_freq_req_changed == TRUE ) { G_time_until_freq_check = FREQ_CHG_CHECK_TIME; } else if (G_time_until_freq_check != 0) { G_time_until_freq_check--; } //convert POWERCAP reason to POWER_SUPPLY_FAILURE if ovs/failsafe is asserted if((l_kvm_throt_reason == POWERCAP) && (AMEC_INTF_GET_FAILSAFE() || AMEC_INTF_GET_OVERSUBSCRIPTION())) { l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //check if we need to update the throttle reason in homer if(G_sysConfigData.system_type.kvm && (l_kvm_throt_reason != G_amec_kvm_throt_reason)) { //Notify dcom thread to update the table G_amec_kvm_throt_reason = l_kvm_throt_reason; ssx_semaphore_post(&G_dcomThreadWakeupSem); } }
//************************************************************************* // Functions //************************************************************************* void amec_vectorize_core_sensor(sensor_t * l_sensor, vectorSensor_t * l_vector, const VECTOR_SENSOR_OP l_op, uint16_t l_sensor_elem_array_gsid) { #define VECTOR_CREATE_FAILURE 1 #define VECTOR_ADD_ELEM_FAILURE 2 int l_idx = 0; // Used to index the for loops for vector create int l_rc = 0; // Indicates failure to add a sensor to vector uint16_t l_gsid = 0xFFFF; errlHndl_t l_err = NULL; do { // Grab GSID for errl in case of failure l_gsid = l_sensor->gsid; // Vectorize the sensor sensor_vectorize(l_sensor, l_vector, l_op); // If vectorize worked, add elements to the vector sensor if(NULL != l_sensor->vector) { // Loop through cores for(l_idx = 0; l_idx < MAX_NUM_CORES; l_idx++) { // Add elements to the vector sensor sensor_vector_elem_add(l_sensor->vector, l_idx, AMECSENSOR_ARRAY_PTR(l_sensor_elem_array_gsid, l_idx)); // If core is not present, disable this vector element if(!CORE_PRESENT(l_idx)) { sensor_vector_elem_enable(l_sensor->vector, l_idx, 0 /* Disable */); } } // Sanity check, we should have MAX_NUM_CORES entries in // vector sensor if(l_sensor->vector->size != MAX_NUM_CORES) { // Set l_rc and break out so that we can create an errl l_rc = VECTOR_ADD_ELEM_FAILURE; break; } } else { // Set l_rc and break out so that we can create an errl l_rc = VECTOR_CREATE_FAILURE; break; } }while(0); if(l_rc) { //If fail to create pore flex object then there is a problem. TRAC_ERR("Failed to vectorize sensor[0x%x, 0x%x]", l_gsid, l_rc ); /* @ * @errortype * @moduleid AMEC_VECTORIZE_FW_SENSORS * @reasoncode SSX_GENERIC_FAILURE * @userdata1 return code * @userdata2 gsid of failed sensor * @userdata4 OCC_NO_EXTENDED_RC * @devdesc Firmware failure in call to vectorize sensor */ l_err = createErrl( AMEC_VECTORIZE_FW_SENSORS, //modId SSX_GENERIC_FAILURE, //reasoncode OCC_NO_EXTENDED_RC, //Extended reason code ERRL_SEV_UNRECOVERABLE, //Severity NULL,//TODO: create trace //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size l_rc, //userdata1 l_gsid //userdata2 ); REQUEST_RESET(l_err); } }
// Function Specification // // Name: amec_dps_main // // Description: Main DPS function. // // End Function Specification void amec_dps_main(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ uint16_t l_idx = 0; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ // First, update the utilization variables for all cores amec_dps_update_core_util(); // Loop through all core groups and apply energy-savings policy for (l_idx=0; l_idx<AMEC_PART_MAX_PART; l_idx++) { if (!g_amec->part_config.part_list[l_idx].valid) { continue; } if (g_amec->part_config.part_list[l_idx].ncores == 0) { continue; } switch (g_amec->part_config.part_list[l_idx].es_policy) { case OCC_INTERNAL_MODE_DPS: case OCC_INTERNAL_MODE_DPS_MP: amec_dps_partition_update_sensors(l_idx); amec_dps_partition_alg(l_idx); break; default: // No energy-savings policy: DPS vote is already disabled when // policy first selected for partition or core ownership // changes (amec_part.c) break; } } // For GA1, we need to send the Fwish to the Master OCC if ((g_amec->part_config.part_list[0].es_policy == OCC_INTERNAL_MODE_DPS) || (g_amec->part_config.part_list[0].es_policy == OCC_INTERNAL_MODE_DPS_MP)) { // If this core group policy is one of the DPS modes, then send the // frequency request from the DPS algorithm G_dcom_slv_outbox_tx.fwish = g_amec->part_config.part_list[0].dpsalg.freq_request; } else { // Else, send the nominal frequency of the system G_dcom_slv_outbox_tx.fwish = g_amec->part_mode_freq[OCC_INTERNAL_MODE_NOM].fmax; } // We also need to send the Factual to the Master OCC for (l_idx=0; l_idx<MAX_NUM_CORES; l_idx++) { // Find the first valid core and send its frequency if (CORE_PRESENT(l_idx)) { G_dcom_slv_outbox_tx.factual = AMECSENSOR_ARRAY_PTR(FREQ250USP0C0,l_idx)->sample; break; } } }
// Function Specification // // Name: amec_slv_proc_voting_box // // Description: Slave OCC's voting box that decides the frequency request. // This function will run every tick. // // Thread: RealTime Loop // // Task Flags: // // End Function Specification void amec_slv_proc_voting_box(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ uint16_t k = 0; uint16_t l_chip_fmax = g_amec->sys.fmax; uint16_t l_core_freq = 0; uint16_t l_core_freq_max = 0; // max freq across all cores uint16_t l_core_freq_min = g_amec->sys.fmax; // min freq across all cores uint32_t l_current_reason = 0; // used for debug purposes static uint32_t L_last_reason = 0; // used for debug purposes uint32_t l_chip_reason = 0; uint32_t l_core_reason = 0; amec_proc_voting_reason_t l_kvm_throt_reason = NO_THROTTLE; amec_part_t *l_part = NULL; // frequency threshold for reporting throttling uint16_t l_report_throttle_freq = G_sysConfigData.system_type.report_dvfs_nom ? G_sysConfigData.sys_mode_freq.table[OCC_MODE_NOMINAL] : G_sysConfigData.sys_mode_freq.table[OCC_MODE_TURBO]; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ if (!G_allowPstates) { // Don't allow pstates to be sent until after initial mode has been set if ( (CURRENT_MODE()) || (G_sysConfigData.system_type.kvm) ) { G_allowPstates = TRUE; } } // Voting Box for CPU speed. // This function implements the voting box to decide which input gets the right // to actuate the system. // check for oversubscription if redundant ps policy (oversubscription) is being enforced if (G_sysConfigData.system_type.non_redund_ps == false) { // If in oversubscription and there is a defined (non 0) OVERSUB frequency less than max then use it if( (AMEC_INTF_GET_OVERSUBSCRIPTION()) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB]) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB] < l_chip_fmax) ) { l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB]; l_chip_reason = AMEC_VOTING_REASON_OVERSUB; } } // If there is an active VRM fault and a defined (non 0) VRM N frequency less than max use it if( (g_amec->sys.vrm_fault_status) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N]) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N] < l_chip_fmax) ) { l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N]; l_chip_reason = AMEC_VOTING_REASON_VRM_N; } // PPB_FMAX if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax; l_chip_reason = AMEC_VOTING_REASON_PPB; if(l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = PCAP_EXCEED_REPORT; } else { l_kvm_throt_reason = POWERCAP; } } // PMAX_CLIP_FREQ if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } // Pmax_clip frequency request if there is an APSS failure if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //THERMALPROC.FREQ_REQUEST //Thermal controller input based on processor temperature if(g_amec->thermalproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_PROC_THRM; if( l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = PROC_OVERTEMP_EXCEED_REPORT; } else { l_kvm_throt_reason = CPU_OVERTEMP; } } //Thermal controller input based on VRM Vdd temperature if(g_amec->thermalvdd.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalvdd.freq_request; l_chip_reason = AMEC_VOTING_REASON_VDD_THRM; if( l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = VDD_OVERTEMP_EXCEED_REPORT; } else { l_kvm_throt_reason = VDD_OVERTEMP; } } for (k=0; k<MAX_NUM_CORES; k++) { if( CORE_PRESENT(k) && !CORE_OFFLINE(k) ) { l_core_freq = l_chip_fmax; l_core_reason = l_chip_reason; // Disable DPS in KVM if(!G_sysConfigData.system_type.kvm) { l_part = amec_part_find_by_core(&g_amec->part_config, k); // Check frequency request generated by DPS algorithms if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq) { l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request; l_core_reason = AMEC_VOTING_REASON_UTIL; } // Adjust frequency based on soft frequency boundaries if(l_part != NULL) { if(l_core_freq < l_part->soft_fmin) { // Before enforcing a soft Fmin, make sure we don't // have a thermal or power emergency if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM | AMEC_VOTING_REASON_VDD_THRM | AMEC_VOTING_REASON_PPB | AMEC_VOTING_REASON_PMAX | AMEC_VOTING_REASON_CONN_OC))) { l_core_freq = l_part->soft_fmin; l_core_reason = AMEC_VOTING_REASON_SOFT_MIN; } } else if(l_core_freq > l_part->soft_fmax) { l_core_freq = l_part->soft_fmax; l_core_reason = AMEC_VOTING_REASON_SOFT_MAX; } } } if(CURRENT_MODE() == OCC_MODE_NOMINAL) { // PROC_PCAP_NOM_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } else { // PROC_PCAP_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote; l_core_reason = AMEC_VOTING_REASON_PWR; if(l_report_throttle_freq <= l_core_freq) { l_kvm_throt_reason = PCAP_EXCEED_REPORT; } else { l_kvm_throt_reason = POWERCAP; } } } // Check IPS frequency request sent by Master OCC if(g_amec->slv_ips_freq_request != 0) { if(g_amec->slv_ips_freq_request < l_core_freq) { l_core_freq = g_amec->slv_ips_freq_request; l_core_reason = AMEC_VOTING_REASON_IPS; } } // Override frequency with request from Master OCC if(g_amec->foverride_enable) { if(g_amec->foverride != 0) { // Override the frequency on all cores if Master OCC sends // a non-zero request l_core_freq = g_amec->foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } l_kvm_throt_reason = MANUFACTURING_OVERRIDE; } if(g_amec->pstate_foverride_enable) { if(g_amec->pstate_foverride != 0) { // Override the frequency on all cores if the Global Pstate // table has been modified l_core_freq = g_amec->pstate_foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } //Make sure the frequency is not less then the system min if(l_core_freq < g_amec->sys.fmin) { l_core_freq = g_amec->sys.fmin; } // Override frequency via Amester parameter interface if (g_amec->proc[0].parm_f_override_enable && g_amec->proc[0].parm_f_override[k] > 0) { l_core_freq = g_amec->proc[0].parm_f_override[k]; l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE; } //STORE core frequency and reason g_amec->proc[0].core[k].f_request = l_core_freq; g_amec->proc[0].core[k].f_reason = l_core_reason; if(l_core_freq < l_core_freq_min) { // store the new lowest frequency and reason to be used after all cores checked l_core_freq_min = l_core_freq; l_current_reason = l_core_reason; } // Update the Amester parameter telling us the reason. Needed for // parameter array. g_amec->proc[0].parm_f_reason[k] = l_core_reason; //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed //before MODE change if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE) && (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) && (CURRENT_MODE() != OCC_MODE_NOM_PERFORMANCE) && (CURRENT_MODE() != OCC_MODE_MAX_PERFORMANCE) && (CURRENT_MODE() != OCC_MODE_FMF) && (CURRENT_MODE() != OCC_MODE_NOCHANGE) && (l_core_reason & NON_DPS_POWER_LIMITED)) { G_non_dps_power_limited = TRUE; } else { G_non_dps_power_limited = FALSE; } // Update the sensor telling us what the requested frequency is sensor_update( AMECSENSOR_ARRAY_PTR(FREQREQC0,k), (uint16_t) g_amec->proc[0].core[k].f_request); #if DEBUG_PROC_VOTING_BOX /// This trace that can be used to debug the voting /// box and control loops. It will trace the reason why a /// controller is lowering the freq, but will only do it once in a /// row for the specific freq it wants to control to. It assumes /// that all cores will be controlled to same freq. if(l_chip_fmax != g_amec->sys.fmax){ static uint16_t L_trace = 0; if(l_chip_fmax != L_trace){ L_trace = l_chip_fmax; TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason); } } #endif if(l_core_freq > l_core_freq_max) { l_core_freq_max = l_core_freq; } } // if core present and not offline else { //Set f_request to 0 so this core is ignored in amec_slv_freq_smh() g_amec->proc[0].core[k].f_request = 0; g_amec->proc[0].core[k].f_reason = 0; } }//End of for loop // update max core frequency if not 0 i.e. all cores offline (stop 2 or greater) // this is used by power capping alg, updating to 0 will cause power throttling when not needed if(l_core_freq_max) { g_amec->proc[0].core_max_freq = l_core_freq_max; // update the overall reason driving frequency across all cores g_amec->proc[0].f_reason = l_current_reason; } //check if there was a throttle reason change if(l_kvm_throt_reason != G_amec_opal_proc_throt_reason) { //Always update G_amec_opal_proc_throt_reason, this is used to set poll rsp bits for all system types G_amec_opal_proc_throt_reason = l_kvm_throt_reason; // Only if running OPAL need to notify dcom thread to update the table in HOMER for OPAL if(G_sysConfigData.system_type.kvm) { ssx_semaphore_post(&G_dcomThreadWakeupSem); } } // For debug... if lower than max update vars returned in poll response to give clipping reason g_amec->proc[0].core_min_freq = l_core_freq_min; if(l_core_freq_min < g_amec->sys.fmax) { if(l_current_reason == L_last_reason) { // same reason INC counter if(g_amec->proc[0].current_clip_count != 0xFF) { g_amec->proc[0].current_clip_count++; } } else { // new reason update history and set counter to 1 L_last_reason = l_current_reason; g_amec->proc[0].current_clip_count = 1; if( (g_amec->proc[0].chip_f_reason_history & l_current_reason) == 0) { g_amec->proc[0].chip_f_reason_history |= l_current_reason; TRAC_IMP("First time throttling for reason[0x%08X] History[0x%08X] freq = %d", l_current_reason, g_amec->proc[0].chip_f_reason_history, l_core_freq_min); } } } else // no active clipping { L_last_reason = 0; g_amec->proc[0].current_clip_count = 0; } }
void amec_update_fw_sensors(void) { errlHndl_t l_err = NULL; int rc = 0; int rc2 = 0; static bool l_first_call = TRUE; bool l_gpe0_idle, l_gpe1_idle; static int L_consec_trace_count = 0; // ------------------------------------------------------ // Update OCC Firmware Sensors from last tick // ------------------------------------------------------ int l_last_state = G_fw_timing.amess_state; // RTLtickdur = duration of last tick's RTL ISR (max = 250us) sensor_update( AMECSENSOR_PTR(RTLtickdur), G_fw_timing.rtl_dur); // AMEintdur = duration of last tick's AMEC portion of RTL ISR sensor_update( AMECSENSOR_PTR(AMEintdur), G_fw_timing.ameint_dur); // AMESSdurX = duration of last tick's AMEC state if(l_last_state >= NUM_AMEC_SMH_STATES) { // Sanity check. Trace this out, even though it should never happen. TRAC_INFO("AMEC State Invalid, Sensor Not Updated"); } else { // AMESSdurX = duration of last tick's AMEC state sensor_update( AMECSENSOR_ARRAY_PTR(AMESSdur0, l_last_state), G_fw_timing.amess_dur); } // ------------------------------------------------------ // Kick off GPE programs to track WorstCase time in GPE // and update the sensors. // ------------------------------------------------------ if( (NULL != G_fw_timing.gpe0_timing_request) && (NULL != G_fw_timing.gpe1_timing_request) ) { //Check if both GPE engines were able to complete the last GPE job on //the queue within 1 tick. l_gpe0_idle = async_request_is_idle(&G_fw_timing.gpe0_timing_request->request); l_gpe1_idle = async_request_is_idle(&G_fw_timing.gpe1_timing_request->request); if(l_gpe0_idle && l_gpe1_idle) { //reset the consecutive trace count L_consec_trace_count = 0; //Both GPE engines finished on time. Now check if they were //successful too. if( async_request_completed(&(G_fw_timing.gpe0_timing_request->request)) && async_request_completed(&(G_fw_timing.gpe1_timing_request->request)) ) { // GPEtickdur0 = duration of last tick's PORE-GPE0 duration sensor_update( AMECSENSOR_PTR(GPEtickdur0), G_fw_timing.gpe_dur[0]); // GPEtickdur1 = duration of last tick's PORE-GPE1 duration sensor_update( AMECSENSOR_PTR(GPEtickdur1), G_fw_timing.gpe_dur[1]); } else { //This case is expected on the first call of the function. //After that, this should not happen. if(!l_first_call) { //Note: FFDC for this case is gathered by each task //responsible for a GPE job. TRAC_INFO("GPE task idle but GPE task did not complete"); } l_first_call = FALSE; } // Update Time used to measure GPE duration. G_fw_timing.rtl_start_gpe = G_fw_timing.rtl_start; // Schedule the GPE Routines that will run and update the worst // case timings (via callback) after they complete. These GPE // routines are the last GPE routines added to the queue // during the RTL tick. rc = pore_flex_schedule(G_fw_timing.gpe0_timing_request); rc2 = pore_flex_schedule(G_fw_timing.gpe1_timing_request); if(rc || rc2) { /* @ * @errortype * @moduleid AMEC_UPDATE_FW_SENSORS * @reasoncode SSX_GENERIC_FAILURE * @userdata1 return code - gpe0 * @userdata2 return code - gpe1 * @userdata4 OCC_NO_EXTENDED_RC * @devdesc Failure to schedule PORE-GPE poreFlex object for FW timing * analysis. */ l_err = createErrl( AMEC_UPDATE_FW_SENSORS, //modId SSX_GENERIC_FAILURE, //reasoncode OCC_NO_EXTENDED_RC, //Extended reason code ERRL_SEV_INFORMATIONAL, //Severity NULL, //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size rc, //userdata1 rc2); //userdata2 // commit error log commitErrl( &l_err ); } } else if(L_consec_trace_count < MAX_CONSEC_TRACE) { uint64_t l_dbg1; // Reset will eventually be requested due to not having power measurement // data after X ticks, but add some additional FFDC to the trace that // will tell us what GPE job is currently executing. if(!l_gpe0_idle) { l_dbg1 = in64(PORE_GPE0_DBG1); TRAC_ERR("GPE0 programs did not complete within one tick. DBG1[0x%08x%08x]", l_dbg1 >> 32, l_dbg1 & 0x00000000ffffffffull); }