// Function Specification // // Name: SMGR_mode_transition_to_dynpowersave_fp // // Description: // // End Function Specification errlHndl_t SMGR_mode_transition_to_dynpowersave_fp() { errlHndl_t l_errlHndl = NULL; TRAC_IMP("SMGR: Mode to Dynamic PowerSave-Favor Performance Transition Started"); // Set Freq Mode for AMEC to use l_errlHndl = amec_set_freq_range(OCC_MODE_DYN_POWER_SAVE_FP); CURRENT_MODE() = OCC_MODE_DYN_POWER_SAVE_FP; TRAC_IMP("SMGR: Mode to Dynamic PowerSave-Favor Performance Transition Completed"); return l_errlHndl; }
// Function Specification // // Name: SMGR_mode_transition_to_ffo // // Description: // // End Function Specification errlHndl_t SMGR_mode_transition_to_ffo() { errlHndl_t l_errlHndl = NULL; TRAC_IMP("SMGR: Mode to FFO Transition Started"); // Set Freq Mode for AMEC to use l_errlHndl = amec_set_freq_range(OCC_MODE_FFO); CURRENT_MODE() = OCC_MODE_FFO; TRAC_IMP("SMGR: Mode to FFO Transition Completed"); return l_errlHndl; }
// Function Specification // // Name: SMGR_mode_transition_to_nominal // // Description: // // End Function Specification errlHndl_t SMGR_mode_transition_to_nominal() { errlHndl_t l_errlHndl = NULL; TRAC_IMP("SMGR: Mode to Nominal Transition Started"); // Set Freq Mode for AMEC to use l_errlHndl = amec_set_freq_range(OCC_MODE_NOMINAL); CURRENT_MODE() = OCC_MODE_NOMINAL; TRAC_IMP("SMGR: Mode to Nominal Transition Completed"); return l_errlHndl; }
// Function Specification // // Name: SMGR_mode_transition_to_powersave // // Description: // // End Function Specification errlHndl_t SMGR_mode_transition_to_powersave() { errlHndl_t l_errlHndl = NULL; TRAC_IMP("SMGR: Mode to PowerSave Transition Started"); // Set Freq Mode for AMEC to use l_errlHndl = amec_set_freq_range(OCC_MODE_PWRSAVE); CURRENT_MODE() = OCC_MODE_PWRSAVE; TRAC_IMP("SMGR: Mode to PowerSave Transition Completed"); return l_errlHndl; }
// Function Specification // // Name: dcom_build_occfw_msg // // Description: Copy data into occ fw msg portion // // End Function Specification void dcom_build_occfw_msg( const dcom_error_type_t i_which_msg ) { if ( i_which_msg == SLAVE_INBOX ) { uint32_t l_slv_idx = 0; // For each occ slave for(; l_slv_idx < MAX_OCCS; l_slv_idx++) { G_dcom_slv_inbox_tx[l_slv_idx].occ_fw_mailbox[0] = G_occ_external_req_state; G_dcom_slv_inbox_tx[l_slv_idx].occ_fw_mailbox[1] = G_occ_external_req_mode; G_dcom_slv_inbox_tx[l_slv_idx].occ_fw_mailbox[2] = G_master_event_flags; G_dcom_slv_inbox_tx[l_slv_idx].occ_fw_mailbox[3] = G_slave_event_flags_ack[l_slv_idx]; G_dcom_slv_inbox_tx[l_slv_idx].occ_fw_mailbox[4] = 0; } } else if ( i_which_msg == SLAVE_OUTBOX ) { G_dcom_slv_outbox_tx.occ_fw_mailbox[0] = CURRENT_STATE(); if(G_sysConfigData.system_type.kvm ) { G_dcom_slv_outbox_tx.occ_fw_mailbox[1] = G_occ_external_req_mode_kvm; } else { G_dcom_slv_outbox_tx.occ_fw_mailbox[1] = CURRENT_MODE(); } G_dcom_slv_outbox_tx.occ_fw_mailbox[2] = G_master_event_flags_ack; G_dcom_slv_outbox_tx.occ_fw_mailbox[3] = G_slave_event_flags; G_dcom_slv_outbox_tx.occ_fw_mailbox[4] = SMGR_validate_get_valid_states(); } }
void cent_update_nlimits(uint32_t i_cent) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ static uint32_t L_trace_throttle_count = 0; uint16_t l_mba01_mba_maxn, l_mba01_chip_maxn, l_mba23_mba_maxn, l_mba23_chip_maxn; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ do { centaur_throttle_t* l_active_limits01 = &G_centaurThrottleLimits[i_cent][0]; centaur_throttle_t* l_active_limits23 = &G_centaurThrottleLimits[i_cent][1]; mem_throt_config_data_t* l_state_limits01 = &G_sysConfigData.mem_throt_limits[i_cent][0]; mem_throt_config_data_t* l_state_limits23 = &G_sysConfigData.mem_throt_limits[i_cent][1]; //Minimum N value is not state dependent l_active_limits01->min_n_per_mba = l_state_limits01->min_ot_n_per_mba; l_active_limits23->min_n_per_mba = l_state_limits23->min_ot_n_per_mba; //oversubscription? if(AMEC_INTF_GET_OVERSUBSCRIPTION()) { l_mba01_mba_maxn = l_state_limits01->ovs_n_per_mba; l_mba01_chip_maxn = l_state_limits01->ovs_n_per_chip; l_mba23_mba_maxn = l_state_limits23->ovs_n_per_mba; l_mba23_chip_maxn = l_state_limits23->ovs_n_per_chip; } else if(CURRENT_MODE() == OCC_MODE_NOMINAL) { l_mba01_mba_maxn = l_state_limits01->nom_n_per_mba; l_mba01_chip_maxn = l_state_limits01->nom_n_per_chip; l_mba23_mba_maxn = l_state_limits23->nom_n_per_mba; l_mba23_chip_maxn = l_state_limits23->nom_n_per_chip; } else //DPS, TURBO, FFO, and SPS modes will use these settings { l_mba01_mba_maxn = l_state_limits01->turbo_n_per_mba; l_mba01_chip_maxn = l_state_limits01->turbo_n_per_chip; l_mba23_mba_maxn = l_state_limits23->turbo_n_per_mba; l_mba23_chip_maxn = l_state_limits23->turbo_n_per_chip; } l_active_limits01->max_n_per_chip = l_mba01_chip_maxn; l_active_limits23->max_n_per_chip = l_mba23_chip_maxn; //Trace when the MBA max N value changes if((l_mba01_mba_maxn != l_active_limits01->max_n_per_mba) || (l_mba23_mba_maxn != l_active_limits23->max_n_per_mba)) { l_active_limits01->max_n_per_mba = l_mba01_mba_maxn; l_active_limits23->max_n_per_mba = l_mba23_mba_maxn; //Don't trace every MBA changing, just one if(!L_trace_throttle_count) { L_trace_throttle_count = CENT_TRACE_THROTTLE_DELAY; TRAC_IMP("New MBA throttle max|min N values: mba01[0x%08x], mba23[0x%08x]", (uint32_t)((l_mba01_mba_maxn << 16) | l_active_limits01->min_n_per_mba), (uint32_t)((l_mba23_mba_maxn << 16) | l_active_limits23->min_n_per_mba)); break; } } if(L_trace_throttle_count) { L_trace_throttle_count--; } }while(0); }
// Function Specification // // Name: SMGR_set_mode // // Description: // // End Function Specification errlHndl_t SMGR_set_mode(const OCC_MODE i_mode, const uint8_t i_sms_type) { errlHndl_t l_errlHndl = NULL; int jj=0; OCC_MODE l_mode = i_mode; do { // Get lock for critical section if(ssx_semaphore_pend(&G_smgrModeChangeSem,SSX_WAIT_FOREVER)) { /* @ * @errortype * @moduleid MAIN_MODE_TRANSITION_MID * @reasoncode SSX_GENERIC_FAILURE * @userdata1 none * @userdata4 ERC_RUNNING_SEM_PENDING_FAILURE * @devdesc SSX semaphore related failure */ l_errlHndl = createErrl(MAIN_MODE_TRANSITION_MID, //modId SSX_GENERIC_FAILURE, //reasoncode ERC_RUNNING_SEM_PENDING_FAILURE,//Extended reason code ERRL_SEV_UNRECOVERABLE, //Severity NULL, //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size 0, //userdata1 0); //userdata2 // Callout firmware addCalloutToErrl(l_errlHndl, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_FIRMWARE, ERRL_CALLOUT_PRIORITY_HIGH); break; } //Check to see if we need to make a change if(l_mode == OCC_MODE_NOCHANGE) { break; } // SAPPHIRE only accepts DPS-FE mode. In case OCC gets other modes, it should accept the request // and keep reporting back that it is in that mode. However, internally we should not // initiate any mode transition, i.e., OCC should remain internally in DPS-FE mode. if(G_sysConfigData.system_type.kvm) { G_occ_external_req_mode_kvm = l_mode; if (l_mode != OCC_MODE_DYN_POWER_SAVE) { TRAC_ERR("SAPPHIRE only accepts DPS-FE mode(6) but requested mode is : %d", l_mode); l_mode = OCC_MODE_DYN_POWER_SAVE; } } switch (l_mode) { case OCC_MODE_NOMINAL: // FALL THROUGH case OCC_MODE_PWRSAVE: // FALL THROUGH case OCC_MODE_DYN_POWER_SAVE: // FALL THROUGH case OCC_MODE_DYN_POWER_SAVE_FP: // FALL THROUGH case OCC_MODE_TURBO: // FALL THROUGH case OCC_MODE_STURBO: // FALL THROUGH case OCC_MODE_FFO: // FALL THROUGH // Notify AMEC of mode change // Change Mode via Transition Function do { // Loop through mode transition table, and find the state // transition function that matches the transition we need to do. for(jj=0; jj<G_smgr_mode_trans_count; jj++) { if( ((G_smgr_mode_trans[jj].old_state == G_occ_internal_mode) || (G_smgr_mode_trans[jj].old_state == OCC_MODE_ALL) ) && (G_smgr_mode_trans[jj].new_state == l_mode) ) { // We found the transtion that matches, now run the function // that is associated with that state transition. if(NULL != G_smgr_mode_trans[jj].trans_func_ptr) { // Signal that we are now in a mode transition G_mode_transition_occuring = TRUE; // Run transition function l_errlHndl = (G_smgr_mode_trans[jj].trans_func_ptr)(); // Signal that we are done with the transition G_mode_transition_occuring = FALSE; break; } } } // Check if we hit the end of the table without finding a valid // mode transition. If we did, log an internal error. if(G_smgr_mode_trans_count == jj) { TRAC_ERR("No transition (or NULL) found for the mode change\n"); l_errlHndl = NULL; //TODO: Create Error break; } // Update the power mode for all core groups that are following system mode AMEC_part_update_sysmode_policy(CURRENT_MODE()); } while(0); break; default: //unsupported mode break; } if(l_errlHndl) { // Punt !!! :-) break; } // Load correct thermal thresholds based on the current mode l_errlHndl = AMEC_data_write_thrm_thresholds(CURRENT_MODE()); // Update the CPU speed in AME? // Register the New Mode? // Update Power Policy Requirements? // Update CPM Calibration }while(0); // If we have a mode change failure, Mode change flag needs to be set, // otherwise, it needs be be cleared/unset. if(l_errlHndl) { } // Unlock critical section ssx_semaphore_post(&G_smgrModeChangeSem); return l_errlHndl; }
// Verifies that each core is at the correct frequency after they have had // time to stabilize void amec_verify_pstate() { uint8_t l_core = 0; int8_t l_pstate_from_fmax = 0; gpe_bulk_core_data_t * l_core_data_ptr; pmc_pmsr_ffcdc_data_t l_pmc_pmsr_ffdc; errlHndl_t l_err = NULL; if ( (G_time_until_freq_check == 0) && ( CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE ) && ( CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP ) && (!G_sysConfigData.system_type.kvm)) { // Reset the counter G_time_until_freq_check = FREQ_CHG_CHECK_TIME; // Convert fmax to the corresponding pstate l_pstate_from_fmax = proc_freq2pstate(g_amec->sys.fmax); for( l_core = 0; l_core < MAX_NUM_CORES; l_core++ ) { // If the core isn't present, skip it if(!CORE_PRESENT(l_core)) { l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].value = 0; continue; } // Get pointer to core data l_core_data_ptr = proc_get_bulk_core_data_ptr(l_core); // Get the core's pmsr data l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core] = l_core_data_ptr->pcb_slave.pmsr; // Verify that the core is running at the correct frequency // If not, log an error if( (l_pstate_from_fmax != l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.local_pstate_actual) && (l_pstate_from_fmax > l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.pv_min) && (l_err == NULL) ) { TRAC_ERR("Frequency mismatch in core %d: actual_ps[%d] req_ps[%d] fmax[%d] mode[%d].", l_core, l_pmc_pmsr_ffdc.pmsr_ffdc_data.data[l_core].fields.local_pstate_actual, l_pstate_from_fmax, g_amec->sys.fmax, CURRENT_MODE()); fill_pmc_ffdc_buffer(&l_pmc_pmsr_ffdc.pmc_ffcdc_data); /* @ * @moduleid AMEC_VERIFY_FREQ_MID * @reasonCode TARGET_FREQ_FAILURE * @severity ERRL_SEV_PREDICTIVE * @userdata1 0 * @userdata2 0 * @userdata4 OCC_NO_EXTENDED_RC * @devdesc A core is not running at the expected frequency */ l_err = createErrl( AMEC_VERIFY_FREQ_MID, // i_modId, TARGET_FREQ_FAILURE, // i_reasonCode, OCC_NO_EXTENDED_RC, ERRL_SEV_UNRECOVERABLE, NULL, // i_trace, DEFAULT_TRACE_SIZE, // i_traceSz, 0, // i_userData1, 0); // i_userData2 //Add firmware callout addCalloutToErrl(l_err, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_FIRMWARE, ERRL_CALLOUT_PRIORITY_HIGH); //Add processor callout addCalloutToErrl(l_err, ERRL_CALLOUT_TYPE_HUID, G_sysConfigData.proc_huid, ERRL_CALLOUT_PRIORITY_MED); } } if( l_err != NULL) { //Add our register dump to the error log addUsrDtlsToErrl(l_err, (uint8_t*) &l_pmc_pmsr_ffdc, sizeof(l_pmc_pmsr_ffdc), ERRL_USR_DTL_STRUCT_VERSION_1, ERRL_USR_DTL_BINARY_DATA); REQUEST_RESET(l_err); } } }
// Function Specification // // Name: amec_slv_check_perf // // Description: Slave OCC's Detect and log degraded performance errors // This function will run every tick. // // Thread: RealTime Loop // // Task Flags: // // End Function Specification void amec_slv_check_perf(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ static BOOLEAN l_prev_failsafe_state = FALSE; static BOOLEAN l_prev_ovs_state = FALSE; static BOOLEAN l_prev_pcap_state = FALSE; static ERRL_SEVERITY l_pcap_sev = ERRL_SEV_PREDICTIVE; static BOOLEAN l_throttle_traced = FALSE; static uint64_t l_time = 0; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ // Verify that cores are at proper frequency amec_verify_pstate(); do { // was frequency limited by power ? if ( G_non_dps_power_limited != TRUE ) { if(l_throttle_traced) { TRAC_INFO("Frequency not limited by power algorithms anymore"); l_throttle_traced = FALSE; } // we are done break and return break; } // frequency limited due to failsafe condition ? if ( AMEC_INTF_GET_FAILSAFE() == TRUE ) { if ( l_prev_failsafe_state == TRUE) { // we are done break and return break; } else { // log this error ONLY ONCE per IPL l_prev_failsafe_state = TRUE; TRAC_ERR("Frequency limited due to failsafe condition(mode:%d, state:%d)", CURRENT_MODE(), CURRENT_STATE()); l_throttle_traced = TRUE; l_time = ssx_timebase_get(); // log error that calls out OVS procedure // set error severity to RRL_SEV_PREDICTIVE /* @ * @errortype * @moduleid AMEC_SLAVE_CHECK_PERFORMANCE * @reasoncode INTERNAL_FAILURE * @userdata1 Previous FailSafe State * @userdata4 ERC_AMEC_SLAVE_FAILSAFE_STATE * @devdesc Frequency limited due to failsafe condition */ errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId INTERNAL_FAILURE, //reasoncode ERC_AMEC_SLAVE_FAILSAFE_STATE,//Extended reason code ERRL_SEV_PREDICTIVE, //Severity NULL, //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size l_prev_failsafe_state, //userdata1 0); //userdata2 addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_OVERSUBSCRIPTION, ERRL_CALLOUT_PRIORITY_HIGH ); // and sets the consolidate action flag setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS ); // Commit Error commitErrl(&l_errl); // we are done lets break break; } } // frequency limited due to oversubscription condition ? if ( AMEC_INTF_GET_OVERSUBSCRIPTION() == TRUE ) { if ( l_prev_ovs_state == TRUE) { // we are done break and return break; } else { // log this error ONLY ONCE per IPL l_prev_ovs_state = TRUE; TRAC_ERR("Frequency limited due to oversubscription condition(mode:%d, state:%d)", CURRENT_MODE(), CURRENT_STATE()); l_throttle_traced = TRUE; l_time = ssx_timebase_get(); // log error that calls out OVS procedure // set error severity to RRL_SEV_PREDICTIVE // Updated the RC to match the actual RC passed to createErrl() /* @ * @errortype * @moduleid AMEC_SLAVE_CHECK_PERFORMANCE * @reasoncode OVERSUB_LIMIT_ALERT * @userdata1 Previous OVS State * @userdata4 ERC_AMEC_SLAVE_OVS_STATE * @devdesc Frequency limited due to oversubscription condition */ errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId OVERSUB_LIMIT_ALERT, //reasoncode ERC_AMEC_SLAVE_OVS_STATE, //Extended reason code ERRL_SEV_PREDICTIVE, //Severity NULL, //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size l_prev_ovs_state, //userdata1 0); //userdata2 // Callout to Oversubscription addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_OVERSUBSCRIPTION, ERRL_CALLOUT_PRIORITY_HIGH ); // Callout to APSS addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_HUID, G_sysConfigData.apss_huid, ERRL_CALLOUT_PRIORITY_MED ); // Callout to Firmware addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_FIRMWARE, ERRL_CALLOUT_PRIORITY_LOW ); // and sets the consolidate action flag setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS ); // Commit Error commitErrl(&l_errl); // we are done lets break break; } } uint16_t l_snrBulkPwr = AMECSENSOR_PTR(PWR250US)->sample; // frequency limited due to system power cap condition ? if (( l_snrBulkPwr > (G_sysConfigData.pcap.system_pcap - PDROP_THRESH) ) && ( G_sysConfigData.pcap.current_pcap == 0 )) { if ( l_prev_pcap_state == TRUE) { // we are done break and return break; } else { //log this error ONLY ONCE per IPL l_prev_pcap_state = TRUE; TRAC_ERR("Frequency limited due to power cap condition(mode:%d, state:%d)", CURRENT_MODE(), CURRENT_STATE()); TRAC_ERR("SnrBulkPwr %d > Sys Pcap %d ",l_snrBulkPwr, G_sysConfigData.pcap.system_pcap ); TRAC_ERR("SnrFanPwr %d, SnrIOPwr %d, SnrStoragePwr %d, SnrGpuPrw %d ", AMECSENSOR_PTR(PWR250USFAN)->sample, AMECSENSOR_PTR(PWR250USIO)->sample, AMECSENSOR_PTR(PWR250USSTORE)->sample, AMECSENSOR_PTR(PWR250USGPU)->sample ); TRAC_ERR("SnrProcPwr 0 %d, SnrProcPwr 1 %d, SnrProcPwr 2 %d, SnrProcPwr 3 %d", g_amec->proc_snr_pwr[0], g_amec->proc_snr_pwr[1], g_amec->proc_snr_pwr[2], g_amec->proc_snr_pwr[3] ); TRAC_ERR("SnrMemPwr 0 %d, SnrMemPwr 1 %d, SnrMemPwr 2 %d, SnrMemPwr 3 %d", g_amec->mem_snr_pwr[0], g_amec->mem_snr_pwr[1], g_amec->mem_snr_pwr[2], g_amec->mem_snr_pwr[3] ); l_throttle_traced = TRUE; l_time = ssx_timebase_get(); // log error that calls out firmware and APSS procedure // set error severity to l_pcap_sev /* @ * @errortype * @moduleid AMEC_SLAVE_CHECK_PERFORMANCE * @reasoncode PCAP_THROTTLE_POWER_LIMIT * @userdata1 Current Sensor Bulk Power * @userdata2 System PCAP * @userdata4 ERC_AMEC_SLAVE_POWERCAP * @devdesc Frequency limited due to PowerCap condition */ errlHndl_t l_errl = createErrl(AMEC_SLAVE_CHECK_PERFORMANCE, //modId PCAP_THROTTLE_POWER_LIMIT, //reasoncode ERC_AMEC_SLAVE_POWERCAP, //Extended reason code l_pcap_sev, //Severity NULL, //Trace Buf DEFAULT_TRACE_SIZE, //Trace Size l_snrBulkPwr, //userdata1 G_sysConfigData.pcap.system_pcap);//userdata2 addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_COMPONENT_ID, ERRL_COMPONENT_ID_FIRMWARE, ERRL_CALLOUT_PRIORITY_HIGH ); addCalloutToErrl( l_errl, ERRL_CALLOUT_TYPE_HUID, G_sysConfigData.apss_huid, ERRL_CALLOUT_PRIORITY_HIGH ); // and sets the consolidate action flag setErrlActions( l_errl, ERRL_ACTIONS_CONSOLIDATE_ERRORS ); // then l_pcap_sev to informational l_pcap_sev = ERRL_SEV_INFORMATIONAL; // Commit Error commitErrl(&l_errl); // we are done lets break break; } } // trottle trace to every 3600 seconds (1hr = 3600000) if(!l_throttle_traced && ( DURATION_IN_MS_UNTIL_NOW_FROM(l_time) > 3600000 ) ) { TRAC_INFO("Frequency power limited due to transient condition: PowerLimited=%x, FailSafe=%x, OverSubScription=%x CurrentBulkPwr=%x", G_non_dps_power_limited, AMEC_INTF_GET_FAILSAFE(), AMEC_INTF_GET_OVERSUBSCRIPTION(), l_snrBulkPwr ); l_throttle_traced = TRUE; l_time = ssx_timebase_get(); } } while( 0 ); return; }
// Function Specification // // Name: amec_slv_voting_box // // Description: Slave OCC's voting box that decides the frequency request. // This function will run every tick. // // Thread: RealTime Loop // // Task Flags: // // End Function Specification void amec_slv_voting_box(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ uint16_t k = 0; uint16_t l_chip_fmax = g_amec->sys.fmax; uint16_t l_core_freq = 0; uint32_t l_chip_reason = 0; uint32_t l_core_reason = 0; uint8_t l_kvm_throt_reason = NO_THROTTLE; amec_part_t *l_part = NULL; bool l_freq_req_changed = FALSE; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ // Voting Box for CPU speed. // This function implements the voting box to decide which input gets the right // to actuate the system. //Reset the maximum core frequency requested prior to recalculation. g_amec->proc[0].core_max_freq = 0; // PPB_FMAX if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax; l_chip_reason = AMEC_VOTING_REASON_PPB; l_kvm_throt_reason = POWERCAP; } // PMAX_CLIP_FREQ if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } // Pmax_clip frequency request if there is an APSS failure if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //THERMALPROC.FREQ_REQUEST //Thermal controller input based on processor temperature if(g_amec->thermalproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_PROC_THRM; l_kvm_throt_reason = CPU_OVERTEMP; } // Controller request based on VRHOT signal from processor regulator if(g_amec->vrhotproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->vrhotproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_VRHOT_THRM; l_kvm_throt_reason = CPU_OVERTEMP; } // CONN_OC_VOTE if(g_amec->proc[0].pwr_votes.conn_oc_vote < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.conn_oc_vote; l_chip_reason = AMEC_VOTING_REASON_CONN_OC; l_kvm_throt_reason = OVERCURRENT; } for (k=0; k<MAX_NUM_CORES; k++) { if(CORE_PRESENT(k)) { l_core_freq = l_chip_fmax; l_core_reason = l_chip_reason; // Disable DPS in KVM if(!G_sysConfigData.system_type.kvm) { l_part = amec_part_find_by_core(&g_amec->part_config, k); // Check frequency request generated by DPS algorithms if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq) { l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request; l_core_reason = AMEC_VOTING_REASON_UTIL; } // Adjust frequency based on soft frequency boundaries if(l_part != NULL) { if(l_core_freq < l_part->soft_fmin) { // Before enforcing a soft Fmin, make sure we don't // have a thermal or power emergency if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM | AMEC_VOTING_REASON_VRHOT_THRM | AMEC_VOTING_REASON_PPB | AMEC_VOTING_REASON_PMAX | AMEC_VOTING_REASON_CONN_OC))) { l_core_freq = l_part->soft_fmin; l_core_reason = AMEC_VOTING_REASON_SOFT_MIN; } } else if(l_core_freq > l_part->soft_fmax) { l_core_freq = l_part->soft_fmax; l_core_reason = AMEC_VOTING_REASON_SOFT_MAX; } } } if(CURRENT_MODE() == OCC_MODE_NOMINAL) { // PROC_PCAP_NOM_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } else { // PROC_PCAP_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } // Check IPS frequency request sent by Master OCC if(g_amec->slv_ips_freq_request != 0) { if(g_amec->slv_ips_freq_request < l_core_freq) { l_core_freq = g_amec->slv_ips_freq_request; l_core_reason = AMEC_VOTING_REASON_IPS; } } // Override frequency with request from Master OCC if(g_amec->foverride_enable) { if(g_amec->foverride != 0) { // Override the frequency on all cores if Master OCC sends // a non-zero request l_core_freq = g_amec->foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } if(g_amec->pstate_foverride_enable) { if(g_amec->pstate_foverride != 0) { // Override the frequency on all cores if the Global Pstate // table has been modified l_core_freq = g_amec->pstate_foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } //Make sure the frequency is not less then the system min if(l_core_freq < g_amec->sys.fmin) { l_core_freq = g_amec->sys.fmin; } // Override frequency via Amester parameter interface if (g_amec->proc[0].parm_f_override_enable && g_amec->proc[0].parm_f_override[k] > 0) { l_core_freq = g_amec->proc[0].parm_f_override[k]; l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE; } // If frequency has changed, set the flag if ( (l_core_freq != g_amec->proc[0].core[k].f_request) || (l_core_freq != g_amec->sys.fmax)) { l_freq_req_changed = TRUE; } //STORE core frequency and reason g_amec->proc[0].core[k].f_request = l_core_freq; g_amec->proc[0].core[k].f_reason = l_core_reason; // Update the Amester parameter telling us the reason. Needed for // parameter array. g_amec->proc[0].parm_f_reason[k] = l_core_reason; //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed //before MODE change if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE) && (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) && (CURRENT_MODE() != OCC_MODE_NOCHANGE) && (l_core_reason & NON_DPS_POWER_LIMITED)) { G_non_dps_power_limited = TRUE; } else { G_non_dps_power_limited = FALSE; } // Update the sensor telling us what the requested frequency is sensor_update( AMECSENSOR_ARRAY_PTR(FREQ250USP0C0,k), (uint16_t) g_amec->proc[0].core[k].f_request); #if 0 /// TODO: This can be deleted if deemed useless /// This trace that can be used to debug the voting /// box an control loops. It will trace the reason why a /// controller is lowering the freq, but will only do it once in a /// row for the specific freq it wants to control to. It assumes /// that all cores will be controlled to same freq. if(l_chip_fmax != g_amec->sys.fmax){ static uint16_t L_trace = 0; if(l_chip_fmax != L_trace){ L_trace = l_chip_fmax; TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason); } } #endif if(l_core_freq > g_amec->proc[0].core_max_freq) { g_amec->proc[0].core_max_freq = l_core_freq; } } else { l_core_freq = 0; l_core_reason = 0; } }//End of for loop // Check if the frequency is going to be changing if( l_freq_req_changed == TRUE ) { G_time_until_freq_check = FREQ_CHG_CHECK_TIME; } else if (G_time_until_freq_check != 0) { G_time_until_freq_check--; } //convert POWERCAP reason to POWER_SUPPLY_FAILURE if ovs/failsafe is asserted if((l_kvm_throt_reason == POWERCAP) && (AMEC_INTF_GET_FAILSAFE() || AMEC_INTF_GET_OVERSUBSCRIPTION())) { l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //check if we need to update the throttle reason in homer if(G_sysConfigData.system_type.kvm && (l_kvm_throt_reason != G_amec_kvm_throt_reason)) { //Notify dcom thread to update the table G_amec_kvm_throt_reason = l_kvm_throt_reason; ssx_semaphore_post(&G_dcomThreadWakeupSem); } }
// Function Specification // // Name: Dcom_thread_routine // // Description: Purpose of this task is to handle messages passed from // Master to Slave and vice versa. // // Nothing in this thread should be time-critical, but should // happen more often than the 1-second that other threads run // at. // // This thread currently runs ~1ms, based on the RTL loop of // 250us. // // FWIW -- It is pointless to set this thread to run any more // often than the length of the RTL loop, since it is acting // on data passed back and forth via that loop. // // End Function Specification void Dcom_thread_routine(void *arg) { OCC_STATE l_newOccState = 0; OCC_MODE l_newOccMode = 0; SsxTimer l_timeout_timer; errlHndl_t l_errlHndl = NULL; // -------------------------------------------------- // Create a timer that pops every 10 seconds to wake up // this thread, in case a semaphore never gets posted. // TODO: Is this really needed? // -------------------------------------------------- ssx_timer_create(&l_timeout_timer, (SsxTimerCallback) ssx_semaphore_post, (void *) &G_dcomThreadWakeupSem); ssx_timer_schedule(&l_timeout_timer, SSX_SECONDS(10), SSX_SECONDS(10)); for(;;) { // -------------------------------------------------- // Wait on Semaphore until we get new data over DCOM // (signalled by sem_post() or timeout occurs. // Sem timeout is designed to be the slowest // interval we will attempt to run this thread at. // -------------------------------------------------- // Wait for sem_post before we run through this thread. ssx_semaphore_pend(&G_dcomThreadWakeupSem, SSX_WAIT_FOREVER); // -------------------------------------------------- // Counter to ensure thread is running (can wrap) // -------------------------------------------------- G_dcom_thread_counter++; // -------------------------------------------------- // Check if we need to update the sapphire table // -------------------------------------------------- if(G_sysConfigData.system_type.kvm) { proc_check_for_sapphire_updates(); } // -------------------------------------------------- // Set Mode and State Based on Master // -------------------------------------------------- l_newOccState = (G_occ_master_state == CURRENT_STATE()) ? OCC_STATE_NOCHANGE : G_occ_master_state; if(G_sysConfigData.system_type.kvm) { l_newOccMode = (G_occ_master_mode == G_occ_external_req_mode_kvm ) ? OCC_MODE_NOCHANGE : G_occ_master_mode; } else { l_newOccMode = (G_occ_master_mode == CURRENT_MODE() ) ? OCC_MODE_NOCHANGE : G_occ_master_mode; } // Override State if SAFE state is requested l_newOccState = ( isSafeStateRequested() ) ? OCC_STATE_SAFE : l_newOccState; // Override State if we are in SAFE state already l_newOccState = ( OCC_STATE_SAFE == CURRENT_STATE() ) ? OCC_STATE_NOCHANGE : l_newOccState; if( (OCC_STATE_NOCHANGE != l_newOccState) || (OCC_MODE_NOCHANGE != l_newOccMode) ) { // If we're active, then we should always process the mode change first // If we're not active, then we should always process the state change first if(OCC_STATE_ACTIVE == CURRENT_STATE()) { // Set the new mode l_errlHndl = SMGR_set_mode(l_newOccMode, 0 /* TODO V/F */ ); if(l_errlHndl) { commitErrl(&l_errlHndl); } // Set the new state l_errlHndl = SMGR_set_state(l_newOccState); if(l_errlHndl) { commitErrl(&l_errlHndl); } } else { // Set the new state l_errlHndl = SMGR_set_state(l_newOccState); if(l_errlHndl) { commitErrl(&l_errlHndl); } // Set the new mode l_errlHndl = SMGR_set_mode(l_newOccMode, 0 /* TODO V/F */ ); if(l_errlHndl) { commitErrl(&l_errlHndl); } } } // -------------------------------------------------- // DCM PStates // \_ can do sem_post to increment through state machine // -------------------------------------------------- if(OCC_STATE_SAFE != CURRENT_STATE()) { proc_gpsm_dcm_sync_enable_pstates_smh(); } // -------------------------------------------------- // SSX Sleep // -------------------------------------------------- // Even if semaphores are continually posted, there is no reason // for us to run this thread any more often than once every 250us // so we don't starve any other thread ssx_sleep(SSX_MICROSECONDS(250)); } }
////////////////////////// // Function Specification // // Name: amec_pcap_calc // // Description: Calculate the node, memory and processor power caps. // // Thread: Real Time Loop // // End Function Specification void amec_pcap_calc(const bool i_oversub_state) { bool l_active_pcap_changed = FALSE; uint16_t l_node_pwr = AMECSENSOR_PTR(PWRSYS)->sample; uint16_t l_p0_pwr = AMECSENSOR_PTR(PWRPROC)->sample; int32_t l_avail_power = 0; uint16_t mem_pwr_diff = 0; uint32_t l_proc_fraction = 0; static uint32_t L_prev_node_pcap = 0; static bool L_apss_error_traced = FALSE; static uint32_t L_ticks_mem_pwr_available = 0; static bool L_trace_pcap_throttle = true; static bool L_trace_pcap_unthrottle = true; // Determine the active power cap. // when in oversub (N mode) only use oversub pcap if lower than user set pcap // OCC should allow N mode to be higher than N+1 (don't compare against norm_node_pcap) // N mode may be higher on some systems due to ps issue reporting higher power in N mode if( (TRUE == i_oversub_state) && (g_amec->pcap.ovs_node_pcap < G_sysConfigData.pcap.current_pcap) ) { g_amec->pcap.active_node_pcap = g_amec->pcap.ovs_node_pcap; } // norm_node_pcap is set as lowest between sys (N+1 mode) and // user in amec_data_write_pcap() else { g_amec->pcap.active_node_pcap = g_amec->pcap.norm_node_pcap; } //Trace whenever the node pcap changes if(L_prev_node_pcap != g_amec->pcap.active_node_pcap) { TRAC_IMP("amec_pcap_calc: Node pcap set to %d watts.", g_amec->pcap.active_node_pcap); L_prev_node_pcap = g_amec->pcap.active_node_pcap; // set this pcap as valid (needed by master for comparison) g_amec->pcap_valid = 1; l_active_pcap_changed = TRUE; } l_avail_power = g_amec->pcap.active_node_pcap - l_node_pwr; // Determine GPU power cap if there are GPUs present if(G_first_proc_gpu_config) { amec_gpu_pcap(i_oversub_state, l_active_pcap_changed, l_avail_power); } if(l_node_pwr != 0) { l_proc_fraction = ((uint32_t)(l_p0_pwr) << 16)/l_node_pwr; if(L_apss_error_traced) { TRAC_ERR("PCAP: PWRSYS sensor is no longer 0."); L_apss_error_traced = FALSE; } // check if allowed to increase power AND memory throttled due to pcap if((l_avail_power > 0) && (g_amec->pcap.active_mem_level != 0)) { // un-throttle memory if there is enough available power between // current and new throttles if (CURRENT_MODE() == OCC_MODE_NOMINAL) { mem_pwr_diff = g_amec->pcap.nominal_mem_pwr; } else { mem_pwr_diff = g_amec->pcap.turbo_mem_pwr; } // currently there's only 1 mem pcap throt level so must be pcap1 mem_pwr_diff -= g_amec->pcap.pcap1_mem_pwr; if(l_avail_power >= mem_pwr_diff) { L_ticks_mem_pwr_available++; if( L_ticks_mem_pwr_available == UNTHROTTLE_MEMORY_DELAY ) { if( L_trace_pcap_unthrottle || (G_allow_trace_flags & ALLOW_MEM_TRACE) ) { TRAC_IMP("PCAP: Un-Throttling memory"); L_trace_pcap_unthrottle = false; } g_amec->pcap.active_mem_level = 0; L_ticks_mem_pwr_available = 0; // don't let the proc have any available power this tick l_avail_power = 0; } } } // check if need to reduce power and frequency is already at the min else if(l_avail_power < 0) { L_ticks_mem_pwr_available = 0; // if memory is not throttled and frequency is at min shed additional power // by throttling memory if( (g_amec->pcap.active_mem_level == 0) && (g_amec->proc[0].pwr_votes.ppb_fmax == g_amec->sys.fmin) ) { if( L_trace_pcap_throttle || (G_allow_trace_flags & ALLOW_MEM_TRACE) ) { TRAC_IMP("PCAP: Throttling memory"); L_trace_pcap_throttle = false; } g_amec->pcap.active_mem_level = 1; } } else { // no changes to memory throttles due to power } } else { if(!L_apss_error_traced) { TRAC_ERR("PCAP: PWRSYS sensor is showing a value of 0."); L_apss_error_traced = TRUE; } } // skip processor changes until memory is un-capped if(!g_amec->pcap.active_mem_level) { g_amec->pcap.active_proc_pcap = l_p0_pwr + ((l_proc_fraction * l_avail_power) >> 16); //NOTE: Power capping will not affect nominal cores unless a customer pcap // is set below the max pcap or oversubscription occurs. However, // nominal cores will drop below nominal if ppb_fmax drops below nominal if(g_amec->pcap.active_node_pcap < G_sysConfigData.pcap.max_pcap) { g_amec->proc[0].pwr_votes.nom_pcap_fmin = G_sysConfigData.sys_mode_freq.table[OCC_MODE_MIN_FREQUENCY]; } else { g_amec->proc[0].pwr_votes.nom_pcap_fmin = G_sysConfigData.sys_mode_freq.table[OCC_MODE_NOMINAL]; } }
// Function Specification // // Name: task_dcom_parse_occfwmsg // // Description: Purpose of this task is to handle and acknowledge // fw messages passed from Master to Slave and vice versa. // // End Function Specification void task_dcom_parse_occfwmsg(task_t *i_self) { if(G_occ_role == OCC_MASTER) { // Local slave index counter uint32_t l_slv_idx = 0; // Loop and collect occ data for each slave occ for(; l_slv_idx < MAX_OCCS; l_slv_idx++) { // Verify all slave are in correct state and mode G_dcom_slv_outbox_rx[l_slv_idx].occ_fw_mailbox[0] = CURRENT_STATE(); if(G_sysConfigData.system_type.kvm ) { G_dcom_slv_outbox_rx[l_slv_idx].occ_fw_mailbox[1] = G_occ_external_req_mode_kvm; } else { G_dcom_slv_outbox_rx[l_slv_idx].occ_fw_mailbox[1] = CURRENT_MODE(); } // Acknowledge all slave event flags G_slave_event_flags_ack[l_slv_idx] = G_dcom_slv_outbox_rx[l_slv_idx].occ_fw_mailbox[3]; // Clear master event flags if slave has acknowledged them and the event has cleared G_master_event_flags &= ~G_dcom_slv_outbox_rx[l_slv_idx].occ_fw_mailbox[2]; } }//End master role check // Check if master has changed state and mode and update if changed // so that we can handle it in a thread. if( (G_occ_master_state != G_dcom_slv_inbox_rx.occ_fw_mailbox[0]) || (G_occ_master_mode != G_dcom_slv_inbox_rx.occ_fw_mailbox[1]) ) { if( ! isSafeStateRequested() ) { G_occ_master_state = G_dcom_slv_inbox_rx.occ_fw_mailbox[0]; G_occ_master_mode = G_dcom_slv_inbox_rx.occ_fw_mailbox[1]; ssx_semaphore_post(&G_dcomThreadWakeupSem); } } // If we are master, we don't want to update based on // the data sent to us, because it corrupts the 'golden' data // If we are in standby, we don't want to update because // the data may not have been set up yet, and would be set to zero. if(OCC_MASTER != G_occ_role ) { // Update the system mode frequencies if they have changed int l_mode = 0; bool l_change = FALSE; bool l_all_zero = TRUE; // Check if all values are zero for(l_mode = 0; l_mode<OCC_MODE_COUNT; l_mode++) { if( (0 != G_dcom_slv_inbox_rx.sys_mode_freq.table[l_mode]) ) { l_all_zero = FALSE; break; } } extern data_cnfg_t * G_data_cnfg; if( l_all_zero == FALSE) { for(l_mode =0; l_mode<OCC_MODE_COUNT; l_mode++) { // Don't trust a frequency of 0x0000 if( (0 != G_dcom_slv_inbox_rx.sys_mode_freq.table[l_mode]) ) { if(G_sysConfigData.sys_mode_freq.table[l_mode] != G_dcom_slv_inbox_rx.sys_mode_freq.table[l_mode]) { TRAC_INFO("New Frequency for Mode %d: Old: %d MHz -> New: %d MHz",l_mode, G_sysConfigData.sys_mode_freq.table[l_mode], G_dcom_slv_inbox_rx.sys_mode_freq.table[l_mode]); // Update mode frequency G_sysConfigData.sys_mode_freq.table[l_mode] = G_dcom_slv_inbox_rx.sys_mode_freq.table[l_mode]; l_change = TRUE; } } } if(l_change) { // Update "update count" for debug purposes G_sysConfigData.sys_mode_freq.update_count = G_dcom_slv_inbox_rx.sys_mode_freq.update_count; // Change Data Request Mask to indicate we got this data extern data_cnfg_t * G_data_cnfg; G_data_cnfg->data_mask |= DATA_MASK_FREQ_PRESENT; // Notify AMEC that the frequencies have changed AMEC_data_change(DATA_MASK_FREQ_PRESENT); } } else { // Clear Data Request Mask and data G_data_cnfg->data_mask &= (~DATA_MASK_FREQ_PRESENT); memset(&G_sysConfigData.sys_mode_freq.table[0], 0, sizeof(G_sysConfigData.sys_mode_freq.table)); } } // Copy mnfg parameters into g_amec structure g_amec->foverride_enable = G_dcom_slv_inbox_rx.foverride_enable; g_amec->foverride = G_dcom_slv_inbox_rx.foverride; // Copy IPS parameters sent by Master OCC g_amec->slv_ips_freq_request = G_dcom_slv_inbox_rx.ips_freq_request; // Copy DPS tunable parameters sent by Master OCC if required if(G_dcom_slv_inbox_rx.tunable_param_overwrite) { AMEC_part_overwrite_dps_parameters(); if(G_dcom_slv_inbox_rx.tunable_param_overwrite == 1) { g_amec->slv_dps_param_overwrite = TRUE; } else { g_amec->slv_dps_param_overwrite = FALSE; } } // Copy soft frequency boundaries sent by Master OCC g_amec->part_config.part_list[0].soft_fmin = G_dcom_slv_inbox_rx.soft_fmin; g_amec->part_config.part_list[0].soft_fmax = G_dcom_slv_inbox_rx.soft_fmax; // acknowledge all masters event flags G_master_event_flags_ack = G_dcom_slv_inbox_rx.occ_fw_mailbox[2]; // clear slave event flags if master has acknowledged them and the event has cleared G_slave_event_flags = (G_slave_event_flags & (~(G_dcom_slv_inbox_rx.occ_fw_mailbox[3]))); }
// Function Specification // // Name: amec_slv_proc_voting_box // // Description: Slave OCC's voting box that decides the frequency request. // This function will run every tick. // // Thread: RealTime Loop // // Task Flags: // // End Function Specification void amec_slv_proc_voting_box(void) { /*------------------------------------------------------------------------*/ /* Local Variables */ /*------------------------------------------------------------------------*/ uint16_t k = 0; uint16_t l_chip_fmax = g_amec->sys.fmax; uint16_t l_core_freq = 0; uint16_t l_core_freq_max = 0; // max freq across all cores uint16_t l_core_freq_min = g_amec->sys.fmax; // min freq across all cores uint32_t l_current_reason = 0; // used for debug purposes static uint32_t L_last_reason = 0; // used for debug purposes uint32_t l_chip_reason = 0; uint32_t l_core_reason = 0; amec_proc_voting_reason_t l_kvm_throt_reason = NO_THROTTLE; amec_part_t *l_part = NULL; // frequency threshold for reporting throttling uint16_t l_report_throttle_freq = G_sysConfigData.system_type.report_dvfs_nom ? G_sysConfigData.sys_mode_freq.table[OCC_MODE_NOMINAL] : G_sysConfigData.sys_mode_freq.table[OCC_MODE_TURBO]; /*------------------------------------------------------------------------*/ /* Code */ /*------------------------------------------------------------------------*/ if (!G_allowPstates) { // Don't allow pstates to be sent until after initial mode has been set if ( (CURRENT_MODE()) || (G_sysConfigData.system_type.kvm) ) { G_allowPstates = TRUE; } } // Voting Box for CPU speed. // This function implements the voting box to decide which input gets the right // to actuate the system. // check for oversubscription if redundant ps policy (oversubscription) is being enforced if (G_sysConfigData.system_type.non_redund_ps == false) { // If in oversubscription and there is a defined (non 0) OVERSUB frequency less than max then use it if( (AMEC_INTF_GET_OVERSUBSCRIPTION()) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB]) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB] < l_chip_fmax) ) { l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_OVERSUB]; l_chip_reason = AMEC_VOTING_REASON_OVERSUB; } } // If there is an active VRM fault and a defined (non 0) VRM N frequency less than max use it if( (g_amec->sys.vrm_fault_status) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N]) && (G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N] < l_chip_fmax) ) { l_chip_fmax = G_sysConfigData.sys_mode_freq.table[OCC_MODE_VRM_N]; l_chip_reason = AMEC_VOTING_REASON_VRM_N; } // PPB_FMAX if(g_amec->proc[0].pwr_votes.ppb_fmax < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.ppb_fmax; l_chip_reason = AMEC_VOTING_REASON_PPB; if(l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = PCAP_EXCEED_REPORT; } else { l_kvm_throt_reason = POWERCAP; } } // PMAX_CLIP_FREQ if(g_amec->proc[0].pwr_votes.pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } // Pmax_clip frequency request if there is an APSS failure if(g_amec->proc[0].pwr_votes.apss_pmax_clip_freq < l_chip_fmax) { l_chip_fmax = g_amec->proc[0].pwr_votes.apss_pmax_clip_freq; l_chip_reason = AMEC_VOTING_REASON_APSS_PMAX; l_kvm_throt_reason = POWER_SUPPLY_FAILURE; } //THERMALPROC.FREQ_REQUEST //Thermal controller input based on processor temperature if(g_amec->thermalproc.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalproc.freq_request; l_chip_reason = AMEC_VOTING_REASON_PROC_THRM; if( l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = PROC_OVERTEMP_EXCEED_REPORT; } else { l_kvm_throt_reason = CPU_OVERTEMP; } } //Thermal controller input based on VRM Vdd temperature if(g_amec->thermalvdd.freq_request < l_chip_fmax) { l_chip_fmax = g_amec->thermalvdd.freq_request; l_chip_reason = AMEC_VOTING_REASON_VDD_THRM; if( l_report_throttle_freq <= l_chip_fmax) { l_kvm_throt_reason = VDD_OVERTEMP_EXCEED_REPORT; } else { l_kvm_throt_reason = VDD_OVERTEMP; } } for (k=0; k<MAX_NUM_CORES; k++) { if( CORE_PRESENT(k) && !CORE_OFFLINE(k) ) { l_core_freq = l_chip_fmax; l_core_reason = l_chip_reason; // Disable DPS in KVM if(!G_sysConfigData.system_type.kvm) { l_part = amec_part_find_by_core(&g_amec->part_config, k); // Check frequency request generated by DPS algorithms if(g_amec->proc[0].core[k].core_perf.dps_freq_request < l_core_freq) { l_core_freq = g_amec->proc[0].core[k].core_perf.dps_freq_request; l_core_reason = AMEC_VOTING_REASON_UTIL; } // Adjust frequency based on soft frequency boundaries if(l_part != NULL) { if(l_core_freq < l_part->soft_fmin) { // Before enforcing a soft Fmin, make sure we don't // have a thermal or power emergency if(!(l_chip_reason & (AMEC_VOTING_REASON_PROC_THRM | AMEC_VOTING_REASON_VDD_THRM | AMEC_VOTING_REASON_PPB | AMEC_VOTING_REASON_PMAX | AMEC_VOTING_REASON_CONN_OC))) { l_core_freq = l_part->soft_fmin; l_core_reason = AMEC_VOTING_REASON_SOFT_MIN; } } else if(l_core_freq > l_part->soft_fmax) { l_core_freq = l_part->soft_fmax; l_core_reason = AMEC_VOTING_REASON_SOFT_MAX; } } } if(CURRENT_MODE() == OCC_MODE_NOMINAL) { // PROC_PCAP_NOM_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_nom_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_nom_vote; l_core_reason = AMEC_VOTING_REASON_PWR; l_kvm_throt_reason = POWERCAP; } } else { // PROC_PCAP_VOTE if(g_amec->proc[0].pwr_votes.proc_pcap_vote < l_core_freq) { l_core_freq = g_amec->proc[0].pwr_votes.proc_pcap_vote; l_core_reason = AMEC_VOTING_REASON_PWR; if(l_report_throttle_freq <= l_core_freq) { l_kvm_throt_reason = PCAP_EXCEED_REPORT; } else { l_kvm_throt_reason = POWERCAP; } } } // Check IPS frequency request sent by Master OCC if(g_amec->slv_ips_freq_request != 0) { if(g_amec->slv_ips_freq_request < l_core_freq) { l_core_freq = g_amec->slv_ips_freq_request; l_core_reason = AMEC_VOTING_REASON_IPS; } } // Override frequency with request from Master OCC if(g_amec->foverride_enable) { if(g_amec->foverride != 0) { // Override the frequency on all cores if Master OCC sends // a non-zero request l_core_freq = g_amec->foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } l_kvm_throt_reason = MANUFACTURING_OVERRIDE; } if(g_amec->pstate_foverride_enable) { if(g_amec->pstate_foverride != 0) { // Override the frequency on all cores if the Global Pstate // table has been modified l_core_freq = g_amec->pstate_foverride; l_core_reason = AMEC_VOTING_REASON_OVERRIDE; } } //Make sure the frequency is not less then the system min if(l_core_freq < g_amec->sys.fmin) { l_core_freq = g_amec->sys.fmin; } // Override frequency via Amester parameter interface if (g_amec->proc[0].parm_f_override_enable && g_amec->proc[0].parm_f_override[k] > 0) { l_core_freq = g_amec->proc[0].parm_f_override[k]; l_core_reason = AMEC_VOTING_REASON_OVERRIDE_CORE; } //STORE core frequency and reason g_amec->proc[0].core[k].f_request = l_core_freq; g_amec->proc[0].core[k].f_reason = l_core_reason; if(l_core_freq < l_core_freq_min) { // store the new lowest frequency and reason to be used after all cores checked l_core_freq_min = l_core_freq; l_current_reason = l_core_reason; } // Update the Amester parameter telling us the reason. Needed for // parameter array. g_amec->proc[0].parm_f_reason[k] = l_core_reason; //CURRENT_MODE() may be OCC_MODE_NOCHANGE because STATE change is processed //before MODE change if ((CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE) && (CURRENT_MODE() != OCC_MODE_DYN_POWER_SAVE_FP) && (CURRENT_MODE() != OCC_MODE_NOM_PERFORMANCE) && (CURRENT_MODE() != OCC_MODE_MAX_PERFORMANCE) && (CURRENT_MODE() != OCC_MODE_FMF) && (CURRENT_MODE() != OCC_MODE_NOCHANGE) && (l_core_reason & NON_DPS_POWER_LIMITED)) { G_non_dps_power_limited = TRUE; } else { G_non_dps_power_limited = FALSE; } // Update the sensor telling us what the requested frequency is sensor_update( AMECSENSOR_ARRAY_PTR(FREQREQC0,k), (uint16_t) g_amec->proc[0].core[k].f_request); #if DEBUG_PROC_VOTING_BOX /// This trace that can be used to debug the voting /// box and control loops. It will trace the reason why a /// controller is lowering the freq, but will only do it once in a /// row for the specific freq it wants to control to. It assumes /// that all cores will be controlled to same freq. if(l_chip_fmax != g_amec->sys.fmax){ static uint16_t L_trace = 0; if(l_chip_fmax != L_trace){ L_trace = l_chip_fmax; TRAC_INFO("Core: %d, Freq: %d, Reason: %d",k,l_core_freq,l_core_reason); } } #endif if(l_core_freq > l_core_freq_max) { l_core_freq_max = l_core_freq; } } // if core present and not offline else { //Set f_request to 0 so this core is ignored in amec_slv_freq_smh() g_amec->proc[0].core[k].f_request = 0; g_amec->proc[0].core[k].f_reason = 0; } }//End of for loop // update max core frequency if not 0 i.e. all cores offline (stop 2 or greater) // this is used by power capping alg, updating to 0 will cause power throttling when not needed if(l_core_freq_max) { g_amec->proc[0].core_max_freq = l_core_freq_max; // update the overall reason driving frequency across all cores g_amec->proc[0].f_reason = l_current_reason; } //check if there was a throttle reason change if(l_kvm_throt_reason != G_amec_opal_proc_throt_reason) { //Always update G_amec_opal_proc_throt_reason, this is used to set poll rsp bits for all system types G_amec_opal_proc_throt_reason = l_kvm_throt_reason; // Only if running OPAL need to notify dcom thread to update the table in HOMER for OPAL if(G_sysConfigData.system_type.kvm) { ssx_semaphore_post(&G_dcomThreadWakeupSem); } } // For debug... if lower than max update vars returned in poll response to give clipping reason g_amec->proc[0].core_min_freq = l_core_freq_min; if(l_core_freq_min < g_amec->sys.fmax) { if(l_current_reason == L_last_reason) { // same reason INC counter if(g_amec->proc[0].current_clip_count != 0xFF) { g_amec->proc[0].current_clip_count++; } } else { // new reason update history and set counter to 1 L_last_reason = l_current_reason; g_amec->proc[0].current_clip_count = 1; if( (g_amec->proc[0].chip_f_reason_history & l_current_reason) == 0) { g_amec->proc[0].chip_f_reason_history |= l_current_reason; TRAC_IMP("First time throttling for reason[0x%08X] History[0x%08X] freq = %d", l_current_reason, g_amec->proc[0].chip_f_reason_history, l_core_freq_min); } } } else // no active clipping { L_last_reason = 0; g_amec->proc[0].current_clip_count = 0; } }