/*! * @fn unc_power_hsw_Read_PMU_Data(param) * * @param param The read thread node to process * @param id The event id for the which the sample is generated * * @return None No return needed * * @brief Read the Uncore count data and store into the buffer param; * Uncore PMU does not support sampling, i.e. ignore the id parameter. */ static VOID unc_power_hsw_Read_PMU_Data ( PVOID param ) { S32 j; U64 *buffer = read_unc_ctr_info; U32 dev_idx = *((U32*)param); U32 start_index; DRV_CONFIG pcfg_unc; U32 this_cpu = CONTROL_THIS_CPU(); CPU_STATE pcpu = &pcb[this_cpu]; U32 num_cpus = GLOBAL_STATE_num_cpus(driver_state); U32 thread_event_count = 0; pcfg_unc = (DRV_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); start_index = DRV_CONFIG_emon_unc_offset(pcfg_unc, 0); FOR_EACH_DATA_REG_UNC(pecb, dev_idx, i) { if (ECB_entries_event_scope(pecb,i) == PACKAGE_EVENT) { j = start_index + thread_event_count*(num_cpus-1) + ECB_entries_group_index(pecb,i) + ECB_entries_emon_event_id_index_local(pecb,i); if (!CPU_STATE_socket_master(pcpu)) { continue; } } else { j = start_index + this_cpu + thread_event_count*(num_cpus-1) + ECB_entries_group_index(pecb,i) + ECB_entries_emon_event_id_index_local(pecb,i); thread_event_count++; } buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb,i)); } END_FOR_EACH_DATA_REG_UNC; return; }
/*! * @fn corei7_Read_PMU_Data(param) * * @param param dummy parameter which is not used * * @return None No return needed * * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. * */ static VOID corei7_unc_Read_PMU_Data( PVOID param ) { S32 start_index, j; U64 *buffer = read_counter_info; U32 this_cpu = CONTROL_THIS_CPU(); start_index = DRV_CONFIG_num_events(pcfg) * this_cpu; SEP_PRINT_DEBUG("PMU control_data 0x%p, buffer 0x%p, j = %d\n", PMU_register_data, buffer, j); FOR_EACH_DATA_REG(pecb_unc,i) { j = start_index + ECB_entries_event_id_index(pecb_unc,i); buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb_unc,i)); SEP_PRINT_DEBUG("this_cpu %d, event_id %d, value 0x%llx\n", this_cpu, i, buffer[j]); }
/*! * @fn unc_power_avt_Read_PMU_Data(param) * * @param param The read thread node to process * * @return None No return needed * * @brief Read the Uncore count data and store into the buffer param; * Uncore PMU does not support sampling, i.e. ignore the id parameter. */ static VOID unc_power_avt_Read_PMU_Data ( PVOID param ) { S32 j; U64 *buffer = read_unc_ctr_info; U32 dev_idx = *((U32*)param); U32 start_index; DRV_CONFIG pcfg_unc; U32 this_cpu = CONTROL_THIS_CPU(); CPU_STATE pcpu = &pcb[this_cpu]; U32 num_cpus = GLOBAL_STATE_num_cpus(driver_state); U32 cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)]); U32 package_event_count = 0; U32 thread_event_count = 0; U32 module_event_count = 0; pcfg_unc = (DRV_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); start_index = DRV_CONFIG_emon_unc_offset(pcfg_unc, cur_grp); FOR_EACH_DATA_REG_UNC(pecb, dev_idx, i) { j = start_index + ECB_entries_group_index(pecb,i) + package_event_count*num_packages + module_event_count*(GLOBAL_STATE_num_modules(driver_state)) + thread_event_count*num_cpus ; if (ECB_entries_event_scope(pecb,i) == PACKAGE_EVENT) { j = j + core_to_package_map[this_cpu]; package_event_count++; if (!CPU_STATE_socket_master(pcpu)) { continue; } } else if (ECB_entries_event_scope(pecb,i) == MODULE_EVENT) { j = j + CPU_STATE_cpu_module_num(pcpu); module_event_count++; if (!CPU_STATE_cpu_module_master(pcpu)) { continue; } } else { j = j + this_cpu; thread_event_count++; } buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb,i)); //SEP_PRINT_DEBUG("cpu=%d j=%d mec=%d mid=%d tec=%d i=%d gi=%d ei=%d count=%llu\n", this_cpu, j, module_event_count, CPU_STATE_cpu_module_num(pcpu), thread_event_count, i, ECB_entries_group_index(pecb,i), ECB_entries_emon_event_id_index_local(pecb,i), buffer[j]); } END_FOR_EACH_DATA_REG_UNC;
/****************************************************************************************** * @fn static VOID unc_power_snb_Write_PMU(VOID*) * * @brief No registers to write and setup the accumalators with initial values * * @return None * * <I>Special Notes:</I> ******************************************************************************************/ static VOID unc_power_snb_Write_PMU ( VOID *param ) { U32 dev_idx = *((U32*)param); U64 tmp_value = 0; U32 j; U32 event_id = 0; FOR_EACH_REG_ENTRY_UNC(pecb, dev_idx, i) { for ( j = 0; j < (U32)GLOBAL_STATE_num_cpus(driver_state); j++) { tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb,i)) & SNB_POWER_MSR_DATA_MASK; LWPMU_DEVICE_prev_val_per_thread(&devices[dev_idx])[j][event_id + 1] = tmp_value; // need to account for group id } // Initialize counter_mask for accumulators if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = (U64)ECB_entries_max_bits(pecb,i); } } END_FOR_EACH_REG_ENTRY_UNC; return; }
/*! * @fn static U32 chap_Read_Counters(PVOID param) * * @brief Read the CHAP counter data * * @param PVOID param - address of the buffer to write into * * @return None * * <I>Special Notes:</I> * <NONE> */ static VOID chap_Read_Counters ( PVOID param ) { U64 *data; CHAP_INTERFACE chap; U32 mch_cpu; int i, data_index; U64 tmp_data; U64 *mch_data; U64 *ich_data; U64 *mmio_data; U64 *mmio; U32 this_cpu = CONTROL_THIS_CPU(); CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); data = param; data_index = 0; // Save the Motherboard time. This is universal time for this // system. This is the only 64-bit timer so we save it first so // always aligned on 64-bit boundary that way. if (CHIPSET_CONFIG_mch_chipset(pma)) { mch_data = data + data_index; // Save the MCH counters. chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address(mch_chipset_seg); for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { CHAP_INTERFACE_command_register(&chap[i]) = 0x00020000; // Sample } // The StartingReadRegister is only used for special event // configs that use CHAP counters to trigger events in other // CHAP counters. This is an unusual request but useful in // getting the number of lit subspans - implying a count of the // number of triangles. I am not sure it will be used // elsewhere. We cannot read some of the counters because it // will invalidate their configuration to trigger other CHAP // counters. Yuk! data_index += CHIPSET_SEGMENT_start_register(mch_chipset_seg); for (i = CHIPSET_SEGMENT_start_register(mch_chipset_seg); i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { data[data_index++] = CHAP_INTERFACE_data_register(&chap[i]); } // Initialize the counters on the first interrupt if (pcb[this_cpu].chipset_count_init == TRUE) { for (i = 0; i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { pcb[this_cpu].last_mch_count[i] = mch_data[i]; } } // Now compute the delta! // NOTE: Special modification to accomodate Gen 4 work - count // everything since last interrupt - regardless of cpu! This // way there is only one count of the Gen 4 counters. // mch_cpu = CHIPSET_CONFIG_host_proc_run(pma) ? this_cpu : 0; for (i = 0; i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { tmp_data = mch_data[i]; if (mch_data[i] < pcb[mch_cpu].last_mch_count[i]) { mch_data[i] = mch_data[i] + (U32)(-1) - pcb[mch_cpu].last_mch_count[i]; } else { mch_data[i] = mch_data[i] - pcb[mch_cpu].last_mch_count[i]; } pcb[mch_cpu].last_mch_count[i] = tmp_data; } } if (CHIPSET_CONFIG_ich_chipset(pma)) { // Save the ICH counters. ich_data = data + data_index; chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address(ich_chipset_seg); for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); i++) { CHAP_INTERFACE_command_register(&chap[i]) = 0x00020000; // Sample } for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); i++) { data[data_index++] = CHAP_INTERFACE_data_register(&chap[i]); } // Initialize the counters on the first interrupt if (pcb[this_cpu].chipset_count_init == TRUE) { for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); i++) { pcb[this_cpu].last_ich_count[i] = ich_data[i]; } } // Now compute the delta! for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); i++) { tmp_data = ich_data[i]; if (ich_data[i] < pcb[this_cpu].last_ich_count[i]) { ich_data[i] = ich_data[i] + (U32)(-1) - pcb[this_cpu].last_ich_count[i]; } else { ich_data[i] = ich_data[i] - pcb[this_cpu].last_ich_count[i]; } pcb[this_cpu].last_ich_count[i] = tmp_data; } } if (CHIPSET_CONFIG_noa_chipset(pma)) { // Save the MMIO counters. mmio_data = data + data_index; mmio = (U64 *) (UIOP)CHIPSET_SEGMENT_virtual_address(noa_chipset_seg); for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); i++) { data[data_index++] = mmio[i*2 + 2244]; // 64-bit quantity } // Initialize the counters on the first interrupt if (pcb[this_cpu].chipset_count_init == TRUE) { for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); i++) { pcb[this_cpu].last_mmio_count[i] = mmio_data[i]; } } // Now compute the delta! for (i = 0; i < CHIPSET_SEGMENT_total_events(noa_chipset_seg); i++) { tmp_data = mmio_data[i]; if (mmio_data[i] < pcb[this_cpu].last_mmio_count[i]) { mmio_data[i] = mmio_data[i] + (U32)(-1) - pcb[this_cpu].last_mmio_count[i]; } else { mmio_data[i] = mmio_data[i] - pcb[this_cpu].last_mmio_count[i]; } pcb[this_cpu].last_mmio_count[i] = tmp_data; } } pcb[this_cpu].chipset_count_init = FALSE; FOR_EACH_DATA_REG(pecb,i) { data[data_index++] = SYS_Read_MSR(ECB_entries_reg_id(pecb,i)); SYS_Write_MSR(ECB_entries_reg_id(pecb,i), (U64)0); } END_FOR_EACH_DATA_REG;