extern VOID CPUMON_Remove_Cpuhooks ( void ) { #if defined(PERFMON_V1) || defined(PERFMON_V2_ALT) int status; SEP_PRINT_DEBUG("CPUMON_Remove_Cpuhooks: entered... pmv=0x%p \n", SYS_Read_PMV()); /* * if Perfmon1 or Perfmon2_alt is set, we used the perfmon.c * interface to steal perfmon.c's interrupt handler for our use. * Now we must release it back. * Don't free_irq() because perfmon.c still wants to use it */ status = CPUMON_REMOVE_INTERRUPT(&desc); if (status) { SEP_PRINT_WARNING("CPUMON_Remove_Cpuhooks: CPUMON_REMOVE_INTERRUPT returned: %d\n",status); } #elif !defined(PERFMON_V2) SEP_PRINT_DEBUG("CPUMON_Remove_Cpuhooks: entered... pmv=0x%p \n", SYS_Read_PMV()); if (xchg(&pebs_irqaction, 0)) { free_irq(ebs_irq, NULL); } #endif SEP_PRINT_DEBUG("CPUMON_Remove_Cpuhooks: exit... pmv=0x%p \n", SYS_Read_PMV()); return; }
/*! * @fn extern int PCI_Write_To_Memory_Address(addr, val) * * @param addr - physical address in mmio * @param value - value to be written * * @return status * * @brief Write to memory mapped i/o physical location * */ extern int PCI_Write_To_Memory_Address ( U32 addr, U32 val ) { U32 aligned_addr, offset; PVOID base; if (addr <= 0) { return OS_INVALID; } SEP_PRINT_DEBUG("PCI_Write_To_Memory_Address: writing physcial address:%x with value:%x\n",addr,val); offset = addr & ~PAGE_MASK; aligned_addr = addr & PAGE_MASK; SEP_PRINT_DEBUG("PCI_Write_To_Memory_Address: aligned physcial address:%x,offset:%x\n",aligned_addr,offset); base = ioremap_nocache(aligned_addr, PAGE_SIZE); if (base == NULL) { return OS_INVALID; } writel(val,base+offset); iounmap(base); return OS_SUCCESS; }
/*! * @fn extern int PCI_Read_From_Memory_Address(addr, val) * * @param addr - physical address in mmio * @param *value - value at this address * * @return status * * @brief Read memory mapped i/o physical location * */ extern int PCI_Read_From_Memory_Address ( U32 addr, U32* val ) { U32 aligned_addr, offset, value; PVOID base; if (addr <= 0) { return OS_INVALID; } SEP_PRINT_DEBUG("PCI_Read_From_Memory_Address: reading physcial address:%x\n",addr); offset = addr & ~PAGE_MASK; aligned_addr = addr & PAGE_MASK; SEP_PRINT_DEBUG("PCI_Read_From_Memory_Address: aligned physcial address:%x,offset:%x\n",aligned_addr,offset); base = ioremap_nocache(aligned_addr, PAGE_SIZE); if (base == NULL) { return OS_INVALID; } value = readl(base+offset); *val = value; SEP_PRINT_DEBUG("PCI_Read_From_Memory_Address: value at this physical address:%x\n",value); iounmap(base); return OS_SUCCESS; }
/*! * @fn static U32 chap_Init_Chipset(void) * * @brief Chipset PMU initialization * * @param None * * @return VT_SUCCESS if successful, otherwise error * * <I>Special Notes:</I> * <NONE> */ static U32 chap_Init_Chipset ( VOID ) { U32 i; CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); CHIPSET_SEGMENT noa_chipset_seg = &CHIPSET_CONFIG_noa(pma); SEP_PRINT_DEBUG("Initializing chipset ...\n"); if (DRV_CONFIG_enable_chipset(pcfg)) { for (i=0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { pcb[i].chipset_count_init = TRUE; } if (CHIPSET_CONFIG_mch_chipset(pma)) { if (CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) == 0) { // Map the virtual address of the PCI CHAP interface. CHIPSET_SEGMENT_virtual_address(mch_chipset_seg) = (U64) (UIOP) ioremap_nocache( CHIPSET_SEGMENT_physical_address(mch_chipset_seg), CHIPSET_SEGMENT_size(mch_chipset_seg)); } } if (CHIPSET_CONFIG_ich_chipset(pma)) { if (CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) == 0) { // Map the virtual address of the PCI CHAP interface. CHIPSET_SEGMENT_virtual_address(ich_chipset_seg) = (U64) (UIOP) ioremap_nocache( CHIPSET_SEGMENT_physical_address(ich_chipset_seg), CHIPSET_SEGMENT_size(ich_chipset_seg)); } } // Here we map the MMIO registers for the Gen X processors. if (CHIPSET_CONFIG_noa_chipset(pma)) { if (CHIPSET_SEGMENT_virtual_address(noa_chipset_seg) == 0) { // Map the virtual address of the PCI CHAP interface. CHIPSET_SEGMENT_virtual_address(noa_chipset_seg) = (U64) (UIOP) ioremap_nocache( CHIPSET_SEGMENT_physical_address(noa_chipset_seg), CHIPSET_SEGMENT_size(noa_chipset_seg)); } } // // always collect processor events // CHIPSET_CONFIG_processor(pma) = 1; } else { CHIPSET_CONFIG_processor(pma) = 0; } SEP_PRINT_DEBUG("Initializing chipset done.\n"); return VT_SUCCESS; }
/* * @fn OS_STATUS OUTPUT_Flush() * * @brief Flush the module buffers and sample buffers * * @return OS_STATUS * * For each CPU in the system, set buffer full to the byte count to flush. * Flush the modules buffer, as well. * */ extern int OUTPUT_Flush ( VOID ) { int i; int writers = 0; OUTPUT outbuf; /* * Flush all remaining data to files * set up a flush event */ init_waitqueue_head(&flush_queue); SEP_PRINT_DEBUG("flush: waiting for %d writers\n",(GLOBAL_STATE_num_cpus(driver_state)+ OTHER_C_DEVICES)); for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { if (CPU_STATE_initial_mask(&pcb[i]) == 0) { continue; } outbuf = &(cpu_buf[i].outbuf); writers += 1; OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); } atomic_set(&flush_writers, writers + OTHER_C_DEVICES); // Flip the switch to terminate the output threads // Do not do this earlier, as threads may terminate before all the data is flushed flush = 1; for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { if (CPU_STATE_initial_mask(&pcb[i]) == 0) { continue; } outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); wake_up_interruptible_sync(&BUFFER_DESC_queue(&cpu_buf[i])); } // Flush all data from the module buffers outbuf = &BUFFER_DESC_outbuf(module_buf); OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); SEP_PRINT_DEBUG("OUTPUT_Flush - waking up module_queue\n"); wake_up_interruptible_sync(&BUFFER_DESC_queue(module_buf)); //Wait for buffers to empty if (wait_event_interruptible(flush_queue, atomic_read(&flush_writers)==0)) { return OS_RESTART_SYSCALL; } SEP_PRINT_DEBUG("OUTPUT_Flush - awakened from flush_queue\n"); flush = 0; return 0; }
/*! * @fn void corei7_unc_Write_PMU(param) * * @param param dummy parameter which is not used * * @return None No return needed * * @brief Initial set up of the PMU registers * * <I>Special Notes</I> * Initial write of PMU registers. * Walk through the enties and write the value of the register accordingly. * Assumption: For CCCR registers the enable bit is set to value 0. * When current_group = 0, then this is the first time this routine is called, * initialize the locks and set up EM tables. */ static VOID corei7_unc_Write_PMU ( VOID *param ) { U32 dev_idx = *((U32*)param); FOR_EACH_REG_ENTRY_UNC(pecb_unc, dev_idx, i) { /* * Writing the GLOBAL Control register enables the PMU to start counting. * So write 0 into the register to prevent any counting from starting. */ if (ECB_entries_reg_id(pecb_unc,i) == UNC_UCLK_PERF_GLOBAL_CTRL) { SYS_Write_MSR(ECB_entries_reg_id(pecb_unc,i), 0LL); continue; } SYS_Write_MSR(ECB_entries_reg_id(pecb_unc,i), ECB_entries_reg_value(pecb_unc,i)); #if defined(MYDEBUG) SEP_PRINT_DEBUG("corei7_UNC_Write_PMU Event_Data_reg = 0x%x --- value 0x%llx\n", ECB_entries_reg_id(pecb_unc,i), ECB_entries_reg_value(pecb_unc,i)); #endif // this is needed for overflow detection of the accumulators. if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = (U64)ECB_entries_max_bits(pecb_unc,i); } } END_FOR_EACH_REG_ENTRY_UNC; return; }
/*! * @fn corei7_Read_PMU_Data(param) * * @param param dummy parameter which is not used * * @return None No return needed * * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. * */ static VOID corei7_unc_Read_PMU_Data( PVOID param ) { S32 start_index, j; U64 *buffer = read_counter_info; U32 this_cpu = CONTROL_THIS_CPU(); start_index = DRV_CONFIG_num_events(pcfg) * this_cpu; SEP_PRINT_DEBUG("PMU control_data 0x%p, buffer 0x%p, j = %d\n", PMU_register_data, buffer, j); FOR_EACH_DATA_REG(pecb_unc,i) { j = start_index + ECB_entries_event_id_index(pecb_unc,i); buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb_unc,i)); SEP_PRINT_DEBUG("this_cpu %d, event_id %d, value 0x%llx\n", this_cpu, i, buffer[j]); }
/*! * @fn void cpumon_Save_Cpu(param) * * @param param - Unused, set up to enable parallel calls * * @return None No return needed * * @brief Set up the interrupt handler. * @brief Save the old handler for restoration when done * */ static VOID cpumon_Save_Cpu ( PVOID parm ) { unsigned long eflags; IDTGDT_DESC idt_base; CPU_STATE pcpu = &pcb[CONTROL_THIS_CPU()]; GATE_STRUCT old_gate; GATE_STRUCT *idt; SYS_Local_Irq_Save(eflags); SYS_Get_IDT_Base((PVOID*)&idt_base); idt = idt_base.idtgdt_base; CPU_STATE_idt_base(pcpu) = idt; memcpy (&old_gate, &idt[CPU_PERF_VECTOR], 16); CPU_STATE_saved_ih(pcpu) = (PVOID) ((((U64) old_gate.offset_high) << 32) | (((U64) old_gate.offset_middle) << 16) | ((U64) old_gate.offset_low)); SEP_PRINT_DEBUG("saved_ih is 0x%llx\n", CPU_STATE_saved_ih(pcpu)); SYS_Local_Irq_Restore(eflags); return; }
/* * @fn output_Initialized_Buffers() * * @result OUTPUT * @param BUFFER_DESC desc - descriptor for the buffer being initialized * @param U32 factor - multiplier for OUTPUT_BUFFER_SIZE. * 1 for cpu buffers, 2 for module buffers. * * @brief Allocate, initialize, and return an output data structure * * <I>Special Notes:</I> * Multiple (OUTPUT_NUM_BUFFERS) buffers will be allocated * Each buffer is of size (OUTPUT_BUFFER_SIZE) * Each field in the buffer is initialized * The event queue for the OUTPUT is initialized * */ static BUFFER_DESC output_Initialized_Buffers ( BUFFER_DESC desc, U32 factor ) { OUTPUT outbuf; int j; /* * Allocate the BUFFER_DESC, then allocate its buffers */ if (desc == NULL) { desc = (BUFFER_DESC)CONTROL_Allocate_Memory(sizeof(BUFFER_DESC_NODE)); if (desc == NULL) { SEP_PRINT_DEBUG("OUTPUT Initialize_Buffer: Failed Allocation\n"); return(desc); } } outbuf = &(BUFFER_DESC_outbuf(desc)); spin_lock_init(&OUTPUT_buffer_lock(outbuf)); for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { if (OUTPUT_buffer(outbuf,j) == NULL) { OUTPUT_buffer(outbuf,j) = CONTROL_Allocate_Memory(OUTPUT_BUFFER_SIZE * factor); } OUTPUT_buffer_full(outbuf,j) = 0; if (!OUTPUT_buffer(outbuf,j)) { SEP_PRINT_DEBUG("OUTPUT Initialize_Buffer: Failed Allocation\n"); /*return NULL to tell the caller that allocation failed*/ return NULL; } } /* * Initialize the remaining fields in the BUFFER_DESC */ OUTPUT_current_buffer(outbuf) = 0; OUTPUT_remaining_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; OUTPUT_total_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; init_waitqueue_head(&BUFFER_DESC_queue(desc)); return(desc); }
/*! * @fn VOID UTILITY_Configure_Chipset * * @brief Configures the chipset information * * @param none * * @return none * * <I>Special Notes:</I> * <NONE> */ extern CS_DISPATCH UTILITY_Configure_Chipset ( void ) { if (CHIPSET_CONFIG_gmch_chipset(pma)) { cs_dispatch = &gmch_dispatch; SEP_PRINT_DEBUG("UTLITY_Configure_Chipset: using GMCH dispatch table!\n"); } else if (CHIPSET_CONFIG_mch_chipset(pma) || CHIPSET_CONFIG_ich_chipset(pma)) { cs_dispatch = &chap_dispatch; SEP_PRINT_DEBUG("UTLITY_Configure_Chipset: using CHAP dispatch table!\n"); } else { SEP_PRINT_ERROR("UTLITY_Configure_Chipset: unable to map chipset dispatch table!\n"); } SEP_PRINT_DEBUG("UTLITY_Configure_Chipset: exiting with cs_dispatch=0x%p\n", cs_dispatch); return cs_dispatch; }
/*! * @fn ssize_t OUTPUT_Module_Read(struct file *filp, * char *buf, * size_t count, * loff_t *f_pos) * * @brief Return a module buffer to user-mode. If not full or flush, wait * * @param *filp a file pointer * @param *buf a sampling buffer * @param count size of the user's buffer * @param f_pos file pointer (current offset in bytes) * @param buf the kernel output buffer structure * * @return number of bytes read. zero indicates end of file. Neg means error * * Place no more than count bytes into the user's buffer. * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. * * <I>Special Notes:</I> * */ extern ssize_t OUTPUT_Module_Read ( struct file *filp, char *buf, size_t count, loff_t *f_pos ) { SEP_PRINT_DEBUG("read request for modules on minor\n"); return output_Read(filp, buf, count, f_pos, module_buf); }
/*! * @fn static U32 chap_Start_Chipset(void) * @param None * @return VT_SUCCESS if successful, otherwise error * @brief Start collection on the Chipset PMU * * <I>Special Notes:</I> * <NONE> */ static VOID chap_Start_Chipset ( VOID ) { U32 i; CHAP_INTERFACE chap; CHIPSET_SEGMENT mch_chipset_seg = &CHIPSET_CONFIG_mch(pma); CHIPSET_SEGMENT ich_chipset_seg = &CHIPSET_CONFIG_ich(pma); // // reset and start chipset counters // SEP_PRINT_DEBUG("Starting chipset counters...\n"); if (pma) { chap = (CHAP_INTERFACE)(UIOP)CHIPSET_SEGMENT_virtual_address(mch_chipset_seg); if (chap != NULL) { for (i = 0; i < CHIPSET_SEGMENT_total_events(mch_chipset_seg); i++) { CHAP_INTERFACE_command_register(&chap[i]) = 0x00040000; // Reset to zero CHAP_INTERFACE_command_register(&chap[i]) = 0x00010000; // Restart } } chap = (CHAP_INTERFACE) (UIOP)CHIPSET_SEGMENT_virtual_address(ich_chipset_seg); if (chap != NULL) { for (i = 0; i < CHIPSET_SEGMENT_total_events(ich_chipset_seg); i++) { CHAP_INTERFACE_command_register(&chap[i]) = 0x00040000; // Reset to zero CHAP_INTERFACE_command_register(&chap[i]) = 0x00010000; // Restart } } } SEP_PRINT_DEBUG("Starting chipset counters done.\n"); return; }
/*! * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, * char *buf, * size_t count, * loff_t *f_pos) * * @brief Return a sample buffer to user-mode. If not full or flush, wait * * @param *filp a file pointer * @param *buf a sampling buffer * @param count size of the user's buffer * @param f_pos file pointer (current offset in bytes) * @param buf the kernel output buffer structure * * @return number of bytes read. zero indicates end of file. Neg means error * * Place no more than count bytes into the user's buffer. * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. * * <I>Special Notes:</I> * */ extern ssize_t OUTPUT_Sample_Read ( struct file *filp, char *buf, size_t count, loff_t *f_pos ) { int i; i = iminor(filp->f_dentry->d_inode); // kernel pointer - not user pointer SEP_PRINT_DEBUG("read request for samples on minor %d\n", i); return output_Read(filp, buf, count, f_pos, &(cpu_buf[i])); }
/*! * @fn void cpumon_Save_Cpu(param) * * @param param unused parameter * * @return None No return needed * * @brief Save the old handler for restoration when done * */ static void cpumon_Save_Cpu ( PVOID parm ) { unsigned long eflags; U64 *idt_base; CPU_STATE pcpu; preempt_disable(); pcpu = &pcb[CONTROL_THIS_CPU()]; preempt_enable(); SYS_Local_Irq_Save(eflags); CPU_STATE_idt_base(pcpu) = idt_base = SYS_Get_IDT_Base(); // save original perf. vector CPU_STATE_saved_ih(pcpu) = idt_base[CPU_PERF_VECTOR]; SEP_PRINT_DEBUG("saved_ih is 0x%llx\n", CPU_STATE_saved_ih(pcpu)); SYS_Local_Irq_Restore(eflags); return; }
/*! * @fn VOID UTILITY_Configure_CPU * * @brief Reads the CPU information from the hardware * * @param param dispatch_id - The id of the dispatch table. * * @return Pointer to the correct dispatch table for the CPU architecture * * <I>Special Notes:</I> * <NONE> */ extern DISPATCH UTILITY_Configure_CPU ( U32 dispatch_id ) { DISPATCH dispatch = NULL; switch (dispatch_id) { #if defined(DRV_IA32) || defined(DRV_EM64T) case 1: SEP_PRINT_DEBUG("Set up the Core(TM)2 processor dispatch table\n"); dispatch = &core2_dispatch; break; case 6: SEP_PRINT_DEBUG("Set up the Silvermont dispatch table\n"); dispatch = &silvermont_dispatch; break; case 7: SEP_PRINT_DEBUG("Set up the perfver4 HTON dispatch table such as Skylake\n"); dispatch = &perfver4_dispatch; break; case 8: SEP_PRINT_DEBUG("Set up the perfver4 HTOFF dispatch table such as Skylake\n"); dispatch = &perfver4_dispatch_htoff_mode; break; case 700: SEP_PRINT_DEBUG("Set up the Valleyview SA dispatch table\n"); dispatch = &valleyview_visa_dispatch; break; case 710: case 800: SEP_PRINT_DEBUG("Set up the Silvermont/Haswell Server Power dispatch table\n"); dispatch = &avoton_power_dispatch; break; case 2: dispatch = &corei7_dispatch; SEP_PRINT_DEBUG("Set up the Core i7(TM) processor dispatch table\n"); break; case 3: SEP_PRINT_DEBUG("Set up the Core i7(TM) dispatch table\n"); dispatch = &corei7_dispatch_htoff_mode; break; case 4: dispatch = &corei7_dispatch_2; SEP_PRINT_DEBUG("Set up the Sandybridge processor dispatch table\n"); break; case 5: SEP_PRINT_DEBUG("Set up the Sandybridge dispatch table\n"); dispatch = &corei7_dispatch_htoff_mode_2; break; case 9: dispatch = &corei7_dispatch_nehalem; SEP_PRINT_DEBUG("Set up the Nehalem, Westemere dispatch table\n"); break; case 200: SEP_PRINT_DEBUG("Set up the SNB iMC dispatch table\n"); dispatch = &snbunc_imc_dispatch; break; case 201: SEP_PRINT_DEBUG("Set up the SNB Cbo dispatch table\n"); dispatch = &snbunc_cbo_dispatch; break; #if !defined (DRV_ANDROID) case 100: SEP_PRINT_DEBUG("Set up the Core i7 uncore dispatch table\n"); dispatch = &corei7_unc_dispatch; break; case 210: SEP_PRINT_DEBUG("Set up the WSM-EX iMC dispatch table\n"); dispatch = &wsmexunc_imc_dispatch; break; case 211: SEP_PRINT_DEBUG("Set up the WSM-EX QPI dispatch table\n"); dispatch = &wsmexunc_qpi_dispatch; break; case 212: SEP_PRINT_DEBUG("Set up the WSM-EX WBOX dispatch table\n"); dispatch = &wsmexunc_wbox_dispatch; break; case 220: SEP_PRINT_DEBUG("Set up the JKT IMC dispatch table\n"); dispatch = &jktunc_imc_dispatch; break; case 221: SEP_PRINT_DEBUG("Set up the JKT QPILL dispatch table\n"); dispatch = &jktunc_qpill_dispatch; break; case 222: SEP_PRINT_DEBUG("Set up the Jaketown UBOX dispatch table\n"); dispatch = &jaketown_ubox_dispatch; break; #endif case 300: SEP_PRINT_DEBUG("Set up the SNB Power dispatch table\n"); dispatch = &snb_power_dispatch; break; case 400: SEP_PRINT_DEBUG("Set up the SNB Power dispatch table\n"); dispatch = &snbunc_gt_dispatch; break; case 500: SEP_PRINT_DEBUG("Set up the Haswell UNC NCU dispatch table\n"); dispatch = &haswellunc_ncu_dispatch; break; #if !defined (DRV_ANDROID) case 600: SEP_PRINT_DEBUG("Set up the IVT UNC CBO dispatch table\n"); dispatch = &ivtunc_cbo_dispatch; break; case 610: SEP_PRINT_DEBUG("Set up the IVT UNC IMC dispatch table\n"); dispatch = &ivtunc_imc_dispatch; break; case 620: SEP_PRINT_DEBUG("Set up the Ivytown UNC PCU dispatch table\n"); dispatch = &ivytown_pcu_dispatch; break; case 630: SEP_PRINT_DEBUG("Set up the Ivytown UNC PCU dispatch table\n"); dispatch = &ivytown_ha_dispatch; break; case 640: SEP_PRINT_DEBUG("Set up the Ivytown QPI dispatch table\n"); dispatch = &ivytown_qpill_dispatch; break; case 650: SEP_PRINT_DEBUG("Set up the Ivytown R3QPI dispatch table\n"); dispatch = &ivytown_r3qpi_dispatch; break; case 660: SEP_PRINT_DEBUG("Set up the Ivytown UNC UBOX dispatch table\n"); dispatch = &ivytown_ubox_dispatch; break; case 670: SEP_PRINT_DEBUG("Set up the Ivytown UNC R2PCIe dispatch table\n"); dispatch = &ivytown_r2pcie_dispatch; break; case 680: SEP_PRINT_DEBUG("Set up the Ivytown UNC IRP dispatch table\n"); dispatch = &ivytown_irp_dispatch; break; #endif case 720: SEP_PRINT_DEBUG("Set up the Haswell Power dispatch table\n"); dispatch = &haswell_power_dispatch; break; #if !defined (DRV_ANDROID) case 790: SEP_PRINT_DEBUG("Set up the Haswell Server CBO dispatch table\n"); dispatch = &haswell_server_cbo_dispatch; break; case 791: SEP_PRINT_DEBUG("Set up the Haswell Server PCU dispatch table\n"); dispatch = &haswell_server_pcu_dispatch; break; case 792: SEP_PRINT_DEBUG("Set up the Haswell Server UBOX dispatch table\n"); dispatch = &haswell_server_ubox_dispatch; break; case 793: SEP_PRINT_DEBUG("Set up the Haswell Server QPILL dispatch table\n"); dispatch = &haswell_server_qpill_dispatch; break; case 794: SEP_PRINT_DEBUG("Set up the Haswell Server iMC dispatch table\n"); dispatch = &haswell_server_imc_dispatch; break; case 795: SEP_PRINT_DEBUG("Set up the Haswell Server HA dispatch table\n"); dispatch = &haswell_server_ha_dispatch; break; case 796: SEP_PRINT_DEBUG("Set up the Haswell Server R2PCIe dispatch table\n"); dispatch = &haswell_server_r2pcie_dispatch; break; case 797: SEP_PRINT_DEBUG("Set up the Haswell Server R3QPI dispatch table\n"); dispatch = &haswell_server_r3qpi_dispatch; break; case 798: SEP_PRINT_DEBUG("Set up the Haswell Server SBOX dispatch table\n"); dispatch = &haswell_server_sbox_dispatch; break; case 799: SEP_PRINT_DEBUG("Set up the Haswell Server IRP dispatch table\n"); dispatch = &haswell_server_irp_dispatch; break; #endif #endif default: dispatch = NULL; SEP_PRINT_ERROR("Architecture not supported (dispatch_id=%d)\n", dispatch_id); break; } return dispatch; }
/*! * @fn VOID UTILITY_Configure_CPU * * @brief Reads the CPU information from the hardware * * @param param dispatch_id - The id of the dispatch table. * * @return Pointer to the correct dispatch table for the CPU architecture * * <I>Special Notes:</I> * <NONE> */ extern DISPATCH UTILITY_Configure_CPU ( U32 dispatch_id ) { DISPATCH dispatch = NULL; switch (dispatch_id) { #if defined(DRV_IA32) && !defined(DRV_ATOM_ONLY) case 0: SEP_PRINT_DEBUG("Set up the Core(TM) processor dispatch table\n"); dispatch = &core_dispatch; break; #endif #if defined(DRV_IA32) || defined(DRV_EM64T) case 1: SEP_PRINT_DEBUG("Set up the Core(TM)2 processor dispatch table\n"); dispatch = &core2_dispatch; break; case 6: SEP_PRINT_DEBUG("Set up the Silvermont dispatch table\n"); dispatch = &silvermont_dispatch; break; #if !defined(DRV_ATOM_ONLY) case 2: dispatch = &corei7_dispatch; SEP_PRINT_DEBUG("Set up the Core i7(TM) processor dispatch table\n"); break; case 3: SEP_PRINT_DEBUG("Set up the Core i7(TM) dispatch table\n"); dispatch = &corei7_dispatch_htoff_mode; break; case 4: dispatch = &corei7_dispatch_2; SEP_PRINT_DEBUG("Set up the Sandybridge processor dispatch table\n"); break; case 5: SEP_PRINT_DEBUG("Set up the Sandybridge dispatch table\n"); dispatch = &corei7_dispatch_htoff_mode_2; break; case 100: SEP_PRINT_DEBUG("Set up the Core i7 uncore dispatch table\n"); dispatch = &corei7_unc_dispatch; break; case 200: SEP_PRINT_DEBUG("Set up the SNB iMC dispatch table\n"); dispatch = &snbunc_imc_dispatch; break; case 201: SEP_PRINT_DEBUG("Set up the SNB Cbo dispatch table\n"); dispatch = &snbunc_cbo_dispatch; break; case 210: SEP_PRINT_DEBUG("Set up the WSM-EX iMC dispatch table\n"); dispatch = &wsmexunc_imc_dispatch; break; case 211: SEP_PRINT_DEBUG("Set up the WSM-EX QPI dispatch table\n"); dispatch = &wsmexunc_qpi_dispatch; break; case 212: SEP_PRINT_DEBUG("Set up the WSM-EX WBOX dispatch table\n"); dispatch = &wsmexunc_wbox_dispatch; break; case 220: SEP_PRINT_DEBUG("Set up the JKT IMC dispatch table\n"); dispatch = &jktunc_imc_dispatch; break; case 221: SEP_PRINT_DEBUG("Set up the JKT QPILL dispatch table\n"); dispatch = &jktunc_qpill_dispatch; break; case 222: SEP_PRINT_DEBUG("Set up the Jaketown UBOX dispatch table\n"); dispatch = &jaketown_ubox_dispatch; break; case 500: SEP_PRINT_DEBUG("Set up the Haswell UNC NCU dispatch table\n"); dispatch = &haswellunc_ncu_dispatch; break; case 600: SEP_PRINT_DEBUG("Set up the IVT UNC CBO dispatch table\n"); dispatch = &ivtunc_cbo_dispatch; break; case 610: SEP_PRINT_DEBUG("Set up the IVT UNC IMC dispatch table\n"); dispatch = &ivtunc_imc_dispatch; break; case 620: SEP_PRINT("Set up the Ivytown UNC PCU dispatch table\n"); dispatch = &ivytown_pcu_dispatch; break; case 630: SEP_PRINT("Set up the Ivytown UNC PCU dispatch table\n"); dispatch = &ivytown_ha_dispatch; break; case 640: SEP_PRINT_DEBUG("Set up the Ivytown QPI dispatch table\n"); dispatch = &ivytown_qpill_dispatch; break; case 650: SEP_PRINT_DEBUG("Set up the Ivytown R3QPI dispatch table\n"); dispatch = &ivytown_r3qpi_dispatch; break; case 660: SEP_PRINT("Set up the Ivytown UNC UBOX dispatch table\n"); dispatch = &ivytown_ubox_dispatch; break; case 670: SEP_PRINT("Set up the Ivytown UNC R2PCIe dispatch table\n"); dispatch = &ivytown_r2pcie_dispatch; break; #endif #endif #if defined(DRV_IA64) case 4: dispatch = &montecito_dispatch; SEP_PRINT_DEBUG("Set up the Itanium(TM) Processor dispatch table\n"); break; case 5: dispatch = &poulson_dispatch; SEP_PRINT_DEBUG("Set up the Itanium(TM) Processor dispatch table\n"); break; #endif default: dispatch = NULL; SEP_PRINT_ERROR("Architecture not supported (dispatch_id=%d)\n", dispatch_id); break; } return dispatch; }
/*! * @fn static VOID snbunc_imc_Write_PMU(VOID*) * * @brief Initial write of PMU registers * Walk through the enties and write the value of the register accordingly. * When current_group = 0, then this is the first time this routine is called, * * @param None * * @return None * * <I>Special Notes:</I> */ static VOID snbunc_imc_Write_PMU ( VOID *param ) { DRV_PCI_DEVICE_ENTRY_NODE dpden; U32 pci_address; U32 bar_lo; U64 next_bar_offset; U64 bar_hi; U64 physical_address; U64 final_bar; U32 dev_idx = *((U32*)param); ECB pecb = LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[0]; U32 j; U32 event_id = 0; U32 offset_delta; U32 tmp_value; int me = CONTROL_THIS_CPU(); if (me != invoking_processor_id) { return; } SEP_PRINT_DEBUG("snbunc_imc_Write_PMU Enter\n"); dpden = ECB_pcidev_entry_node(pecb); pci_address = FORM_PCI_ADDR(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), DRV_PCI_DEVICE_ENTRY_func_no(&dpden), 0); #if defined(MYDEBUG) { U32 device_id = PCI_Read_Ulong(pci_address); SEP_PRINT("Bus no = 0x%x\n",DRV_PCI_DEVICE_ENTRY_bus_no(&dpden)); SEP_PRINT("Dev no = 0x%x\n",DRV_PCI_DEVICE_ENTRY_dev_no(&dpden)); SEP_PRINT("Func no = 0x%x\n",DRV_PCI_DEVICE_ENTRY_func_no(&dpden)); SEP_PRINT("value for device id = 0x%x\n",device_id); } #endif pci_address = FORM_PCI_ADDR(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), DRV_PCI_DEVICE_ENTRY_func_no(&dpden), DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden)); bar_lo = PCI_Read_Ulong(pci_address); next_bar_offset = DRV_PCI_DEVICE_ENTRY_bar_offset(&dpden) + NEXT_ADDR_OFFSET; pci_address = FORM_PCI_ADDR(DRV_PCI_DEVICE_ENTRY_bus_no(&dpden), DRV_PCI_DEVICE_ENTRY_dev_no(&dpden), DRV_PCI_DEVICE_ENTRY_func_no(&dpden), next_bar_offset); bar_hi = PCI_Read_Ulong(pci_address); final_bar = (bar_hi << SNBUNC_IMC_BAR_ADDR_SHIFT) | bar_lo; final_bar &= SNBUNC_IMC_BAR_ADDR_MASK; DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)) = final_bar; physical_address = DRV_PCI_DEVICE_ENTRY_bar_address(&ECB_pcidev_entry_node(pecb)) + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(&ECB_pcidev_entry_node(pecb)); virtual_address = ioremap_nocache(physical_address,4096); //Read in the counts into temporary buffer FOR_EACH_PCI_DATA_REG(pecb,i,dev_idx,offset_delta) { event_id = ECB_entries_event_id_index_local(pecb,i); tmp_value = readl((U32*)((char*)(virtual_address) + offset_delta)); for ( j = 0; j < (U32)GLOBAL_STATE_num_cpus(driver_state) ; j++) { LWPMU_DEVICE_prev_val_per_thread(&devices[dev_idx])[j][event_id + 1] = tmp_value; // need to account for group id #if defined(MYDEBUG) SEP_PRINT_DEBUG("initial value for i =%d is 0x%x\n",i,LWPMU_DEVICE_prev_val_per_thread(&devices[dev_idx])[j][i]); #endif } // this is needed for overflow detection of the accumulators. if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = (U64)ECB_entries_max_bits(pecb,i); } } END_FOR_EACH_PCI_DATA_REG;
/*! * @fn int CPUMON_Install_Cpuhooks(VOID) * @brief Assign the PMU interrupt to the driver * * @return zero if successful, non-zero error value if something failed * * Install the driver ebs handler onto the PMU interrupt. If perfmon is * compiled in then we ask perfmon for the interrupt, otherwise we ask the * kernel... * * <I>Special Notes:</I> * * @Note This routine is for Itanium(R)-based systems only! * * For IA32, the LBRs are not frozen when a PMU interrupt is taken. * Since the LBRs capture information on every branch, for the LBR * registers to be useful, we need to freeze them as quickly as * possible after the interrupt. This means hooking the IDT directly * to call a driver specific interrupt handler. That happens in the * vtxsys.S file via samp_get_set_idt_entry. The real routine being * called first upon PMU interrupt is t_ebs (in vtxsys.S) and that * routine calls PMI_Interrupt_Handler()... * */ extern void CPUMON_Install_Cpuhooks ( void ) { int status = -1; SEP_PRINT_DEBUG("CPUMON_Install_Cpuhooks: entered... pmv 0x%p \n", SYS_Read_PMV()); #if defined(PERFMON_V1) || defined(PERFMON_V2_ALT) /* * if Perfmon1 or Perfmon2_alt is set, we can use the perfmon.c * interface to steal perfmon.c's interrupt handler for our use * perfmon.c has already done register_percpu_irq() */ ebs_irq = SEP_PERFMON_IRQ; desc.handler = &PMI_Interrupt_Handler; status = CPUMON_INSTALL_INTERRUPT(&desc); if (status) { SEP_PRINT_ERROR("CPUMON_Install_Cpuhooks: CPUMON_INSTALL_INTERRUPT returned %d\n",status); } #elif !defined(PERFMON_V2) if (pebs_irqaction) { return status; } #ifdef SA_PERCPU_IRQ_SUPPORTED ebs_irq = SEP_PERFMON_IRQ; pebs_irqaction = (struct irqaction *) 1; status = request_irq(SEP_PERFMON_IRQ, PMI_Interrupt_Handler, SA_INTERRUPT | SA_PERCPU_IRQ, "SEP Sampling", NULL); #else { pebs_irqaction = kmalloc(sizeof (struct irqaction), GFP_ATOMIC); if (pebs_irqaction) { memset(pebs_irqaction, 0, sizeof (struct irqaction)); ebs_irq = SEP_PERFMON_IRQ; pebs_irqaction->handler = (void *)PMI_Interrupt_Handler; pebs_irqaction->flags = SA_INTERRUPT; pebs_irqaction->name = SEP_DRIVER_NAME; pebs_irqaction->dev_id = NULL; register_percpu_irq(ebs_irq, pebs_irqaction); status = 0; } else { SEP_PRINT_WARNING("couldn't kmalloc pebs_irqaction (%d bytes)\n", (int)sizeof(struct irqaction)); } } #endif #endif SEP_PRINT("IRQ vector 0x%x will be used for handling PMU interrupts\n", SEP_PERFMON_IRQ); SEP_PRINT_DEBUG("CPUMON_Install_Cpuhooks: exit...... rc=0x%x pmv=0x%p \n", status, SYS_Read_PMV()); return; }
/*! * @fn ssize_t output_Read(struct file *filp, * char *buf, * size_t count, * loff_t *f_pos, * BUFFER_DESC kernel_buf) * * @brief Return a sample buffer to user-mode. If not full or flush, wait * * @param *filp a file pointer * @param *buf a sampling buffer * @param count size of the user's buffer * @param f_pos file pointer (current offset in bytes) * @param kernel_buf the kernel output buffer structure * * @return number of bytes read. zero indicates end of file. Neg means error * * Place no more than count bytes into the user's buffer. * Block if unavailable on "BUFFER_DESC_queue(buf)" * * <I>Special Notes:</I> * */ static ssize_t output_Read ( struct file *filp, char *buf, size_t count, loff_t *f_pos, BUFFER_DESC kernel_buf ) { ssize_t to_copy; ssize_t uncopied; OUTPUT outbuf = &BUFFER_DESC_outbuf(kernel_buf); U32 cur_buf, i; /* Buffer is filled by output_fill_modules. */ cur_buf = OUTPUT_current_buffer(outbuf); for (i=0; i<OUTPUT_NUM_BUFFERS; i++) { //iterate through all buffers cur_buf++; if (cur_buf >= OUTPUT_NUM_BUFFERS) { cur_buf = 0; } //circularly if ((to_copy = OUTPUT_buffer_full(outbuf, cur_buf))) { break; } } SEP_PRINT_DEBUG("buffer %d has %d bytes ready\n", (S32)cur_buf, (S32)to_copy); if (!flush && to_copy == 0) { #if defined(CONFIG_PREEMPT_RT) do { unsigned long delay; delay = msecs_to_jiffies(1000); wait_event_interruptible_timeout(BUFFER_DESC_queue(kernel_buf), flush||OUTPUT_buffer_full(outbuf, cur_buf), delay); } while (!(flush||OUTPUT_buffer_full(outbuf, cur_buf))); #else if (wait_event_interruptible(BUFFER_DESC_queue(kernel_buf), flush||OUTPUT_buffer_full(outbuf, cur_buf))) { return OS_RESTART_SYSCALL; } #endif SEP_PRINT_DEBUG("Get to copy\n", (S32)cur_buf); to_copy = OUTPUT_buffer_full(outbuf, cur_buf); SEP_PRINT_DEBUG("output_Read awakened, buffer %d has %d bytes\n",cur_buf, (int)to_copy ); } /* Ensure that the user's buffer is large enough */ if (to_copy > count) { SEP_PRINT_DEBUG("user buffer is too small\n"); return OS_NO_MEM; } /* Copy data to user space. Note that we use cur_buf as the source */ if (abnormal_terminate == 0) { uncopied = copy_to_user(buf, OUTPUT_buffer(outbuf, cur_buf), to_copy); /* Mark the buffer empty */ OUTPUT_buffer_full(outbuf, cur_buf) = 0; *f_pos += to_copy-uncopied; if (uncopied) { SEP_PRINT_DEBUG("only copied %d of %lld bytes of module records\n", (S32)to_copy, (long long)uncopied); return (to_copy - uncopied); } } else { to_copy = 0; SEP_PRINT_DEBUG("to copy set to 0\n"); } // At end-of-file, decrement the count of active buffer writers if (to_copy == 0) { DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers); SEP_PRINT_DEBUG("output_Read decremented flush_writers\n"); if (flush_val == TRUE) { wake_up_interruptible_sync(&flush_queue); } } return to_copy; }