static void ppro_setup_ctrs(struct op_msrs const * const msrs) { uint low, high; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { CTRL_READ(low, high, msrs, i); CTRL_CLEAR(low); CTRL_WRITE(low, high, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) CTR_WRITE(1, msrs, i); /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { if (sysctl.ctr[i].enabled) { CTR_WRITE(sysctl.ctr[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR(low); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, sysctl.ctr[i].user); CTRL_SET_KERN(low, sysctl.ctr[i].kernel); CTRL_SET_UM(low, sysctl.ctr[i].unit_mask); CTRL_SET_EVENT(low, sysctl.ctr[i].event); CTRL_WRITE(low, high, msrs, i); } } }
static void p4_check_ctrs(unsigned int const cpu, struct op_msrs const * const msrs, struct pt_regs * const regs) { unsigned long ctr, low, high, stag, real; int i; stag = get_stagger(); for (i = 0; i < num_counters; ++i) { if (!sysctl.ctr[i].enabled) continue; real = VIRT_CTR(stag, i); CCCR_READ(low, high, real); CTR_READ(ctr, high, real); if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { op_do_profile(cpu, instruction_pointer(regs), IRQ_ENABLED(regs), i); CTR_WRITE(oprof_data[cpu].ctr_count[i], real); CCCR_CLEAR_OVF(low); CCCR_WRITE(low, high, real); CTR_WRITE(oprof_data[cpu].ctr_count[i], real); } } apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); }
static int p4_check_ctrs(unsigned int const cpu, struct op_msrs const * const msrs, struct pt_regs * const regs) { unsigned long ctr, low, high, stag, real; int i; unsigned long eip = instruction_pointer(regs); int is_kernel = !user_mode(regs); stag = get_stagger(); for (i = 0; i < num_counters; ++i) { if (!counter_config[i].event) continue; /* * there is some eccentricity in the hardware which * requires that we perform 2 extra corrections: * * - check both the CCCR:OVF flag for overflow and the * counter high bit for un-flagged overflows. * * - write the counter back twice to ensure it gets * updated properly. * * the former seems to be related to extra NMIs happening * during the current NMI; the latter is reported as errata * N15 in intel doc 249199-029, pentium 4 specification * update, though their suggested work-around does not * appear to solve the problem. */ real = VIRT_CTR(stag, i); CCCR_READ(low, high, real); CTR_READ(ctr, high, real); if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { oprofile_add_sample(eip, is_kernel, i, cpu); CTR_WRITE(reset_value[i], real); CCCR_CLEAR_OVF(low); CCCR_WRITE(low, high, real); CTR_WRITE(reset_value[i], real); /* P4 quirk: you have to re-unmask the apic vector */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); } } /* P4 quirk: you have to re-unmask the apic vector */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); /* See op_model_ppro.c */ return 1; }
static int ppro_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { unsigned int low, high; int i; for (i = 0 ; i < NUM_COUNTERS; ++i) { CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(regs, i); CTR_WRITE(reset_value[i], msrs, i); } } /* Only P6 based Pentium M need to re-unmask the apic vector but it * doesn't hurt other P6 variant */ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); /* We can't work out if we really handled an interrupt. We * might have caught a *second* counter just after overflowing * the interrupt for this counter then arrives * and we don't find a counter that's overflowed, so we * would return 0 and get dazed + confused. Instead we always * assume we found an overflow. This sucks. */ return 1; }
static int ppro_check_ctrs(unsigned int const cpu, struct op_msrs const * const msrs, struct pt_regs * const regs) { unsigned int low, high; int i; unsigned long eip = instruction_pointer(regs); int is_kernel = !user_mode(regs); for (i = 0 ; i < NUM_COUNTERS; ++i) { CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(eip, is_kernel, i, cpu); CTR_WRITE(reset_value[i], msrs, i); } } /* We can't work out if we really handled an interrupt. We * might have caught a *second* counter just after overflowing * the interrupt for this counter then arrives * and we don't find a counter that's overflowed, so we * would return 0 and get dazed + confused. Instead we always * assume we found an overflow. This sucks. */ return 1; }
static void athlon_setup_ctrs(struct op_msrs const * const msrs) { unsigned int low, high; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs,i))) continue; CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_WRITE(low, high, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs,i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { reset_value[i] = counter_config[i].count; CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[i].event); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); } else { reset_value[i] = 0; } } }
static void ppro_check_ctrs(uint const cpu, struct op_msrs const * const msrs, struct pt_regs * const regs) { ulong low, high; int i; for (i = 0 ; i < NUM_COUNTERS; ++i) { CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { op_do_profile(cpu, instruction_pointer(regs), IRQ_ENABLED(regs), i); CTR_WRITE(oprof_data[cpu].ctr_count[i], msrs, i); } } }
static void athlon_setup_ctrs(struct op_msrs const * const msrs) { unsigned int low, high; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { CTRL_READ(low, high, msrs, i); CTRL_CLEAR(low); CTRL_WRITE(low, high, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { CTR_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { if (counter_config[i].event) { reset_value[i] = counter_config[i].count; CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR(low); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT(low, counter_config[i].event); CTRL_WRITE(low, high, msrs, i); } else { reset_value[i] = 0; } } }
static int athlon_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { unsigned int low, high; int i; for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(regs, i); CTR_WRITE(reset_value[i], msrs, i); } } /* See op_model_ppro.c */ return 1; }
static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; unsigned int stag; stag = get_stagger(); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (!MISC_PMC_ENABLED_P(low)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } /* clear all escrs (including those outside our concern) */ for (i = num_counters; i < num_controls; i++) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; wrmsr(msrs->controls[i].addr, 0, 0); } /* setup all counters */ for (i = 0 ; i < num_counters ; ++i) { if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); } else { reset_value[i] = 0; } } }
static int athlon_check_ctrs(unsigned int const cpu, struct op_msrs const * const msrs, struct pt_regs * const regs) { unsigned int low, high; int i; unsigned long eip = instruction_pointer(regs); int is_kernel = !user_mode(regs); for (i = 0 ; i < NUM_COUNTERS; ++i) { CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(eip, is_kernel, i, cpu); CTR_WRITE(reset_value[i], msrs, i); } } /* See op_model_ppro.c */ return 1; }
static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; unsigned int addr; unsigned int stag; stag = get_stagger(); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (!MISC_PMC_ENABLED_P(low)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } for (i = 0 ; i < num_counters ; i++) { rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { rdmsr(p4_unused_cccr[i], low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_unused_cccr[i], low, high); } for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { wrmsr(addr, 0, 0); } if (boot_cpu_data.x86_model < 0x3) { wrmsr(MSR_P4_IQ_ESCR0, 0, 0); wrmsr(MSR_P4_IQ_ESCR1, 0, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()) { wrmsr(addr, 0, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } else if (stag == 0) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); } else { wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } for (i = 0 ; i < num_counters ; ++i) { if (sysctl.ctr[i].event) { pmc_setup_one_p4_counter(i); CTR_WRITE(sysctl.ctr[i].count, VIRT_CTR(stag, i)); } } }
static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; unsigned int addr; unsigned int stag; stag = get_stagger(); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (! MISC_PMC_ENABLED_P(low)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } /* clear cccrs outside our concern */ for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { rdmsr(p4_unused_cccr[i], low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_unused_cccr[i], low, high); } /* clear all escrs (including those outside our concern) */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { wrmsr(addr, 0, 0); } /* On older models clear also MSR_P4_IQ_ESCR0/1 */ if (boot_cpu_data.x86_model < 0x3) { wrmsr(MSR_P4_IQ_ESCR0, 0, 0); wrmsr(MSR_P4_IQ_ESCR1, 0, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ wrmsr(addr, 0, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ wrmsr(addr, 0, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } else if (stag == 0) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); } else { wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } /* setup all counters */ for (i = 0 ; i < num_counters ; ++i) { if (counter_config[i].enabled) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); } else { reset_value[i] = 0; } } }