static void p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, stag; setup_num_counters(); stag = get_stagger(); /* the counter registers we pay attention to */ for (i = 0; i < num_counters; ++i) { msrs->counters.addrs[i] = p4_counters[VIRT_CTR(stag, i)].counter_address; } /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ /* 18 CCCR registers */ for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } /* 43 ESCR registers in three discontiguous group */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } /* there are 2 remaining non-contiguously located ESCRs */ if (num_counters == NUM_COUNTERS_NON_HT) { /* standard non-HT CPUs handle both remaining ESCRs*/ msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4; } else if (stag == 0) { /* HT CPUs give the first remainder to the even thread, as the 32nd control register */ msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4; } else { /* and two copies of the second to the odd thread, for the 31st and 32nd control registers */ msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; } }
static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; unsigned int addr; unsigned int stag; stag = get_stagger(); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (!MISC_PMC_ENABLED_P(low)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } for (i = 0 ; i < num_counters ; i++) { rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { rdmsr(p4_unused_cccr[i], low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_unused_cccr[i], low, high); } for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { wrmsr(addr, 0, 0); } if (boot_cpu_data.x86_model < 0x3) { wrmsr(MSR_P4_IQ_ESCR0, 0, 0); wrmsr(MSR_P4_IQ_ESCR1, 0, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()) { wrmsr(addr, 0, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } else if (stag == 0) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); } else { wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } for (i = 0 ; i < num_counters ; ++i) { if (sysctl.ctr[i].event) { pmc_setup_one_p4_counter(i); CTR_WRITE(sysctl.ctr[i].count, VIRT_CTR(stag, i)); } } }
static void p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, stag; setup_num_counters(); stag = get_stagger(); for (i = 0; i < num_counters; ++i) { msrs->counters.addrs[i] = p4_counters[VIRT_CTR(stag, i)].counter_address; } for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } if (boot_cpu_data.x86_model >= 0x3) { for (addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } } else { for (addr = MSR_P4_IQ_ESCR0 + stag; addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { msrs->controls.addrs[i] = addr; } if (num_counters == NUM_COUNTERS_NON_HT) { msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4; } else if (stag == 0) { msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR4; } else { msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; msrs->controls.addrs[i++] = MSR_P4_CRU_ESCR5; } }
static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; unsigned int addr; unsigned int stag; stag = get_stagger(); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (! MISC_PMC_ENABLED_P(low)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } /* clear cccrs outside our concern */ for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { rdmsr(p4_unused_cccr[i], low, high); CCCR_CLEAR(low); CCCR_SET_REQUIRED_BITS(low); wrmsr(p4_unused_cccr[i], low, high); } /* clear all escrs (including those outside our concern) */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { wrmsr(addr, 0, 0); } /* On older models clear also MSR_P4_IQ_ESCR0/1 */ if (boot_cpu_data.x86_model < 0x3) { wrmsr(MSR_P4_IQ_ESCR0, 0, 0); wrmsr(MSR_P4_IQ_ESCR1, 0, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { wrmsr(addr, 0, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ wrmsr(addr, 0, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ wrmsr(addr, 0, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } else if (stag == 0) { wrmsr(MSR_P4_CRU_ESCR4, 0, 0); } else { wrmsr(MSR_P4_CRU_ESCR5, 0, 0); } /* setup all counters */ for (i = 0 ; i < num_counters ; ++i) { if (counter_config[i].enabled) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); } else { reset_value[i] = 0; } } }
static void p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, stag; setup_num_counters(); stag = get_stagger(); /* the counter registers we pay attention to */ for (i = 0; i < num_counters; ++i) { msrs->counters[i].addr = p4_counters[VIRT_CTR(stag, i)].counter_address; } /* FIXME: bad feeling, we don't save the 10 counters we don't use. */ /* 18 CCCR registers */ for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag; addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } /* 43 ESCR registers in three or four discontiguous group */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 * to avoid special case in nmi_{save|restore}_registers() */ if (boot_cpu_data.x86_model >= 0x3) { for (addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } } else { for (addr = MSR_P4_IQ_ESCR0 + stag; addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { msrs->controls[i].addr = addr; } /* there are 2 remaining non-contiguously located ESCRs */ if (num_counters == NUM_COUNTERS_NON_HT) { /* standard non-HT CPUs handle both remaining ESCRs*/ msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else if (stag == 0) { /* HT CPUs give the first remainder to the even thread, as the 32nd control register */ msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else { /* and two copies of the second to the odd thread, for the 22st and 23nd control registers */ msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; } }
static int p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, cccraddr, stag; setup_num_counters(); stag = get_stagger(); /* the counter & cccr registers we pay attention to */ for (i = 0; i < num_counters; ++i) { addr = p4_counters[VIRT_CTR(stag, i)].counter_address; cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; if (reserve_perfctr_nmi(addr)) { msrs->counters[i].addr = addr; msrs->controls[i].addr = cccraddr; } } /* 43 ESCR registers in three or four discontiguous group */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 * to avoid special case in nmi_{save|restore}_registers() */ if (boot_cpu_data.x86_model >= 0x3) { for (addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } } else { for (addr = MSR_P4_IQ_ESCR0 + stag; addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } /* there are 2 remaining non-contiguously located ESCRs */ if (num_counters == NUM_COUNTERS_NON_HT) { /* standard non-HT CPUs handle both remaining ESCRs*/ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else if (stag == 0) { /* HT CPUs give the first remainder to the even thread, as the 32nd control register */ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else { /* and two copies of the second to the odd thread, for the 22st and 23nd control registers */ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) { msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; } } for (i = 0; i < num_counters; ++i) { if (!counter_config[i].enabled) continue; if (msrs->controls[i].addr) continue; op_x86_warn_reserved(i); p4_shutdown(msrs); return -EBUSY; } return 0; }