static int ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; for (i = 0; i < num_counters; i++) { if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) goto fail; if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { release_perfctr_nmi(MSR_P6_PERFCTR0 + i); goto fail; } msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; continue; fail: if (!counter_config[i].enabled) continue; op_x86_warn_reserved(i); ppro_shutdown(msrs); return -EBUSY; } return 0; }
static void ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; for (i = 0; i < num_counters; i++) { if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i = 0; i < num_counters; i++) { if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } }
static void athlon_fill_in_addresses(struct op_msrs * const msrs) { int i; for (i=0; i < NUM_COUNTERS; i++) { if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i=0; i < NUM_CONTROLS; i++) { if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } }
static int p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, cccraddr, stag; setup_num_counters(); stag = get_stagger(); /* the counter & cccr registers we pay attention to */ for (i = 0; i < num_counters; ++i) { addr = p4_counters[VIRT_CTR(stag, i)].counter_address; cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; if (reserve_perfctr_nmi(addr)) { msrs->counters[i].addr = addr; msrs->controls[i].addr = cccraddr; } } /* 43 ESCR registers in three or four discontiguous group */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1 * to avoid special case in nmi_{save|restore}_registers() */ if (boot_cpu_data.x86_model >= 0x3) { for (addr = MSR_P4_BSU_ESCR0 + stag; addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } } else { for (addr = MSR_P4_IQ_ESCR0 + stag; addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { if (reserve_evntsel_nmi(addr)) msrs->controls[i].addr = addr; } /* there are 2 remaining non-contiguously located ESCRs */ if (num_counters == NUM_COUNTERS_NON_HT) { /* standard non-HT CPUs handle both remaining ESCRs*/ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else if (stag == 0) { /* HT CPUs give the first remainder to the even thread, as the 32nd control register */ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4)) msrs->controls[i++].addr = MSR_P4_CRU_ESCR4; } else { /* and two copies of the second to the odd thread, for the 22st and 23nd control registers */ if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) { msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; } } for (i = 0; i < num_counters; ++i) { if (!counter_config[i].enabled) continue; if (msrs->controls[i].addr) continue; op_x86_warn_reserved(i); p4_shutdown(msrs); return -EBUSY; } return 0; }