int kpc_set_running(uint32_t classes) { uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK; struct kpc_running_remote mp_config = { .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL }; /* target all available PMCs */ mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes); /* translate the power class for the machine layer */ if (classes & KPC_CLASS_POWER_MASK) mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; /* generate the state of each configurable PMCs */ mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes); return kpc_set_running_arch(&mp_config); } boolean_t kpc_register_pm_handler(kpc_pm_handler_t handler) { return kpc_reserve_pm_counters(0x38, handler, TRUE); } boolean_t kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, boolean_t custom_config) { uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1; uint64_t req_mask = 0ULL; /* pre-condition */ assert(handler != NULL); assert(kpc_pm_handler == NULL); /* check number of counters requested */ req_mask = (pmc_mask & all_mask); assert(kpc_popcount(req_mask) <= kpc_configurable_count()); /* save the power manager states */ kpc_pm_has_custom_config = custom_config; kpc_pm_pmc_mask = req_mask; kpc_pm_handler = handler; printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n", req_mask, custom_config); /* post-condition */ { uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK); uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask); #pragma unused(cfg_count, pwr_count) assert((cfg_count + pwr_count) == kpc_configurable_count()); } return force_all_ctrs ? FALSE : TRUE; }
/* may be called from an IPI */ int kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) { int enabled=0, offset=0; uint64_t pmc_mask = 0ULL; assert(buf); enabled = ml_set_interrupts_enabled(FALSE); /* grab counters and CPU number as close as possible */ if (curcpu) *curcpu = current_processor()->cpu_id; if (classes & KPC_CLASS_FIXED_MASK) { kpc_get_fixed_counters(&buf[offset]); offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK); } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); kpc_get_configurable_counters(&buf[offset], pmc_mask); offset += kpc_popcount(pmc_mask); } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); kpc_get_configurable_counters(&buf[offset], pmc_mask); offset += kpc_popcount(pmc_mask); } ml_set_interrupts_enabled(enabled); return offset; }
int kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, int *curcpu, uint64_t *buf) { int curcpu_id = current_processor()->cpu_id; uint32_t cfg_count = kpc_configurable_count(), offset = 0; uint64_t pmc_mask = 0ULL; boolean_t enabled; assert(buf); enabled = ml_set_interrupts_enabled(FALSE); curcpu_id = current_processor()->cpu_id; if (curcpu) *curcpu = curcpu_id; for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) { /* filter if the caller did not request all cpus */ if (!all_cpus && (cpu != curcpu_id)) continue; if (classes & KPC_CLASS_FIXED_MASK) { uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t)); offset += count; } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) if ((1ULL << cfg_ctr) & pmc_mask) buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) if ((1ULL << cfg_ctr) & pmc_mask) buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); } } ml_set_interrupts_enabled(enabled); return offset; }
void kpc_release_pm_counters(void) { /* pre-condition */ assert(kpc_pm_handler != NULL); /* release the counters */ kpc_pm_has_custom_config = FALSE; kpc_pm_pmc_mask = 0ULL; kpc_pm_handler = NULL; printf("kpc: pm released counters\n"); /* post-condition */ assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count()); }
int kpc_get_period(uint32_t classes, uint64_t *val) { uint32_t count = 0 ; uint64_t pmc_mask = 0ULL; assert(val); lck_mtx_lock(&kpc_config_lock); if (classes & KPC_CLASS_FIXED_MASK) { /* convert reload values to periods */ count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); for (uint32_t i = 0; i < count; ++i) *val++ = kpc_fixed_max() - FIXED_RELOAD(i); } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); /* convert reload values to periods */ count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); /* convert reload values to periods */ count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); } lck_mtx_unlock(&kpc_config_lock); return 0; }
int kpc_set_thread_counting(uint32_t classes) { uint32_t count; lck_mtx_lock(&kpc_thread_lock); count = kpc_get_counter_count(classes); if( (classes == 0) || (count == 0) ) { /* shut down */ kpc_threads_counting = FALSE; } else { /* stash the config */ kpc_thread_classes = classes; /* work out the size */ kpc_thread_classes_count = count; assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS); /* enable switch */ kpc_threads_counting = TRUE; /* and schedule an AST for this thread... */ if( !current_thread()->kpc_buf ) { current_thread()->kperf_flags |= T_KPC_ALLOC; act_set_kperf(current_thread()); } } kpc_off_cpu_update(); lck_mtx_unlock(&kpc_thread_lock); return 0; }
int kpc_set_actionid(uint32_t classes, uint32_t *val) { uint32_t count = 0; uint64_t pmc_mask = 0ULL; assert(val); /* NOTE: what happens if a pmi occurs while actionids are being * set is undefined. */ lck_mtx_lock(&kpc_config_lock); if (classes & KPC_CLASS_FIXED_MASK) { count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t)); val += count; } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) CONFIGURABLE_ACTIONID(i) = *val++; } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) CONFIGURABLE_ACTIONID(i) = *val++; } lck_mtx_unlock(&kpc_config_lock); return 0; }
int kpc_get_actionid(uint32_t classes, uint32_t *val) { uint32_t count = 0; uint64_t pmc_mask = 0ULL; assert(val); lck_mtx_lock(&kpc_config_lock); if (classes & KPC_CLASS_FIXED_MASK) { count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t)); val += count; } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) *val++ = CONFIGURABLE_ACTIONID(i); } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) if ((1ULL << i) & pmc_mask) *val++ = CONFIGURABLE_ACTIONID(i); } lck_mtx_unlock(&kpc_config_lock); return 0; }