static void gator_event_sampling_offline_dispatch(int cpu) { if (per_cpu(pevent, cpu)) { perf_event_release_kernel(per_cpu(pevent, cpu)); per_cpu(pevent, cpu) = NULL; } }
static void gator_event_sampling_online_dispatch(int cpu) { struct perf_event * ev; if (!event_based_sampling) return; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler); #else ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler, 0); #endif if (IS_ERR(ev)) { pr_err("gator: unable to start event-based-sampling"); return; } if (ev->state != PERF_EVENT_STATE_ACTIVE) { pr_err("gator: unable to start event-based-sampling"); perf_event_release_kernel(ev); return; } ev->pmu->read(ev); per_cpu(prev_value, cpu) = local64_read(&ev->count); }
static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) { struct perf_event *event; const struct perf_event_attr *attr; event = perf_event_get(fd); if (IS_ERR(event)) return event; attr = perf_event_attrs(event); if (IS_ERR(attr)) goto err; if (attr->inherit) goto err; if (attr->type == PERF_TYPE_RAW) return event; if (attr->type == PERF_TYPE_HARDWARE) return event; if (attr->type == PERF_TYPE_SOFTWARE && attr->config == PERF_COUNT_SW_BPF_OUTPUT) return event; err: perf_event_release_kernel(event); return ERR_PTR(-EINVAL); }
static int op_create_counter(int cpu, int event) { struct perf_event *pevent; if (!counter_config[event].enabled || perf_events[cpu][event]) return 0; pevent = perf_event_create_kernel_counter(&counter_config[event].attr, cpu, NULL, op_overflow_handler, NULL); if (IS_ERR(pevent)) return PTR_ERR(pevent); if (pevent->state != PERF_EVENT_STATE_ACTIVE) { perf_event_release_kernel(pevent); pr_warning("oprofile: failed to enable event %d " "on CPU %d\n", event, cpu); return -EBUSY; } perf_events[cpu][event] = pevent; return 0; }
static int op_create_counter(int cpu, int event) { int ret = 0; struct perf_event *pevent; if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL)) return ret; pevent = perf_event_create_kernel_counter(&counter_config[event].attr, cpu, -1, op_overflow_handler); if (IS_ERR(pevent)) { ret = PTR_ERR(pevent); } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { perf_event_release_kernel(pevent); pr_warning("oprofile: failed to enable event %d " "on CPU %d\n", event, cpu); ret = -EBUSY; } else { perf_events[cpu][event] = pevent; } return ret; }
static void op_destroy_counter(int cpu, int event) { struct perf_event *pevent = perf_events[cpu][event]; if (pevent) { perf_event_release_kernel(pevent); perf_events[cpu][event] = NULL; } }
void perf_counter_term(struct perf_event *pevent){ if(attr != NULL) { kfree(attr); attr=NULL; } //release the performance event// if(pevent != NULL) perf_event_release_kernel(pevent); }
static void watchdog_nmi_disable(unsigned int cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) { perf_event_disable(event); per_cpu(watchdog_ev, cpu) = NULL; /* should be in cleanup, but blocks oprofile */ perf_event_release_kernel(event); } return; }
/* * This function sets up the performance counter registers to record. */ struct perf_event *perf_counter_init(struct task_struct *tsk){ int cpu; struct perf_event *pevent; if(attr == NULL) { attr = kmalloc(sizeof(*attr), GFP_KERNEL); memset(attr,0,sizeof(*attr)); attr->type = PERF_TYPE_HARDWARE; //attr->type = PERF_TYPE_RAW; attr->config = PERF_COUNT_HW_INSTRUCTIONS; attr->size = sizeof(*attr); attr->sample_period = 0; attr->disabled = 0; attr->inherit = 0; attr->pinned = 1; //TODO Not sure if we want this set or not yet //attr->exclusive = 1; attr->freq = 0; attr->exclude_user = 0; attr->exclude_kernel = 1; attr->exclude_hv = 1; attr->exclude_idle = 1; attr->inherit_stat = 1; //attr->precise_ip = 3; //attr->wakeup_events = 10000; // wakeup every n events } cpu = -1; // count events for this thread on all cpus pevent = perf_event_create_kernel_counter(attr, cpu, tsk, (perf_overflow_handler_t) capo_overflow_handler); if (IS_ERR(pevent)){ BUG(); printk(KERN_CRIT "***** PERFCT: Failed to create kernel counter"); //return PTR_ERR(pevent); return NULL; } if (pevent->state != PERF_EVENT_STATE_ACTIVE) { printk(KERN_CRIT "***** PERFCT: Failed to enable kernel counter"); kfree(attr); attr=NULL; perf_event_release_kernel(pevent); BUG(); //return -EBUSY; return NULL; } return pevent; }
/** Delete performance event. * Call this to shutdown performance event sampling * * @param stp Handle for the event to be unregistered. */ static void _stp_perf_del (struct stap_perf_probe *stp) { if (stp && stp->events) { int cpu; /* shut down performance event sampling */ stp_for_each_cpu(cpu) { struct perf_event **event = per_cpu_ptr (stp->events, cpu); if (*event) { perf_event_release_kernel(*event); } } _stp_free_percpu (stp->events); stp->events = NULL; }
/** * kvm_pmu_stop_counter - stop PMU counter * @pmc: The PMU counter pointer * * If this counter has been configured to monitor some event, release it here. */ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) { u64 counter, reg; if (pmc->perf_event) { counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; __vcpu_sys_reg(vcpu, reg) = counter; perf_event_disable(pmc->perf_event); perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; } }
static void gator_event_sampling_offline_dispatch(int cpu) { struct perf_event * pe = NULL; mutex_lock(&perf_mutex); if (per_cpu(pevent_ebs, cpu)) { pe = per_cpu(pevent_ebs, cpu); per_cpu(pevent_ebs, cpu) = NULL; } mutex_unlock(&perf_mutex); if (pe) { perf_event_release_kernel(pe); } }
/** * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu * @vcpu: The vcpu pointer * */ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) { int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { struct kvm_pmc *pmc = &pmu->pmc[i]; if (pmc->perf_event) { perf_event_disable(pmc->perf_event); perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; } } }
static void nmi_timer_shutdown(void) { struct perf_event *event; int cpu; cpuhp_remove_state(hp_online); for_each_possible_cpu(cpu) { event = per_cpu(nmi_timer_events, cpu); if (!event) continue; perf_event_disable(event); per_cpu(nmi_timer_events, cpu) = NULL; perf_event_release_kernel(event); } }
void watchdog_nmi_disable(unsigned int cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) { perf_event_disable(event); per_cpu(watchdog_ev, cpu) = NULL; /* should be in cleanup, but blocks oprofile */ perf_event_release_kernel(event); } if (cpu == 0) { /* watchdog_nmi_enable() expects this to be zero initially. */ cpu0_err = 0; } }
void oprofile_perf_exit(void) { int cpu, id; struct perf_event *event; for_each_possible_cpu(cpu) { for (id = 0; id < num_counters; ++id) { event = perf_events[cpu][id]; if (event) perf_event_release_kernel(event); } kfree(perf_events[cpu]); } kfree(counter_config); exit_driverfs(); }
static void nmi_timer_shutdown(void) { struct perf_event *event; int cpu; cpu_notifier_register_begin(); __unregister_cpu_notifier(&nmi_timer_cpu_nb); for_each_possible_cpu(cpu) { event = per_cpu(nmi_timer_events, cpu); if (!event) continue; perf_event_disable(event); per_cpu(nmi_timer_events, cpu) = NULL; perf_event_release_kernel(event); } cpu_notifier_register_done(); }
static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) { struct perf_event *event; const struct perf_event_attr *attr; event = perf_event_get(fd); if (IS_ERR(event)) return event; attr = perf_event_attrs(event); if (IS_ERR(attr)) return (void *)attr; if (attr->type != PERF_TYPE_RAW && attr->type != PERF_TYPE_HARDWARE) { perf_event_release_kernel(event); return ERR_PTR(-EINVAL); } return event; }
static void perf_event_fd_array_put_ptr(void *ptr) { struct perf_event *event = ptr; perf_event_release_kernel(event); }
/** * unregister_hw_breakpoint - unregister a user-space hardware breakpoint * @bp: the breakpoint structure to unregister */ void unregister_hw_breakpoint(struct perf_event *bp) { if (!bp) return; perf_event_release_kernel(bp); }
static void hrprof_destroy_event(int cpu) { hrprof_cpu_t* event = per_cpu_ptr(&hrprof_cpu_event, cpu); hrtimer_cancel(&event->hrtimer); perf_event_release_kernel(event->event); }