static void bts_event_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_buffer *buf; buf = perf_aux_output_begin(&bts->handle, event); if (!buf) goto fail_stop; if (bts_buffer_reset(buf, &bts->handle)) goto fail_end_stop; bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; event->hw.itrace_started = 1; event->hw.state = 0; __bts_event_start(event); return; fail_end_stop: perf_aux_output_end(&bts->handle, 0, false); fail_stop: event->hw.state = PERF_HES_STOPPED; }
static int bts_event_add(struct perf_event *event, int mode) { struct bts_buffer *buf; struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int ret = -EBUSY; event->hw.state = PERF_HES_STOPPED; if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) return -EBUSY; if (bts->handle.event) return -EBUSY; buf = perf_aux_output_begin(&bts->handle, event); if (!buf) return -EINVAL; ret = bts_buffer_reset(buf, &bts->handle); if (ret) { perf_aux_output_end(&bts->handle, 0, false); return ret; } bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; if (mode & PERF_EF_START) { bts_event_start(event, 0); if (hwc->state & PERF_HES_STOPPED) { bts_event_del(event, 0); return -EBUSY; } } return 0; }
int intel_bts_interrupt(void) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct perf_event *event = bts->handle.event; struct bts_buffer *buf; s64 old_head; int err; if (!event || !bts->started) return 0; buf = perf_get_aux(&bts->handle); /* * Skip snapshot counters: they don't use the interrupt, but * there's no other way of telling, because the pointer will * keep moving */ if (!buf || buf->snapshot) return 0; old_head = local_read(&buf->head); bts_update(bts); /* no new data */ if (old_head == local_read(&buf->head)) return 0; perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), !!local_xchg(&buf->lost, 0)); buf = perf_aux_output_begin(&bts->handle, event); if (!buf) return 1; err = bts_buffer_reset(buf, &bts->handle); if (err) perf_aux_output_end(&bts->handle, 0, false); return 1; }
int intel_bts_interrupt(void) { struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds; struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct perf_event *event = bts->handle.event; struct bts_buffer *buf; s64 old_head; int err = -ENOSPC, handled = 0; /* * The only surefire way of knowing if this NMI is ours is by checking * the write ptr against the PMI threshold. */ if (ds->bts_index >= ds->bts_interrupt_threshold) handled = 1; /* * this is wrapped in intel_bts_enable_local/intel_bts_disable_local, * so we can only be INACTIVE or STOPPED */ if (READ_ONCE(bts->state) == BTS_STATE_STOPPED) return handled; buf = perf_get_aux(&bts->handle); if (!buf) return handled; /* * Skip snapshot counters: they don't use the interrupt, but * there's no other way of telling, because the pointer will * keep moving */ if (buf->snapshot) return 0; old_head = local_read(&buf->head); bts_update(bts); /* no new data */ if (old_head == local_read(&buf->head)) return handled; perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), !!local_xchg(&buf->lost, 0)); buf = perf_aux_output_begin(&bts->handle, event); if (buf) err = bts_buffer_reset(buf, &bts->handle); if (err) { WRITE_ONCE(bts->state, BTS_STATE_STOPPED); if (buf) { /* * BTS_STATE_STOPPED should be visible before * cleared handle::event */ barrier(); perf_aux_output_end(&bts->handle, 0, false); } } return 1; }