static int victim_child(union pipe read_pipe, union pipe write_pipe) { FAIL_IF(wait_for_parent(read_pipe)); /* Setup our EBB handler, before the EBB event is created */ ebb_enable_pmc_counting(1); setup_ebb_handler(standard_ebb_callee); ebb_global_enable(); FAIL_IF(notify_parent(write_pipe)); while (ebb_state.stats.ebb_count < 20) { FAIL_IF(core_busy_loop()); } ebb_global_disable(); ebb_freeze_pmcs(); count_pmc(1, sample_period); dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); return 0; }
static void ebb_callee(void) { uint64_t siar, val; val = mfspr(SPRN_BESCR); if (!(val & BESCR_PMEO)) { ebb_state.stats.spurious++; goto out; } ebb_state.stats.ebb_count++; trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); /* Resets the PMC */ count_pmc(1, sample_period); out: if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS) /* Reset but leave counters frozen */ reset_ebb_with_clear_mask(MMCR0_PMAO); else /* Unfreezes */ reset_ebb(); /* Do some stuff to chew some cycles and pop the counter */ siar = mfspr(SPRN_SIAR); trace_log_reg(ebb_state.trace, SPRN_SIAR, siar); val = mfspr(SPRN_PMC1); trace_log_reg(ebb_state.trace, SPRN_PMC1, val); val = mfspr(SPRN_MMCR0); trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); }
int back_to_back_ebbs(void) { struct event event; event_init_named(&event, 0x1001e, "cycles"); event_leader_ebb_init(&event); event.attr.exclude_kernel = 1; event.attr.exclude_hv = 1; event.attr.exclude_idle = 1; FAIL_IF(event_open(&event)); setup_ebb_handler(ebb_callee); FAIL_IF(ebb_event_enable(&event)); sample_period = 5; ebb_freeze_pmcs(); mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); ebb_global_enable(); ebb_unfreeze_pmcs(); while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS) FAIL_IF(core_busy_loop()); ebb_global_disable(); ebb_freeze_pmcs(); count_pmc(1, sample_period); dump_ebb_state(); event_close(&event); FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS); return 0; }
static int test_body(void) { int i, orig_period, max_period; struct event event; /* We use PMC4 to make sure the kernel switches all counters correctly */ event_init_named(&event, 0x40002, "instructions"); event_leader_ebb_init(&event); event.attr.exclude_kernel = 1; event.attr.exclude_hv = 1; event.attr.exclude_idle = 1; FAIL_IF(event_open(&event)); ebb_enable_pmc_counting(4); setup_ebb_handler(standard_ebb_callee); ebb_global_enable(); FAIL_IF(ebb_event_enable(&event)); /* * We want a low sample period, but we also want to get out of the EBB * handler without tripping up again. * * This value picked after much experimentation. */ orig_period = max_period = sample_period = 400; mtspr(SPRN_PMC4, pmc_sample_period(sample_period)); while (ebb_state.stats.ebb_count < 1000000) { /* * We are trying to get the EBB exception to race exactly with * us entering the kernel to do the syscall. We then need the * kernel to decide our timeslice is up and context switch to * the other thread. When we come back our EBB will have been * lost and we'll spin in this while loop forever. */ for (i = 0; i < 100000; i++) sched_yield(); /* Change the sample period slightly to try and hit the race */ if (sample_period >= (orig_period + 200)) sample_period = orig_period; else sample_period++; if (sample_period > max_period) max_period = sample_period; } ebb_freeze_pmcs(); ebb_global_disable(); count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); dump_ebb_hw_state(); event_close(&event); FAIL_IF(ebb_state.stats.ebb_count == 0); /* We vary our sample period so we need extra fudge here */ FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period))); return 0; }