Beispiel #1
0
int
ipe_init(void)
{
	spinlock_acquire(&ipe_init_alloc_lock);
	local_recv_buffer = remote_recv_buffer[lapic_id] =
		(ipe_packet_t *)KADDR_DIRECT(kalloc_pages(RBUF_PAGES));
	spinlock_release(&ipe_init_alloc_lock);
	
	eproc_open(&ipe_eproc, "ipe", (void(*)(void))proc_wait_try, NULL, 8192);
	event_open(ipe_event, &ipe_eproc.event_pool, do_ipe, NULL);
	event_open(&ipe_timer.event, &ipe_eproc.event_pool, do_ipe, NULL);
	timer_open(&ipe_timer, timer_tick + IPE_REFRESH_INV * timer_freq);

	/* ALL CPU BARRIER {{{ */	
	/* XXX: use naive CAS here =_= for atomic inc */
	while (1)
	{
		int old = ipe_ready;
		if (cmpxchg32(&ipe_ready, old, old + 1) == old) break;
	}
	
	while (ipe_ready != lcpu_count) ;
	/* }}} */
	
	return 0;
}
Beispiel #2
0
/* Tests that fork clears EBB state */
int fork_cleanup(void)
{
	pid_t pid;

	event_init_named(&event, 0x1001e, "cycles");
	event_leader_ebb_init(&event);

	FAIL_IF(event_open(&event));

	ebb_enable_pmc_counting(1);
	setup_ebb_handler(standard_ebb_callee);
	ebb_global_enable();

	FAIL_IF(ebb_event_enable(&event));

	mtspr(SPRN_MMCR0, MMCR0_FC);
	mtspr(SPRN_PMC1, pmc_sample_period(sample_period));

	/* Don't need to actually take any EBBs */

	pid = fork();
	if (pid == 0)
		exit(child());

	/* Child does the actual testing */
	FAIL_IF(wait_for_child(pid));

	/* After fork */
	event_close(&event);

	return 0;
}
/*
 * Tests that the L3 bank handling is correct. We fixed it in commit e9aaac1.
 */
static int l3_bank_test(void)
{
	struct event event;
	char *p;
	int i;

	p = malloc(MALLOC_SIZE);
	FAIL_IF(!p);

	event_init(&event, 0x84918F);

	FAIL_IF(event_open(&event));

	for (i = 0; i < MALLOC_SIZE; i += 0x10000)
		p[i] = i;

	event_read(&event);
	event_report(&event);

	FAIL_IF(event.result.running == 0);
	FAIL_IF(event.result.enabled == 0);

	event_close(&event);
	free(p);

	return 0;
}
adv_error mouseb_event_init(int mouseb_id)
{
	unsigned i;
	adv_bool eacces = 0;
	struct event_location map[EVENT_MOUSE_DEVICE_MAX];
	unsigned mac;

	log_std(("mouseb:event: mouseb_event_init(id:%d)\n", mouseb_id));

#if defined(USE_SVGALIB)
	/* close the SVGALIB mouse device. SVGALIB always call mouse_init(), also */
	/* if mouse input is not requested */
	if (os_internal_svgalib_get()) {
		mouse_close();
	}
#endif

	log_std(("mouseb:event: opening mouse from 0 to %d\n", EVENT_MOUSE_DEVICE_MAX));

	mac = event_locate(map, EVENT_MOUSE_DEVICE_MAX, &eacces);

	event_state.mac = 0;
	for(i=0;i<mac;++i) {
		int f;

		if (event_state.mac >= EVENT_MOUSE_MAX)
			continue;

		f = event_open(map[i].file, event_state.map[event_state.mac].evtype_bitmask, sizeof(event_state.map[event_state.mac].evtype_bitmask));
		if (f == -1) {
			if (errno == EACCES) {
				eacces = 1;
			}
			continue;
		}

		if (!event_is_mouse(f, event_state.map[event_state.mac].evtype_bitmask)) {
			log_std(("mouseb:event: not a mouse on device %s\n", map[i].file));
			event_close(f);
			continue;
		}

		if (mouseb_setup(&event_state.map[event_state.mac], f) != 0) {
			event_close(f);
			continue;
		}

		++event_state.mac;
	}

	if (!event_state.mac) {
		if (eacces)
			error_set("No mouse found. Check the /dev/input/event* permissions.\n");
		else
			error_set("No mouse found.\n");
		return -1;
	}

	return 0;
}
adv_error joystickb_event_init(int joystickb_id)
{
	unsigned i;
	adv_bool eacces = 0;
	struct event_location map[EVENT_JOYSTICK_DEVICE_MAX];
	unsigned mac;

	log_std(("josytickb:event: joystickb_event_init(id:%d)\n", joystickb_id));

	log_std(("joystickb:event: opening joystick from 0 to %d\n", EVENT_JOYSTICK_DEVICE_MAX));

	event_state.counter = 0;

	mac = event_locate(map, EVENT_JOYSTICK_DEVICE_MAX, &eacces);

	event_state.mac = 0;
	for(i=0;i<mac;++i) {
		int f;

		if (event_state.mac >= EVENT_JOYSTICK_MAX)
			continue;

		f = event_open(map[i].file, event_state.map[event_state.mac].evtype_bitmask, sizeof(event_state.map[event_state.mac].evtype_bitmask));
		if (f == -1) {
			if (errno == EACCES) {
				eacces = 1;
			}
			continue;
		}

		if (!event_is_joystick(f, event_state.map[event_state.mac].evtype_bitmask)) {
			log_std(("joystickb:event: not a joystick on device %s\n", map[i].file));
			event_close(f);
			continue;
		}

		if (joystickb_setup(&event_state.map[event_state.mac], f) != 0) {
			event_close(f);
			continue;
		}

		++event_state.mac;
	}

	if (!event_state.mac) {
		if (eacces)
			error_set("No joystick found. Check the /dev/input/event* permissions.\n");
		else
			error_set("No joystick found.\n");
		return -1;
	}

	return 0;
}
Beispiel #6
0
static int no_handler_test(void)
{
	struct event event;
	u64 val;
	int i;

	SKIP_IF(!ebb_is_supported());

	event_init_named(&event, 0x1001e, "cycles");
	event_leader_ebb_init(&event);

	event.attr.exclude_kernel = 1;
	event.attr.exclude_hv = 1;
	event.attr.exclude_idle = 1;

	FAIL_IF(event_open(&event));
	FAIL_IF(ebb_event_enable(&event));

	val = mfspr(SPRN_EBBHR);
	FAIL_IF(val != 0);

	/* Make sure it overflows quickly */
	sample_period = 1000;
	mtspr(SPRN_PMC1, pmc_sample_period(sample_period));

	/* Spin to make sure the event has time to overflow */
	for (i = 0; i < 1000; i++)
		mb();

	dump_ebb_state();

	/* We expect to see the PMU frozen & PMAO set */
	val = mfspr(SPRN_MMCR0);
	FAIL_IF(val != 0x0000000080000080);

	event_close(&event);

	dump_ebb_state();

	/* The real test is that we never took an EBB at 0x0 */

	return 0;
}
Beispiel #7
0
int back_to_back_ebbs(void)
{
	struct event event;

	event_init_named(&event, 0x1001e, "cycles");
	event_leader_ebb_init(&event);

	event.attr.exclude_kernel = 1;
	event.attr.exclude_hv = 1;
	event.attr.exclude_idle = 1;

	FAIL_IF(event_open(&event));

	setup_ebb_handler(ebb_callee);

	FAIL_IF(ebb_event_enable(&event));

	sample_period = 5;

	ebb_freeze_pmcs();
	mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
	ebb_global_enable();
	ebb_unfreeze_pmcs();

	while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS)
		FAIL_IF(core_busy_loop());

	ebb_global_disable();
	ebb_freeze_pmcs();

	count_pmc(1, sample_period);

	dump_ebb_state();

	event_close(&event);

	FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS);

	return 0;
}
static int per_event_excludes(void)
{
	struct event *e, events[4];
	char *platform;
	int i;

	platform = (char *)get_auxv_entry(AT_BASE_PLATFORM);
	FAIL_IF(!platform);
	SKIP_IF(strcmp(platform, "power8") != 0);

	/*
	 * We need to create the events disabled, otherwise the running/enabled
	 * counts don't match up.
	 */
	e = &events[0];
	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
			PERF_TYPE_HARDWARE, "instructions");
	e->attr.disabled = 1;

	e = &events[1];
	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
			PERF_TYPE_HARDWARE, "instructions(k)");
	e->attr.disabled = 1;
	e->attr.exclude_user = 1;
	e->attr.exclude_hv = 1;

	e = &events[2];
	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
			PERF_TYPE_HARDWARE, "instructions(h)");
	e->attr.disabled = 1;
	e->attr.exclude_user = 1;
	e->attr.exclude_kernel = 1;

	e = &events[3];
	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
			PERF_TYPE_HARDWARE, "instructions(u)");
	e->attr.disabled = 1;
	e->attr.exclude_hv = 1;
	e->attr.exclude_kernel = 1;

	FAIL_IF(event_open(&events[0]));

	/*
	 * The open here will fail if we don't have per event exclude support,
	 * because the second event has an incompatible set of exclude settings
	 * and we're asking for the events to be in a group.
	 */
	for (i = 1; i < 4; i++)
		FAIL_IF(event_open_with_group(&events[i], events[0].fd));

	/*
	 * Even though the above will fail without per-event excludes we keep
	 * testing in order to be thorough.
	 */
	prctl(PR_TASK_PERF_EVENTS_ENABLE);

	/* Spin for a while */
	for (i = 0; i < INT_MAX; i++)
		asm volatile("" : : : "memory");

	prctl(PR_TASK_PERF_EVENTS_DISABLE);

	for (i = 0; i < 4; i++) {
		FAIL_IF(event_read(&events[i]));
		event_report(&events[i]);
	}

	/*
	 * We should see that all events have enabled == running. That
	 * shows that they were all on the PMU at once.
	 */
	for (i = 0; i < 4; i++)
		FAIL_IF(events[i].result.running != events[i].result.enabled);

	/*
	 * We can also check that the result for instructions is >= all the
	 * other counts. That's because it is counting all instructions while
	 * the others are counting a subset.
	 */
	for (i = 1; i < 4; i++)
		FAIL_IF(events[0].result.value < events[i].result.value);

	for (i = 0; i < 4; i++)
		event_close(&events[i]);

	return 0;
}
static int test_body(void)
{
	int i, orig_period, max_period;
	struct event event;

	/* We use PMC4 to make sure the kernel switches all counters correctly */
	event_init_named(&event, 0x40002, "instructions");
	event_leader_ebb_init(&event);

	event.attr.exclude_kernel = 1;
	event.attr.exclude_hv = 1;
	event.attr.exclude_idle = 1;

	FAIL_IF(event_open(&event));

	ebb_enable_pmc_counting(4);
	setup_ebb_handler(standard_ebb_callee);
	ebb_global_enable();
	FAIL_IF(ebb_event_enable(&event));

	/*
	 * We want a low sample period, but we also want to get out of the EBB
	 * handler without tripping up again.
	 *
	 * This value picked after much experimentation.
	 */
	orig_period = max_period = sample_period = 400;

	mtspr(SPRN_PMC4, pmc_sample_period(sample_period));

	while (ebb_state.stats.ebb_count < 1000000) {
		/*
		 * We are trying to get the EBB exception to race exactly with
		 * us entering the kernel to do the syscall. We then need the
		 * kernel to decide our timeslice is up and context switch to
		 * the other thread. When we come back our EBB will have been
		 * lost and we'll spin in this while loop forever.
		 */

		for (i = 0; i < 100000; i++)
			sched_yield();

		/* Change the sample period slightly to try and hit the race */
		if (sample_period >= (orig_period + 200))
			sample_period = orig_period;
		else
			sample_period++;

		if (sample_period > max_period)
			max_period = sample_period;
	}

	ebb_freeze_pmcs();
	ebb_global_disable();

	count_pmc(4, sample_period);
	mtspr(SPRN_PMC4, 0xdead);

	dump_summary_ebb_state();
	dump_ebb_hw_state();

	event_close(&event);

	FAIL_IF(ebb_state.stats.ebb_count == 0);

	/* We vary our sample period so we need extra fudge here */
	FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period)));

	return 0;
}