Ejemplo n.º 1
0
int
mainloop(char **arg)
{
	ctx_arg_t ctx;
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_load_t load_args;
	pfm_msg_t msg;
	unsigned long ovfl_count = 0UL;
	unsigned long sample_period;
	unsigned long smpl_pmd_mask = 0UL;
	pid_t pid;
	int status, ret, fd;
	unsigned int i, num_counters;

	/*
	 * intialize all locals
	 */
	memset(&ctx, 0, sizeof(ctx));
	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));
	memset(pd, 0, sizeof(pd));
	memset(pc, 0, sizeof(pc));

	/*
	 * locate events
	 */
	pfm_get_num_counters(&num_counters);

	if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find cycle event\n");

	if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS)
		fatal_error("cannot find inst retired event\n");

	i = 2;

	if (i > num_counters) {
		i = num_counters;
		printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i);
	}
	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level
	 * 	PFM_PLM0 : kernel level
	 */
	inp.pfp_dfl_plm   = PFM_PLM3;
	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = i;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	
	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We must pfp_pmc_count to determine the number of PMC to intialize.
	 * We must use pfp_event_count to determine the number of PMD to initialize.
	 * Some events causes extra PMCs to be used, so  pfp_pmc_count may be >= pfp_event_count.
	 *
	 * This step is new compared to libpfm-2.x. It is necessary because the library no
	 * longer knows about the kernel data structures.
	 */

	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	/*
	 * the PMC controlling the event ALWAYS come first, that's why this loop
	 * is safe even when extra PMC are needed to support a particular event.
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
		/* build sampling mask */
		smpl_pmd_mask  |= 1UL << pc[i].reg_num;
	}

	printf("smpl_pmd_mask=0x%lx\n", smpl_pmd_mask);

	/*
	 * now we indicate what to record when each counter overflows.
	 * In our case, we only have one sampling period and it is set for the
	 * first event. Here we indicate that when the sampling period expires
	 * then we want to record the value of all the other counters.
	 *
	 * We exclude the first counter in this case.
	 */
	smpl_pmd_mask  &= ~(1UL << pc[0].reg_num);

	pc[0].reg_smpl_pmds[0] = smpl_pmd_mask;

	/*
	 * we our sampling counter overflow, we want to be notified.
	 * The notification will come ONLY when the sampling buffer
	 * becomes full.
	 *
	 * We also activate randomization of the sampling period.
	 */
	pc[0].reg_flags	|= PFM_REGFL_OVFL_NOTIFY | PFM_REGFL_RANDOM;

	/*
	 * we also want to reset the other PMDs on
	 * every overflow. If we do not set
	 * this, the non-overflowed counters
	 * will be untouched.
	 */
	pc[0].reg_reset_pmds[0] |= smpl_pmd_mask;

	sample_period = 1000000UL;

	pd[0].reg_value       = (~0) - sample_period + 1;
	pd[0].reg_short_reset = (~0) - sample_period + 1;
	pd[0].reg_long_reset  = (~0) - sample_period + 1;
	/*
	 * setup randomization parameters, we allow a range of up to +256 here.
	 */
	pd[0].reg_random_seed = 5;
	pd[0].reg_random_mask = 0xff;


	printf("programming %u PMCS and %u PMDS\n", outp.pfp_pmc_count, inp.pfp_event_count);

	/*
	 * prepare context structure.
	 *
	 * format specific parameters MUST be concatenated to the regular
	 * pfarg_context_t structure. For convenience, the default sampling
	 * format provides a data structure that already combines the pfarg_context_t
	 * with what is needed fot this format.
	 */

	 /*
	  * We initialize the format specific information.
	  * The format is identified by its UUID which must be copied
	  * into the ctx_buf_fmt_id field.
	  */
	memcpy(ctx.ctx_arg.ctx_smpl_buf_id, buf_fmt_id, sizeof(pfm_uuid_t));

	/*
	 * the size of the buffer is indicated in bytes (not entries).
	 *
	 * The kernel will record into the buffer up to a certain point.
	 * No partial samples are ever recorded.
	 */
	ctx.buf_arg.buf_size = 8192;

	/*
	 * now create our perfmon context.
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, &ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}

	/*
	 * extract the file descriptor we will use to
	 * identify this newly created context
	 */
	fd = ctx.ctx_arg.ctx_fd;

	/*
	 * retrieve the virtual address at which the sampling
	 * buffer has been mapped
	 */
	buf_addr = ctx.ctx_arg.ctx_smpl_vaddr;

	printf("context [%d] buffer mapped @%p\n", fd, buf_addr);

	/*
	 * Now program the registers
	 */
	if (perfmonctl(fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	/*
	 * initialize the PMDs
	 */
	if (perfmonctl(fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Create the child task
	 */
	if ((pid=fork()) == -1) fatal_error("Cannot fork process\n");

	/*
	 * In order to get the PFM_END_MSG message, it is important
	 * to ensure that the child task does not inherit the file
	 * descriptor of the context. By default, file descriptor
	 * are inherited during exec(). We explicitely close it
	 * here. We could have set it up through fcntl(FD_CLOEXEC)
	 * to achieve the same thing.
	 */
	if (pid == 0) {
		close(fd);
		child(arg);
	}

	/*
	 * wait for the child to exec
	 */
	waitpid(pid, &status, WUNTRACED);

	/*
	 * process is stopped at this point
	 */
	if (WIFEXITED(status)) {
		warning("task %s [%d] exited already status %d\n", arg[0], pid, WEXITSTATUS(status));
		goto terminate_session;
	}

	/*
	 * attach context to stopped task
	 */
	load_args.load_pid = pid;
	if (perfmonctl(fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_LOAD_CONTEXT errno %d\n",errno);
	}
	/*
	 * activate monitoring for stopped task.
	 * (nothing will be measured at this point
	 */
	if (perfmonctl(fd, PFM_START, NULL, 0) == -1) {
		fatal_error(" perfmonctl error PFM_START errno %d\n",errno);
	}
	/*
	 * detach child. Side effect includes
	 * activation of monitoring.
	 */
	ptrace(PTRACE_DETACH, pid, NULL, 0);

	/*
	 * core loop
	 */
	for(;;) {
		/*
		 * wait for overflow/end notification messages
		 */
		ret = read(fd, &msg, sizeof(msg));
		if (ret == -1) {
			fatal_error("cannot read perfmon msg: %s\n", strerror(errno));
		}
		switch(msg.type) {
			case PFM_MSG_OVFL: /* the sampling buffer is full */
				process_smpl_buf(fd, smpl_pmd_mask, 1);
				ovfl_count++;
				break;
			case PFM_MSG_END: /* monitored task terminated */
				printf("task terminated\n");
				goto terminate_session;
			default: fatal_error("unknown message type %d\n", msg.type);
		}
	}
terminate_session:
	/*
	 * cleanup child
	 */
	waitpid(pid, &status, 0);

	/*
	 * check for any leftover samples
	 */
	process_smpl_buf(fd, smpl_pmd_mask, 0);

	/*
	 * destroy perfmon context
	 */
	close(fd);

	printf("%lu samples collected in %lu buffer overflows\n", collect_samples, ovfl_count);

	return 0;
}
Ejemplo n.º 2
0
int
mainloop(char **arg)
{
	static uint64_t ovfl_count; /* static to avoid setjmp issue */
	struct pollfd pollfds[1];
	sigset_t bmask;
	int go[2], ready[2];
	size_t pgsz;
	size_t map_size = 0;
	pid_t pid;
	int status, ret;
	int i;
	char buf;

	if (pfm_initialize() != PFM_SUCCESS)
		errx(1, "libpfm initialization failed\n");

	pgsz = sysconf(_SC_PAGESIZE);
	map_size = (options.mmap_pages+1)*pgsz;

	/*
	 * does allocate fds
	 */
	ret  = perf_setup_list_events(options.events, &fds, &num_fds);
	if (ret || !num_fds)
		errx(1, "cannot setup event list");

	memset(pollfds, 0, sizeof(pollfds));

	ret = pipe(ready);
	if (ret)
		err(1, "cannot create pipe ready");

	ret = pipe(go);
	if (ret)
		err(1, "cannot create pipe go");

	/*
	 * Create the child task
	 */
//	if ((pid=fork()) == -1)
//		err(1, "cannot fork process\n");

/*	if (pid == 0) {
		close(ready[0]);
		close(go[1]);
*/
		/*
		 * let the parent know we exist
		 */
/*		close(ready[1]);
		if (read(go[0], &buf, 1) == -1)
			err(1, "unable to read go_pipe");

		exit(child(arg));
	}
*/
	int tid;
	pid = getpid();
	tid = gettid();
	printf("From C - pid : %d, tid : %d\n",pid, tid);	
	
	close(ready[1]);
	close(go[0]);

	if (read(ready[0], &buf, 1) == -1)
		err(1, "unable to read child_ready_pipe");

	close(ready[0]);

	fds[0].fd = -1;

	fds[0].hw.sample_period=10000;
	if (!fds[0].hw.sample_period)
		errx(1, "need to set sampling period or freq on first event, use :period= or :freq=");

	for(i=0; i < num_fds; i++) {

		if (i == 0) {
			fds[i].hw.disabled = 1;
			fds[i].hw.enable_on_exec = 1; /* start immediately */
		} else
			fds[i].hw.disabled = 0;


		if (options.opt_inherit)
			fds[i].hw.inherit = 1;

		if (fds[i].hw.sample_period) {
			/*
			 * set notification threshold to be halfway through the buffer
			 */
			fds[i].hw.wakeup_watermark = (options.mmap_pages*pgsz) / 2;
			fds[i].hw.watermark = 1;

			fds[i].hw.sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_READ|PERF_SAMPLE_TIME|PERF_SAMPLE_PERIOD;
			/*
			 * if we have more than one event, then record event identifier to help with parsing
			 */
			if (num_fds > 1)
				fds[i].hw.sample_type |= PERF_SAMPLE_IDENTIFIER;

			fprintf(options.output_file,"%s period=%"PRIu64" freq=%d\n", fds[i].name, fds[i].hw.sample_period, fds[i].hw.freq);

			fds[i].hw.read_format = PERF_FORMAT_SCALE;

			if (fds[i].hw.freq)
				fds[i].hw.sample_type |= PERF_SAMPLE_PERIOD;
			
			fds[i].hw.sample_period=10000;
			if (options.mem_mode)
				fds[i].hw.sample_type |= PERF_SAMPLE_WEIGHT | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_ADDR;
			if (options.branch_mode) {
				fds[i].hw.sample_type |= PERF_SAMPLE_BRANCH_STACK;
				fds[i].hw.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
			}
		}
		/*
		 * we are grouping the events, so there may be a limit
		 */
		fds[i].fd = perf_event_open(&fds[i].hw, tid+1, options.cpu, fds[0].fd, 0);
		if (fds[i].fd == -1) {
			if (fds[i].hw.precise_ip)
				err(1, "cannot attach event %s: precise mode may not be supported", fds[i].name);
			err(1, "cannot attach event %s", fds[i].name);
		}
	}

	/*
	 * kernel adds the header page to the size of the mmapped region
	 */
	fds[0].buf = mmap(NULL, map_size, PROT_READ|PROT_WRITE, MAP_SHARED, fds[0].fd, 0);
	if (fds[0].buf == MAP_FAILED)
		err(1, "cannot mmap buffer");

	/* does not include header page */
	fds[0].pgmsk = (options.mmap_pages*pgsz)-1;

	/*
	 * send samples for all events to first event's buffer
	 */
	for (i = 1; i < num_fds; i++) {
		if (!fds[i].hw.sample_period)
			continue;
		ret = ioctl(fds[i].fd, PERF_EVENT_IOC_SET_OUTPUT, fds[0].fd);
		if (ret)
			err(1, "cannot redirect sampling output");
	}

	if (num_fds > 1 && fds[0].fd > -1) {
		for(i = 0; i < num_fds; i++) {
			/*
			 * read the event identifier using ioctl
			 * new method replaced the trick with PERF_FORMAT_GROUP + PERF_FORMAT_ID + read()
			 */
			ret = ioctl(fds[i].fd, PERF_EVENT_IOC_ID, &fds[i].id);
			if (ret == -1)
				err(1, "cannot read ID");
			fprintf(options.output_file,"ID %"PRIu64"  %s\n", fds[i].id, fds[i].name);
		}
	}

	pollfds[0].fd = fds[0].fd;
	pollfds[0].events = POLLIN;
	
	for(i=0; i < num_fds; i++) {
		ret = ioctl(fds[i].fd, PERF_EVENT_IOC_ENABLE, 0);
		if (ret)
			err(1, "cannot enable event %s\n", fds[i].name);
	}
	signal(SIGCHLD, cld_handler);

	close(go[1]);

	if (setjmp(jbuf) == 1)
		goto terminate_session;

	sigemptyset(&bmask);
	sigaddset(&bmask, SIGCHLD);

	/*
	 * core loop
	 */
	for(;;) {
		ret = poll(pollfds, 1, -1);
		if (ret < 0 && errno == EINTR)
			break;
		ovfl_count++;
		ret = sigprocmask(SIG_SETMASK, &bmask, NULL);
		if (ret)
			err(1, "setmask");
		process_smpl_buf(&fds[0]);
		ret = sigprocmask(SIG_UNBLOCK, &bmask, NULL);
		if (ret)
			err(1, "unblock");
	}
	printf("How was you day??\n");
terminate_session:
	/*
	 * cleanup child
	 */
	wait4(pid, &status, 0, NULL);

	for(i=0; i < num_fds; i++)
		close(fds[i].fd);

	/* check for partial event buffer */
	process_smpl_buf(&fds[0]);
	munmap(fds[0].buf, map_size);

	perf_free_fds(fds, num_fds);

	fprintf(options.output_file,
		"%"PRIu64" samples collected in %"PRIu64" poll events, %"PRIu64" lost samples\n",
		collected_samples,
		ovfl_count, lost_samples);

	/* free libpfm resources cleanly */
	pfm_terminate();

	fclose(options.output_file);

	return 0;
}
Ejemplo n.º 3
0
int
mainloop(char **arg)
{
	static uint64_t ovfl_count = 0; /* static to avoid setjmp issue */
	struct pollfd pollfds[1];
	int ret;
	int fd = -1;
	int i;

	if (pfm_initialize() != PFM_SUCCESS)
		errx(1, "libpfm initialization failed\n");

	pgsz = sysconf(_SC_PAGESIZE);
	map_size = (options.mmap_pages+1)*pgsz;

	if (options.cgroup) {
		fd = open_cgroup(options.cgroup);
		if (fd == -1)
			err(1, "cannot open cgroup file %s\n", options.cgroup);
	}

	setup_cpu(options.cpu, fd);

	signal(SIGALRM, handler);
	signal(SIGINT, handler);

	pollfds[0].fd = fds[0].fd;
	pollfds[0].events = POLLIN;

	printf("monitoring on CPU%d, session ending in %ds\n", options.cpu, options.delay);

	if (setjmp(jbuf) == 1)
		goto terminate_session;

	start_cpu();

	alarm(options.delay);
	/*
	 * core loop
	 */
	for(;;) {
		ret = poll(pollfds, 1, -1);
		if (ret < 0 && errno == EINTR)
			break;
		ovfl_count++;
		process_smpl_buf(&fds[0]);
	}
terminate_session:
	for(i=0; i < num_fds; i++)
		close(fds[i].fd);

	/* check for partial event buffer */
	process_smpl_buf(&fds[0]);
	munmap(fds[0].buf, map_size);

	free(fds);

	printf("%"PRIu64" samples collected in %"PRIu64" poll events, %"PRIu64" lost samples\n",
		collected_samples,
		ovfl_count, lost_samples);
	return 0;
}
Ejemplo n.º 4
0
int
mainloop(char **arg)
{
	static uint64_t ovfl_count; /* static to avoid setjmp issue */
	struct pollfd pollfds[1];
	sigset_t bmask;
	int go[2], ready[2];
	uint64_t *val;
	size_t sz, pgsz;
	size_t map_size = 0;
	pid_t pid;
	int status, ret;
	int i;
	char buf;

	if (pfm_initialize() != PFM_SUCCESS)
		errx(1, "libpfm initialization failed\n");

	pgsz = sysconf(_SC_PAGESIZE);
	map_size = (options.mmap_pages+1)*pgsz;

	/*
	 * does allocate fds
	 */
	ret  = perf_setup_list_events(options.events, &fds, &num_fds);
	if (ret || !num_fds)
		errx(1, "cannot setup event list");

	memset(pollfds, 0, sizeof(pollfds));

	ret = pipe(ready);
	if (ret)
		err(1, "cannot create pipe ready");

	ret = pipe(go);
	if (ret)
		err(1, "cannot create pipe go");

	/*
	 * Create the child task
	 */
	if ((pid=fork()) == -1)
		err(1, "cannot fork process\n");

	if (pid == 0) {
		close(ready[0]);
		close(go[1]);

		/*
		 * let the parent know we exist
		 */
		close(ready[1]);
		if (read(go[0], &buf, 1) == -1)
			err(1, "unable to read go_pipe");

		exit(child(arg));
	}
	close(ready[1]);
	close(go[0]);

	if (read(ready[0], &buf, 1) == -1)
		err(1, "unable to read child_ready_pipe");

	close(ready[0]);

	fds[0].fd = -1;

	if (!fds[0].hw.sample_period)
		errx(1, "need to set sampling period or freq on first event, use :period= or :freq=");

	for(i=0; i < num_fds; i++) {

		if (i == 0) {
			fds[i].hw.disabled = 1;
			fds[i].hw.enable_on_exec = 1; /* start immediately */
		} else
			fds[i].hw.disabled = 0;


		if (options.opt_inherit)
			fds[i].hw.inherit = 1;

		if (fds[i].hw.sample_period) {
			/*
			 * set notification threshold to be halfway through the buffer
			 */
			fds[i].hw.wakeup_watermark = (options.mmap_pages*pgsz) / 2;
			fds[i].hw.watermark = 1;

			fds[i].hw.sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_READ|PERF_SAMPLE_TIME|PERF_SAMPLE_PERIOD|PERF_SAMPLE_STREAM_ID;
			fprintf(options.output_file,"%s period=%"PRIu64" freq=%d\n", fds[i].name, fds[i].hw.sample_period, fds[i].hw.freq);

			fds[i].hw.read_format = PERF_FORMAT_SCALE;
			if (num_fds > 1)
				fds[i].hw.read_format |= PERF_FORMAT_GROUP|PERF_FORMAT_ID;

			if (fds[i].hw.freq)
				fds[i].hw.sample_type |= PERF_SAMPLE_PERIOD;
		}

		fds[i].fd = perf_event_open(&fds[i].hw, pid, options.cpu, fds[0].fd, 0);
		if (fds[i].fd == -1) {
			if (fds[i].hw.precise_ip)
				err(1, "cannot attach event %s: precise mode may not be supported", fds[i].name);
			err(1, "cannot attach event %s", fds[i].name);
		}
	}

	/*
	 * kernel adds the header page to the size of the mmapped region
	 */
	fds[0].buf = mmap(NULL, map_size, PROT_READ|PROT_WRITE, MAP_SHARED, fds[0].fd, 0);
	if (fds[0].buf == MAP_FAILED)
		err(1, "cannot mmap buffer");

	/* does not include header page */
	fds[0].pgmsk = (options.mmap_pages*pgsz)-1;

	/*
	 * send samples for all events to first event's buffer
	 */
	for (i = 1; i < num_fds; i++) {
		if (!fds[i].hw.sample_period)
			continue;
		ret = ioctl(fds[i].fd, PERF_EVENT_IOC_SET_OUTPUT, fds[0].fd);
		if (ret)
			err(1, "cannot redirect sampling output");
	}

	/*
	 * we are using PERF_FORMAT_GROUP, therefore the structure
	 * of val is as follows:
	 *
	 *      { u64           nr;
	 *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
	 *        { u64         time_running; } && PERF_FORMAT_RUNNING
	 *        { u64         value;
	 *          { u64       id;           } && PERF_FORMAT_ID
	 *        }             cntr[nr];
	 * We are skipping the first 3 values (nr, time_enabled, time_running)
	 * and then for each event we get a pair of values.
	 */
	if (num_fds > 1) {
		sz = (3+2*num_fds)*sizeof(uint64_t);
		val = malloc(sz);
		if (!val)
			err(1, "cannot allocate memory");

		ret = read(fds[0].fd, val, sz);
		if (ret == -1)
			err(1, "cannot read id %zu", sizeof(val));


		for(i=0; i < num_fds; i++) {
			fds[i].id = val[2*i+1+3];
			fprintf(options.output_file,"%"PRIu64"  %s\n", fds[i].id, fds[i].name);
		}
		free(val);
	}

	pollfds[0].fd = fds[0].fd;
	pollfds[0].events = POLLIN;
	
	for(i=0; i < num_fds; i++) {
		ret = ioctl(fds[i].fd, PERF_EVENT_IOC_ENABLE, 0);
		if (ret)
			err(1, "cannot enable event %s\n", fds[i].name);
	}
	signal(SIGCHLD, cld_handler);

	close(go[1]);

	if (setjmp(jbuf) == 1)
		goto terminate_session;

	sigemptyset(&bmask);
	sigaddset(&bmask, SIGCHLD);

	/*
	 * core loop
	 */
	for(;;) {
		ret = poll(pollfds, 1, -1);
		if (ret < 0 && errno == EINTR)
			break;
		ovfl_count++;
		ret = sigprocmask(SIG_SETMASK, &bmask, NULL);
		if (ret)
			err(1, "setmask");
		process_smpl_buf(&fds[0]);
		ret = sigprocmask(SIG_UNBLOCK, &bmask, NULL);
		if (ret)
			err(1, "unblock");
	}
terminate_session:
	/*
	 * cleanup child
	 */
	wait4(pid, &status, 0, NULL);

	for(i=0; i < num_fds; i++)
		close(fds[i].fd);

	/* check for partial event buffer */
	process_smpl_buf(&fds[0]);
	munmap(fds[0].buf, map_size);

	perf_free_fds(fds, num_fds);

	fprintf(options.output_file,
		"%"PRIu64" samples collected in %"PRIu64" poll events, %"PRIu64" lost samples\n",
		collected_samples,
		ovfl_count, lost_samples);

	/* free libpfm resources cleanly */
	pfm_terminate();

	fclose(options.output_file);

	return 0;
}
Ejemplo n.º 5
0
int
main(int argc, char **argv)
{
	pfarg_pmr_t pd[NUM_PMDS];
	pfarg_pmr_t pc[NUM_PMCS];
	pfarg_pmd_attr_t pa[NUM_PMDS];
	smpl_arg_t buf_arg;
	pfarg_msg_t msg;
	smpl_hdr_t *hdr;
	void *buf_addr;
	pid_t pid;
	int ret, fd, status, npmcs = 0;


	check_valid_cpu();

	if (argc < 2)
		fatal_error("you need to pass a program to sample\n");

	memset(pd, 0, sizeof(pd));
	memset(pc, 0, sizeof(pc));
	memset(&buf_arg, 0, sizeof(buf_arg));

	buf_arg.buf_size = getpagesize();
	buf_arg.cnt_reset = -SMPL_PERIOD;
	/*
	 * trigger interrupt when reached 90% of buffer
	 */
	buf_arg.intr_thres = (buf_arg.buf_size/sizeof(smpl_entry_t))*90/100;

	fd = pfm_create(PFM_FL_SMPL_FMT, NULL, FMT_NAME, &buf_arg, sizeof(buf_arg));
	if (fd == -1) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("cannot create session %s, maybe you do not have the P4/Xeon PEBS sampling format in the kernel.\n Check /sys/kernel/perfmon\n", strerror(errno));
	}

	/*
	 * retrieve the virtual address at which the sampling
	 * buffer has been mapped
	 */
	buf_addr = mmap(NULL, (size_t)buf_arg.buf_size, PROT_READ, MAP_PRIVATE, fd, 0);
	if (buf_addr == MAP_FAILED)
		fatal_error("cannot mmap sampling buffer errno %d\n", errno);

	printf("session [%d] buffer mapped @%p\n", fd, buf_addr);

	hdr = (smpl_hdr_t *)buf_addr;

	printf("pebs_base=0x%lx pebs_end=0x%lx index=0x%lx\n"
	       "intr=0x%lx version=%u.%u\n"
	       "entry_size=%zu ds_size=%zu\n",
			hdr->ds.pebs_buf_base,
			hdr->ds.pebs_abs_max,
			hdr->ds.pebs_index,
			hdr->ds.pebs_intr_thres,
			PFM_VERSION_MAJOR(hdr->version),
			PFM_VERSION_MINOR(hdr->version),
			sizeof(smpl_entry_t),
			sizeof(hdr->ds));

	if (PFM_VERSION_MAJOR(hdr->version) < 1)
		fatal_error("invalid buffer format version\n");

	/*
	 * using the replay_event event
	 *
	 * CRU_ESCR2.usr=1
	 * CRU_ESCR2.event_mask=1 (NBOGUS)
	 * CRU_ESCR2.event_select=0x9 (replay_event)
	 */
	pc[npmcs].reg_num   = 21;
	pc[npmcs].reg_value = (9ULL <<25) | (1ULL<<9) |(1ULL<<2);
	npmcs++;

	/*
	 * for PEBS, must use IQ_CCCR4 for thread0
	 * IQ_CCCR4.escr_select = 5
	 * IQ_CCCR4.enable= 1
	 * IQ_CCCR4.active_thread= 3
	 *
	 * We must disable 64-bit emulation by the kernel
	 * on the associated counter when using PEBS. Otherwise
	 * we received a spurious interrupt for every counter overflow.
	 */
	pc[npmcs].reg_num   = 31;
	pc[npmcs].reg_flags = PFM_REGFL_NO_EMUL64;
	pc[npmcs].reg_value = (5ULL << 13) | (1ULL<<12) | (3ULL<<16);
	npmcs++;

	/*
	 * PEBS_MATRIX_VERT.bit0=1 (1st level cache load miss retired)
	 */
	pc[npmcs].reg_num   = 63;
	pc[npmcs].reg_value = 1;
	npmcs++;

	/*
	 * PEBS_ENABLE.enable=1 (bit0)
	 * PEBS_ENABLE.uops=1 (bit 24)
	 * PEBS_ENABLE.my_thr=1 (bit 25)
	 */
	pc[npmcs].reg_num   = 64;
	pc[npmcs].reg_value = (1ULL<<25)|(1ULL<<24) | 1ULL;
	npmcs++;

	/*
	 * Must use IQ_CCCR4/IQ_CTR4 with PEBS for thread0
	 *
	 * IMPORTANT:
	 * 	SMPL_PERIOD MUST not exceed width of HW counter
	 * 	because no 64-bit virtualization is done by the
	 * 	kernel.
	 */
	pd[0].reg_num = 8;
	pd[0].reg_flags = PFM_REGFL_OVFL_NOTIFY;
	pd[0].reg_value = -SMPL_PERIOD;

	pa[0].reg_long_reset = -SMPL_PERIOD;
	pa[0].reg_short_reset = -SMPL_PERIOD;

	/*
	 * Now program the registers
	 */
	if (pfm_write(fd, 0, PFM_RW_PMC, pc, npmcs * sizeof(*pc)) == -1)
		fatal_error("pfm_writeerror errno %d\n",errno);

	if (pfm_write(fd, 0, PFM_RW_PMD_ATTR, pd, sizeof(*pd)) == -1)
		fatal_error("pfm_write(PMD) error errno %d\n",errno);

	signal(SIGCHLD, SIG_IGN);
	/*
	 * Create the child task
	 */
	if ((pid=fork()) == -1) fatal_error("Cannot fork process\n");

	/*
	 * In order to get the PFM_END_MSG message, it is important
	 * to ensure that the child task does not inherit the file
	 * descriptor of the session. By default, file descriptor
	 * are inherited during exec(). We explicitely close it
	 * here. We could have set it up through fcntl(FD_CLOEXEC)
	 * to achieve the same thing.
	 */
	if (pid == 0) {
		close(fd);
		child(argv+1);
	}

	/*
	 * wait for the child to exec
	 */
	waitpid(pid, &status, WUNTRACED);

	/*
	 * process is stopped at this point
	 */
	if (WIFEXITED(status)) {
		warning("task %s [%d] exited already status %d\n", argv[1], pid, WEXITSTATUS(status));
		goto terminate_session;
	}

	/*
	 *  attach the session
	 */
	if (pfm_attach(fd, 0, pid) == -1)
		fatal_error("pfm_attach error errno %d\n",errno);
	/*
	 * start monitoring
	 */
	if (pfm_set_state(fd, 0, PFM_ST_START) == -1)
		fatal_error("pfm_set_state(start) error errno %d\n",errno);

	/*
	 * detach child. Side effect includes
	 * activation of monitoring.
	 */
	ptrace(PTRACE_DETACH, pid, NULL, 0);

	/*
	 * core loop
	 */
	for(;;) {
		/*
		 * wait for overflow/end notification messages
		 */
		ret = read(fd, &msg, sizeof(msg));
		if (ret == -1) {
			if(ret == -1 && errno == EINTR) {
				warning("read interrupted, retrying\n");
				continue;
			}
			fatal_error("cannot read perfmon msg: %s\n", strerror(errno));
		}
		switch(msg.type) {
			case PFM_MSG_OVFL: /* the sampling buffer is full */
				process_smpl_buf(hdr);
				/*
				 * reactivate monitoring once we are done with the samples
				 *
				 * Note that this call can fail with EBUSY in non-blocking mode
				 * as the task may have disappeared while we were processing
				 * the samples.
				 */
				if (pfm_set_state(fd, 0, PFM_ST_RESTART) == -1) {
					if (errno != EBUSY)
						fatal_error("pfm_set_state(restart)_ error errno %d\n",errno);
					else
						warning("pfm_set_state(restart): task has probably terminated \n");
				}
				break;
			case PFM_MSG_END: /* monitored task terminated */
				warning("task terminated\n");
				goto terminate_session;
			default: fatal_error("unknown message type %d\n", msg.type);
		}
	}
terminate_session:
	/*
	 * cleanup child
	 */
	wait4(pid, &status, 0, NULL);

	/*
	 * check for any leftover samples
	 */
	process_smpl_buf(hdr);

	munmap(buf_addr, (size_t)buf_arg.buf_size);

	close(fd);

	return 0;
}
Ejemplo n.º 6
0
int
main(int argc, char **argv)
{
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfmlib_core_input_param_t mod_inp;
	pfmlib_options_t pfmlib_options;
	pfarg_pmr_t pc[NUM_PMCS];
	pfarg_pmd_attr_t pd[NUM_PMDS];
	pfarg_sinfo_t sif;
	struct pollfd fds;
	smpl_arg_t buf_arg;
	pfarg_msg_t msg;
	smpl_hdr_t *hdr;
	void *buf_addr;
	uint64_t pebs_size;
	pid_t pid;
	int ret, fd, type;
	unsigned int i;
	uint32_t ctx_flags;

	if (argc < 2)
		fatal_error("you need to pass a program to sample\n");

	if (pfm_initialize() != PFMLIB_SUCCESS)
		fatal_error("libpfm intialization failed\n");

	/*
	 * check we are on an Intel Core PMU
	 */
	pfm_get_pmu_type(&type);
	if (type != PFMLIB_INTEL_CORE_PMU && type != PFMLIB_INTEL_ATOM_PMU)
		fatal_error("This program only works with an Intel Core processor\n");

	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug   = 0; /* set to 1 for debug */
	pfmlib_options.pfm_verbose = 1; /* set to 1 for verbose */
	pfm_set_options(&pfmlib_options);

	memset(pd, 0, sizeof(pd));
	memset(pc, 0, sizeof(pc));
	memset(&inp, 0, sizeof(inp));
	memset(&outp, 0, sizeof(outp));
	memset(&mod_inp, 0, sizeof(mod_inp));
	memset(&sif, 0, sizeof(sif));

	memset(&buf_arg, 0, sizeof(buf_arg));

	memset(&fds, 0, sizeof(fds));

	/*
	 * search for our sampling event
	 */
	if (pfm_find_full_event(SMPL_EVENT, &inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find sampling event %s\n", SMPL_EVENT);

	inp.pfp_event_count = 1;
	inp.pfp_dfl_plm = PFM_PLM3;

	/*
	 * important: inform libpfm we do use PEBS
	 */
	mod_inp.pfp_core_pebs.pebs_used = 1;

	/*
	 * sampling buffer parameters
	 */
	pebs_size = 3 * getpagesize();
	buf_arg.buf_size = pebs_size;

	/*
	 * sampling period cannot use more bits than HW counter can supoprt
	 */
	buf_arg.cnt_reset = -SMPL_PERIOD;

	/*
	 * We want a system-wide context for sampling
	 */
	ctx_flags = PFM_FL_SYSTEM_WIDE | PFM_FL_SMPL_FMT;

	/*
	 * trigger notification (interrupt) when reaching the very end of
	 * the buffer
	 */
	buf_arg.intr_thres = (pebs_size/sizeof(smpl_entry_t))*90/100;

	/*
 	 * we want to measure CPU0, thus we pin ourself to the CPU before invoking
 	 * perfmon. This ensures that the sampling buffer will be allocated on the
 	 * same NUMA node.
 	 */
	ret = pin_cpu(getpid(), 0);
	if (ret)
		fatal_error("cannot pin on CPU0");

	/*
	 * create session and sampling buffer
	 */
	fd = pfm_create(ctx_flags, &sif, FMT_NAME, &buf_arg, sizeof(buf_arg));
	if (fd == -1) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("cannot create session %s, maybe you do not have the PEBS sampling format in the kernel.\nCheck /sys/kernel/perfmon/formats\n", strerror(errno));
	}

	/*
	 * map buffer into our address space
	 */
	buf_addr = mmap(NULL, (size_t)buf_arg.buf_size, PROT_READ, MAP_PRIVATE, fd, 0);
	printf("session [%d] buffer mapped @%p\n", fd, buf_addr);
	if (buf_addr == MAP_FAILED)
		fatal_error("cannot mmap sampling buffer errno %d\n", errno);

	hdr = (smpl_hdr_t *)buf_addr;

	printf("pebs_base=0x%llx pebs_end=0x%llx index=0x%llx\n"
	       "intr=0x%llx version=%u.%u\n"
	       "entry_size=%zu ds_size=%zu\n",
			(unsigned long long)hdr->ds.pebs_buf_base,
			(unsigned long long)hdr->ds.pebs_abs_max,
			(unsigned long long)hdr->ds.pebs_index,
			(unsigned long long)hdr->ds.pebs_intr_thres,
			PFM_VERSION_MAJOR(hdr->version),
			PFM_VERSION_MINOR(hdr->version),
			sizeof(smpl_entry_t),
			sizeof(hdr->ds));

	if (PFM_VERSION_MAJOR(hdr->version) < 1)
		fatal_error("invalid buffer format version\n");

	/*
	 * get which PMC registers are available
	 */
	detect_unavail_pmu_regs(&sif, &inp.pfp_unavail_pmcs, NULL);

	/*
	 * let libpfm figure out how to assign event onto PMU registers
	 */
	if (pfm_dispatch_events(&inp, &mod_inp, &outp, NULL) != PFMLIB_SUCCESS)
		fatal_error("cannot assign event %s\n", SMPL_EVENT);


	/*
	 * propagate PMC setup from libpfm to perfmon
	 */
	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;

		/*
		 * must disable 64-bit emulation on the PMC0 counter.
		 * PMC0 is the only counter useable with PEBS. We must disable
		 * 64-bit emulation to avoid getting interrupts for each
		 * sampling period, PEBS takes care of this part.
		 */
		if (pc[i].reg_num == 0)
			pc[i].reg_flags = PFM_REGFL_NO_EMUL64;
	}

	/*
	 * propagate PMD set from libpfm to perfmon
	 */
	for (i=0; i < outp.pfp_pmd_count; i++)
		pd[i].reg_num = outp.pfp_pmds[i].reg_num;

	/*
	 * setup sampling period for first counter
	 * we want notification on overflow, i.e., when buffer is full
	 */
	pd[0].reg_flags = PFM_REGFL_OVFL_NOTIFY;
	pd[0].reg_value = -SMPL_PERIOD;

	pd[0].reg_long_reset = -SMPL_PERIOD;
	pd[0].reg_short_reset = -SMPL_PERIOD;
	
	/*
	 * Now program the registers
	 */
	if (pfm_write(fd, 0, PFM_RW_PMC, pc, outp.pfp_pmc_count * sizeof(*pc)) == -1)
		fatal_error("pfm_write error errno %d\n",errno);

	if (pfm_write(fd, 0, PFM_RW_PMD_ATTR, pd, outp.pfp_pmd_count * sizeof(*pd)) == -1)
		fatal_error("pfm_write(PMD) error errno %d\n",errno);

	/*
	 *  attach the session to CPU0
	 */
	if (pfm_attach(fd, 0, 0) == -1)
		fatal_error("pfm_attach error errno %d\n",errno);

	/*
	 * Create the child task
	 */
	signal(SIGCHLD, handler);

	if ((pid=fork()) == -1)
		fatal_error("Cannot fork process\n");

	if (pid == 0) {
		/* child does not inherit context file descriptor */
		close(fd);

		/* if child is too short-lived we may not measure it */
		child(argv+1);
	}

	/*
	 * start monitoring
	 */
	if (pfm_set_state(fd, 0, PFM_ST_START) == -1)
		fatal_error("pfm_set_state(start) error errno %d\n",errno);

	fds.fd = fd;
	fds.events = POLLIN;
	/*
	 * core loop
	 */
	for(;done == 0;) {
		/*
		 * Must use a timeout to avoid a race condition
		 * with the SIGCHLD signal
		 */
		ret = poll(&fds, 1, 500);

		/*
		 * if timeout expired, then check done
		 */
		if (ret == 0)
			continue;

		if (ret == -1) {
			if(ret == -1 && errno == EINTR) {
				warning("read interrupted, retrying\n");
				continue;
			}
			fatal_error("poll failed: %s\n", strerror(errno));
		}

		ret = read(fd, &msg, sizeof(msg));
		if (ret == -1)
			fatal_error("cannot read perfmon msg: %s\n", strerror(errno));

		switch(msg.type) {
			case PFM_MSG_OVFL: /* the sampling buffer is full */
				process_smpl_buf(hdr);
				/*
				 * reactivate monitoring once we are done with the samples
				 * in syste-wide, interface guarantees monitoring is active
				 * upon return from the pfm_restart() syscall
				 */
				if (pfm_set_state(fd, 0, PFM_ST_RESTART) == -1)
					fatal_error("pfm_set_state(restart) error errno %d\n",errno);
				break;
			default: fatal_error("unknown message type %d\n", msg.type);
		}
	}
	/*
	 * cleanup child
	 */
	waitpid(pid, NULL, 0);

	/*
	 * stop monitoring, this is required in order to guarantee that the PEBS buffer
	 * header is updated with the latest position, such that we see see the final
	 * samples
	 */
	if (pfm_set_state(fd, 0, PFM_ST_STOP) == -1)
		fatal_error("pfm_set_state(stop) error errno %d\n",errno);

	/*
	 * check for any leftover samples. Must have monitoring stopped
	 * for this operation to have guarantee it is up to date
	 */
	process_smpl_buf(hdr);

	/*
	 * close session
	 */
	close(fd);

	/*
	 * unmap sampling buffer and actually free the perfmon session
	 */
	munmap(buf_addr, (size_t)buf_arg.buf_size);

	return 0;
}