/** program the perfmon counters */
static void write_pmu(struct child * self)
{
	pfarg_reg_t pc[OP_MAX_COUNTERS];
	pfarg_reg_t pd[OP_MAX_COUNTERS];
	int err;
	size_t i;

	memset(pc, 0, sizeof(pc));
	memset(pd, 0, sizeof(pd));

#define PMC_GEN_INTERRUPT (1UL << 5)
#define PMC_PRIV_MONITOR (1UL << 6)
/* McKinley requires pmc4 to have bit 23 set (enable PMU).
 * It is supposedly ignored in other pmc registers.
 */
#define PMC_MANDATORY (1UL << 23)
#define PMC_USER (1UL << 3)
#define PMC_KERNEL (1UL << 0)
	for (i = 0; i < op_nr_counters && opd_events[i].name; ++i) {
		struct opd_event * event = &opd_events[i];
		pc[i].reg_num = event->counter + 4;
		pc[i].reg_value = PMC_GEN_INTERRUPT;
		pc[i].reg_value |= PMC_PRIV_MONITOR;
		pc[i].reg_value |= PMC_MANDATORY;
		(event->user) ? (pc[i].reg_value |= PMC_USER)
		              : (pc[i].reg_value &= ~PMC_USER);
		(event->kernel) ? (pc[i].reg_value |= PMC_KERNEL)
		                : (pc[i].reg_value &= ~PMC_KERNEL);
		pc[i].reg_value &= ~(0xff << 8);
		pc[i].reg_value |= ((event->value & 0xff) << 8);
		pc[i].reg_value &= ~(0xf << 16);
		pc[i].reg_value |= ((event->um & 0xf) << 16);
		pc[i].reg_smpl_eventid = event->counter;
	}

	for (i = 0; i < op_nr_counters && opd_events[i].name; ++i) {
		struct opd_event * event = &opd_events[i];
		pd[i].reg_value = ~0UL - event->count + 1;
		pd[i].reg_short_reset = ~0UL - event->count + 1;
		pd[i].reg_num = event->counter + 4;
	}

	err = perfmonctl(self->ctx_fd, PFM_WRITE_PMCS, pc, i);
	if (err == -1) {
		perror("Couldn't write PMCs");
		exit(EXIT_FAILURE);
	}

	err = perfmonctl(self->ctx_fd, PFM_WRITE_PMDS, pd, i);
	if (err == -1) {
		perror("Couldn't write PMDs");
		exit(EXIT_FAILURE);
	}
}
Esempio n. 2
0
static void perfmon_start_child(int ctx_fd)
{
	if (perfmonctl(ctx_fd, PFM_START, 0, 0) == -1) {
		perror("Couldn't start perfmon: ");
		exit(EXIT_FAILURE);
	}
}
static void
overflow_handler(int n, struct pfm_siginfo *info, struct sigcontext *sc)
{
	unsigned long mask =info->sy_pfm_ovfl[0];
	pfarg_reg_t pd[1];

	/*
	 * Check to see if we received a spurious SIGPROF, i.e., one not
	 * generated by the perfmon subsystem.
	 */
	if (info->sy_code != PROF_OVFL) {
		printf("Received spurious SIGPROF si_code=%d\n", info->sy_code);
		return;
	} 
	/*
	 * Each bit set in the overflow mask represents an overflowed counter.
	 *
	 * Here we check that the overflow was caused by our first counter.
	 */
	if ((mask & (1UL<< evt.pfp_pc[0].reg_num)) == 0) {
		printf("Something is wrong, unexpected mask 0x%lx\n", mask);
		exit(1);
	}

	/*
	 * Read the value of the second counter
	 */
	pd[0].reg_num = evt.pfp_pc[1].reg_num;

	if (perfmonctl(getpid(), PFM_READ_PMDS, pd, 1) == -1) {
		perror("PFM_READ_PMDS");
		exit(1);
	}

	printf("Notification received\n");
	process_smpl_buffer();
	/*
	 * And resume monitoring
	 */
	if (perfmonctl(getpid(), PFM_RESTART,NULL, 0) == -1) {
		perror("PFM_RESTART");
		exit(1);
	}
	/* Here we have the PMU enabled and are capturing events */
}
static void load_context(struct child * self)
{
	pfarg_load_t load_args;
	int err;

	memset(&load_args, 0, sizeof(load_args));
	load_args.load_pid = self->pid;

	err = perfmonctl(self->ctx_fd, PFM_LOAD_CONTEXT, &load_args, 1);
	if (err == -1) {
		perror("Couldn't load context");
		exit(EXIT_FAILURE);
	}
}
Esempio n. 5
0
static void
sigio_handler(int n, struct siginfo *info, struct sigcontext *sc)
{
	if (perfmonctl(ctx_fd, PFM_READ_PMDS, pd+1, 1) == -1) {
		fatal_error("PFM_READ_PMDS: %s", strerror(errno));
	}

	/*
	 * we do not need to extract the overflow message, we know
	 * where it is coming from.
	 */

	/*
	 * XXX: risky to do printf() in signal handler!
	 */
	if (event1_name)
		printf("Notification %02lu: %"PRIu64" %s\n", notification_received, pd[1].reg_value, event1_name);
	else
		printf("Notification %02lu:\n", notification_received);

	/*
	 * At this point, the counter used for the sampling period has already
	 * be reset by the kernel because we are in non-blocking mode, self-monitoring.
	 */

	/*
	 * increment our notification counter
	 */
	notification_received++;

	/*
	 * And resume monitoring
	 */
	if (perfmonctl(ctx_fd, PFM_RESTART,NULL, 0) == -1) {
		fatal_error("PFM_RESTART: %s", strerror(errno));
	}
}
/** create the per-cpu context */
static void create_context(struct child * self)
{
	pfarg_context_t ctx;
	int err;

	memset(&ctx, 0, sizeof(pfarg_context_t));
	memcpy(&ctx.ctx_smpl_buf_id, &uuid, 16);
	ctx.ctx_flags = PFM_FL_SYSTEM_WIDE;

	err = perfmonctl(0, PFM_CREATE_CONTEXT, &ctx, 1);
	if (err == -1) {
		perror("CREATE_CONTEXT failed");
		exit(EXIT_FAILURE);
	}

	self->ctx_fd = ctx.ctx_fd;
}
Esempio n. 7
0
int
main(int argc, char **argv)
{
	unsigned int i, cnum = 0;
	pfarg_reg_t pc[NUM_PMCS];
	pfmlib_regmask_t impl_pmcs;
	unsigned int num_pmcs;

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		printf("Can't initialize library\n");
		exit(1);
	}

	memset(&impl_pmcs, 0, sizeof(impl_pmcs));
	memset(pc, 0, sizeof(pc));
	
	pfm_get_impl_pmcs(&impl_pmcs);
	pfm_get_num_pmcs(&num_pmcs);

	for(i=0; num_pmcs ; i++) {
		if (pfm_regmask_isset(&impl_pmcs, i) == 0) continue;
		pc[cnum++].reg_num = i;
		num_pmcs--;
	}

	if (perfmonctl(0, PFM_GET_PMC_RESET_VAL, pc, cnum) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("cannot get reset values: %s\n", strerror(errno));
	}

	for (i=0; i < cnum; i++) {
		printf("PMC%u 0x%lx\n", pc[i].reg_num, pc[i].reg_value);

	}
	return 0;
}
Esempio n. 8
0
int
parent(char **arg)
{
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfarg_context_t ctx[1];
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_load_t load_args;
	unsigned int i, num_counters;
	int status, ret;
	int ctx_fd;
	pid_t pid;
	char name[MAX_EVT_NAME_LEN];

	memset(pc, 0, sizeof(ctx));
	memset(pd, 0, sizeof(ctx));
	memset(ctx, 0, sizeof(ctx));
	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));
	memset(&load_args,0, sizeof(load_args));

	pfm_get_num_counters(&num_counters);

	if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find cycle event\n");

	if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS)
		fatal_error("cannot find inst retired event\n");
	i = 2;

	if (num_counters < i) {
		i = num_counters;
		printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i);
	}

	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level
	 * 	PFM_PLM0 : kernel level
	 */
	inp.pfp_dfl_plm   = PFM_PLM3;

	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = i;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * now create a context. we will later attach it to the task we are creating.
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, ctx, 1) == -1) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	/*
	 * extract the identifier for our context
	 */
	ctx_fd = ctx[0].ctx_fd;

	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We must pfp_pmc_count to determine the number of PMC to intialize.
	 * We must use pfp_event_count to determine the number of PMD to initialize.
	 * Some events causes extra PMCs to be used, so  pfp_pmc_count may be >= pfp_event_count.
	 *
	 * This step is new compared to libpfm-2.x. It is necessary because the library no
	 * longer knows about the kernel data structures.
	 */
	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	/*
	 * the PMC controlling the event ALWAYS come first, that's why this loop
	 * is safe even when extra PMC are needed to support a particular event.
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
	}

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann counting monitors.
	 */

	if (perfmonctl(ctx_fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}

	if (perfmonctl(ctx_fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Create the child task
	 */
	if ((pid=fork()) == -1) fatal_error("Cannot fork process\n");

	/*
	 * and launch the child code
	 */
	if (pid == 0) exit(child(arg));

	/*
	 * wait for the child to exec
	 */
	waitpid(pid, &status, WUNTRACED);

	/*
	 * check if process exited early
	 */
	if (WIFEXITED(status)) {
		fatal_error("command %s exited too early with status %d\n", arg[0], WEXITSTATUS(status));
	}
	/*
	 * the task is stopped at this point
	 */
	
	
	/*
	 * now we load (i.e., attach) the context to ourself
	 */
	load_args.load_pid = pid;

	if (perfmonctl(ctx_fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_LOAD_CONTEXT errno %d\n",errno);
	}

	/*
	 * activate monitoring. The task is still STOPPED at this point. Monitoring
	 * will not take effect until the execution of the task is resumed.
	 */
	if (perfmonctl(ctx_fd, PFM_START, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_START errno %d\n",errno);
	}

	/*
	 * now resume execution of the task, effectively activating
	 * monitoring.
	 */
	ptrace(PTRACE_DETACH, pid, NULL, 0);

	/*
	 * now the task is running
	 */

	/*
	 * simply wait for completion
	 */
	waitpid(pid, &status, 0);

	/*
	 * the task has disappeared at this point but our context is still
	 * present and contains all the latest counts.
	 */

	/*
	 * now simply read the results.
	 */
	if (perfmonctl(ctx_fd, PFM_READ_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error READ_PMDS errno %d\n",errno);
		return -1;
	}
	/*
	 * print the results
	 *
	 * It is important to realize, that the first event we specified may not
	 * be in PMD4. Not all events can be measured by any monitor. That's why
	 * we need to use the pc[] array to figure out where event i was allocated.
	 *
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pfm_get_full_event_name(&inp.pfp_events[i], name, MAX_EVT_NAME_LEN);
		printf("PMD%u %20"PRIu64" %s\n",
			pd[i].reg_num,
			pd[i].reg_value,
			name);
	}
	/*
	 * free the context
	 */
	close(ctx_fd);

	return 0;
}
Esempio n. 9
0
int
main(int argc, char **argv)
{
	char **p;
	int i, ret;
	pid_t pid = getpid();
	pfmlib_param_t evt;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_context_t ctx[1];
	pfmlib_options_t pfmlib_options;

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		printf("Can't initialize library\n");
		exit(1);
	}
	
	/* 
	 * check that the user did not specify too many events
	 */
	if (argc-1 > pfm_get_num_counters()) {
		printf("Too many events specified\n");
		exit(1);
	}

	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug = 0; /* set to 1 for debug */
	pfm_set_options(&pfmlib_options);

	memset(pd, 0, sizeof(pd));
	memset(ctx, 0, sizeof(ctx));

	/*
	 * prepare parameters to library. we don't use any Itanium
	 * specific features here. so the pfp_model is NULL.
	 */
	memset(&evt,0, sizeof(evt));

	/*
	 * be nice to user!
	 */
	p = argc > 1 ? argv+1 : event_list;
	for (i=0; *p ; i++, p++) {
		if (pfm_find_event(*p, &evt.pfp_events[i].event) != PFMLIB_SUCCESS) {
			fatal_error("Cannot find %s event\n", *p);
		}
	}

	/*
	 * set the default privilege mode for all counters:
	 * 	PFM_PLM3 : user level only
	 */
	evt.pfp_dfl_plm   = PFM_PLM3; 

	/*
	 * how many counters we use
	 */
	evt.pfp_event_count = i;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&evt)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * for this example, we have decided not to get notified
	 * on counter overflows and the monitoring is not to be inherited
	 * in derived tasks.
	 */
	ctx[0].ctx_flags = PFM_FL_INHERIT_NONE;

	/*
	 * now create the context for self monitoring/per-task
	 */
	if (perfmonctl(pid, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	/* 
	 * Must be done before any PMD/PMD calls (unfreeze PMU). Initialize
	 * PMC/PMD to safe values. psr.up is cleared.
	 */
	if (perfmonctl(pid, PFM_ENABLE, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_ENABLE errno %d\n",errno);
	}

	/*
	 * Now prepare the argument to initialize the PMDs.
	 * the memset(pd) initialized the entire array to zero already, so
	 * we just have to fill in the register numbers from the pc[] array.
	 */
	for (i=0; i < evt.pfp_event_count; i++) {
		pd[i].reg_num = evt.pfp_pc[i].reg_num;
	}
	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann coutning monitors.
	 */
	if (perfmonctl(pid, PFM_WRITE_PMCS, evt.pfp_pc, evt.pfp_pc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	if (perfmonctl(pid, PFM_WRITE_PMDS, pd, evt.pfp_event_count) == -1) {
		{int i; for(i=0; i < evt.pfp_event_count; i++) printf("pmd%d: 0x%x\n", i, pd[i].reg_flags);}
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Let's roll now
	 */
	pfm_start();

	noploop(10000000);

	pfm_stop();

	/* 
	 * now read the results
	 */
	if (perfmonctl(pid, PFM_READ_PMDS, pd, evt.pfp_event_count) == -1) {
		fatal_error( "perfmonctl error READ_PMDS errno %d\n",errno);
		return -1;
	}
	/* 
	 * print the results
	 *
	 * It is important to realize, that the first event we specified may not
	 * be in PMD4. Not all events can be measured by any monitor. That's why
	 * we need to use the pc[] array to figure out where event i was allocated.
	 *
	 */
	for (i=0; i < evt.pfp_event_count; i++) {
		char *name;
		pfm_get_event_name(evt.pfp_events[i].event, &name);
		printf("PMD%u %20lu %s\n", 
			pd[i].reg_num, 
			pd[i].reg_value, 
			name);
	}
	/* 
	 * let's stop this now
	 */
	if (perfmonctl(pid, PFM_DESTROY_CONTEXT, NULL, 0) == -1) {
		fatal_error( "child: perfmonctl error PFM_DESTROY errno %d\n",errno);
	}

	return 0;
}
Esempio n. 10
0
int
main(void)
{
	int ret;
	int type = 0;
	pid_t pid = getpid();
	pfmlib_ita2_param_t ita_param;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_context_t ctx[1];
	pfmlib_options_t pfmlib_options;
	struct sigaction act;

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		fatal_error("Can't initialize library\n");
	}

	/*
	 * Let's make sure we run this on the right CPU
	 */
	pfm_get_pmu_type(&type);
	if (type != PFMLIB_ITANIUM2_PMU) {
		char *model; 
		pfm_get_pmu_name(&model);
		fatal_error("this program does not work with %s PMU\n", model);
	}

	/*
	 * Install the overflow handler (SIGPROF)
	 */
	memset(&act, 0, sizeof(act));
	act.sa_handler = (sig_t)overflow_handler;
	sigaction (SIGPROF, &act, 0);


	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug = 0; /* set to 1 for debug */
	pfmlib_options.pfm_verbose = 0; /* set to 1 for debug */
	pfm_set_options(&pfmlib_options);



	memset(pd, 0, sizeof(pd));
	memset(ctx, 0, sizeof(ctx));

	/*
	 * prepare parameters to library. we don't use any Itanium
	 * specific features here. so the pfp_model is NULL.
	 */
	memset(&evt,0, sizeof(evt));
	memset(&ita_param,0, sizeof(ita_param));


	/*
	 * because we use a model specific feature, we must initialize the
	 * model specific pfmlib parameter structure and link it to the
	 * common structure.
	 * The magic number is a simple mechanism used by the library to check
	 * that the model specific data structure is decent. You must set it manually
	 * otherwise the model specific feature won't work.
	 */
	ita_param.pfp_magic = PFMLIB_ITA2_PARAM_MAGIC;
	evt.pfp_model       = &ita_param;

	/*
	 * Before calling pfm_find_dispatch(), we must specify what kind
	 * of branches we want to capture. We are interesteed in all the mispredicted branches, 
	 * therefore we program we set the various fields of the BTB config to:
	 */
	ita_param.pfp_ita2_btb.btb_used = 1;

	ita_param.pfp_ita2_btb.btb_ds  = 0;
	ita_param.pfp_ita2_btb.btb_tm  = 0x3;
	ita_param.pfp_ita2_btb.btb_ptm = 0x3;
	ita_param.pfp_ita2_btb.btb_ppm = 0x3;
	ita_param.pfp_ita2_btb.btb_brt = 0x0;
	ita_param.pfp_ita2_btb.btb_plm = PFM_PLM3;

	/*
	 * To count the number of occurence of this instruction, we must
	 * program a counting monitor with the IA64_TAGGED_INST_RETIRED_PMC8
	 * event.
	 */
	if (pfm_find_event_byname("BRANCH_EVENT", &evt.pfp_events[0].event) != PFMLIB_SUCCESS) {
		fatal_error("cannot find event BRANCH_EVENT\n");
	}

	/*
	 * set the (global) privilege mode:
	 * 	PFM_PLM3 : user level only
	 */
	evt.pfp_dfl_plm   = PFM_PLM3; 
	/*
	 * how many counters we use
	 */
	evt.pfp_event_count = 1;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&evt)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * for this example, we will get notified ONLY when the sampling
	 * buffer is full. The monitoring is not to be inherited
	 * in derived tasks
	 */
	ctx[0].ctx_flags        = PFM_FL_INHERIT_NONE;
	ctx[0].ctx_notify_pid   = getpid();
	ctx[0].ctx_smpl_entries = SMPL_BUF_NENTRIES;
	ctx[0].ctx_smpl_regs[0] = smpl_regs = BTB_REGS_MASK;


	/*
	 * now create the context for self monitoring/per-task
	 */
	if (perfmonctl(pid, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}

	printf("Sampling buffer mapped at %p\n", ctx[0].ctx_smpl_vaddr);

	smpl_vaddr = ctx[0].ctx_smpl_vaddr;

	/* 
	 * Must be done before any PMD/PMD calls (unfreeze PMU). Initialize
	 * PMC/PMD to safe values. psr.up is cleared.
	 */
	if (perfmonctl(pid, PFM_ENABLE, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_ENABLE errno %d\n",errno);
	}

	/*
	 * indicate we want notification when buffer is full
	 */
	evt.pfp_pc[0].reg_flags |= PFM_REGFL_OVFL_NOTIFY;

	/*
	 * Now prepare the argument to initialize the PMD and the sampling period
	 */
	pd[0].reg_num         = evt.pfp_pc[0].reg_num;
	pd[0].reg_value       = (~0UL) - SMPL_PERIOD +1;
	pd[0].reg_long_reset  = (~0UL) - SMPL_PERIOD +1;
	pd[0].reg_short_reset = (~0UL) - SMPL_PERIOD +1;

	/*
	 * When our counter overflows, we want to BTB index to be reset, so that we keep
	 * in sync. This is required to make it possible to interpret pmd16 on overflow
	 * to avoid repeating the same branch several times.
	 */
	evt.pfp_pc[0].reg_reset_pmds[0] = M_PMD(16);

	/*
	 * reset pmd16, short and long reset value are set to zero as well
	 */
	pd[1].reg_num         = 16;
	pd[1].reg_value       = 0UL;

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann coutning monitors.
	 */
	if (perfmonctl(pid, PFM_WRITE_PMCS, evt.pfp_pc, evt.pfp_pc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	if (perfmonctl(pid, PFM_WRITE_PMDS, pd, 2) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Let's roll now.
	 */

	do_test(100000);

	/*
	 * We must call the processing routine to cover the last entries recorded
	 * in the sampling buffer, i.e. which may not be full
	 */
	process_smpl_buffer();

	/* 
	 * let's stop this now
	 */
	if (perfmonctl(pid, PFM_DESTROY_CONTEXT, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_DESTROY errno %d\n",errno);
	}
	return 0;
}
static void perfmon_start_child(int ctx_fd)
{
	if (perfmonctl(ctx_fd, PFM_START, 0, 0) == -1) {
		exit(EXIT_FAILURE);
	}
}
Esempio n. 12
0
int
main(void)
{
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfmlib_ita2_input_param_t ita2_inp;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_context_t ctx[1];
	pfarg_load_t load_args;
	pfmlib_options_t pfmlib_options;
	int ret;
	int type = 0;
	int id;
	unsigned int i;
	char name[MAX_EVT_NAME_LEN];

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		fatal_error("Can't initialize library\n");
	}

	/*
	 * Let's make sure we run this on the right CPU
	 */
	pfm_get_pmu_type(&type);
	if (type != PFMLIB_ITANIUM2_PMU) {
		char model[MAX_PMU_NAME_LEN];
		pfm_get_pmu_name(model, MAX_PMU_NAME_LEN);
		fatal_error("this program does not work with the %s PMU\n", model);
	}

	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug   = 0; /* set to 1 for debug */
	pfmlib_options.pfm_verbose = 0; /* set to 1 for verbose */
	pfm_set_options(&pfmlib_options);

	memset(pd, 0, sizeof(pd));
	memset(pc, 0, sizeof(pc));
	memset(ctx, 0, sizeof(ctx));
	memset(&load_args, 0, sizeof(load_args));

	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));
	memset(&ita2_inp,0, sizeof(ita2_inp));

	/*
	 * We indicate that we are using the PMC8 opcode matcher. This is required
	 * otherwise the library add PMC8 to the list of PMC to pogram during
	 * pfm_dispatch_events().
	 */
	ita2_inp.pfp_ita2_pmc8.opcm_used = 1;

	/*
	 * We want to match all the br.cloop in our test function.
	 * This branch is an IP-relative branch for which the major
	 * opcode (bits [40-37]=4) and the btype field is 5 (which represents
	 * bits[6-8]) so it is included in the match/mask fields of PMC8.
	 * It is necessarily in a B slot.
	 *
	 * We don't care which operands are used with br.cloop therefore
	 * the mask field of pmc8 is set such that only the 4 bits of the
	 * opcode and 3 bits of btype must match exactly. This is accomplished by
	 * clearing the top 4 bits and bits [6-8] of the mask field and setting the
	 * remaining bits.  Similarly, the match field only has the opcode value  and btype
	 * set according to the encoding of br.cloop, the
	 * remaining bits are zero. Bit 60 of PMC8 is set to indicate
	 * that we look only in B slots  (this is the only possibility for
	 * this instruction anyway).
	 *
	 * So the binary representation of the value for PMC8 is as follows:
	 *
	 * 6666555555555544444444443333333333222222222211111111110000000000
	 * 3210987654321098765432109876543210987654321098765432109876543210
	 * ----------------------------------------------------------------
	 * 0001010000000000000000101000000000000011111111111111000111111000
	 *
	 * which yields a value of 0x1400028003fff1f8.
	 *
	 * Depending on the level of optimization to compile this code, it may
	 * be that the count reported could be zero, if the compiler uses a br.cond
	 * instead of br.cloop.
	 *
	 *
	 * The 0x1 sets the ig_ad field to make sure we ignore any range restriction.
	 * Also bit 2 must always be set
	 */
	ita2_inp.pfp_ita2_pmc8.pmc_val = 0x1400028003fff1fa;

	/*
	 * To count the number of occurence of this instruction, we must
	 * program a counting monitor with the IA64_TAGGED_INST_RETIRED_PMC8
	 * event.
	 */
	if (pfm_find_full_event("IA64_TAGGED_INST_RETIRED_IBRP0_PMC8", &inp.pfp_events[0]) != PFMLIB_SUCCESS) {
		fatal_error("cannot find event IA64_TAGGED_INST_RETIRED_IBRP0_PMC8\n");
	}

	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level only
	 */
	inp.pfp_dfl_plm   = PFM_PLM3;
	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = 1;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, &ita2_inp, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}

	/*
	 * now create the context for self monitoring/per-task
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	/*
	 * extract the unique identifier for our context, a regular file descriptor
	 */
	id = ctx[0].ctx_fd;

	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We must pfp_pmc_count to determine the number of PMC to intialize.
	 * We must use pfp_event_count to determine the number of PMD to initialize.
	 * Some events causes extra PMCs to be used, so  pfp_pmc_count may be >= pfp_event_count.
	 *
	 * This step is new compared to libpfm-2.x. It is necessary because the library no
	 * longer knows about the kernel data structures.
	 */

	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	/*
	 * the PMC controlling the event ALWAYS come first, that's why this loop
	 * is safe even when extra PMC are needed to support a particular event.
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
	}
	printf("event_count=%d id=%d\n",  inp.pfp_event_count, id);

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann coutning monitors.
	 */
	if (perfmonctl(id, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	if (perfmonctl(id, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}
	/*
	 * now we load (i.e., attach) the context to ourself
	 */
	load_args.load_pid = getpid();

	if (perfmonctl(id, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_LOAD_CONTEXT errno %d\n",errno);
	}

	/*
	 * Let's roll now.
	 */
	pfm_self_start(id);

	do_test(100UL);

	pfm_self_stop(id);

	/*
	 * now read the results
	 */
	if (perfmonctl(id, PFM_READ_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error READ_PMDS errno %d\n",errno);
	}

	/*
	 * print the results
	 */
	pfm_get_full_event_name(&inp.pfp_events[0], name, MAX_EVT_NAME_LEN);
	printf("PMD%u %20lu %s\n",
			pd[0].reg_num,
			pd[0].reg_value,
			name);

	if (pd[0].reg_value != 0)
		printf("compiler used br.cloop\n");
	else
		printf("compiler did not use br.cloop\n");

	/*
	 * let's stop this now
	 */
	close(id);
	return 0;
}
Esempio n. 13
0
int
parent(pid_t pid, unsigned long delay)
{
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfarg_context_t ctx[1];
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_load_t load_args;
	struct pollfd pollfd;
	pfm_msg_t msg;
	unsigned int i, num_counters;
	int status, ret;
	int ctx_fd;
	char name[MAX_EVT_NAME_LEN];


	memset(pc, 0, sizeof(ctx));
	memset(pd, 0, sizeof(ctx));
	memset(ctx, 0, sizeof(ctx));
	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));
	memset(&load_args,0, sizeof(load_args));

	pfm_get_num_counters(&num_counters);

	if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find cycle event\n");

	if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS)
		fatal_error("cannot find inst retired event\n");
	i = 2;

	if (num_counters < i) {
		i = num_counters;
		printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i);
	}

	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level
	 * 	PFM_PLM0 : kernel level
	 */
	inp.pfp_dfl_plm   = PFM_PLM3;

	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = i;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * now create a context. we will later attach it to the task we are creating.
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, ctx, 1) == -1) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	/*
	 * extract the identifier for our context
	 */
	ctx_fd = ctx[0].ctx_fd;

	/*
	 * use our file descriptor for the poll.
	 * we are interested in read events only.
	 */
	pollfd.fd     = ctx_fd;
	pollfd.events = POLLIN;


	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We must pfp_pmc_count to determine the number of PMC to intialize.
	 * We must use pfp_event_count to determine the number of PMD to initialize.
	 * Some events causes extra PMCs to be used, so  pfp_pmc_count may be >= pfp_event_count.
	 *
	 * This step is new compared to libpfm-2.x. It is necessary because the library no
	 * longer knows about the kernel data structures.
	 */
	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	/*
	 * the PMC controlling the event ALWAYS come first, that's why this loop
	 * is safe even when extra PMC are needed to support a particular event.
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
	}

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann counting monitors.
	 */

	if (perfmonctl(ctx_fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}

	if (perfmonctl(ctx_fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	ret = ptrace(PTRACE_ATTACH, pid, NULL, 0);
	if (ret == -1) {
		fatal_error("cannot attach to %d: %s\n", pid, strerror(errno));
	}

	/*
	 * wait for the child to be actually stopped
	 */
	waitpid(pid, &status, WUNTRACED);

	/*
	 * check if process exited early
	 */
	if (WIFEXITED(status)) {
		fatal_error("command process %d exited too early with status %d\n", pid, WEXITSTATUS(status));
	}

	/*
	 * the task is stopped at this point
	 */
	
	/*
	 * now we load (i.e., attach) the context to ourself
	 */
	load_args.load_pid = pid;

	if (perfmonctl(ctx_fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_LOAD_CONTEXT errno %d\n",errno);
	}

	/*
	 * activate monitoring. The task is still STOPPED at this point. Monitoring
	 * will not take effect until the execution of the task is resumed.
	 */
	if (perfmonctl(ctx_fd, PFM_START, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_START errno %d\n",errno);
	}

	/*
	 * now resume execution of the task, effectively activating
	 * monitoring.
	 */
	ptrace(PTRACE_DETACH, pid, NULL, 0);
	printf("attached to [%d], timeout set to %lu seconds\n", pid, delay);

	/*
	 * now the task is running
	 */

	/*
	 * We cannot simply do a waitpid() because we may be attaching to a process
	 * totally unrelated to our program. Instead we use a perfmon facility that
	 * notifies us when the monitoring task is exiting.
	 *
	 * When a task with a monitoring context attached to it exits, a PFM_MSG_END
	 * is generated. It can be retrieve with a simple read() on the context's descriptor.
	 *
	 * Another reason why you might return from the read is if there was a counter
	 * overflow, unlikely in this example.
	 *
	 * To measure only for short period of time, use select or poll with a timeout,
	 * see task_attach_timeout.c
	 *
	 */
	ret = poll(&pollfd, 1, delay*1000);
	switch( ret ) {
		case -1:
			fatal_error("cannot read from descriptor: %s\n", strerror(errno));
			/* no return */
		case  1:
			/*
	 		 * there is a message, i.e., the program exited before our timeout
	 		 */
			if (ret == 1) {
				/*
		 		* extract message
		 		*/
				ret = read(ctx_fd, &msg, sizeof(msg));

				if (msg.type != PFM_MSG_END) {
					fatal_error("unexpected msg type : %d\n", msg.type);
				}
			}
			break;
		case   0:
			/*
			 * we timed out, we need to stop the task to unload
			 */
			ret = ptrace(PTRACE_ATTACH, pid, NULL, 0);
			if (ret == -1) {
				fatal_error("cannot attach to %d: %s\n", pid, strerror(errno));
			}
			/*
			 * wait for task to be actually stopped
			 */
			waitpid(pid, &status, WUNTRACED);

			/*
	 		 * check if process exited, then no need to unload
	 		 */
			if (WIFEXITED(status)) goto read_results;

			if (perfmonctl(ctx_fd, PFM_UNLOAD_CONTEXT, NULL, 0) == -1) {
				fatal_error("perfmonctl error PFM_UNLOAD_CONTEXT errno %d\n",errno);
			}

			/*
			 * let it run free again
			 */
			ptrace(PTRACE_DETACH, pid, NULL, 0);
			break;
		default:
			fatal_error("unexpected return from poll: %d\n", ret);
	}

read_results:
	/*
	 * now simply read the results.
	 */
	if (perfmonctl(ctx_fd, PFM_READ_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error READ_PMDS errno %d\n",errno);
		return -1;
	}

	/*
	 * print the results
	 *
	 * It is important to realize, that the first event we specified may not
	 * be in PMD4. Not all events can be measured by any monitor. That's why
	 * we need to use the pc[] array to figure out where event i was allocated.
	 *
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pfm_get_full_event_name(&inp.pfp_events[i], name, MAX_EVT_NAME_LEN);
		printf("PMD%u %20"PRIu64" %s\n",
			pd[i].reg_num,
			pd[i].reg_value,
			name);
	}
	/*
	 * free the context
	 */
	close(ctx_fd);

	return 0;
}
int
main(void)
{
	int ret;
	int type = 0;
	char *name;
	pid_t pid = getpid();
	pfmlib_param_t evt;
	pfmlib_ita2_param_t ita2_param;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_context_t ctx[1];
	pfmlib_options_t pfmlib_options;

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		fatal_error("Can't initialize library\n");
	}

	/*
	 * Let's make sure we run this on the right CPU
	 */
	pfm_get_pmu_type(&type);
	if (type != PFMLIB_ITANIUM2_PMU) {
		char *model; 
		pfm_get_pmu_name(&model);
		fatal_error("this program does not work with the %s PMU\n", model);
	}

	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug = 0; /* set to 1 for debug */
	pfmlib_options.pfm_verbose = 0; /* set to 1 for verbose */
	pfm_set_options(&pfmlib_options);



	memset(pd, 0, sizeof(pd));
	memset(ctx, 0, sizeof(ctx));

	memset(&evt,0, sizeof(evt));
	memset(&ita2_param,0, sizeof(ita2_param));

	/*
	 * because we use a model specific feature, we must initialize the
	 * model specific pfmlib parameter structure and link it to the
	 * common structure.
	 * The magic number is a simple mechanism used by the library to check
	 * that the model specific data structure is decent. You must set it manually
	 * otherwise the model specific feature won't work.
	 */
	ita2_param.pfp_magic = PFMLIB_ITA2_PARAM_MAGIC;
	evt.pfp_model       = &ita2_param;

	/*
	 * We indicate that we are using the PMC8 opcode matcher. This is required
	 * otherwise the library add PMC8 to the list of PMC to pogram during
	 * pfm_dispatch_events().
	 */
	ita2_param.pfp_ita2_pmc8.opcm_used = 1;

	/*
	 * We want to match all the br.cloop in our test function.
	 * This branch is an IP-relative branch for which the major
	 * opcode (bits [40-37]=4) and the btype field is 5 (which represents
	 * bits[6-8]) so it is included in the match/mask fields of PMC8. 
	 * It is necessarily in a B slot.
	 *
	 * We don't care which operands are used with br.cloop therefore
	 * the mask field of pmc8 is set such that only the 4 bits of the
	 * opcode and 3 bits of btype must match exactly. This is accomplished by 
	 * clearing the top 4 bits and bits [6-8] of the mask field and setting the 
	 * remaining bits.  Similarly, the match field only has the opcode value  and btype
	 * set according to the encoding of br.cloop, the
	 * remaining bits are zero. Bit 60 of PMC8 is set to indicate
	 * that we look only in B slots  (this is the only possibility for
	 * this instruction anyway). 
	 *
	 * So the binary representation of the value for PMC8 is as follows:
	 *
	 * 6666555555555544444444443333333333222222222211111111110000000000
	 * 3210987654321098765432109876543210987654321098765432109876543210
	 * ----------------------------------------------------------------
	 * 0001010000000000000000101000000000000011111111111111000111111000
	 * 
	 * which yields a value of 0x1400028003fff1f8.
	 *
	 * Depending on the level of optimization to compile this code, it may 
	 * be that the count reported could be zero, if the compiler uses a br.cond 
	 * instead of br.cloop.
	 *
	 *
	 * The 0x1 sets the ig_ad field to make sure we ignore any range restriction.
	 * Also bit 2 must always be set
	 */
	ita2_param.pfp_ita2_pmc8.pmc_val = 0x1400028003fff1fa;

	/*
	 * To count the number of occurence of this instruction, we must
	 * program a counting monitor with the IA64_TAGGED_INST_RETIRED_PMC8
	 * event.
	 */
	if (pfm_find_event_byname("IA64_TAGGED_INST_RETIRED_IBRP0_PMC8", &evt.pfp_events[0].event) != PFMLIB_SUCCESS) {
		fatal_error("cannot find event IA64_TAGGED_INST_RETIRED_IBRP0_PMC8\n");
	}

	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level only
	 */
	evt.pfp_dfl_plm   = PFM_PLM3; 
	/*
	 * how many counters we use
	 */
	evt.pfp_event_count = 1;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&evt)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * for this example, we have decided not to get notified
	 * on counter overflows and the monitoring is not to be inherited
	 * in derived tasks
	 */
	ctx[0].ctx_flags = PFM_FL_INHERIT_NONE;

	/*
	 * now create the context for self monitoring/per-task
	 */
	if (perfmonctl(pid, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	/* 
	 * Must be done before any PMD/PMD calls (unfreeze PMU). Initialize
	 * PMC/PMD to safe values. psr.up is cleared.
	 */
	if (perfmonctl(pid, PFM_ENABLE, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_ENABLE errno %d\n",errno);
	}

	/*
	 * Now prepare the argument to initialize the PMD.
	 */
	pd[0].reg_num = evt.pfp_pc[0].reg_num;

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more thann coutning monitors.
	 */
	if (perfmonctl(pid, PFM_WRITE_PMCS, evt.pfp_pc, evt.pfp_pc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	if (perfmonctl(pid, PFM_WRITE_PMDS, pd, evt.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Let's roll now.
	 */
	pfm_start();

	do_test(100UL);

	pfm_stop();

	/* 
	 * now read the results
	 */
	if (perfmonctl(pid, PFM_READ_PMDS, pd, evt.pfp_event_count) == -1) {
		fatal_error("perfmonctl error READ_PMDS errno %d\n",errno);
	}

	/* 
	 * print the results
	 */
	pfm_get_event_name(evt.pfp_events[0].event, &name);
	printf("PMD%u %20lu %s\n", 
			pd[0].reg_num, 
			pd[0].reg_value, 
			name);

	if (pd[0].reg_value != 0) 
		printf("compiler used br.cloop\n");
	else
		printf("compiler did not use br.cloop\n");

	/* 
	 * let's stop this now
	 */
	if (perfmonctl(pid, PFM_DESTROY_CONTEXT, NULL, 0) == -1) {
		fatal_error("perfmonctl error PFM_DESTROY errno %d\n",errno);
	}
	return 0;
}
Esempio n. 15
0
static void
process_smpl_buf(int id, unsigned long smpl_pmd_mask, int need_restart)
{
	static unsigned long last_overflow = ~0UL; /* initialize to biggest value possible */
	smpl_hdr_t *hdr = (smpl_hdr_t *)buf_addr;
	smpl_entry_t *ent;
	unsigned long count, entry, *reg, pos, msk;
	unsigned long entry_size;
	int j;
	


	printf("processing %s buffer at %p\n", need_restart==0 ? "leftover" : "", hdr);
	if (hdr->hdr_overflows <= last_overflow && last_overflow != ~0UL) {
		warning("skipping identical set of samples %lu <= %lu\n",
			hdr->hdr_overflows, last_overflow);
		return;	
	}
	last_overflow = hdr->hdr_overflows;

	count = hdr->hdr_count;

	ent   = (smpl_entry_t *)(hdr+1);
	pos   = (unsigned long)ent;
	entry = collect_samples;

	/*
	 * in this example program, we use fixed-size entries, therefore we
	 * can compute the entry size in advance. Perfmon-2 supports variable
	 * size entries.
	 */
	entry_size = sizeof(smpl_entry_t)+(bit_weight(smpl_pmd_mask)<<3);

	while(count--) {
		printf("entry %ld PID:%d CPU:%d IIP:0x%016lx\n",
			entry,
			ent->pid,
			ent->cpu,
			ent->ip);

		printf("\tOVFL: %d LAST_VAL: %lu\n", ent->ovfl_pmd, -ent->last_reset_val);

		/*
		 * print body: additional PMDs recorded
		 * PMD are recorded in increasing index order
		 */
		reg = (unsigned long *)(ent+1);

		for(j=0, msk = smpl_pmd_mask; msk; msk >>=1, j++) {	
			if ((msk & 0x1) == 0) continue;
			printf("PMD%-2d = 0x%016lx\n", j, *reg);
			reg++;
		}
		/*
		 * we could have removed this and used:
		 * ent = (smpl_entry_t *)reg
		 * instead.
		 */
		pos += entry_size;
		ent = (smpl_entry_t *)pos;
		entry++;
	}
	collect_samples = entry;

	/*
	 * reactivate monitoring once we are done with the samples
	 *
	 * Note that this call can fail with EBUSY in non-blocking mode
	 * as the task may have disappeared while we were processing
	 * the samples.
	 */
	if (need_restart && perfmonctl(id, PFM_RESTART, 0, 0) == -1) {
		if (errno != EBUSY)
			fatal_error("perfmonctl error PFM_RESTART errno %d\n",errno);
		else
			warning("PFM_RESTART: task has probably terminated \n");
	}
}
Esempio n. 16
0
int
mainloop(char **arg)
{
	ctx_arg_t ctx;
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfarg_reg_t pd[NUM_PMDS];
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_load_t load_args;
	pfm_msg_t msg;
	unsigned long ovfl_count = 0UL;
	unsigned long sample_period;
	unsigned long smpl_pmd_mask = 0UL;
	pid_t pid;
	int status, ret, fd;
	unsigned int i, num_counters;

	/*
	 * intialize all locals
	 */
	memset(&ctx, 0, sizeof(ctx));
	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));
	memset(pd, 0, sizeof(pd));
	memset(pc, 0, sizeof(pc));

	/*
	 * locate events
	 */
	pfm_get_num_counters(&num_counters);

	if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find cycle event\n");

	if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS)
		fatal_error("cannot find inst retired event\n");

	i = 2;

	if (i > num_counters) {
		i = num_counters;
		printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i);
	}
	/*
	 * set the privilege mode:
	 * 	PFM_PLM3 : user level
	 * 	PFM_PLM0 : kernel level
	 */
	inp.pfp_dfl_plm   = PFM_PLM3;
	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = i;

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("cannot configure events: %s\n", pfm_strerror(ret));
	}
	
	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We must pfp_pmc_count to determine the number of PMC to intialize.
	 * We must use pfp_event_count to determine the number of PMD to initialize.
	 * Some events causes extra PMCs to be used, so  pfp_pmc_count may be >= pfp_event_count.
	 *
	 * This step is new compared to libpfm-2.x. It is necessary because the library no
	 * longer knows about the kernel data structures.
	 */

	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	/*
	 * the PMC controlling the event ALWAYS come first, that's why this loop
	 * is safe even when extra PMC are needed to support a particular event.
	 */
	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
		/* build sampling mask */
		smpl_pmd_mask  |= 1UL << pc[i].reg_num;
	}

	printf("smpl_pmd_mask=0x%lx\n", smpl_pmd_mask);

	/*
	 * now we indicate what to record when each counter overflows.
	 * In our case, we only have one sampling period and it is set for the
	 * first event. Here we indicate that when the sampling period expires
	 * then we want to record the value of all the other counters.
	 *
	 * We exclude the first counter in this case.
	 */
	smpl_pmd_mask  &= ~(1UL << pc[0].reg_num);

	pc[0].reg_smpl_pmds[0] = smpl_pmd_mask;

	/*
	 * we our sampling counter overflow, we want to be notified.
	 * The notification will come ONLY when the sampling buffer
	 * becomes full.
	 *
	 * We also activate randomization of the sampling period.
	 */
	pc[0].reg_flags	|= PFM_REGFL_OVFL_NOTIFY | PFM_REGFL_RANDOM;

	/*
	 * we also want to reset the other PMDs on
	 * every overflow. If we do not set
	 * this, the non-overflowed counters
	 * will be untouched.
	 */
	pc[0].reg_reset_pmds[0] |= smpl_pmd_mask;

	sample_period = 1000000UL;

	pd[0].reg_value       = (~0) - sample_period + 1;
	pd[0].reg_short_reset = (~0) - sample_period + 1;
	pd[0].reg_long_reset  = (~0) - sample_period + 1;
	/*
	 * setup randomization parameters, we allow a range of up to +256 here.
	 */
	pd[0].reg_random_seed = 5;
	pd[0].reg_random_mask = 0xff;


	printf("programming %u PMCS and %u PMDS\n", outp.pfp_pmc_count, inp.pfp_event_count);

	/*
	 * prepare context structure.
	 *
	 * format specific parameters MUST be concatenated to the regular
	 * pfarg_context_t structure. For convenience, the default sampling
	 * format provides a data structure that already combines the pfarg_context_t
	 * with what is needed fot this format.
	 */

	 /*
	  * We initialize the format specific information.
	  * The format is identified by its UUID which must be copied
	  * into the ctx_buf_fmt_id field.
	  */
	memcpy(ctx.ctx_arg.ctx_smpl_buf_id, buf_fmt_id, sizeof(pfm_uuid_t));

	/*
	 * the size of the buffer is indicated in bytes (not entries).
	 *
	 * The kernel will record into the buffer up to a certain point.
	 * No partial samples are ever recorded.
	 */
	ctx.buf_arg.buf_size = 8192;

	/*
	 * now create our perfmon context.
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, &ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}

	/*
	 * extract the file descriptor we will use to
	 * identify this newly created context
	 */
	fd = ctx.ctx_arg.ctx_fd;

	/*
	 * retrieve the virtual address at which the sampling
	 * buffer has been mapped
	 */
	buf_addr = ctx.ctx_arg.ctx_smpl_vaddr;

	printf("context [%d] buffer mapped @%p\n", fd, buf_addr);

	/*
	 * Now program the registers
	 */
	if (perfmonctl(fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}
	/*
	 * initialize the PMDs
	 */
	if (perfmonctl(fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * Create the child task
	 */
	if ((pid=fork()) == -1) fatal_error("Cannot fork process\n");

	/*
	 * In order to get the PFM_END_MSG message, it is important
	 * to ensure that the child task does not inherit the file
	 * descriptor of the context. By default, file descriptor
	 * are inherited during exec(). We explicitely close it
	 * here. We could have set it up through fcntl(FD_CLOEXEC)
	 * to achieve the same thing.
	 */
	if (pid == 0) {
		close(fd);
		child(arg);
	}

	/*
	 * wait for the child to exec
	 */
	waitpid(pid, &status, WUNTRACED);

	/*
	 * process is stopped at this point
	 */
	if (WIFEXITED(status)) {
		warning("task %s [%d] exited already status %d\n", arg[0], pid, WEXITSTATUS(status));
		goto terminate_session;
	}

	/*
	 * attach context to stopped task
	 */
	load_args.load_pid = pid;
	if (perfmonctl(fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_LOAD_CONTEXT errno %d\n",errno);
	}
	/*
	 * activate monitoring for stopped task.
	 * (nothing will be measured at this point
	 */
	if (perfmonctl(fd, PFM_START, NULL, 0) == -1) {
		fatal_error(" perfmonctl error PFM_START errno %d\n",errno);
	}
	/*
	 * detach child. Side effect includes
	 * activation of monitoring.
	 */
	ptrace(PTRACE_DETACH, pid, NULL, 0);

	/*
	 * core loop
	 */
	for(;;) {
		/*
		 * wait for overflow/end notification messages
		 */
		ret = read(fd, &msg, sizeof(msg));
		if (ret == -1) {
			fatal_error("cannot read perfmon msg: %s\n", strerror(errno));
		}
		switch(msg.type) {
			case PFM_MSG_OVFL: /* the sampling buffer is full */
				process_smpl_buf(fd, smpl_pmd_mask, 1);
				ovfl_count++;
				break;
			case PFM_MSG_END: /* monitored task terminated */
				printf("task terminated\n");
				goto terminate_session;
			default: fatal_error("unknown message type %d\n", msg.type);
		}
	}
terminate_session:
	/*
	 * cleanup child
	 */
	waitpid(pid, &status, 0);

	/*
	 * check for any leftover samples
	 */
	process_smpl_buf(fd, smpl_pmd_mask, 0);

	/*
	 * destroy perfmon context
	 */
	close(fd);

	printf("%lu samples collected in %lu buffer overflows\n", collect_samples, ovfl_count);

	return 0;
}
Esempio n. 17
0
int
main(int argc, char **argv)
{
	pfarg_context_t ctx[1];
	pfmlib_input_param_t inp;
	pfmlib_output_param_t outp;
	pfarg_reg_t pc[NUM_PMCS];
	pfarg_load_t load_args;
	pfmlib_options_t pfmlib_options;
	struct sigaction act;
	size_t len;
	unsigned int i, num_counters;
	int ret;

	/*
	 * Initialize pfm library (required before we can use it)
	 */
	if (pfm_initialize() != PFMLIB_SUCCESS) {
		printf("Can't initialize library\n");
		exit(1);
	}

	/*
	 * Install the signal handler (SIGIO)
	 */
	memset(&act, 0, sizeof(act));
	act.sa_handler = (sig_t)sigio_handler;
	sigaction (SIGIO, &act, 0);

	/*
	 * pass options to library (optional)
	 */
	memset(&pfmlib_options, 0, sizeof(pfmlib_options));
	pfmlib_options.pfm_debug = 0; /* set to 1 for debug */
	pfm_set_options(&pfmlib_options);

	memset(pc, 0, sizeof(pc));
	memset(ctx, 0, sizeof(ctx));
	memset(&load_args, 0, sizeof(load_args));
	memset(&inp,0, sizeof(inp));
	memset(&outp,0, sizeof(outp));

	pfm_get_num_counters(&num_counters);

	if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS)
		fatal_error("cannot find cycle event\n");

	if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS)
		fatal_error("cannot find inst retired event\n");

	i = 2;

	if (i > num_counters) {
		i = num_counters;
		printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i);
	}

	/*
	 * set the default privilege mode for all counters:
	 * 	PFM_PLM3 : user level only
	 */
	inp.pfp_dfl_plm = PFM_PLM3;

	/*
	 * how many counters we use
	 */
	inp.pfp_event_count = i;

	/*
	 * how many counters we use
	 */
	if (i > 1) {
		inp.pfp_event_count = i;

		pfm_get_max_event_name_len(&len);

		event1_name = malloc(len+1);
		if (event1_name == NULL)
			fatal_error("cannot allocate event name\n");

		pfm_get_full_event_name(&inp.pfp_events[1], event1_name, len+1);
	}

	/*
	 * let the library figure out the values for the PMCS
	 */
	if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) {
		fatal_error("Cannot configure events: %s\n", pfm_strerror(ret));
	}
	/*
	 * when we know we are self-monitoring and we have only one context, then
	 * when we get an overflow we know where it is coming from. Therefore we can
	 * save the call to the kernel to extract the notification message. By default,
	 * a message is generated. The queue of messages has a limited size, therefore
	 * it is important to clear the queue by reading the message on overflow. Failure
	 * to do so may result in a queue full and you will lose notification messages.
	 *
	 * With the PFM_FL_OVFL_NO_MSG, no message will be queue, but you will still get
	 * the signal. Similarly, the PFM_MSG_END will be generated.
	 */
	ctx[0].ctx_flags = PFM_FL_OVFL_NO_MSG;

	/*
	 * now create the context for self monitoring/per-task
	 */
	if (perfmonctl(0, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) {
		if (errno == ENOSYS) {
			fatal_error("Your kernel does not have performance monitoring support!\n");
		}
		fatal_error("Can't create PFM context %s\n", strerror(errno));
	}
	ctx_fd = ctx->ctx_fd;

	/*
	 * Now prepare the argument to initialize the PMDs and PMCS.
	 * We use pfp_pmc_count to determine the number of registers to
	 * setup. Note that this field can be >= pfp_event_count.
	 */

	for (i=0; i < outp.pfp_pmc_count; i++) {
		pc[i].reg_num   = outp.pfp_pmcs[i].reg_num;
		pc[i].reg_value = outp.pfp_pmcs[i].reg_value;
	}

	for (i=0; i < inp.pfp_event_count; i++) {
		pd[i].reg_num   = pc[i].reg_num;
	}
	/*
	 * We want to get notified when the counter used for our first
	 * event overflows
	 */
	pc[0].reg_flags 	|= PFM_REGFL_OVFL_NOTIFY;
	pc[0].reg_reset_pmds[0] |= 1UL << outp.pfp_pmcs[1].reg_num;

	/*
	 * we arm the first counter, such that it will overflow
	 * after SMPL_PERIOD events have been observed
	 */
	pd[0].reg_value       = (~0UL) - SMPL_PERIOD + 1;
	pd[0].reg_long_reset  = (~0UL) - SMPL_PERIOD + 1;
	pd[0].reg_short_reset = (~0UL) - SMPL_PERIOD + 1;

	/*
	 * Now program the registers
	 *
	 * We don't use the save variable to indicate the number of elements passed to
	 * the kernel because, as we said earlier, pc may contain more elements than
	 * the number of events we specified, i.e., contains more than counting monitors.
	 */
	if (perfmonctl(ctx_fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno);
	}

	if (perfmonctl(ctx_fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * we want to monitor ourself
	 */
	load_args.load_pid = getpid();

	if (perfmonctl(ctx_fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) {
		fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno);
	}

	/*
	 * setup asynchronous notification on the file descriptor
	 */
	ret = fcntl(ctx_fd, F_SETFL, fcntl(ctx_fd, F_GETFL, 0) | O_ASYNC);
	if (ret == -1) {
		fatal_error("cannot set ASYNC: %s\n", strerror(errno));
	}

	/*
	 * get ownership of the descriptor
	 */
	ret = fcntl(ctx_fd, F_SETOWN, getpid());
	if (ret == -1) {
		fatal_error("cannot setown: %s\n", strerror(errno));
	}

	/*
	 * Let's roll now
	 */
	pfm_self_start(ctx_fd);

	busyloop();

	pfm_self_stop(ctx_fd);

	/*
	 * free our context
	 */
	close(ctx_fd);

	return 0;
}