Пример #1
0
static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
{
	int ncpus = perf_evsel__nr_cpus(evsel);
	int nthreads = thread_map__nr(evsel->threads);

	if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
	    perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
	    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
		return -ENOMEM;

	return 0;
}
Пример #2
0
static void handle_initial_delay(void)
{
	struct perf_evsel *counter;

	if (initial_delay) {
		const int ncpus = cpu_map__nr(evsel_list->cpus),
			nthreads = thread_map__nr(evsel_list->threads);

		usleep(initial_delay * 1000);
		evlist__for_each(evsel_list, counter)
			perf_evsel__enable(counter, ncpus, nthreads);
	}
}
Пример #3
0
static void read_counters(bool close_counters)
{
	struct perf_evsel *counter;

	evlist__for_each(evsel_list, counter) {
		if (read_counter(counter))
			pr_debug("failed to read counter %s\n", counter->name);

		if (perf_stat_process_counter(&stat_config, counter))
			pr_warning("failed to process counter %s\n", counter->name);

		if (close_counters) {
			perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
					     thread_map__nr(evsel_list->threads));
		}
	}
}
Пример #4
0
/*
 * Read out the results of a single counter:
 * do not aggregate counts across CPUs in system-wide mode
 */
static int read_counter(struct perf_evsel *counter)
{
	int nthreads = thread_map__nr(evsel_list->threads);
	int ncpus = perf_evsel__nr_cpus(counter);
	int cpu, thread;

	if (!counter->supported)
		return -ENOENT;

	if (counter->system_wide)
		nthreads = 1;

	for (thread = 0; thread < nthreads; thread++) {
		for (cpu = 0; cpu < ncpus; cpu++) {
			struct perf_counts_values *count;

			count = perf_counts(counter->counts, cpu, thread);
			if (perf_evsel__read(counter, cpu, thread, count))
				return -1;
		}
	}

	return 0;
}