static struct tep_record * get_return_for_leaf(struct trace_seq *s, int cpu, int cur_pid, unsigned long long cur_func, struct tep_record *next, struct tracecmd_ftrace *finfo) { unsigned long long val; unsigned long long type; unsigned long long pid; /* Searching a common field, can use any event */ if (tep_get_common_field_val(s, finfo->fgraph_ret_event, "common_type", next, &type, 1)) return NULL; if (type != finfo->fgraph_ret_id) return NULL; if (tep_get_common_field_val(s, finfo->fgraph_ret_event, "common_pid", next, &pid, 1)) return NULL; if (cur_pid != pid) return NULL; /* We aleady know this is a funcgraph_ret_event */ if (tep_get_field_val(s, finfo->fgraph_ret_event, "func", next, &val, 1)) return NULL; if (cur_func != val) return NULL; /* this is a leaf, now advance the iterator */ return tracecmd_read_data(tracecmd_curr_thread_handle, cpu); }
static void update_filter_tasks(TraceViewStore *store) { struct tracecmd_input *handle; struct pevent *pevent; struct record *record; gint pid; gint cpu; gint i; handle = store->handle; pevent = tracecmd_get_pevent(store->handle); if (!store->sched_switch_event) { store->sched_switch_event = pevent_find_event_by_name(pevent, "sched", "sched_switch"); if (store->sched_switch_event) store->sched_switch_next_field = pevent_find_any_field(store->sched_switch_event, "next_pid"); store->sched_wakeup_event = pevent_find_event_by_name(pevent, "sched", "sched_wakeup"); if (store->sched_wakeup_event) store->sched_wakeup_pid_field = pevent_find_any_field(store->sched_wakeup_event, "pid"); store->sched_wakeup_new_event = pevent_find_event_by_name(pevent, "sched", "sched_wakeup"); if (store->sched_wakeup_new_event) store->sched_wakeup_new_pid_field = pevent_find_any_field(store->sched_wakeup_new_event, "pid"); } for (cpu = 0; cpu < store->cpus; cpu++) { record = tracecmd_read_cpu_first(handle, cpu); for (i = 0; i < store->cpu_items[cpu]; i++) { g_assert(record->offset == store->cpu_list[cpu][i].offset); /* The record may be filtered by the events */ if (!store->all_events) { int ret; ret = pevent_filter_match(store->event_filter, record); if (ret != FILTER_MATCH) { store->cpu_list[cpu][i].visible = 0; goto skip; } } pid = pevent_data_pid(pevent, record); if (show_task(store, pevent, record, pid)) store->cpu_list[cpu][i].visible = 1; else store->cpu_list[cpu][i].visible = 0; skip: free_record(record); record = tracecmd_read_data(handle, cpu); } g_assert(record == NULL); } merge_sort_rows_ts(store); }
TraceViewStore * trace_view_store_new (struct tracecmd_input *handle) { TraceViewStore *newstore; struct record *data; gint cpu, count, total=0; struct temp { guint64 offset; guint64 ts; struct temp *next; } *list, **next, *rec; newstore = (TraceViewStore*) g_object_new (TRACE_VIEW_STORE_TYPE, NULL); g_assert( newstore != NULL ); newstore->handle = handle; newstore->cpus = tracecmd_cpus(handle); tracecmd_ref(handle); newstore->event_filter = pevent_filter_alloc(tracecmd_get_pevent(handle)); newstore->cpu_list = g_new(TraceViewRecord *, newstore->cpus); g_assert(newstore->cpu_list != NULL); newstore->cpu_items = g_new(gint, newstore->cpus); g_assert(newstore->cpu_items != NULL); newstore->all_cpus = 1; newstore->all_events = 1; newstore->cpu_mask = g_new0(guint64, (newstore->cpus >> 6) + 1); g_assert(newstore->cpu_mask != NULL); mask_set_cpus(newstore, newstore->cpus); for (cpu = 0; cpu < newstore->cpus; cpu++) { count = 0; list = NULL; next = &list; data = tracecmd_read_cpu_first(handle, cpu); while (data) { *next = rec = g_malloc(sizeof(*rec)); g_assert(rec != NULL); rec->offset = data->offset; rec->ts = data->ts; rec->next = NULL; next = &rec->next; free_record(data); count++; data = tracecmd_read_data(handle, cpu); } if (count) { TraceViewRecord *trec; struct temp *t; gint i; rec = list; trec = g_new(TraceViewRecord, count); g_assert(trec != NULL); for (i = 0; i < count; i++) { g_assert(rec != NULL); trec[i].cpu = cpu; trec[i].timestamp = rec->ts; trec[i].offset = rec->offset; trec[i].visible = 1; trec[i].pos = i; t = rec; rec = rec->next; g_free(t); } g_assert(rec == NULL); newstore->cpu_list[cpu] = trec; } else newstore->cpu_list[cpu] = NULL; newstore->cpu_items[cpu] = count; total += count; } newstore->actual_rows = total; newstore->rows = g_malloc(sizeof(*newstore->rows) * total + 1); merge_sort_rows_ts(newstore); return newstore; }
static void do_trace_hist(struct tracecmd_input *handle) { struct pevent *pevent = tracecmd_get_pevent(handle); struct event_format *event; struct pevent_record *record; int cpus; int cpu; int ret; ret = tracecmd_init_data(handle); if (ret < 0) die("failed to init data"); if (ret > 0) die("trace-cmd hist does not work with latency traces\n"); cpus = tracecmd_cpus(handle); /* Need to get any event */ for (cpu = 0; cpu < cpus; cpu++) { record = tracecmd_peek_data(handle, cpu); if (record) break; } if (!record) die("No records found in file"); ret = pevent_data_type(pevent, record); event = pevent_data_event_from_type(pevent, ret); long_size = tracecmd_long_size(handle); common_type_field = pevent_find_common_field(event, "common_type"); if (!common_type_field) die("Can't find a 'type' field?"); common_pid_field = pevent_find_common_field(event, "common_pid"); if (!common_pid_field) die("Can't find a 'pid' field?"); update_sched_wakeup(pevent); update_sched_wakeup_new(pevent); update_sched_switch(pevent); update_function(pevent); update_function_graph_entry(pevent); update_function_graph_exit(pevent); update_kernel_stack(pevent); for (cpu = 0; cpu < cpus; cpu++) { for (;;) { struct pevent_record *record; record = tracecmd_read_data(handle, cpu); if (!record) break; /* If we missed events, just flush out the current stack */ if (record->missed_events) flush_stack(); process_record(pevent, record); free_record(record); } } if (current_pid >= 0) save_call_chain(current_pid, ips, ips_idx, 0); if (pending_pid >= 0) save_call_chain(pending_pid, pending_ips, pending_ips_idx, 1); save_stored_stacks(); sort_chains(); print_chains(pevent); }