void perf_session__delete(struct perf_session *session) { perf_session__destroy_kernel_maps(session); perf_session__delete_dead_threads(session); perf_session__delete_threads(session); perf_session_env__delete(&session->header.env); machines__exit(&session->machines); if (session->file) perf_data_file__close(session->file); free(session); }
struct perf_session *perf_session__new(struct perf_data_file *file, bool repipe, struct perf_tool *tool) { struct perf_session *session = zalloc(sizeof(*session)); if (!session) goto out; session->repipe = repipe; INIT_LIST_HEAD(&session->ordered_samples.samples); INIT_LIST_HEAD(&session->ordered_samples.sample_cache); INIT_LIST_HEAD(&session->ordered_samples.to_free); machines__init(&session->machines); if (file) { if (perf_data_file__open(file)) goto out_delete; session->file = file; if (perf_data_file__is_read(file)) { if (perf_session__open(session) < 0) goto out_close; perf_session__set_id_hdr_size(session); } } if (!file || perf_data_file__is_write(file)) { /* * In O_RDONLY mode this will be performed when reading the * kernel MMAP event, in perf_event__process_mmap(). */ if (perf_session__create_kernel_maps(session) < 0) goto out_delete; } if (tool && tool->ordering_requires_timestamps && tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_samples = false; } return session; out_close: perf_data_file__close(file); out_delete: perf_session__delete(session); out: return NULL; }