static int do_lttng_statedump(struct lttng_session *session) { int cpu; trace_lttng_statedump_start(session); lttng_enumerate_process_states(session); lttng_enumerate_file_descriptors(session); /* FIXME lttng_enumerate_vm_maps(session); */ lttng_list_interrupts(session); lttng_enumerate_network_ip_interface(session); /* TODO lttng_dump_idt_table(session); */ /* TODO lttng_dump_softirq_vec(session); */ /* TODO lttng_list_modules(session); */ /* TODO lttng_dump_swap_files(session); */ /* * Fire off a work queue on each CPU. Their sole purpose in life * is to guarantee that each CPU has been in a state where is was in * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). */ get_online_cpus(); atomic_set(&kernel_threads_to_run, num_online_cpus()); for_each_online_cpu(cpu) { INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func); schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); } /* Wait for all threads to run */ __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0)); put_online_cpus(); /* Our work is done */ trace_lttng_statedump_end(session); return 0; }
void wait_event(void) { struct buffered_status *status = store_status(); status->key = IKEY_UNKNOWN; status->unicode = 0; status->mouse_moved = 0; status->mouse_button = 0; __wait_event(); update_autorepeat(); }