int ltt_statedump_thread(void *data) { struct semaphore work_sema4; int cpu; printk(KERN_DEBUG "ltt_statedump_thread\n"); ltt_enumerate_process_states(); ltt_enumerate_file_descriptors(); ltt_enumerate_modules(); ltt_enumerate_vm_maps(); ltt_enumerate_interrupts(); ltt_enumerate_network_ip_interface(); /* Fire off a work queue on each CPU. Their sole purpose in life * is to guarantee that each CPU has been in a state where is was in * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ) */ sema_init(&work_sema4, 1 - num_online_cpus()); lock_cpu_hotplug(); for_each_online_cpu(cpu) { INIT_WORK(&cpu_work[cpu], ltt_statedump_work_func, &work_sema4); schedule_delayed_work_on(cpu,&cpu_work[cpu],0); } unlock_cpu_hotplug(); /* Wait for all work queues to have completed */ down(&work_sema4); /* Our work is done */ printk(KERN_DEBUG "trace_statedump_statedump_end\n"); trace_statedump_statedump_end(); do_exit(0); return 0; }
static int do_ltt_statedump(void) { int cpu; printk(KERN_DEBUG "do_ltt_statedump\n"); ltt_enumerate_process_states(); ltt_enumerate_file_descriptors(); list_modules(); ltt_enumerate_vm_maps(); list_interrupts(); ltt_enumerate_network_ip_interface(); /* Fire off a work queue on each CPU. Their sole purpose in life * is to guarantee that each CPU has been in a state where is was in * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ) */ lock_cpu_hotplug(); atomic_set(&kernel_threads_to_run, num_online_cpus()); __set_current_state(TASK_UNINTERRUPTIBLE); for_each_online_cpu(cpu) { INIT_WORK(&cpu_work[cpu], ltt_statedump_work_func, current); schedule_delayed_work_on(cpu,&cpu_work[cpu],0); } unlock_cpu_hotplug(); /* Wait for all threads to run */ schedule(); BUG_ON(atomic_read(&kernel_threads_to_run) != 0); /* Our work is done */ printk(KERN_DEBUG "trace_statedump_statedump_end\n"); trace_statedump_statedump_end(); return 0; }
static int do_ltt_statedump(struct ltt_probe_private_data *call_data) { int cpu; printk(KERN_DEBUG "LTT state dump thread start\n"); ltt_enumerate_process_states(call_data); ltt_enumerate_file_descriptors(call_data); list_modules(call_data); ltt_enumerate_vm_maps(call_data); list_interrupts(call_data); ltt_enumerate_network_ip_interface(call_data); /* * Fire off a work queue on each CPU. Their sole purpose in life * is to guarantee that each CPU has been in a state where is was in * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). */ lock_cpu_hotplug(); atomic_set(&kernel_threads_to_run, num_online_cpus()); work_wake_task = current; __set_current_state(TASK_UNINTERRUPTIBLE); for_each_online_cpu(cpu) { INIT_DELAYED_WORK(&cpu_work[cpu], ltt_statedump_work_func); schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); } unlock_cpu_hotplug(); /* Wait for all threads to run */ schedule(); BUG_ON(atomic_read(&kernel_threads_to_run) != 0); /* Our work is done */ printk(KERN_DEBUG "LTT state dump end\n"); __trace_mark(0, list_statedump_end, call_data, MARK_NOARGS); return 0; }