static int __init init_ft_overhead_trace(void) { int err, cpu; printk("Initializing Feather-Trace overhead tracing device.\n"); err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); if (err) goto err_out; overhead_dev.alloc = alloc_timestamp_buffer; overhead_dev.free = free_timestamp_buffer; overhead_dev.write = write_timestamp_from_user; err = register_ftdev(&overhead_dev); if (err) goto err_dealloc; /* initialize IRQ flags */ for (cpu = 0; cpu < NR_CPUS; cpu++) { clear_irq_fired(); } return 0; err_dealloc: ftdev_exit(&overhead_dev); err_out: printk(KERN_WARNING "Could not register ft_trace module.\n"); return err; }
static int __init init_sched_task_trace(void) { struct local_buffer* buf; int i, ok = 0, err; printk("Allocated %u sched_trace_xxx() events per CPU " "(buffer size: %d bytes)\n", NO_EVENTS, (int) sizeof(struct local_buffer)); err = ftdev_init(&st_dev, THIS_MODULE, num_online_cpus(), "sched_trace"); if (err) goto err_out; for (i = 0; i < st_dev.minor_cnt; i++) { buf = &per_cpu(st_event_buffer, i); ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, sizeof(struct st_event_record), buf->flag, buf->record); st_dev.minor[i].buf = &buf->ftbuf; } if (ok == st_dev.minor_cnt) { st_dev.can_open = st_dev_can_open; err = register_ftdev(&st_dev); if (err) goto err_dealloc; } else { err = -EINVAL; goto err_dealloc; } return 0; err_dealloc: ftdev_exit(&st_dev); err_out: printk(KERN_WARNING "Could not register sched_trace module\n"); return err; }
static void __exit exit_ft_overhead_trace(void) { ftdev_exit(&overhead_dev); }
static void __exit exit_sched_task_trace(void) { ftdev_exit(&st_dev); }