int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx) { struct lttng_ctx_field *field; field = lttng_append_context(ctx); if (!field) return -ENOMEM; if (lttng_find_context(*ctx, "vpid")) { lttng_remove_context_field(ctx, field); return -EEXIST; } field->event_field.name = "vpid"; field->event_field.type.atype = atype_integer; field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT; field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT; field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t); field->event_field.type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.basic.integer.base = 10; field->event_field.type.u.basic.integer.encoding = lttng_encode_none; field->get_size = vpid_get_size; field->record = vpid_record; field->get_value = vpid_get_value; lttng_context_update(*ctx); wrapper_vmalloc_sync_all(); return 0; }
int lttng_add_hostname_to_ctx(struct lttng_ctx **ctx) { struct lttng_ctx_field *field; field = lttng_append_context(ctx); if (!field) return -ENOMEM; if (lttng_find_context(*ctx, "hostname")) { lttng_remove_context_field(ctx, field); return -EEXIST; } field->event_field.name = "hostname"; field->event_field.type.atype = atype_array; field->event_field.type.u.array.elem_type.atype = atype_integer; field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT; field->event_field.type.u.array.elem_type.u.basic.integer.alignment = lttng_alignof(char) * CHAR_BIT; field->event_field.type.u.array.elem_type.u.basic.integer.signedness = lttng_is_signed_type(char); field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.array.elem_type.u.basic.integer.base = 10; field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8; field->event_field.type.u.array.length = LTTNG_HOSTNAME_CTX_LEN; field->get_size = hostname_get_size; field->record = hostname_record; wrapper_vmalloc_sync_all(); return 0; }
static int __init lttng_test_init(void) { int ret = 0; (void) wrapper_lttng_fixup_sig(THIS_MODULE); wrapper_vmalloc_sync_all(); lttng_test_filter_event_dentry = proc_create_data(LTTNG_TEST_FILTER_EVENT_FILE, S_IRUGO | S_IWUGO, NULL, <tng_test_filter_event_operations, NULL); if (!lttng_test_filter_event_dentry) { printk(KERN_ERR "Error creating LTTng test filter file\n"); ret = -ENOMEM; goto error; } ret = __lttng_events_init__lttng_test(); if (ret) goto error_events; return ret; error_events: remove_proc_entry(LTTNG_TEST_FILTER_EVENT_FILE, NULL); error: return ret; }
int lttng_add_prio_to_ctx(struct lttng_ctx **ctx) { struct lttng_ctx_field *field; int ret; if (!wrapper_task_prio_sym) { ret = wrapper_task_prio_init(); if (ret) return ret; } field = lttng_append_context(ctx); if (!field) return -ENOMEM; if (lttng_find_context(*ctx, "prio")) { lttng_remove_context_field(ctx, field); return -EEXIST; } field->event_field.name = "prio"; field->event_field.type.atype = atype_integer; field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT; field->event_field.type.u.basic.integer.alignment = lttng_alignof(int) * CHAR_BIT; field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(int); field->event_field.type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.basic.integer.base = 10; field->event_field.type.u.basic.integer.encoding = lttng_encode_none; field->get_size = prio_get_size; field->record = prio_record; lttng_context_update(*ctx); wrapper_vmalloc_sync_all(); return 0; }
static int lttng_types_init(void) { int ret = 0; wrapper_vmalloc_sync_all(); /* TODO */ return ret; }
int __init lttng_abi_init(void) { int ret = 0; wrapper_vmalloc_sync_all(); lttng_clock_ref(); lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL, <tng_fops, NULL); if (!lttng_proc_dentry) { printk(KERN_ERR "Error creating LTTng control file\n"); ret = -ENOMEM; goto error; } lttng_stream_override_ring_buffer_fops(); return 0; error: lttng_clock_unref(); return ret; }
int __init lttngprofile_init(void) { int ret = 0; wrapper_vmalloc_sync_all(); lttngprofile_proc_dentry = proc_create_data(LTTNGPROFILE_PROC, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, NULL, <tngprofile_fops, NULL); if (!lttngprofile_proc_dentry) { printk(KERN_ERR "Error creating lttngprofile control file\n"); ret = -ENOMEM; goto error; } // Register probes. if (lttng_wrapper_tracepoint_probe_register( "sys_enter", syscall_entry_probe, NULL) < 0 || lttng_wrapper_tracepoint_probe_register( "sys_exit", syscall_exit_probe, NULL) < 0 || lttng_wrapper_tracepoint_probe_register( "sched_process_exit", sched_process_exit_probe, NULL) < 0) { printk("tracepoint_probe_register failed, returned %d\n", ret); goto error; } printk("LTTng-profile module loaded successfully.\n"); return ret; error: if (lttngprofile_proc_dentry) remove_proc_entry(LTTNGPROFILE_PROC, NULL); probes_unregister(); return ret; }
int __init lttng_logger_init(void) { int ret = 0; wrapper_vmalloc_sync_all(); lttng_logger_dentry = proc_create_data(LTTNG_LOGGER_FILE, S_IRUGO | S_IWUGO, NULL, <tng_logger_operations, NULL); if (!lttng_logger_dentry) { printk(KERN_ERR "Error creating LTTng logger file\n"); ret = -ENOMEM; goto error; } ret = __lttng_events_init__lttng(); if (ret) goto error_events; return ret; error_events: remove_proc_entry("lttng-logger", NULL); error: return ret; }
int lttng_syscalls_register(struct lttng_channel *chan, void *filter) { struct lttng_kernel_event ev; int ret; wrapper_vmalloc_sync_all(); if (!chan->sc_table) { /* create syscall table mapping syscall to events */ chan->sc_table = kzalloc(sizeof(struct lttng_event *) * ARRAY_SIZE(sc_table), GFP_KERNEL); if (!chan->sc_table) return -ENOMEM; } #ifdef CONFIG_COMPAT if (!chan->compat_sc_table) { /* create syscall table mapping compat syscall to events */ chan->compat_sc_table = kzalloc(sizeof(struct lttng_event *) * ARRAY_SIZE(compat_sc_table), GFP_KERNEL); if (!chan->compat_sc_table) return -ENOMEM; } #endif if (!chan->sc_unknown) { const struct lttng_event_desc *desc = &__event_desc___sys_unknown; memset(&ev, 0, sizeof(ev)); strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; ev.instrumentation = LTTNG_KERNEL_NOOP; chan->sc_unknown = lttng_event_create(chan, &ev, filter, desc); WARN_ON_ONCE(!chan->sc_unknown); if (IS_ERR(chan->sc_unknown)) { return PTR_ERR(chan->sc_unknown); } } if (!chan->sc_compat_unknown) { const struct lttng_event_desc *desc = &__event_desc___compat_sys_unknown; memset(&ev, 0, sizeof(ev)); strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; ev.instrumentation = LTTNG_KERNEL_NOOP; chan->sc_compat_unknown = lttng_event_create(chan, &ev, filter, desc); WARN_ON_ONCE(!chan->sc_unknown); if (IS_ERR(chan->sc_compat_unknown)) { return PTR_ERR(chan->sc_compat_unknown); } } if (!chan->sc_exit) { const struct lttng_event_desc *desc = &__event_desc___exit_syscall; memset(&ev, 0, sizeof(ev)); strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; ev.instrumentation = LTTNG_KERNEL_NOOP; chan->sc_exit = lttng_event_create(chan, &ev, filter, desc); WARN_ON_ONCE(!chan->sc_exit); if (IS_ERR(chan->sc_exit)) { return PTR_ERR(chan->sc_exit); } } ret = fill_table(sc_table, ARRAY_SIZE(sc_table), chan->sc_table, chan, filter); if (ret) return ret; #ifdef CONFIG_COMPAT ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table), chan->compat_sc_table, chan, filter); if (ret) return ret; #endif ret = lttng_wrapper_tracepoint_probe_register("sys_enter", (void *) syscall_entry_probe, chan); if (ret) return ret; /* * We change the name of sys_exit tracepoint due to namespace * conflict with sys_exit syscall entry. */ ret = lttng_wrapper_tracepoint_probe_register("sys_exit", (void *) __event_probe__exit_syscall, chan->sc_exit); if (ret) { WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter", (void *) syscall_entry_probe, chan)); } return ret; }
/** * lib_ring_buffer_backend_allocate - allocate a channel buffer * @config: ring buffer instance configuration * @buf: the buffer struct * @size: total size of the buffer * @num_subbuf: number of subbuffers * @extra_reader_sb: need extra subbuffer for reader */ static int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, size_t size, size_t num_subbuf, int extra_reader_sb) { struct channel_backend *chanb = &bufb->chan->backend; unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; unsigned long subbuf_size, mmap_offset = 0; unsigned long num_subbuf_alloc; struct page **pages; unsigned long i; num_pages = size >> PAGE_SHIFT; num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; if (extra_reader_sb) { num_pages += num_pages_per_subbuf; /* Add pages for reader */ num_subbuf_alloc++; } pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, 1 << INTERNODE_CACHE_SHIFT), cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!pages)) goto pages_error; bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) * num_subbuf_alloc, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->array)) goto array_error; for (i = 0; i < num_pages; i++) { pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); if (unlikely(!pages[i])) goto depopulate; } bufb->num_pages_per_subbuf = num_pages_per_subbuf; /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { bufb->array[i] = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_pages) + sizeof(struct lib_ring_buffer_backend_page) * num_pages_per_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (!bufb->array[i]) goto free_array; } /* Allocate write-side subbuffer table */ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_wsb)) goto free_array; for (i = 0; i < num_subbuf; i++) bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); /* Assign read-side subbuffer table */ if (extra_reader_sb) bufb->buf_rsb.id = subbuffer_id(config, 0, 1, num_subbuf_alloc - 1); else bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); /* Allocate subbuffer packet counter table */ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_cnt)) goto free_wsb; /* Assign pages to page index */ for (i = 0; i < num_subbuf_alloc; i++) { for (j = 0; j < num_pages_per_subbuf; j++) { CHAN_WARN_ON(chanb, page_idx > num_pages); bufb->array[i]->p[j].virt = page_address(pages[page_idx]); bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]); page_idx++; } if (config->output == RING_BUFFER_MMAP) { bufb->array[i]->mmap_offset = mmap_offset; mmap_offset += subbuf_size; } } /* * If kmalloc ever uses vmalloc underneath, make sure the buffer pages * will not fault. */ wrapper_vmalloc_sync_all(); vfree(pages); return 0; free_wsb: lttng_kvfree(bufb->buf_wsb); free_array: for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) lttng_kvfree(bufb->array[i]); depopulate: /* Free all allocated pages */ for (i = 0; (i < num_pages && pages[i]); i++) __free_page(pages[i]); lttng_kvfree(bufb->array); array_error: vfree(pages); pages_error: return -ENOMEM; }