/* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) entry->event = ring_buffer_lock_reserve (op_ring_buffer_write, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); #else entry->event = ring_buffer_lock_reserve (op_ring_buffer_write, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0]), &entry->irq_flags); #endif if (entry->event) entry->sample = ring_buffer_event_data(entry->event); else entry->sample = NULL; if (!entry->sample) return NULL; entry->size = size; entry->data = entry->sample->data; return entry->sample; }
static inline void kmemtrace_free(enum kmemtrace_type_id type_id, unsigned long call_site, const void *ptr) { struct ftrace_event_call *call = &event_kmem_free; struct trace_array *tr = kmemtrace_array; struct kmemtrace_free_entry *entry; struct ring_buffer_event *event; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_KMEM_FREE; entry->type_id = type_id; entry->call_site = call_site; entry->ptr = ptr; if (!filter_check_discard(call, entry, tr->buffer, event)) ring_buffer_unlock_commit(tr->buffer, event); trace_wake_up(); }
/* Trace allocations */ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { struct ftrace_event_call *call = &event_kmem_alloc; struct trace_array *tr = kmemtrace_array; struct kmemtrace_alloc_entry *entry; struct ring_buffer_event *event; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_KMEM_ALLOC; entry->type_id = type_id; entry->call_site = call_site; entry->ptr = ptr; entry->bytes_req = bytes_req; entry->bytes_alloc = bytes_alloc; entry->gfp_flags = gfp_flags; entry->node = node; if (!filter_check_discard(call, entry, tr->buffer, event)) ring_buffer_unlock_commit(tr->buffer, event); trace_wake_up(); }
void trace_boot(struct boot_trace *it, initcall_t fn) { struct ring_buffer_event *event; struct trace_boot *entry; struct trace_array_cpu *data; unsigned long irq_flags; struct trace_array *tr = boot_trace; if (!trace_boot_enabled) return; /* Get its name now since this function could * disappear because it is in the .init section. */ sprint_symbol(it->func, (unsigned long)fn); preempt_disable(); data = tr->data[smp_processor_id()]; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) goto out; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_BOOT; entry->initcall = *it; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); trace_wake_up(); out: preempt_enable(); }
/* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); if (!entry->event) return NULL; entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; return entry->sample; }
void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) { struct ring_buffer_event *event; struct hw_branch_entry *entry; unsigned long irq; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, from); entry->ent.type = TRACE_HW_BRANCHES; entry->ent.cpu = smp_processor_id(); entry->from = from; entry->to = to; ring_buffer_unlock_commit(tr->buffer, event, irq); }
void autoconf_test(void) { (void)ring_buffer_lock_reserve(NULL, 0, 0); }