int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx) { struct lttng_ctx_field *field; field = lttng_append_context(ctx); if (!field) return -ENOMEM; if (lttng_find_context(*ctx, "vpid")) { lttng_remove_context_field(ctx, field); return -EEXIST; } field->event_field.name = "vpid"; field->event_field.type.atype = atype_integer; field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT; field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT; field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t); field->event_field.type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.basic.integer.base = 10; field->event_field.type.u.basic.integer.encoding = lttng_encode_none; field->get_size = vpid_get_size; field->record = vpid_record; field->get_value = vpid_get_value; lttng_context_update(*ctx); wrapper_vmalloc_sync_all(); return 0; }
static void vppid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { struct task_struct *parent; pid_t vppid; /* * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses * the current thread nsproxy to perform the lookup. */ /* * TODO: when we eventually add RCU subsystem instrumentation, * taking the rcu read lock here will trigger RCU tracing * recursively. We should modify the kernel synchronization so * it synchronizes both for RCU and RCU sched, and rely on * rcu_read_lock_sched_notrace. */ rcu_read_lock(); parent = rcu_dereference(current->real_parent); if (!current->nsproxy) vppid = 0; else vppid = task_tgid_vnr(parent); rcu_read_unlock(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid)); chan->ops->event_write(ctx, &vppid, sizeof(vppid)); }
static size_t vpid_get_size(size_t offset) { size_t size = 0; size += lib_ring_buffer_align(offset, lttng_alignof(pid_t)); size += sizeof(pid_t); return size; }
static size_t interruptible_get_size(size_t offset) { size_t size = 0; size += lib_ring_buffer_align(offset, lttng_alignof(int8_t)); size += sizeof(int8_t); return size; }
static void vpid_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t vpid = wrapper_getvpid(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid)); chan->ops->event_write(ctx, &vpid, sizeof(vpid)); }
static void pthread_id_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { unsigned long pthread_id; pthread_id = (unsigned long) pthread_self(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(pthread_id)); chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id)); }
static void cpu_id_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int cpu; cpu = lttng_ust_get_cpu(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu)); chan->ops->event_write(ctx, &cpu, sizeof(cpu)); }
static void ip_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { void *ip; ip = ctx->ip; lib_ring_buffer_align_ctx(ctx, lttng_alignof(ip)); chan->ops->event_write(ctx, &ip, sizeof(ip)); }
static void tid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t tid; tid = task_pid_nr(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid)); chan->ops->event_write(ctx, &tid, sizeof(tid)); }
static void prio_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int prio; prio = wrapper_task_prio_sym(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio)); chan->ops->event_write(ctx, &prio, sizeof(prio)); }
static void interruptible_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv; int8_t interruptible = lttng_probe_ctx->interruptible; lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible)); chan->ops->event_write(ctx, &interruptible, sizeof(interruptible)); }
static void nice_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int nice; nice = task_nice(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice)); chan->ops->event_write(ctx, &nice, sizeof(nice)); }
static void vpid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t vpid; /* * nsproxy can be NULL when scheduled out of exit. */ if (!current->nsproxy) vpid = 0; else vpid = task_tgid_vnr(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid)); chan->ops->event_write(ctx, &vpid, sizeof(vpid)); }
static void ppid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t ppid; /* * TODO: when we eventually add RCU subsystem instrumentation, * taking the rcu read lock here will trigger RCU tracing * recursively. We should modify the kernel synchronization so * it synchronizes both for RCU and RCU sched, and rely on * rcu_read_lock_sched_notrace. */ rcu_read_lock(); ppid = task_tgid_nr(current->real_parent); rcu_read_unlock(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid)); chan->ops->event_write(ctx, &ppid, sizeof(ppid)); }