static void vppid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { struct task_struct *parent; pid_t vppid; /* * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses * the current thread nsproxy to perform the lookup. */ /* * TODO: when we eventually add RCU subsystem instrumentation, * taking the rcu read lock here will trigger RCU tracing * recursively. We should modify the kernel synchronization so * it synchronizes both for RCU and RCU sched, and rely on * rcu_read_lock_sched_notrace. */ rcu_read_lock(); parent = rcu_dereference(current->real_parent); if (!current->nsproxy) vppid = 0; else vppid = task_tgid_vnr(parent); rcu_read_unlock(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid)); chan->ops->event_write(ctx, &vppid, sizeof(vppid)); }
static void vpid_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t vpid = wrapper_getvpid(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid)); chan->ops->event_write(ctx, &vpid, sizeof(vpid)); }
static void pthread_id_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { unsigned long pthread_id; pthread_id = (unsigned long) pthread_self(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(pthread_id)); chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id)); }
static void ip_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { void *ip; ip = ctx->ip; lib_ring_buffer_align_ctx(ctx, lttng_alignof(ip)); chan->ops->event_write(ctx, &ip, sizeof(ip)); }
static void cpu_id_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int cpu; cpu = lttng_ust_get_cpu(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu)); chan->ops->event_write(ctx, &cpu, sizeof(cpu)); }
static void tid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t tid; tid = task_pid_nr(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid)); chan->ops->event_write(ctx, &tid, sizeof(tid)); }
static void prio_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int prio; prio = wrapper_task_prio_sym(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio)); chan->ops->event_write(ctx, &prio, sizeof(prio)); }
static void interruptible_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv; int8_t interruptible = lttng_probe_ctx->interruptible; lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible)); chan->ops->event_write(ctx, &interruptible, sizeof(interruptible)); }
static void nice_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { int nice; nice = task_nice(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice)); chan->ops->event_write(ctx, &nice, sizeof(nice)); }
static void vpid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t vpid; /* * nsproxy can be NULL when scheduled out of exit. */ if (!current->nsproxy) vpid = 0; else vpid = task_tgid_vnr(current); lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid)); chan->ops->event_write(ctx, &vpid, sizeof(vpid)); }
static void ppid_record(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { pid_t ppid; /* * TODO: when we eventually add RCU subsystem instrumentation, * taking the rcu read lock here will trigger RCU tracing * recursively. We should modify the kernel synchronization so * it synchronizes both for RCU and RCU sched, and rely on * rcu_read_lock_sched_notrace. */ rcu_read_lock(); ppid = task_tgid_nr(current->real_parent); rcu_read_unlock(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid)); chan->ops->event_write(ctx, &ppid, sizeof(ppid)); }