static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) { struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); perf_fetch_caller_regs(regs); return bpf_perf_event_output((long)regs, r2, flags, r4, size); }
static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) { /* * r1 points to perf tracepoint buffer where first 8 bytes are hidden * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it * from there and call the same bpf_perf_event_output() helper */ u64 ctx = *(long *)(uintptr_t)r1; return bpf_perf_event_output(ctx, r2, index, r4, size); }
int bpf_prog1(struct pt_regs *ctx) { struct S { u64 pid; u64 cookie; } data; data.pid = bpf_get_current_pid_tgid(); data.cookie = 0x12345678; bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data)); return 0; }
int xdp_sample_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; /* Metadata will be in the perf event before the packet data. */ struct S { u16 cookie; u16 pkt_len; } __packed metadata; if (data < data_end) { /* The XDP perf_event_output handler will use the upper 32 bits * of the flags argument as a number of bytes to include of the * packet payload in the event data. If the size is too big, the * call to bpf_perf_event_output will fail and return -EFAULT. * * See bpf_xdp_event_output in net/core/filter.c. * * The BPF_F_CURRENT_CPU flag means that the event output fd * will be indexed by the CPU number in the event map. */ u64 flags = BPF_F_CURRENT_CPU; u16 sample_size; int ret; metadata.cookie = 0xdead; metadata.pkt_len = (u16)(data_end - data); sample_size = min(metadata.pkt_len, SAMPLE_SIZE); flags |= (u64)sample_size << 32; ret = bpf_perf_event_output(ctx, &my_map, flags, &metadata, sizeof(metadata)); if (ret) bpf_printk("perf_event_output failed: %d\n", ret); } return XDP_PASS; }
int bpf_testcb(struct bpf_sock_ops *skops) { int rv = -1; int op; op = (int) skops->op; if (bpf_ntohl(skops->remote_port) != TESTPORT) { skops->reply = -1; return 0; } switch (op) { case BPF_SOCK_OPS_TIMEOUT_INIT: case BPF_SOCK_OPS_RWND_INIT: case BPF_SOCK_OPS_NEEDS_ECN: case BPF_SOCK_OPS_BASE_RTT: case BPF_SOCK_OPS_RTO_CB: rv = 1; break; case BPF_SOCK_OPS_TCP_CONNECT_CB: case BPF_SOCK_OPS_TCP_LISTEN_CB: case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: bpf_sock_ops_cb_flags_set(skops, (BPF_SOCK_OPS_RETRANS_CB_FLAG| BPF_SOCK_OPS_RTO_CB_FLAG)); rv = 1; break; case BPF_SOCK_OPS_RETRANS_CB: { __u32 key = 0; struct tcpnotify_globals g, *gp; struct tcp_notifier msg = { .type = 0xde, .subtype = 0xad, .source = 0xbe, .hash = 0xef, }; rv = 1; /* Update results */ gp = bpf_map_lookup_elem(&global_map, &key); if (!gp) break; g = *gp; g.total_retrans = skops->total_retrans; g.ncalls++; bpf_map_update_elem(&global_map, &key, &g, BPF_ANY); bpf_perf_event_output(skops, &perf_event_map, BPF_F_CURRENT_CPU, &msg, sizeof(msg)); } break; default: rv = -1; } skops->reply = rv; return 1; } char _license[] SEC("license") = "GPL";