Exemplo n.º 1
0
static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
			    struct bpf_prog *prog, struct sk_buff *skb,
			    int hdr_len)
{
	struct sk_buff *nskb = NULL;
	u32 index;

	if (skb_shared(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return NULL;
		skb = nskb;
	}

	/* temporarily advance data past protocol header */
	if (!pskb_pull(skb, hdr_len)) {
		kfree_skb(nskb);
		return NULL;
	}
	index = bpf_prog_run_save_cb(prog, skb);
	__skb_push(skb, hdr_len);

	consume_skb(nskb);

	if (index >= socks)
		return NULL;

	return reuse->socks[index];
}
Exemplo n.º 2
0
static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
		       struct dst_entry *dst, bool can_redirect)
{
	int ret;

	/* Preempt disable is needed to protect per-cpu redirect_info between
	 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
	 * access to maps strictly require a rcu_read_lock() for protection,
	 * mixing with BH RCU lock doesn't work.
	 */
	preempt_disable();
	bpf_compute_data_pointers(skb);
	ret = bpf_prog_run_save_cb(lwt->prog, skb);

	switch (ret) {
	case BPF_OK:
	case BPF_LWT_REROUTE:
		break;

	case BPF_REDIRECT:
		if (unlikely(!can_redirect)) {
			pr_warn_once("Illegal redirect return code in prog %s\n",
				     lwt->name ? : "<unknown>");
			ret = BPF_OK;
		} else {
Exemplo n.º 3
0
static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_bpf_info_v1 *info = par->matchinfo;

	return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb);
}