/** * sk_filter - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to sk_run_filter. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ int sk_filter(struct sock *sk, struct sk_buff *skb) { int err; struct sk_filter *filter; /* * If the skb was allocated from pfmemalloc reserves, only * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) return -ENOMEM; err = security_sock_rcv_skb(sk, skb); if (err) return err; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { unsigned int pkt_len = SK_RUN_FILTER(filter, skb); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } rcu_read_unlock(); return err; }
static int fb_bpf_netrx(const struct fblock * const fb, struct sk_buff * const skb, enum path_type * const dir) { int drop = 0; unsigned int pkt_len; unsigned long flags; struct fb_bpf_priv __percpu *fb_priv_cpu; fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data)); spin_lock_irqsave(&fb_priv_cpu->flock, flags); if (fb_priv_cpu->filter) { pkt_len = SK_RUN_FILTER(fb_priv_cpu->filter, skb); if (pkt_len < skb->len) { spin_unlock_irqrestore(&fb_priv_cpu->flock, flags); kfree_skb(skb); return PPE_DROPPED; } } write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]); if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN) drop = 1; spin_unlock_irqrestore(&fb_priv_cpu->flock, flags); if (drop) { kfree_skb(skb); return PPE_DROPPED; } return PPE_SUCCESS; }
/** * sk_filter - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to sk_run_filter. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ int sk_filter(struct sock *sk, struct sk_buff *skb) { int err; struct sk_filter *filter; err = security_sock_rcv_skb(sk, skb); if (err) return err; rcu_read_lock_bh(); filter = rcu_dereference_bh(sk->sk_filter); if (filter) { unsigned int pkt_len = SK_RUN_FILTER(filter, skb); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } rcu_read_unlock_bh(); return err; }
/** * seccomp_run_filters - evaluates all seccomp filters against @syscall * @syscall: number of the current system call * * Returns valid seccomp BPF response codes. */ static u32 seccomp_run_filters(int syscall) { struct seccomp_filter *f; struct seccomp_data sd; u32 ret = SECCOMP_RET_ALLOW; /* Ensure unexpected behavior doesn't result in failing open. */ if (WARN_ON(current->seccomp.filter == NULL)) return SECCOMP_RET_KILL; populate_seccomp_data(&sd); /* * All filters in the list are evaluated and the lowest BPF return * value always takes priority (ignoring the DATA). */ for (f = current->seccomp.filter; f; f = f->prev) { u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) ret = cur_ret; } return ret; }
static bool lb_transmit(struct team *team, struct sk_buff *skb) { struct sk_filter *fp; struct team_port *port; unsigned int hash; int port_index; fp = rcu_dereference(lb_priv(team)->fp); if (unlikely(!fp)) goto drop; hash = SK_RUN_FILTER(fp, skb); port_index = hash % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); if (unlikely(!port)) goto drop; skb->dev = port->dev; if (dev_queue_xmit(skb)) return false; return true; drop: dev_kfree_skb_any(skb); return false; }
static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return SK_RUN_FILTER(info->filter, skb); }