Exemplo n.º 1
0
static unsigned int classify(const struct sk_buff *skb)
{
	if (likely(skb->dev &&
		   skb->dev->phydev &&
		   skb->dev->phydev->drv))
		return sk_run_filter(skb, ptp_filter);
	else
		return PTP_CLASS_NONE;
}
Exemplo n.º 2
0
static inline unsigned run_filter(struct sk_buff *skb, struct sock *sk, unsigned res)
{
	struct sk_filter *filter;

	bh_lock_sock(sk);
	filter = sk->sk_filter;
	/*
	 * Our caller already checked that filter != NULL but we need to
	 * verify that under bh_lock_sock() to be safe
	 */
	if (likely(filter != NULL))
		res = sk_run_filter(skb, filter->insns, filter->len);
	bh_unlock_sock(sk);

	return res;
}
/**
 *	sk_filter - run a packet through a socket filter
 *	@sk: sock associated with &sk_buff
 *	@skb: buffer to filter
 *
 * Run the filter code and then cut skb->data to correct size returned by
 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
 * than pkt_len we keep whole skb->data. This is the socket level
 * wrapper to sk_run_filter. It returns 0 if the packet should
 * be accepted or -EPERM if the packet should be tossed.
 *
 */
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
	int err;
	struct sk_filter *filter;

	err = security_sock_rcv_skb(sk, skb);
	if (err)
		return err;

	rcu_read_lock_bh();
	filter = rcu_dereference(sk->sk_filter);
	if (filter) {
		unsigned int pkt_len = sk_run_filter(skb, filter->insns);
		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
	}
	rcu_read_unlock_bh();

	return err;
}
Exemplo n.º 4
0
/**
 * seccomp_run_filters - evaluates all seccomp filters against @syscall
 * @syscall: number of the current system call
 *
 * Returns valid seccomp BPF response codes.
 */
static u32 seccomp_run_filters(int syscall)
{
	struct seccomp_filter *f;
	u32 ret = SECCOMP_RET_ALLOW;

	/* Ensure unexpected behavior doesn't result in failing open. */
	if (WARN_ON(current->seccomp.filter == NULL))
		return SECCOMP_RET_KILL;

	/*
	 * All filters are evaluated in order of youngest to oldest. The lowest
	 * BPF return value (ignoring the DATA) always takes priority.
	 */
	for (f = current->seccomp.filter; f; f = f->prev) {
		u32 cur_ret = sk_run_filter(NULL, f->insns);
		if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
			ret = cur_ret;
	}
	return ret;
}
Exemplo n.º 5
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct packet_opt *po;
	struct sockaddr_ll *sll;
	struct tpacket_hdr *h;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
	unsigned snaplen;
	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
	unsigned short macoff, netoff;
	struct sk_buff *copy_skb = NULL;

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = (struct sock *) pt->data;
	po = sk->protinfo.af_packet;

	if (dev->hard_header) {
		if (sk->type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
			if (skb->ip_summed == CHECKSUM_HW)
				status |= TP_STATUS_CSUMNOTREADY;
		}
	}

	snaplen = skb->len;

#ifdef CONFIG_FILTER
	if (sk->filter) {
		unsigned res = snaplen;
		struct sk_filter *filter;

		bh_lock_sock(sk);
		if ((filter = sk->filter) != NULL)
			res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
		bh_unlock_sock(sk);

		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}
#endif

	if (sk->type == SOCK_DGRAM) {
		macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
	} else {
		unsigned maclen = skb->nh.raw - skb->data;
		netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
		macoff = netoff - maclen;
	}

	if (macoff + snaplen > po->frame_size) {
		if (po->copy_thresh &&
		    atomic_read(&sk->rmem_alloc) + skb->truesize < (unsigned)sk->rcvbuf) {
			if (skb_shared(skb)) {
				copy_skb = skb_clone(skb, GFP_ATOMIC);
			} else {
				copy_skb = skb_get(skb);
				skb_head = skb->data;
			}
			if (copy_skb)
				skb_set_owner_r(copy_skb, sk);
		}
		snaplen = po->frame_size - macoff;
		if ((int)snaplen < 0)
			snaplen = 0;
	}
	if (snaplen > skb->len-skb->data_len)
		snaplen = skb->len-skb->data_len;

	spin_lock(&sk->receive_queue.lock);
	h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);

	if (h->tp_status)
		goto ring_is_full;
	po->head = po->head != po->frame_max ? po->head+1 : 0;
	po->stats.tp_packets++;
	if (copy_skb) {
		status |= TP_STATUS_COPY;
		__skb_queue_tail(&sk->receive_queue, copy_skb);
	}
	if (!po->stats.tp_drops)
		status &= ~TP_STATUS_LOSING;
	spin_unlock(&sk->receive_queue.lock);

	memcpy((u8*)h + macoff, skb->data, snaplen);

	h->tp_len = skb->len;
	h->tp_snaplen = snaplen;
	h->tp_mac = macoff;
	h->tp_net = netoff;
	h->tp_sec = skb->stamp.tv_sec;
	h->tp_usec = skb->stamp.tv_usec;

	sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
	sll->sll_halen = 0;
	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;

	h->tp_status = status;
	mb();

	{
		struct page *p_start, *p_end;
		u8 *h_end = (u8 *)h + macoff + snaplen - 1;

		p_start = virt_to_page(h);
		p_end = virt_to_page(h_end);
		while (p_start <= p_end) {
			flush_dcache_page(p_start);
			p_start++;
		}
	}

	sk->data_ready(sk, 0);

drop_n_restore:
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
        kfree_skb(skb);
	return 0;

ring_is_full:
	po->stats.tp_drops++;
	spin_unlock(&sk->receive_queue.lock);

	sk->data_ready(sk, 0);
	if (copy_skb)
		kfree_skb(copy_skb);
	goto drop_n_restore;
}
Exemplo n.º 6
0
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct sockaddr_ll *sll;
	struct packet_opt *po;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
#ifdef CONFIG_FILTER
	unsigned snaplen;
#endif

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = (struct sock *) pt->data;
	po = sk->protinfo.af_packet;

	skb->dev = dev;

	if (dev->hard_header) {
		/* The device has an explicit notion of ll header,
		   exported to higher levels.

		   Otherwise, the device hides datails of it frame
		   structure, so that corresponding packet head
		   never delivered to user.
		 */
		if (sk->type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
		}
	}

#ifdef CONFIG_FILTER
	snaplen = skb->len;

	if (sk->filter) {
		unsigned res = snaplen;
		struct sk_filter *filter;

		bh_lock_sock(sk);
		if ((filter = sk->filter) != NULL)
			res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
		bh_unlock_sock(sk);

		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}
#endif /* CONFIG_FILTER */

	if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
		goto drop_n_acct;

	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
		if (nskb == NULL)
			goto drop_n_acct;

		if (skb_head != skb->data) {
			skb->data = skb_head;
			skb->len = skb_len;
		}
		kfree_skb(skb);
		skb = nskb;
	}

	sll = (struct sockaddr_ll*)skb->cb;
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;
	sll->sll_halen = 0;

	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);

#ifdef CONFIG_FILTER
	if (pskb_trim(skb, snaplen))
		goto drop_n_acct;
#endif

	skb_set_owner_r(skb, sk);
	skb->dev = NULL;
	spin_lock(&sk->receive_queue.lock);
	po->stats.tp_packets++;
	__skb_queue_tail(&sk->receive_queue, skb);
	spin_unlock(&sk->receive_queue.lock);
	sk->data_ready(sk,skb->len);
	return 0;

drop_n_acct:
	spin_lock(&sk->receive_queue.lock);
	po->stats.tp_drops++;
	spin_unlock(&sk->receive_queue.lock);

#ifdef CONFIG_FILTER
drop_n_restore:
#endif
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
	kfree_skb(skb);
	return 0;
}