Пример #1
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	int sum_truesize;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(&qp->q, clone->truesize);
	}

	skb_push(head, head->data - skb_network_header(head));

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);

		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
	sub_frag_mem_limit(&qp->q, sum_truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = qp->q.max_size;

	iph = ip_hdr(head);
	/* max_size != 0 implies at least one fragment had IP_DF set */
	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
	iph->tot_len = htons(len);
	iph->tos |= ecn;
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
		       qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Пример #2
0
static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *hdr = pnp_hdr(skb);
	struct sk_buff_head *queue;
	int err = 0;

	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);

	switch (hdr->message_id) {
	case PNS_PEP_CONNECT_REQ:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
		break;

	case PNS_PEP_DISCONNECT_REQ:
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		sk->sk_state = TCP_CLOSE_WAIT;
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_state_change(sk);
		break;

	case PNS_PEP_ENABLE_REQ:
		/*                                            */
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_RESET_REQ:
		switch (hdr->state_after_reset) {
		case PN_PIPE_DISABLE:
			pn->init_enable = 0;
			break;
		case PN_PIPE_ENABLE:
			pn->init_enable = 1;
			break;
		default: /*                                     */
			err = -EINVAL;
			goto out;
		}
		/*              */
	case PNS_PEP_DISABLE_REQ:
		atomic_set(&pn->tx_credits, 0);
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_CTRL_REQ:
		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
			atomic_inc(&sk->sk_drops);
			break;
		}
		__skb_pull(skb, 4);
		queue = &pn->ctrlreq_queue;
		goto queue;

	case PNS_PIPE_ALIGNED_DATA:
		__skb_pull(skb, 1);
		/*              */
	case PNS_PIPE_DATA:
		__skb_pull(skb, 3); /*                  */
		if (!pn_flow_safe(pn->rx_fc)) {
			err = sock_queue_rcv_skb(sk, skb);
			if (!err)
				return NET_RX_SUCCESS;
			err = -ENOBUFS;
			break;
		}

		if (pn->rx_credits == 0) {
			atomic_inc(&sk->sk_drops);
			err = -ENOBUFS;
			break;
		}
		pn->rx_credits--;
		queue = &sk->sk_receive_queue;
		goto queue;

	case PNS_PEP_STATUS_IND:
		pipe_rcv_status(sk, skb);
		break;

	case PNS_PIPE_REDIRECTED_IND:
		err = pipe_rcv_created(sk, skb);
		break;

	case PNS_PIPE_CREATED_IND:
		err = pipe_rcv_created(sk, skb);
		if (err)
			break;
		/*              */
	case PNS_PIPE_RESET_IND:
		if (!pn->init_enable)
			break;
		/*              */
	case PNS_PIPE_ENABLED_IND:
		if (!pn_flow_safe(pn->tx_fc)) {
			atomic_set(&pn->tx_credits, 1);
			sk->sk_write_space(sk);
		}
		if (sk->sk_state == TCP_ESTABLISHED)
			break; /*               */
		sk->sk_state = TCP_ESTABLISHED;
		pipe_grant_credits(sk, GFP_ATOMIC);
		break;

	case PNS_PIPE_DISABLED_IND:
		sk->sk_state = TCP_SYN_RECV;
		pn->rx_credits = 0;
		break;

	default:
		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
				hdr->message_id);
		err = -EINVAL;
	}
out:
	kfree_skb(skb);
	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;

queue:
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
	err = skb->len;
	skb_queue_tail(queue, skb);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, err);
	return NET_RX_SUCCESS;
}
/*
 * Stuff received packets to associated sockets.
 * On error, returns non-zero and releases the skb.
 */
static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
			struct packet_type *pkttype,
			struct net_device *orig_dev)
{
	struct net *net = dev_net(dev);
	struct phonethdr *ph;
	struct sockaddr_pn sa;
	u16 len;

	/* check we have at least a full Phonet header */
	if (!pskb_pull(skb, sizeof(struct phonethdr)))
		goto out;

	/* check that the advertised length is correct */
	ph = pn_hdr(skb);
	len = get_unaligned_be16(&ph->pn_length);
	if (len < 2)
		goto out;
	len -= 2;
	if ((len > skb->len) || pskb_trim(skb, len))
		goto out;
	skb_reset_transport_header(skb);

	pn_skb_get_dst_sockaddr(skb, &sa);

	/* check if this is broadcasted */
	if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) {
		pn_deliver_sock_broadcast(net, skb);
		goto out;
	}

	/* resource routing */
	if (pn_sockaddr_get_object(&sa) == 0) {
		struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
		if (sk)
			return sk_receive_skb(sk, skb, 0);
	}

	/* check if we are the destination */
	if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
		/* Phonet packet input */
		struct sock *sk = pn_find_sock_by_sa(net, &sa);

		if (sk)
			return sk_receive_skb(sk, skb, 0);

		if (can_respond(skb)) {
			send_obj_unreachable(skb);
			send_reset_indications(skb);
		}
	} else if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
		goto out; /* Race between address deletion and loopback */
	else {
		/* Phonet packet routing */
		struct net_device *out_dev;

		out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa));
		if (!out_dev) {
			LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n",
					pn_sockaddr_get_addr(&sa));
			goto out;
		}

		__skb_push(skb, sizeof(struct phonethdr));
		skb->dev = out_dev;
		if (out_dev == dev) {
			LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n",
					pn_sockaddr_get_addr(&sa), dev->name);
			goto out_dev;
		}
		/* Some drivers (e.g. TUN) do not allocate HW header space */
		if (skb_cow_head(skb, out_dev->hard_header_len))
			goto out_dev;

		if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL,
					skb->len) < 0)
			goto out_dev;
		dev_queue_xmit(skb);
		dev_put(out_dev);
		return NET_RX_SUCCESS;
out_dev:
		dev_put(out_dev);
	}

out:
	kfree_skb(skb);
	return NET_RX_DROP;
}
Пример #4
0
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
    struct ipv6hdr *iph;
    struct ipv6_esp_hdr *esph;
    struct esp_data *esp = x->data;
    struct crypto_blkcipher *tfm = esp->conf.tfm;
    struct blkcipher_desc desc = { .tfm = tfm };
    struct sk_buff *trailer;
    int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
    int alen = esp->auth.icv_trunc_len;
    int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
    int hdr_len = skb_network_header_len(skb);
    int nfrags;
    int ret = 0;

    if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
        ret = -EINVAL;
        goto out;
    }

    if (elen <= 0 || (elen & (blksize-1))) {
        ret = -EINVAL;
        goto out;
    }

    /* If integrity check is required, do this. */
    if (esp->auth.icv_full_len) {
        u8 sum[alen];

        ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
        if (ret)
            goto out;

        if (skb_copy_bits(skb, skb->len - alen, sum, alen))
            BUG();

        if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
            x->stats.integrity_failed++;
            ret = -EINVAL;
            goto out;
        }
    }

    if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
        ret = -EINVAL;
        goto out;
    }

    skb->ip_summed = CHECKSUM_NONE;

    esph = (struct ipv6_esp_hdr*)skb->data;
    iph = ipv6_hdr(skb);

    /* Get ivec. This can be wrong, check against another impls. */
    if (esp->conf.ivlen)
        crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);

    {
        u8 nexthdr[2];
        struct scatterlist *sg = &esp->sgbuf[0];
        u8 padlen;

        if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
            sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
            if (!sg) {
                ret = -ENOMEM;
                goto out;
            }
        }
        skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
        ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
        if (unlikely(sg != &esp->sgbuf[0]))
            kfree(sg);
        if (unlikely(ret))
            goto out;

        if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
            BUG();

        padlen = nexthdr[0];
        if (padlen+2 >= elen) {
            LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
            ret = -EINVAL;
            goto out;
        }
        /* ... check padding bits here. Silly. :-) */

        pskb_trim(skb, skb->len - alen - padlen - 2);
        ret = nexthdr[1];
    }

    __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
    skb_set_transport_header(skb, -hdr_len);
out:
    return ret;
}
Пример #5
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                         struct net_device *dev)
{
    struct iphdr *iph;
    struct sk_buff *fp, *head = qp->q.fragments;
    int len;
    int ihlen;
    int err;

    ipq_kill(qp);

    /* Make the one we just received the head. */
    if (prev) {
        head = prev->next;
        fp = skb_clone(head, GFP_ATOMIC);
        if (!fp)
            goto out_nomem;

        fp->next = head->next;
        prev->next = fp;

        skb_morph(head, qp->q.fragments);
        head->next = qp->q.fragments->next;

        kfree_skb(qp->q.fragments);
        qp->q.fragments = head;
    }

    WARN_ON(head == NULL);
    WARN_ON(FRAG_CB(head)->offset != 0);

    /* Allocate a new buffer for the datagram. */
    ihlen = ip_hdrlen(head);
    len = ihlen + qp->q.len;

    err = -E2BIG;
    if (len > 65535)
        goto out_oversize;

    /* Head of list must not be cloned. */
    if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
        goto out_nomem;

    /* If the first fragment is fragmented itself, we split
     * it to two chunks: the first with data and paged part
     * and the second, holding only fragments. */
    if (skb_shinfo(head)->frag_list) {
        struct sk_buff *clone;
        int i, plen = 0;

        if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
            goto out_nomem;
        clone->next = head->next;
        head->next = clone;
        skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
        skb_shinfo(head)->frag_list = NULL;
        for (i=0; i<skb_shinfo(head)->nr_frags; i++)
            plen += skb_shinfo(head)->frags[i].size;
        clone->len = clone->data_len = head->data_len - plen;
        head->data_len -= clone->len;
        head->len -= clone->len;
        clone->csum = 0;
        clone->ip_summed = head->ip_summed;
        atomic_add(clone->truesize, &qp->q.net->mem);
    }

    skb_shinfo(head)->frag_list = head->next;
    skb_push(head, head->data - skb_network_header(head));
    atomic_sub(head->truesize, &qp->q.net->mem);

    for (fp=head->next; fp; fp = fp->next) {
        head->data_len += fp->len;
        head->len += fp->len;
        if (head->ip_summed != fp->ip_summed)
            head->ip_summed = CHECKSUM_NONE;
        else if (head->ip_summed == CHECKSUM_COMPLETE)
            head->csum = csum_add(head->csum, fp->csum);
        head->truesize += fp->truesize;
        atomic_sub(fp->truesize, &qp->q.net->mem);
    }

    head->next = NULL;
    head->dev = dev;
    head->tstamp = qp->q.stamp;

    iph = ip_hdr(head);
    iph->frag_off = 0;
    iph->tot_len = htons(len);
    IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS);
    qp->q.fragments = NULL;
    return 0;

out_nomem:
    LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
                   "queue %p\n", qp);
    err = -ENOMEM;
    goto out_fail;
out_oversize:
    if (net_ratelimit())
        printk(KERN_INFO
               "Oversized IP packet from " NIPQUAD_FMT ".\n",
               NIPQUAD(qp->saddr));
out_fail:
    IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
    return err;
}
Пример #6
0
static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	struct ipv6_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;

	int hdr_len = skb->h.raw - skb->nh.raw;
	int nfrags;
	unsigned char *tmp_hdr = NULL;
	int ret = 0;

	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
		ret = -EINVAL;
		goto out_nofree;
	}
	esph = (struct ipv6_esp_hdr*)skb->data;

	if (elen <= 0 || (elen & (blksize-1))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_hdr) {
		ret = -ENOMEM;
		goto out_nofree;
	}
	memcpy(tmp_hdr, skb->nh.raw, hdr_len);

	/* If integrity check is required, do this. */
        if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];

		if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no)) {
			ret = -EINVAL;
			goto out;
		}

		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			ret = -EINVAL;
			goto out;
		}

		if (x->props.replay_window)
			xfrm_replay_advance(x, esph->seq_no);

	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
		ret = -EINVAL;
		goto out;
	}

	skb->ip_summed = CHECKSUM_NONE;

	iph = skb->nh.ipv6h;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

        {
		u8 nexthdr[2];
		struct scatterlist *sg = &esp->sgbuf[0];
		u8 padlen;

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg) {
				ret = -ENOMEM;
				goto out;
			}
		}
		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);

		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
			BUG();

		padlen = nexthdr[0];
		if (padlen+2 >= elen) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
			ret = -EINVAL;
			goto out;
		}
		/* ... check padding bits here. Silly. :-) */ 

		pskb_trim(skb, skb->len - alen - padlen - 2);
		skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen);
		skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
		memcpy(skb->nh.raw, tmp_hdr, hdr_len);
		skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
		ret = nexthdr[1];
	}

out:
	kfree(tmp_hdr);
out_nofree:
	return ret;
}
Пример #7
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip4_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		kfree_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_nomem;

	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &qp->q.net->mem);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &qp->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;

	iph = ip_hdr(head);
	iph->frag_off = 0;
	iph->tot_len = htons(len);
	iph->tos |= ecn;
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
		       qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	if (net_ratelimit())
		pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Пример #8
0
static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
{
	/*
	 * Before process AH
	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
	 * |<-------------->| hdr_len
	 *
	 * To erase AH:
	 * Keeping copy of cleared headers. After AH processing,
	 * Moving the pointer of skb->network_header by using skb_pull as long
	 * as AH header length. Then copy back the copy as long as hdr_len
	 * If destination header following AH exists, copy it into after [Ext2].
	 *
	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
	 * There is offset of AH before IPv6 header after the process.
	 */

	struct ipv6_auth_hdr *ah;
	struct ipv6hdr *ip6h;
	struct ah_data *ahp;
	unsigned char *tmp_hdr = NULL;
	u16 hdr_len;
	u16 ah_hlen;
	int nexthdr;
	int err = -EINVAL;

	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
		goto out;

	/* We are going to _remove_ AH header to keep sockets happy,
	 * so... Later this can change. */
	if (skb_cloned(skb) &&
	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
		goto out;

	hdr_len = skb->data - skb_network_header(skb);
	ah = (struct ipv6_auth_hdr*)skb->data;
	ahp = x->data;
	nexthdr = ah->nexthdr;
	ah_hlen = (ah->hdrlen + 2) << 2;

	if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
	    ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
		goto out;

	if (!pskb_may_pull(skb, ah_hlen))
		goto out;

	tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC);
	if (!tmp_hdr)
		goto out;
	ip6h = ipv6_hdr(skb);
	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
		goto free_out;
	ip6h->priority    = 0;
	ip6h->flow_lbl[0] = 0;
	ip6h->flow_lbl[1] = 0;
	ip6h->flow_lbl[2] = 0;
	ip6h->hop_limit   = 0;

	{
		u8 auth_data[MAX_AH_AUTH_LEN];

		memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
		memset(ah->auth_data, 0, ahp->icv_trunc_len);
		skb_push(skb, hdr_len);
		err = ah_mac_digest(ahp, skb, ah->auth_data);
		if (err)
			goto free_out;
		err = -EINVAL;
		if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
			x->stats.integrity_failed++;
			goto free_out;
		}
	}

	skb->network_header += ah_hlen;
	memcpy(skb_network_header(skb), tmp_hdr, hdr_len);
	skb->transport_header = skb->network_header;
	__skb_pull(skb, ah_hlen + hdr_len);

	kfree(tmp_hdr);

	return nexthdr;

free_out:
	kfree(tmp_hdr);
out:
	return err;
}
Пример #9
0
int ax8netfilter_udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
	struct udp_sock *up = udp_sk(sk);
	int rc;
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	if (up->encap_type) {
		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
		if (skb->len > sizeof(struct udphdr) &&
		    up->encap_rcv != NULL) {
			int ret;

			ret = (*up->encap_rcv)(sk, skb);
			if (ret <= 0) {
				UDP_INC_STATS_BH(sock_net(sk),
						 UDP_MIB_INDATAGRAMS,
						 is_udplite);
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
				"%d while full coverage %d requested\n",
				UDP_SKB_CB(skb)->cscov, skb->len);
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
			LIMIT_NETDEBUG(KERN_WARNING
				"UDPLITE: coverage %d too small, need min %d\n",
				UDP_SKB_CB(skb)->cscov, up->pcrlen);
			goto drop;
		}
	}

	if (sk->sk_filter) {
		if (udp_lib_checksum_complete(skb))
			goto drop;
	}

	rc = 0;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk))
		rc = ax8netfilter___udp_queue_rcv_skb(sk, skb);
	else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);

	return rc;

drop:
	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
	kfree_skb(skb);
	return -1;
} 
Пример #10
0
static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	/*
	 * Before process AH
	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
	 * |<-------------->| hdr_len
	 *
	 * To erase AH:
	 * Keeping copy of cleared headers. After AH processing,
	 * Moving the pointer of skb->nh.raw by using skb_pull as long as AH
	 * header length. Then copy back the copy as long as hdr_len
	 * If destination header following AH exists, copy it into after [Ext2].
	 * 
	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
	 * There is offset of AH before IPv6 header after the process.
	 */

	struct ipv6_auth_hdr *ah;
	struct ah_data *ahp;
	unsigned char *tmp_hdr = NULL;
	u16 hdr_len;
	u16 ah_hlen;
	int nexthdr;

	#if defined (CONFIG_OCTEON_NATIVE_IPSEC)
	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
	    skb_linearize(skb, GFP_ATOMIC) != 0) {
			printk("Input: Unable to linearize memory %d\n", skb->len);
	    	goto out;
	}
	#endif
	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
		goto out;

	/* We are going to _remove_ AH header to keep sockets happy,
	 * so... Later this can change. */
	if (skb_cloned(skb) &&
	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
		goto out;

	hdr_len = skb->data - skb->nh.raw;
	ah = (struct ipv6_auth_hdr*)skb->data;
	ahp = x->data;
	nexthdr = ah->nexthdr;
	ah_hlen = (ah->hdrlen + 2) << 2;

        if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
            ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
                goto out;

	if (!pskb_may_pull(skb, ah_hlen))
		goto out;

	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_hdr)
		goto out;
	memcpy(tmp_hdr, skb->nh.raw, hdr_len);
	if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
		goto free_out;
	skb->nh.ipv6h->priority    = 0;
	skb->nh.ipv6h->flow_lbl[0] = 0;
	skb->nh.ipv6h->flow_lbl[1] = 0;
	skb->nh.ipv6h->flow_lbl[2] = 0;
	skb->nh.ipv6h->hop_limit   = 0;

        {
		u8 auth_data[MAX_AH_AUTH_LEN];

		memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
		memset(ah->auth_data, 0, ahp->icv_trunc_len);
		skb_push(skb, skb->data - skb->nh.raw);
#if defined (CONFIG_OCTEON_NATIVE_IPSEC)
		cavium_process_ah_pkt(ahp, skb, ah->auth_data);
#else
		ahp->icv(ahp, skb, ah->auth_data);
#endif		
		if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
			x->stats.integrity_failed++;
			goto free_out;
		}
	}

	skb->nh.raw = skb_pull(skb, ah_hlen);
	memcpy(skb->nh.raw, tmp_hdr, hdr_len);
	skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
	skb_pull(skb, hdr_len);
	skb->h.raw = skb->data;


	kfree(tmp_hdr);

	return nexthdr;

free_out:
	kfree(tmp_hdr);
out:
	return -EINVAL;
}
Пример #11
0
int ip6_route_me_harder(struct sk_buff *skb)
{
	struct net *net = dev_net(skb_dst(skb)->dev);
	const struct ipv6hdr *iph = ipv6_hdr(skb);
	unsigned int hh_len;
	struct dst_entry *dst;
	struct flowi6 fl6 = {
		.flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
		.flowi6_mark = skb->mark,
		.daddr = iph->daddr,
		.saddr = iph->saddr,
	};

	dst = ip6_route_output(net, skb->sk, &fl6);
	if (dst->error) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
		LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
		dst_release(dst);
		return -EINVAL;
	}

	/* Drop old route. */
	skb_dst_drop(skb);

	skb_dst_set(skb, dst);

#ifdef CONFIG_XFRM
	if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
	    xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
		skb_dst_set(skb, NULL);
		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
		if (IS_ERR(dst))
			return -1;
		skb_dst_set(skb, dst);
	}
#endif

	/* Change in oif may mean change in hh_len. */
	hh_len = skb_dst(skb)->dev->hard_header_len;
	if (skb_headroom(skb) < hh_len &&
	    pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
			     0, GFP_ATOMIC))
		return -1;

	return 0;
}
EXPORT_SYMBOL(ip6_route_me_harder);

/*
 * Extra routing may needed on local out, as the QUEUE target never
 * returns control to the table.
 */

struct ip6_rt_info {
	struct in6_addr daddr;
	struct in6_addr saddr;
	u_int32_t mark;
};

static void nf_ip6_saveroute(const struct sk_buff *skb,
			     struct nf_queue_entry *entry)
{
	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);

	if (entry->hook == NF_INET_LOCAL_OUT) {
		const struct ipv6hdr *iph = ipv6_hdr(skb);

		rt_info->daddr = iph->daddr;
		rt_info->saddr = iph->saddr;
		rt_info->mark = skb->mark;
	}
}

static int nf_ip6_reroute(struct sk_buff *skb,
			  const struct nf_queue_entry *entry)
{
	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);

	if (entry->hook == NF_INET_LOCAL_OUT) {
		const struct ipv6hdr *iph = ipv6_hdr(skb);
		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
		    skb->mark != rt_info->mark)
			return ip6_route_me_harder(skb);
	}
	return 0;
}

static int nf_ip6_route(struct net *net, struct dst_entry **dst,
			struct flowi *fl, bool strict)
{
	static const struct ipv6_pinfo fake_pinfo;
	static const struct inet_sock fake_sk = {
		/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
		.sk.sk_bound_dev_if = 1,
		.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
	};
	const void *sk = strict ? &fake_sk : NULL;
	struct dst_entry *result;
	int err;

	result = ip6_route_output(net, sk, &fl->u.ip6);
	err = result->error;
	if (err)
		dst_release(result);
	else
		*dst = result;
	return err;
}

__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
			     unsigned int dataoff, u_int8_t protocol)
{
	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
	__sum16 csum = 0;

	switch (skb->ip_summed) {
	case CHECKSUM_COMPLETE:
		if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
			break;
		if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
				     skb->len - dataoff, protocol,
				     csum_sub(skb->csum,
					      skb_checksum(skb, 0,
							   dataoff, 0)))) {
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			break;
		}
		/* fall through */
	case CHECKSUM_NONE:
		skb->csum = ~csum_unfold(
				csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
					     skb->len - dataoff,
					     protocol,
					     csum_sub(0,
						      skb_checksum(skb, 0,
								   dataoff, 0))));
		csum = __skb_checksum_complete(skb);
	}
	return csum;
}
EXPORT_SYMBOL(nf_ip6_checksum);

static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
				       unsigned int dataoff, unsigned int len,
				       u_int8_t protocol)
{
	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
	__wsum hsum;
	__sum16 csum = 0;

	switch (skb->ip_summed) {
	case CHECKSUM_COMPLETE:
		if (len == skb->len - dataoff)
			return nf_ip6_checksum(skb, hook, dataoff, protocol);
		/* fall through */
	case CHECKSUM_NONE:
		hsum = skb_checksum(skb, 0, dataoff, 0);
		skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
							 &ip6h->daddr,
							 skb->len - dataoff,
							 protocol,
							 csum_sub(0, hsum)));
		skb->ip_summed = CHECKSUM_NONE;
		return __skb_checksum_complete_head(skb, dataoff + len);
	}
	return csum;
};

static const struct nf_afinfo nf_ip6_afinfo = {
	.family			= AF_INET6,
	.checksum		= nf_ip6_checksum,
	.checksum_partial	= nf_ip6_checksum_partial,
	.route			= nf_ip6_route,
	.saveroute		= nf_ip6_saveroute,
	.reroute		= nf_ip6_reroute,
	.route_key_size		= sizeof(struct ip6_rt_info),
};

int __init ipv6_netfilter_init(void)
{
	return nf_register_afinfo(&nf_ip6_afinfo);
}

/* This can be called from inet6_init() on errors, so it cannot
 * be marked __exit. -DaveM
 */
void ipv6_netfilter_fini(void)
{
	nf_unregister_afinfo(&nf_ip6_afinfo);
}
Пример #12
0
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
{
	const struct dccp_hdr *dh = dccp_hdr(skb);
	struct dccp_sock *dp = dccp_sk(sk);
	u64 lswl, lawl;

	/*
	 *   Step 5: Prepare sequence numbers for Sync
	 *     If P.type == Sync or P.type == SyncAck,
	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
	 *	     / * P is valid, so update sequence number variables
	 *		 accordingly.  After this update, P will pass the tests
	 *		 in Step 6.  A SyncAck is generated if necessary in
	 *		 Step 15 * /
	 *	     Update S.GSR, S.SWL, S.SWH
	 *	  Otherwise,
	 *	     Drop packet and return
	 */
	if (dh->dccph_type == DCCP_PKT_SYNC || 
	    dh->dccph_type == DCCP_PKT_SYNCACK) {
		if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
			      dp->dccps_awl, dp->dccps_awh) &&
		    !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
			dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
		else
			return -1;
	}
	
	/*
	 *   Step 6: Check sequence numbers
	 *      Let LSWL = S.SWL and LAWL = S.AWL
	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
	 *      If LSWL <= P.seqno <= S.SWH
	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
	 *	  Update S.GSR, S.SWL, S.SWH
	 *	  If P.type != Sync,
	 *	     Update S.GAR
	 *      Otherwise,
	 *	  Send Sync packet acknowledging P.seqno
	 *	  Drop packet and return
	 */
	lswl = dp->dccps_swl;
	lawl = dp->dccps_awl;

	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
	    dh->dccph_type == DCCP_PKT_CLOSE ||
	    dh->dccph_type == DCCP_PKT_RESET) {
		lswl = dp->dccps_gsr;
		dccp_inc_seqno(&lswl);
		lawl = dp->dccps_gar;
	}

	if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
	    (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
	     between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
		       lawl, dp->dccps_awh))) {
		dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);

		if (dh->dccph_type != DCCP_PKT_SYNC &&
		    (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
		     DCCP_PKT_WITHOUT_ACK_SEQ))
			dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
	} else {
		LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
					    "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
					    "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
					    "sending SYNC...\n",
			       dccp_packet_name(dh->dccph_type),
			       (unsigned long long) lswl,
			       (unsigned long long)
			       DCCP_SKB_CB(skb)->dccpd_seq,
			       (unsigned long long) dp->dccps_swh,
			       (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
			        DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
			       (unsigned long long) lawl,
			       (unsigned long long)
			       DCCP_SKB_CB(skb)->dccpd_ack_seq,
			       (unsigned long long) dp->dccps_awh);
		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
		return -1;
	}

	return 0;
}