示例#1
0
文件: ip6_output.c 项目: akv95/linux
int ip6_push_pending_frames(struct sock *sk)
{
	struct sk_buff *skb, *tmp_skb;
	struct sk_buff **tail_skb;
	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	struct ipv6hdr *hdr;
	struct ipv6_txoptions *opt = np->cork.opt;
	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
	unsigned char proto = fl6->flowi6_proto;
	int err = 0;

	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
		goto out;
	tail_skb = &(skb_shinfo(skb)->frag_list);

	/* move skb->data to ip header from ext header */
	if (skb->data < skb_network_header(skb))
		__skb_pull(skb, skb_network_offset(skb));
	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
		__skb_pull(tmp_skb, skb_network_header_len(skb));
		*tail_skb = tmp_skb;
		tail_skb = &(tmp_skb->next);
		skb->len += tmp_skb->len;
		skb->data_len += tmp_skb->len;
		skb->truesize += tmp_skb->truesize;
		tmp_skb->destructor = NULL;
		tmp_skb->sk = NULL;
	}

	/* Allow local fragmentation. */
	if (np->pmtudisc < IPV6_PMTUDISC_DO)
		skb->local_df = 1;

	*final_dst = fl6->daddr;
	__skb_pull(skb, skb_network_header_len(skb));
	if (opt && opt->opt_flen)
		ipv6_push_frag_opts(skb, opt, &proto);
	if (opt && opt->opt_nflen)
		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
	hdr->hop_limit = np->cork.hop_limit;
	hdr->nexthdr = proto;
	hdr->saddr = fl6->saddr;
	hdr->daddr = *final_dst;

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	skb_dst_set(skb, dst_clone(&rt->dst));
	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
	if (proto == IPPROTO_ICMPV6) {
		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));

		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
	}

	err = ip6_local_out(skb);
	if (err) {
		if (err > 0)
			err = net_xmit_errno(err);
		if (err)
			goto error;
	}

out:
	ip6_cork_release(inet, np);
	return err;
error:
	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	goto out;
}
示例#2
0
struct sk_buff *__ip6_make_skb(struct sock *sk,
			       struct sk_buff_head *queue,
			       struct inet_cork_full *cork,
			       struct inet6_cork *v6_cork)
{
	struct sk_buff *skb, *tmp_skb;
	struct sk_buff **tail_skb;
	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	struct ipv6hdr *hdr;
	struct ipv6_txoptions *opt = v6_cork->opt;
	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
	struct flowi6 *fl6 = &cork->fl.u.ip6;
	unsigned char proto = fl6->flowi6_proto;

	skb = __skb_dequeue(queue);
	if (skb == NULL)
		goto out;
	tail_skb = &(skb_shinfo(skb)->frag_list);

	/* move skb->data to ip header from ext header */
	if (skb->data < skb_network_header(skb))
		__skb_pull(skb, skb_network_offset(skb));
	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
		__skb_pull(tmp_skb, skb_network_header_len(skb));
		*tail_skb = tmp_skb;
		tail_skb = &(tmp_skb->next);
		skb->len += tmp_skb->len;
		skb->data_len += tmp_skb->len;
		skb->truesize += tmp_skb->truesize;
		tmp_skb->destructor = NULL;
		tmp_skb->sk = NULL;
	}

	/* Allow local fragmentation. */
	skb->ignore_df = ip6_sk_ignore_df(sk);

	*final_dst = fl6->daddr;
	__skb_pull(skb, skb_network_header_len(skb));
	if (opt && opt->opt_flen)
		ipv6_push_frag_opts(skb, opt, &proto);
	if (opt && opt->opt_nflen)
		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	ip6_flow_hdr(hdr, v6_cork->tclass,
		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
					np->autoflowlabel));
	hdr->hop_limit = v6_cork->hop_limit;
	hdr->nexthdr = proto;
	hdr->saddr = fl6->saddr;
	hdr->daddr = *final_dst;

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	skb_dst_set(skb, dst_clone(&rt->dst));
	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
	if (proto == IPPROTO_ICMPV6) {
		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));

		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
	}

	ip6_cork_release(cork, v6_cork);
out:
	return skb;
}
示例#3
0
文件: flow.c 项目: hiyao/ovs
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
                        int nh_len)
{
    struct icmp6hdr *icmp = icmp6_hdr(skb);

    /* The ICMPv6 type and code fields use the 16-bit transport port
     * fields, so we need to store them in 16-bit network byte order.
     */
    key->tp.src = htons(icmp->icmp6_type);
    key->tp.dst = htons(icmp->icmp6_code);
    memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));

    if (icmp->icmp6_code == 0 &&
            (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
             icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
        int icmp_len = skb->len - skb_transport_offset(skb);
        struct nd_msg *nd;
        int offset;

        /* In order to process neighbor discovery options, we need the
         * entire packet.
         */
        if (unlikely(icmp_len < sizeof(*nd)))
            return 0;

        if (unlikely(skb_linearize(skb)))
            return -ENOMEM;

        nd = (struct nd_msg *)skb_transport_header(skb);
        key->ipv6.nd.target = nd->target;

        icmp_len -= sizeof(*nd);
        offset = 0;
        while (icmp_len >= 8) {
            struct nd_opt_hdr *nd_opt =
                (struct nd_opt_hdr *)(nd->opt + offset);
            int opt_len = nd_opt->nd_opt_len * 8;

            if (unlikely(!opt_len || opt_len > icmp_len))
                return 0;

            /* Store the link layer address if the appropriate
             * option is provided.  It is considered an error if
             * the same link layer option is specified twice.
             */
            if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
                    && opt_len == 8) {
                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
                    goto invalid;
                ether_addr_copy(key->ipv6.nd.sll,
                                &nd->opt[offset+sizeof(*nd_opt)]);
            } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
                       && opt_len == 8) {
                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
                    goto invalid;
                ether_addr_copy(key->ipv6.nd.tll,
                                &nd->opt[offset+sizeof(*nd_opt)]);
            }

            icmp_len -= opt_len;
            offset += opt_len;
        }
    }

    return 0;

invalid:
    memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
    memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
    memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));

    return 0;
}
示例#4
0
文件: ip6_output.c 项目: SelfImp/m75
int ip6_forward(struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr = ipv6_hdr(skb);
	struct inet6_skb_parm *opt = IP6CB(skb);
	struct net *net = dev_net(dst->dev);
	u32 mtu;

	if (net->ipv6.devconf_all->forwarding == 0)
		goto error;

	if (skb_warn_if_lro(skb))
		goto drop;

	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
		goto drop;
	}

	if (skb->pkt_type != PACKET_HOST)
		goto drop;

	skb_forward_csum(skb);

	/*
	 *	We DO NOT make any processing on
	 *	RA packets, pushing them to user level AS IS
	 *	without ane WARRANTY that application will be able
	 *	to interpret them. The reason is that we
	 *	cannot make anything clever here.
	 *
	 *	We are not end-node, so that if packet contains
	 *	AH/ESP, we cannot make anything.
	 *	Defragmentation also would be mistake, RA packets
	 *	cannot be fragmented, because there is no warranty
	 *	that different fragments will go along one path. --ANK
	 */
	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
			return 0;
	}

	/*
	 *	check and decrement ttl
	 */
	if (hdr->hop_limit <= 1) {
		/* Force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);

		kfree_skb(skb);
		return -ETIMEDOUT;
	}

#ifdef MTK_IPV6_TETHER_NDP_MODE
	/* mtk80842: for unicast NA/NS/RA */
	{
		struct ipv6hdr *hdr = ipv6_hdr(skb);
		if(hdr->nexthdr == NEXTHDR_ICMP){
			struct icmp6hdr *ndhdr = icmp6_hdr(skb);		
			printk(KERN_WARNING "%s: icmp6_type = %d\n",__FUNCTION__, ndhdr->icmp6_type);
			if(ndhdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT || 
				ndhdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
				ndhdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT){
				ndp_forward(skb);
				kfree_skb(skb);
				return 0;
			}
		}	
	}
#endif

	/* XXX: idev->cnf.proxy_ndp? */
	if (net->ipv6.devconf_all->proxy_ndp &&
	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
		int proxied = ip6_forward_proxy_check(skb);
		if (proxied > 0)
			return ip6_input(skb);
		else if (proxied < 0) {
			IP6_INC_STATS(net, ip6_dst_idev(dst),
				      IPSTATS_MIB_INDISCARDS);
			goto drop;
		}
	}

	if (!xfrm6_route_forward(skb)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
		goto drop;
	}
	dst = skb_dst(skb);

	/* IPv6 specs say nothing about it, but it is clear that we cannot
	   send redirects to source routed frames.
	   We don't send redirects to frames decapsulated from IPsec.
	 */
	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
		struct in6_addr *target = NULL;
		struct inet_peer *peer;
		struct rt6_info *rt;

		/*
		 *	incoming and outgoing devices are the same
		 *	send a redirect.
		 */

		rt = (struct rt6_info *) dst;
		if (rt->rt6i_flags & RTF_GATEWAY)
			target = &rt->rt6i_gateway;
		else
			target = &hdr->daddr;

		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);

		/* Limit redirects both by destination (here)
		   and by source (inside ndisc_send_redirect)
		 */
		if (inet_peer_xrlim_allow(peer, 1*HZ))
			ndisc_send_redirect(skb, target);
		if (peer)
			inet_putpeer(peer);
	} else {
		int addrtype = ipv6_addr_type(&hdr->saddr);

		/* This check is security critical. */
		if (addrtype == IPV6_ADDR_ANY ||
		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
			goto error;
		if (addrtype & IPV6_ADDR_LINKLOCAL) {
			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
				    ICMPV6_NOT_NEIGHBOUR, 0);
			goto error;
		}
	}

	mtu = dst_mtu(dst);
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;

	if (ip6_pkt_too_big(skb, mtu)) {
		/* Again, force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (skb_cow(skb, dst->dev->hard_header_len)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
		goto drop;
	}

	hdr = ipv6_hdr(skb);

	/* Mangling hops number delayed to point after skb COW */

	hdr->hop_limit--;

	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
		       ip6_forward_finish);

error:
	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
	kfree_skb(skb);
	return -EINVAL;
}
示例#5
0
/**
 * Main F&U routine. Called during the processing of every packet.
 *
 * Decides if "skb" should be processed, updating binding and session information.
 *
 * @param[in] skb packet being translated.
 * @param[in] tuple skb's summary.
 * @return indicator of what should happen to skb.
 */
verdict filtering_and_updating(struct sk_buff* skb, struct tuple *in_tuple)
{
	struct ipv6hdr *hdr_ip6;
	verdict result = VER_CONTINUE;

	log_debug("Step 2: Filtering and Updating");

	switch (skb_l3_proto(skb)) {
	case L3PROTO_IPV6:
		/* ICMP errors should not be filtered or affect the tables. */
		if (skb_l4_proto(skb) == L4PROTO_ICMP && is_icmp6_error(icmp6_hdr(skb)->icmp6_type)) {
			log_debug("Packet is ICMPv6 error; skipping step...");
			return VER_CONTINUE;
		}
		/* Get rid of hairpinning loops and unwanted packets. */
		hdr_ip6 = ipv6_hdr(skb);
		if (pool6_contains(&hdr_ip6->saddr)) {
			log_debug("Hairpinning loop. Dropping...");
			inc_stats(skb, IPSTATS_MIB_INADDRERRORS);
			return VER_DROP;
		}
		if (!pool6_contains(&hdr_ip6->daddr)) {
			log_debug("Packet was rejected by pool6; dropping...");
			inc_stats(skb, IPSTATS_MIB_INADDRERRORS);
			return VER_DROP;
		}
		break;
	case L3PROTO_IPV4:
		/* ICMP errors should not be filtered or affect the tables. */
		if (skb_l4_proto(skb) == L4PROTO_ICMP && is_icmp4_error(icmp_hdr(skb)->type)) {
			log_debug("Packet is ICMPv4 error; skipping step...");
			return VER_CONTINUE;
		}
		/* Get rid of unexpected packets */
		if (!pool4_contains(ip_hdr(skb)->daddr)) {
			log_debug("Packet was rejected by pool4; dropping...");
			inc_stats(skb, IPSTATS_MIB_INADDRERRORS);
			return VER_DROP;
		}
		break;
	}

	/* Process packet, according to its protocol. */

	switch (skb_l4_proto(skb)) {
	case L4PROTO_UDP:
		switch (skb_l3_proto(skb)) {
		case L3PROTO_IPV6:
			result = ipv6_simple(skb, in_tuple);
			break;
		case L3PROTO_IPV4:
			result = ipv4_simple(skb, in_tuple);
			break;
		}
		break;

	case L4PROTO_TCP:
		result = tcp(skb, in_tuple);
		break;

	case L4PROTO_ICMP:
		switch (skb_l3_proto(skb)) {
		case L3PROTO_IPV6:
			if (filter_icmpv6_info()) {
				log_debug("Packet is ICMPv6 info (ping); dropping due to policy.");
				inc_stats(skb, IPSTATS_MIB_INDISCARDS);
				return VER_DROP;
			}

			result = ipv6_simple(skb, in_tuple);
			break;
		case L3PROTO_IPV4:
			result = ipv4_simple(skb, in_tuple);
			break;
		}
		break;
	}

	log_debug("Done: Step 2.");
	return result;
}
示例#6
0
/**
 * Extracts relevant data from "skb" and stores it in the "tuple" tuple.
 *
 * @param skb packet the data will be extracted from.
 * @param tuple this function will populate this value using "skb"'s contents.
 * @return whether packet processing should continue.
 */
verdict determine_in_tuple(struct sk_buff *skb, struct tuple *in_tuple)
{
	struct icmphdr *icmp4;
	struct icmp6hdr *icmp6;
	verdict result = VER_CONTINUE;

	log_debug("Step 1: Determining the Incoming Tuple");

	switch (skb_l3_proto(skb)) {
	case L3PROTO_IPV4:
		switch (skb_l4_proto(skb)) {
		case L4PROTO_UDP:
			result = ipv4_udp(skb, in_tuple);
			break;
		case L4PROTO_TCP:
			result = ipv4_tcp(skb, in_tuple);
			break;
		case L4PROTO_ICMP:
			icmp4 = icmp_hdr(skb);
			if (is_icmp4_info(icmp4->type)) {
				result = ipv4_icmp_info(skb, in_tuple);
			} else if (is_icmp4_error(icmp4->type)) {
				result = ipv4_icmp_err(skb, in_tuple);
			} else {
				log_debug("Unknown ICMPv4 type: %u. Dropping packet...", icmp4->type);
				inc_stats(skb, IPSTATS_MIB_INHDRERRORS);
				result = VER_DROP;
			}
			break;
		}
		break;

	case L3PROTO_IPV6:
		switch (skb_l4_proto(skb)) {
		case L4PROTO_UDP:
			result = ipv6_udp(skb, in_tuple);
			break;
		case L4PROTO_TCP:
			result = ipv6_tcp(skb, in_tuple);
			break;
		case L4PROTO_ICMP:
			icmp6 = icmp6_hdr(skb);
			if (is_icmp6_info(icmp6->icmp6_type)) {
				result = ipv6_icmp_info(skb, in_tuple);
			} else if (is_icmp6_error(icmp6->icmp6_type)) {
				result = ipv6_icmp_err(skb, in_tuple);
			} else {
				log_debug("Unknown ICMPv6 type: %u. Dropping packet...", icmp6->icmp6_type);
				inc_stats(skb, IPSTATS_MIB_INHDRERRORS);
				result = VER_DROP;
			}
			break;
		}
		break;
	}

	/*
	 * We moved the transport-protocol-not-recognized ICMP errors to packet.c because they're
	 * covered in validations.
	 */

	log_tuple(in_tuple);
	log_debug("Done step 1.");
	return result;
}