예제 #1
0
파일: tx.c 프로젝트: ANFS/ANFS-kernel
/*
 * Verify that our various assumptions about sk_buffs and the conditions
 * under which TSO will be attempted hold true.  Return the protocol number.
 */
static __be16 efx_tso_check_protocol(struct sk_buff *skb)
{
	__be16 protocol = skb->protocol;

	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
			    protocol);
	if (protocol == htons(ETH_P_8021Q)) {
		/* Find the encapsulated protocol; reset network header
		 * and transport header based on that. */
		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
		protocol = veh->h_vlan_encapsulated_proto;
		skb_set_network_header(skb, sizeof(*veh));
		if (protocol == htons(ETH_P_IP))
			skb_set_transport_header(skb, sizeof(*veh) +
						 4 * ip_hdr(skb)->ihl);
		else if (protocol == htons(ETH_P_IPV6))
			skb_set_transport_header(skb, sizeof(*veh) +
						 sizeof(struct ipv6hdr));
	}

	if (protocol == htons(ETH_P_IP)) {
		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
	} else {
		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
	}
	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
			     + (tcp_hdr(skb)->doff << 2u)) >
			    skb_headlen(skb));

	return protocol;
}
예제 #2
0
/*
 * We need to grow the skb to accommodate the expanssion of the ipcomp packet.
 *
 * The following comment comes from the skb_decompress() which does the
 * same...
 *
 * We have no way of knowing the exact length of the resulting
 * decompressed output before we have actually done the decompression.
 * For now, we guess that the packet will not be bigger than the
 * attached ipsec device's mtu or 16260, whichever is biggest.
 * This may be wrong, since the sender's mtu may be bigger yet.
 * XXX This must be dealt with later XXX
 */
static int ipsec_ocf_ipcomp_copy_expand(struct ipsec_rcv_state *irs)
{
	struct sk_buff *nskb;
	unsigned grow_to, grow_by;
	ptrdiff_t ptr_delta;

	if (!irs->skb)
		return IPSEC_RCV_IPCOMPFAILED;

	if (irs->skb->dev) {
		grow_to = irs->skb->dev->mtu <
			  16260 ? 16260 : irs->skb->dev->mtu;
	} else {
		int tot_len;
		if (lsw_ip_hdr_version(irs) == 6)
			tot_len = ntohs(lsw_ip6_hdr(irs)->payload_len) +
				  sizeof(struct ipv6hdr);
		else
			tot_len = ntohs(lsw_ip4_hdr(irs)->tot_len);
		grow_to = 65520 - tot_len;
	}
	grow_by = grow_to - irs->skb->len;
	grow_by -= skb_headroom(irs->skb);
	grow_by -= skb_tailroom(irs->skb);

	/* it's big enough */
	if (!grow_by)
		return IPSEC_RCV_OK;

	nskb = skb_copy_expand(irs->skb, skb_headroom(irs->skb),
			       skb_tailroom(irs->skb) + grow_by, GFP_ATOMIC);
	if (!nskb)
		return IPSEC_RCV_ERRMEMALLOC;

	memcpy(nskb->head, irs->skb->head, skb_headroom(irs->skb));

	skb_set_network_header(nskb,
			       ipsec_skb_offset(irs->skb,
						skb_network_header(irs->skb)));
	skb_set_transport_header(nskb,
				 ipsec_skb_offset(irs->skb,
						  skb_transport_header(
							  irs->skb)));

	/* update all irs pointers */
	ptr_delta = nskb->data - irs->skb->data;
	irs->authenticator = (void*)((char*)irs->authenticator + ptr_delta);
	irs->iph           = (void*)((char*)irs->iph           + ptr_delta);

	/* flip in the large one */
	irs->pre_ipcomp_skb = irs->skb;
	irs->skb = nskb;

	/* move the tail up to the end to let OCF know how big the buffer is */
	if (grow_by > (irs->skb->end - irs->skb->tail))
		grow_by = irs->skb->end - irs->skb->tail;
	skb_put(irs->skb, grow_by);

	return IPSEC_RCV_OK;
}
예제 #3
0
파일: skb_ops.c 프로젝트: pandax381/NAT64
int skb_from_pkt(void *pkt, u32 pkt_len, struct sk_buff **skb)
{
	*skb = alloc_skb(LL_MAX_HEADER + pkt_len, GFP_ATOMIC);
	if (!*skb) {
		log_err("Could not allocate a skb.");
		return -ENOMEM;
	}

	skb_reserve(*skb, LL_MAX_HEADER); /* Reserve space for Link Layer data. */
	skb_put(*skb, pkt_len); /* L3 + L4 + payload. */

	skb_set_mac_header(*skb, 0);
	skb_set_network_header(*skb, 0);
	skb_set_transport_header(*skb, net_hdr_size(pkt));

	(*skb)->ip_summed = CHECKSUM_UNNECESSARY;
	switch (get_l3_proto(pkt)) {
	case 6:
		(*skb)->protocol = htons(ETH_P_IPV6);
		break;
	case 4:
		(*skb)->protocol = htons(ETH_P_IP);
		break;
	default:
		log_err("Invalid mode: %u.", get_l3_proto(pkt));
		kfree_skb(*skb);
		return -EINVAL;
	}

	/* Copy packet content to skb. */
	memcpy(skb_network_header(*skb), pkt, pkt_len);

	return 0;
}
예제 #4
0
/**
 * Joins out.l3_hdr, out.l4_hdr and out.payload into a single packet, placing the result in
 * out.packet.
 */
static bool create_skb(struct packet_out *out)
{
	struct sk_buff *new_skb;

	new_skb = alloc_skb(config.packet_head_room // user's reserved.
			+ LL_MAX_HEADER // kernel's reserved + layer 2.
			+ out->l3_hdr_len // layer 3.
			+ out->l4_hdr_len // layer 4.
			+ out->payload_len // packet data.
			+ config.packet_tail_room, // user's reserved+.
			GFP_ATOMIC);
	if (!new_skb) {
		log_warning("  New packet allocation failed.");
		return false;
	}
	out->packet = new_skb;

	skb_reserve(new_skb, config.packet_head_room + LL_MAX_HEADER);
	skb_put(new_skb, out->l3_hdr_len + out->l4_hdr_len + out->payload_len);

	skb_reset_mac_header(new_skb);
	skb_reset_network_header(new_skb);
	skb_set_transport_header(new_skb, out->l3_hdr_len);

	memcpy(skb_network_header(new_skb), out->l3_hdr, out->l3_hdr_len);
	memcpy(skb_transport_header(new_skb), out->l4_hdr, out->l4_hdr_len);
	memcpy(skb_transport_header(new_skb) + out->l4_hdr_len, out->payload, out->payload_len);

	return true;
}
예제 #5
0
static unsigned int ipoptstrip_tg(struct sk_buff *skb,
	const struct xt_action_param *par) {
		
	struct ip_options *opt = &(IPCB(skb)->opt);
	unsigned char *opt_ptr, *opt_end_ptr;
	struct iphdr *iphdr;
	const struct xt_ipoptstrip_tg_info *info;
	__wsum csum32;
	
	if (opt->optlen > 0) {
		iphdr = ip_hdr(skb);
		info = par->targinfo;
		
#ifdef DEBUG
		printk("flags: %x\n", info->flags);
		printk("Packet with IP options (%i bytes) from: %pI4 to: %pI4\n", 
			opt->optlen, &iphdr->saddr, &iphdr->daddr);
		print_skb_header_offsets(skb);
#endif
		if (! XT_IPOPTSTRIP_IS_SET(info->flags, XT_IPOPTSTRIP_KEEP_DST)) {
			opt_ptr = (unsigned char*) &iphdr[1];
			opt_end_ptr = opt_ptr + opt->optlen;
			
			for (; opt_ptr < opt_end_ptr; opt_ptr++) {
				
				switch (*opt_ptr) {
					case IPOPT_LSRR:
					case IPOPT_SSRR:
						/* Re-write destination field with last address */
						memcpy(&iphdr->daddr, (opt_ptr+(opt_ptr[1]))-4, 4);
						break;
				}
			}
		}
		
		/* Alter header and total lengths */
		iphdr->ihl = IPV4_HL; // 5 32-bit words in IPv4 header with no options
		iphdr->tot_len -= cpu_to_be16(opt->optlen);
		
		/* Move transport header pointer to after network header */
		skb_set_transport_header(skb, IPV4_LEN);
		
		/* Move remaining data up the buffer */
		memmove(skb_transport_header(skb), skb_transport_header(skb) + opt->optlen,
			skb->tail - (skb->transport_header + opt->optlen));
			
		/* Remove un-needed buffer space */
		skb_trim(skb, (skb->len - opt->optlen));
		
		/* Re-calculate IP header checksum */
		csum32 = csum_partial(iphdr, sizeof(struct iphdr), 0);
		iphdr->check = csum_fold(csum32);
		
#ifdef DEBUG
		print_skb_header_offsets(skb);
#endif
	}
	
	return XT_CONTINUE;
}
예제 #6
0
static int ipv6_mc_check_exthdrs(struct sk_buff *skb)
{
	const struct ipv6hdr *ip6h;
	int offset;
	u8 nexthdr;
	__be16 frag_off;

	ip6h = ipv6_hdr(skb);

	if (ip6h->nexthdr != IPPROTO_HOPOPTS)
		return -ENOMSG;

	nexthdr = ip6h->nexthdr;
	offset = skb_network_offset(skb) + sizeof(*ip6h);
	offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);

	if (offset < 0)
		return -EINVAL;

	if (nexthdr != IPPROTO_ICMPV6)
		return -ENOMSG;

	skb_set_transport_header(skb, offset);

	return 0;
}
예제 #7
0
enum ipsec_xmit_value ipsec_xmit_ipip_setup(struct ipsec_xmit_state *ixs)
{
	lsw_ip4_hdr(ixs)->version  = 4;

	switch (sysctl_ipsec_tos) {
	case 0:
		lsw_ip4_hdr(ixs)->tos = ip_hdr(ixs->skb)->tos;
		break;
	case 1:
		lsw_ip4_hdr(ixs)->tos = 0;
		break;
	default:
		break;
	}
	lsw_ip4_hdr(ixs)->ttl      = SYSCTL_IPSEC_DEFAULT_TTL;
	lsw_ip4_hdr(ixs)->frag_off = 0;
	lsw_ip4_hdr(ixs)->saddr    =
		((struct sockaddr_in*)(ixs->ipsp->ips_addr_s))->sin_addr.s_addr;
	lsw_ip4_hdr(ixs)->daddr    =
		((struct sockaddr_in*)(ixs->ipsp->ips_addr_d))->sin_addr.s_addr;
	lsw_ip4_hdr(ixs)->protocol = IPPROTO_IPIP;
	lsw_ip4_hdr(ixs)->ihl      = sizeof(struct iphdr) >> 2;

#ifdef NET_21
	printk("THIS CODE IS NEVER CALLED\n");
	skb_set_transport_header(ixs->skb,
				 ipsec_skb_offset(ixs->skb, ip_hdr(ixs->skb)));
#endif  /* NET_21 */
	return IPSEC_XMIT_OK;
}
예제 #8
0
파일: packet.c 프로젝트: NICMx/Jool
verdict pkt_init_ipv6(struct xlation *state, struct sk_buff *skb)
{
	struct pkt_metadata meta;
	verdict result;

	state->in.skb = skb;

	/*
	 * DO NOT, UNDER ANY CIRCUMSTANCES, EXTRACT ANY BYTES FROM THE SKB'S
	 * DATA AREA DIRECTLY (ie. without using skb_hdr_ptr()) UNTIL YOU KNOW
	 * IT HAS ALREADY BEEN pskb_may_pull()ED. ASSUME THAT EVEN THE MAIN
	 * LAYER 3 HEADER CAN BE PAGED.
	 *
	 * Also, careful in this function and subfunctions. pskb_may_pull()
	 * might change pointers, so you generally don't want to store them.
	 */

	result = paranoid_validations(state, sizeof(struct ipv6hdr));
	if (result != VERDICT_CONTINUE)
		return result;

	log_debug("Packet addresses: %pI6c->%pI6c",
			&ipv6_hdr(skb)->saddr,
			&ipv6_hdr(skb)->daddr);

	if (skb->len != get_tot_len_ipv6(skb))
		return inhdr6(state, "Packet size doesn't match the IPv6 header's payload length field.");

	result = summarize_skb6(state, skb_network_offset(skb), &meta);
	if (result != VERDICT_CONTINUE)
		return result;

	if (meta.l4_proto == L4PROTO_ICMP) {
		/* Do not move this to summarize_skb6(), because it risks infinite recursion. */
		result = handle_icmp6(state, &meta);
		if (result != VERDICT_CONTINUE)
			return result;
	}

	if (!pskb_may_pull(skb, meta.payload_offset))
		return truncated(state, "headers");

	state->in.l3_proto = L3PROTO_IPV6;
	state->in.l4_proto = meta.l4_proto;
	state->in.is_inner = 0;
	state->in.is_hairpin = false;
	state->in.hdr_frag = meta.has_frag_hdr
			? skb_offset_to_ptr(skb, meta.frag_offset)
			: NULL;
	skb_set_transport_header(skb, meta.l4_offset);
	state->in.payload = skb_offset_to_ptr(skb, meta.payload_offset);
	state->in.original_pkt = &state->in;

	return VERDICT_CONTINUE;
}
예제 #9
0
int mhost_local_deliver(struct sk_buff *skb)
{
    printk(KERN_INFO "mhost_local_deliver called\n");
    
    /* update this pointer */
    skb_set_transport_header(skb, 0);

    /* logic here to determine appropriate L4 handler */
    
    udp_mhost_rcv(skb);
    return 0;
}
예제 #10
0
netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct sk_buff *nskb;
	int padlen;
	u8 *trailer;

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;

	/*
	 * We have to make sure that the trailer ends up as the very
	 * last 4 bytes of the packet.  This means that we have to pad
	 * the packet to the minimum ethernet frame size, if necessary,
	 * before adding the trailer.
	 */
	padlen = 0;
	if (skb->len < 60)
		padlen = 60 - skb->len;

	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
	if (nskb == NULL) {
		kfree_skb(skb);
		return NETDEV_TX_OK;
	}
	skb_reserve(nskb, NET_IP_ALIGN);

	skb_reset_mac_header(nskb);
	skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
	skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
	skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
	kfree_skb(skb);

	if (padlen) {
		u8 *pad = skb_put(nskb, padlen);
		memset(pad, 0, padlen);
	}

	trailer = skb_put(nskb, 4);
	trailer[0] = 0x80;
	trailer[1] = 1 << p->port;
	trailer[2] = 0x10;
	trailer[3] = 0x00;

	nskb->protocol = htons(ETH_P_TRAILER);

	nskb->dev = p->parent->dst->master_netdev;
	dev_queue_xmit(nskb);

	return NETDEV_TX_OK;
}
예제 #11
0
static struct sk_buff *__xip_start_skb(struct sock *sk, struct xip_dst *xdst,
	const struct xia_addr *src, int src_n, const struct xia_addr *dest,
	int dest_n, u8 dest_last_node, int transhdrlen, int noblock)
{
	struct net_device *dev = xdst->dst.dev;
	struct sk_buff *skb;
	u32 mtu, alloclen;
	int hh_len, xh_len, rc;

	if (!dev) {
		net_warn_ratelimited("XIP %s: there is a bug somewhere, tried to send a datagram, but dst.dev is NULL\n",
				     __func__);
		return ERR_PTR(-ENODEV);
	}

	mtu = dst_mtu(&xdst->dst);
	if (mtu < XIP_MIN_MTU) {
		net_warn_ratelimited("XIP %s: cannot send datagram out because mtu (= %u) of dev %s is less than minimum MTU (= %u)\n",
				     __func__, mtu, dev->name, XIP_MIN_MTU);
		return ERR_PTR(-EMSGSIZE);
	}

	hh_len = LL_RESERVED_SPACE(dev);
	alloclen = hh_len + mtu;
	skb = sock_alloc_send_skb(sk, alloclen, noblock, &rc);
	if (unlikely(!skb))
		return ERR_PTR(rc);

	/* Fill in the control structures. */

	/* Reserve space for the link layer header */
	skb_reserve(skb, hh_len);

	/* Fill XIP header. */
	skb_reset_network_header(skb);
	xh_len = xip_hdr_size(dest_n, src_n);
	skb_put(skb, xh_len);
	xip_fill_in_hdr(skb, xdst, src->s_row, src_n,
			dest->s_row, dest_n, dest_last_node);

	skb_set_transport_header(skb, xh_len);
	skb_put(skb, transhdrlen);

	/* XXX Does we need to set skb_shinfo(skb)->tx_flags? */

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	xdst_hold(xdst);
	skb_dst_set(skb, &xdst->dst);
	return skb;
}
예제 #12
0
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
					   netdev_features_t features,
					   bool tx_path)
{
	struct iphdr *iph = ip_hdr(skb);
	int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
	int mac_offset = skb_inner_mac_offset(skb);
	struct sk_buff *skb1 = skb;
	struct sk_buff *segs;
	__be16 proto = skb->protocol;
	char cb[sizeof(skb->cb)];

	/* setup whole inner packet to get protocol. */
	__skb_pull(skb, mac_offset);
	skb->protocol = __skb_network_protocol(skb);

	/* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
	__skb_pull(skb, (pkt_hlen - mac_offset));
	skb_reset_mac_header(skb);
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);

	/* From 3.9 kernel skb->cb is used by skb gso. Therefore
	 * make copy of it to restore it back. */
	memcpy(cb, skb->cb, sizeof(cb));

	segs = __skb_gso_segment(skb, 0, tx_path);
	if (!segs || IS_ERR(segs))
		goto free;

	skb = segs;
	while (skb) {
		__skb_push(skb, pkt_hlen);
		skb_reset_mac_header(skb);
		skb_reset_network_header(skb);
		skb_set_transport_header(skb, sizeof(struct iphdr));
		skb->mac_len = 0;

		memcpy(ip_hdr(skb), iph, pkt_hlen);
		memcpy(skb->cb, cb, sizeof(cb));
		if (OVS_GSO_CB(skb)->fix_segment)
			OVS_GSO_CB(skb)->fix_segment(skb);

		skb->protocol = proto;
		skb = skb->next;
	}
free:
	consume_skb(skb1);
	return segs;
}
예제 #13
0
static int create_skb(int (*l3_hdr_fn)(void *, u16, u8, struct tuple *, bool, bool, u16, u8),
                      int l3_hdr_type, int l3_hdr_len, bool df, bool mf, u16 frag_offset, u8 ttl,
                      int (*l4_hdr_fn)(void *, int, u16, struct tuple *),
                      int l4_hdr_type, int l4_hdr_len, int l4_total_len,
                      int (*payload_fn)(void *, u16), u16 payload_len,
                      int (*l4_post_fn)(void *, u16, struct tuple *),
                      struct sk_buff **result, struct tuple *tuple)
{
    struct sk_buff *skb;
    int datagram_len = l4_hdr_len + payload_len;
    int error;

    skb = alloc_skb(LL_MAX_HEADER + l3_hdr_len + datagram_len, GFP_ATOMIC);
    if (!skb) {
        log_err("New packet allocation failed.");
        return -ENOMEM;
    }
    skb->protocol = htons(l3_hdr_type);

    skb_reserve(skb, LL_MAX_HEADER);
    skb_put(skb, l3_hdr_len + l4_hdr_len + payload_len);

    skb_reset_mac_header(skb);
    skb_reset_network_header(skb);
    skb_set_transport_header(skb, l3_hdr_len);

    error = l3_hdr_fn(skb_network_header(skb), datagram_len, l4_hdr_type, tuple, df, mf,
                      frag_offset, ttl);
    if (error)
        goto failure;
    error = l4_hdr_fn(skb_transport_header(skb), l3_hdr_type, l4_total_len, tuple);
    if (error)
        goto failure;

    error = payload_fn(skb_transport_header(skb) + l4_hdr_len, payload_len);
    if (error)
        goto failure;
    error = l4_post_fn(skb_transport_header(skb), datagram_len, tuple);
    if (error)
        goto failure;

    *result = skb;

    return 0;

failure:
    kfree_skb(skb);
    return error;
}
예제 #14
0
static void init_skbuff(const struct ipq_packet_msg *ipq_msg, struct sk_buff *skb)
{
    memcpy(skb->head, ipq_msg->payload, ipq_msg->data_len);
    skb->len = skb->data_len = ipq_msg->data_len;

    // Becasue the payload of ipq_msg l3 packet. so there is no mac header
    skb_set_mac_header(skb, ~0U);

    skb_set_network_header(skb, 0);

    struct iphdr *iph = (struct iphdr*)skb_network_header(skb);
    
    skb_set_transport_header(skb, iph->ihl<<2);
    
}
예제 #15
0
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	ieee80211_set_qos_hdr(sdata, skb);
}
예제 #16
0
/* Add route optimization header space.
 *
 * The IP header and mutable extension headers will be moved forward to make
 * space for the route optimization header.
 *
 * On exit, skb->h will be set to the start of the encapsulation header to be
 * filled in by x->type->output and skb->nh will be set to the nextheader field
 * of the extension header directly preceding the encapsulation header, or in
 * its absence, that of the top IP header.  The value of skb->data will always
 * point to the top IP header.
 */
static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	u8 *prevhdr;
	int hdr_len;

	skb_push(skb, x->props.header_len);
	iph = ipv6_hdr(skb);

	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
	skb_set_network_header(skb,
			       (prevhdr - x->props.header_len) - skb->data);
	skb_set_transport_header(skb, hdr_len);
	memmove(skb->data, iph, hdr_len);
	return 0;
}
예제 #17
0
/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
}
예제 #18
0
파일: packet.c 프로젝트: NICMx/Jool
verdict pkt_init_ipv4(struct xlation *state, struct sk_buff *skb)
{
	struct pkt_metadata meta;
	verdict result;

	state->in.skb = skb;

	/*
	 * DO NOT, UNDER ANY CIRCUMSTANCES, EXTRACT ANY BYTES FROM THE SKB'S
	 * DATA AREA DIRECTLY (ie. without using skb_hdr_ptr()) UNTIL YOU KNOW
	 * IT HAS ALREADY BEEN pskb_may_pull()ED. ASSUME THAT EVEN THE MAIN
	 * LAYER 3 HEADER CAN BE PAGED.
	 *
	 * Also, careful in this function and subfunctions. pskb_may_pull()
	 * might change pointers, so you generally don't want to store them.
	 */

	result = paranoid_validations(state, sizeof(struct iphdr));
	if (result != VERDICT_CONTINUE)
		return result;

	log_debug("Packet addresses: %pI4->%pI4",
			&ip_hdr(skb)->saddr,
			&ip_hdr(skb)->daddr);

	result = summarize_skb4(state, &meta);
	if (result != VERDICT_CONTINUE)
		return result;

	if (!pskb_may_pull(skb, meta.payload_offset)) {
		log_debug("Could not 'pull' the headers out of the skb.");
		return truncated(state, "headers");
	}

	state->in.l3_proto = L3PROTO_IPV4;
	state->in.l4_proto = meta.l4_proto;
	state->in.is_inner = false;
	state->in.is_hairpin = false;
	state->in.hdr_frag = NULL;
	skb_set_transport_header(skb, meta.l4_offset);
	state->in.payload = skb_offset_to_ptr(skb, meta.payload_offset);
	state->in.original_pkt = &state->in;

	return VERDICT_CONTINUE;
}
예제 #19
0
파일: flow.c 프로젝트: JunPark/openvswitch
static int check_iphdr(struct sk_buff *skb)
{
	unsigned int nh_ofs = skb_network_offset(skb);
	unsigned int ip_len;
	int err;

	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
	if (unlikely(err))
		return err;

	ip_len = ip_hdrlen(skb);
	if (unlikely(ip_len < sizeof(struct iphdr) ||
		     skb->len < nh_ofs + ip_len))
		return -EINVAL;

	skb_set_transport_header(skb, nh_ofs + ip_len);
	return 0;
}
예제 #20
0
파일: flow.c 프로젝트: JunPark/openvswitch
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
			 int *key_lenp)
{
	unsigned int nh_ofs = skb_network_offset(skb);
	unsigned int nh_len;
	int payload_ofs;
	struct ipv6hdr *nh;
	uint8_t nexthdr;
	__be16 frag_off;
	int err;

	*key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);

	err = check_header(skb, nh_ofs + sizeof(*nh));
	if (unlikely(err))
		return err;

	nh = ipv6_hdr(skb);
	nexthdr = nh->nexthdr;
	payload_ofs = (u8 *)(nh + 1) - skb->data;

	key->ip.proto = NEXTHDR_NONE;
	key->ip.tos = ipv6_get_dsfield(nh);
	key->ip.ttl = nh->hop_limit;
	key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
	key->ipv6.addr.src = nh->saddr;
	key->ipv6.addr.dst = nh->daddr;

	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
	if (unlikely(payload_ofs < 0))
		return -EINVAL;

	if (frag_off) {
		if (frag_off & htons(~0x7))
			key->ip.frag = OVS_FRAG_TYPE_LATER;
		else
			key->ip.frag = OVS_FRAG_TYPE_FIRST;
	}

	nh_len = payload_ofs - nh_ofs;
	skb_set_transport_header(skb, nh_ofs + nh_len);
	key->ip.proto = nexthdr;
	return nh_len;
}
예제 #21
0
static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
	void **tcph, u64 *hdr_flags, void *priv)
{
	struct cq_enet_rq_desc *cq_desc = priv;
	unsigned int ip_len;
	struct iphdr *iph;

	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
	u8 packet_error;
	u16 q_number, completed_index, bytes_written, vlan, checksum;
	u32 rss_hash;

	cq_enet_rq_desc_dec(cq_desc,
		&type, &color, &q_number, &completed_index,
		&ingress_port, &fcoe, &eop, &sop, &rss_type,
		&csum_not_calc, &rss_hash, &bytes_written,
		&packet_error, &vlan_stripped, &vlan, &checksum,
		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
		&fcs_ok);

	if (!(ipv4 && tcp && !ipv4_fragment))
		return -1;

	skb_reset_network_header(skb);
	iph = ip_hdr(skb);

	ip_len = ip_hdrlen(skb);
	skb_set_transport_header(skb, ip_len);

	
	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
		return -1;

	*hdr_flags = LRO_IPV4 | LRO_TCP;
	*tcph = tcp_hdr(skb);
	*iphdr = iph;

	return 0;
}
예제 #22
0
static struct sk_buff*
nat64_alloc_skb(int tlen, int paylen)
{
        struct sk_buff *skb;
        skb = alloc_skb(LL_MAX_HEADER + tlen + paylen, GFP_ATOMIC); 

        if (!skb) {
                return NULL;
        }

        skb_reserve(skb, LL_MAX_HEADER);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);

        skb_set_transport_header(skb, tlen);

        skb_put(skb, tlen + paylen);

        return skb;
}
예제 #23
0
static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
{
	const struct ipv6hdr *ip6h;
	unsigned int len;
	unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h);

	if (!pskb_may_pull(skb, offset))
		return -EINVAL;

	ip6h = ipv6_hdr(skb);

	if (ip6h->version != 6)
		return -EINVAL;

	len = offset + ntohs(ip6h->payload_len);
	if (skb->len < len || len <= offset)
		return -EINVAL;

	skb_set_transport_header(skb, offset);

	return 0;
}
예제 #24
0
static unsigned int udpencap_tg(struct sk_buff **pskb, const struct xt_action_param *par)
{
	const struct xt_udpencap_tginfo *info = par->targinfo;
	struct sk_buff *skb = *pskb;
	unsigned int tproto, nlen;
	bool ipv4 = (par->family == NFPROTO_IPV4);

	if (ipv4) {
		nlen = ip_hdrlen(skb);
		if (nlen < sizeof(struct iphdr))
			return NF_DROP;
		tproto = ip_hdr(skb)->protocol;
	} else {
		nlen = sizeof(struct ipv6hdr);
		tproto = ipv6_hdr(skb)->nexthdr;
	}
	if (!info->encap && tproto != IPPROTO_UDP)
		return NF_DROP;
	skb_set_transport_header(skb, skb_network_offset(skb) + nlen);
	if (!(info->encap ? udpencap_insert_header : udpencap_remove_header)(skb, info))
		return NF_DROP;
	(ipv4 ? udpencap_fix4 : udpencap_fix6)(skb, info);
	return XT_CONTINUE;
}
예제 #25
0
static void sc_send_80211(struct sk_buff *skb, struct net_device *dev)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr;
	int hdrlen;

	printk(KERN_DEBUG "capwap inject: %s: hdr: %p\n",
	       dev->name, skb->data);

	/* detach skb from CAPWAP */
	skb_orphan(skb);
	secpath_reset(skb);

	/* drop any routing info */
	skb_dst_drop(skb);

	/* drop conntrack reference */
	nf_reset(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);

	skb->dev = dev;

	skb_set_mac_header(skb, hdrlen);
	skb_set_network_header(skb, hdrlen);
	skb_set_transport_header(skb, hdrlen);

	skb->protocol = htons(ETH_P_CONTROL);
	info->flags |= IEEE80211_TX_CTL_INJECTED;

	/* Force the device to verify it. */
	skb->ip_summed = CHECKSUM_NONE;

	dev_queue_xmit(skb);
}
예제 #26
0
/*
 * Syn-proxy session reuse function.
 * Update syn_proxy_seq struct and clean syn-proxy related
 * members.
 */
int
ip_vs_synproxy_reuse_conn(int af, struct sk_buff *skb,
			  struct ip_vs_conn *cp,
			  struct ip_vs_protocol *pp,
			  struct ip_vs_iphdr *iph, int *verdict)
{
	struct tcphdr _tcph, *th = NULL;
	struct ip_vs_synproxy_opt opt;
	int res_cookie_check;
	u32 tcp_conn_reuse_states = 0;

	th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
	if (unlikely(NULL == th)) {
		IP_VS_ERR_RL("skb has a invalid tcp header\n");
		*verdict = NF_DROP;
		return 0;
	}

	tcp_conn_reuse_states =
	    ((sysctl_ip_vs_synproxy_conn_reuse_cl << IP_VS_TCP_S_CLOSE) |
	     (sysctl_ip_vs_synproxy_conn_reuse_tw << IP_VS_TCP_S_TIME_WAIT) |
	     (sysctl_ip_vs_synproxy_conn_reuse_fw << IP_VS_TCP_S_FIN_WAIT) |
	     (sysctl_ip_vs_synproxy_conn_reuse_cw << IP_VS_TCP_S_CLOSE_WAIT) |
	     (sysctl_ip_vs_synproxy_conn_reuse_la << IP_VS_TCP_S_LAST_ACK));

	if (((1 << (cp->state)) & tcp_conn_reuse_states) &&
	    (cp->flags & IP_VS_CONN_F_SYNPROXY) &&
	    (!th->syn && th->ack && !th->rst && !th->fin) &&
	    (cp->syn_proxy_seq.init_seq !=
	     htonl((__u32) ((ntohl(th->ack_seq) - 1))))) {
		/*
		 * Import: set tcp hdr before cookie check, as it
		 * will be used in cookie_check funcs.
		 */
		skb_set_transport_header(skb, iph->len);
#ifdef CONFIG_IP_VS_IPV6
		if (af == AF_INET6) {
			res_cookie_check = ip_vs_synproxy_v6_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		} else
#endif
		{
			res_cookie_check = ip_vs_synproxy_v4_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		}

		if (!res_cookie_check) {
			/* update statistics */
			IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_BAD_ACK);
			/*
			 * Cookie check fail, let it go.
			 */
			return 1;
		}

		/* update statistics */
		IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_OK_ACK);
		IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_CONN_REUSED);
		switch (cp->old_state) {
		case IP_VS_TCP_S_CLOSE:
			IP_VS_INC_ESTATS(ip_vs_esmib,
					 SYNPROXY_CONN_REUSED_CLOSE);
			break;
		case IP_VS_TCP_S_TIME_WAIT:
			IP_VS_INC_ESTATS(ip_vs_esmib,
					 SYNPROXY_CONN_REUSED_TIMEWAIT);
			break;
		case IP_VS_TCP_S_FIN_WAIT:
			IP_VS_INC_ESTATS(ip_vs_esmib,
					 SYNPROXY_CONN_REUSED_FINWAIT);
			break;
		case IP_VS_TCP_S_CLOSE_WAIT:
			IP_VS_INC_ESTATS(ip_vs_esmib,
					 SYNPROXY_CONN_REUSED_CLOSEWAIT);
			break;
		case IP_VS_TCP_S_LAST_ACK:
			IP_VS_INC_ESTATS(ip_vs_esmib,
					 SYNPROXY_CONN_REUSED_LASTACK);
			break;
		}

		spin_lock(&cp->lock);
		__syn_proxy_reuse_conn(cp, skb, th, pp);
		spin_unlock(&cp->lock);

		if (unlikely(!syn_proxy_send_rs_syn(af, th, cp, skb, pp, &opt))) {
			IP_VS_ERR_RL
			    ("syn_proxy_send_rs_syn failed when reuse conn!\n");
			/* release conn immediately */
			spin_lock(&cp->lock);
			cp->timeout = 0;
			spin_unlock(&cp->lock);
		}

		*verdict = NF_STOLEN;
		return 0;
	}

	return 1;
}
예제 #27
0
/*
 * Syn-proxy step 2 logic
 * Receive client's 3-handshakes  Ack packet, do cookie check
 * and then send syn to rs after creating a session.
 *
 */
int
ip_vs_synproxy_ack_rcv(int af, struct sk_buff *skb, struct tcphdr *th,
		       struct ip_vs_protocol *pp, struct ip_vs_conn **cpp,
		       struct ip_vs_iphdr *iph, int *verdict)
{
	struct ip_vs_synproxy_opt opt;
	struct ip_vs_service *svc;
	int res_cookie_check;

	/*
	 * Don't check svc syn-proxy flag, as it may
	 * be changed after syn-proxy step 1.
	 */
	if (!th->syn && th->ack && !th->rst && !th->fin &&
	    (svc =
	     ip_vs_service_get(af, skb->mark, iph->protocol, &iph->daddr,
			       th->dest))) {
		if (ip_vs_todrop()) {
			/*
			 * It seems that we are very loaded.
			 * We have to drop this packet :(
			 */
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		if (sysctl_ip_vs_synproxy_defer &&
		    !syn_proxy_ack_has_data(skb, iph, th)) {
			/* update statistics */
			IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_NULL_ACK);
			/*
			 * When expecting ack packet with payload,
			 * we get a pure ack, so have to drop it.
			 */
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		/*
		 * Import: set tcp hdr before cookie check, as it
		 * will be used in cookie_check funcs.
		 */
		skb_set_transport_header(skb, iph->len);
#ifdef CONFIG_IP_VS_IPV6
		if (af == AF_INET6) {
			res_cookie_check = ip_vs_synproxy_v6_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		} else
#endif
		{
			res_cookie_check = ip_vs_synproxy_v4_cookie_check(skb,
									  ntohl
									  (th->
									   ack_seq)
									  - 1,
									  &opt);
		}

		if (!res_cookie_check) {
			/* update statistics */
			IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_BAD_ACK);
			/*
			 * Cookie check fail, drop it.
			 */
			IP_VS_DBG(6, "syn_cookie check failed seq=%u\n",
				  ntohl(th->ack_seq) - 1);
			ip_vs_service_put(svc);
			*verdict = NF_DROP;
			return 0;
		}

		/* update statistics */
		IP_VS_INC_ESTATS(ip_vs_esmib, SYNPROXY_OK_ACK);

		/*
		 * Let the virtual server select a real server for the
		 * incoming connection, and create a connection entry.
		 */
		*cpp = ip_vs_schedule(svc, skb, 1);
		if (!*cpp) {
			IP_VS_DBG(6, "ip_vs_schedule failed\n");
			*verdict = ip_vs_leave(svc, skb, pp);
			return 0;
		}

		/*
		 * Release service, we don't need it any more.
		 */
		ip_vs_service_put(svc);

		/*
		 * Do anything but print a error msg when fail.
		 * Because session will be correctly freed in ip_vs_conn_expire.
		 */
		if (!syn_proxy_send_rs_syn(af, th, *cpp, skb, pp, &opt)) {
			IP_VS_ERR_RL("syn_proxy_send_rs_syn failed!\n");
		}

		/* count in the ack packet (STOLEN by synproxy) */
		ip_vs_in_stats(*cpp, skb);

		/*
		 * Active sesion timer, and dec refcnt.
		 * Also stole the skb, and let caller return immediately.
		 */
		ip_vs_conn_put(*cpp);
		*verdict = NF_STOLEN;
		return 0;
	}

	return 1;
}
예제 #28
0
/*
 * Reuse skb for syn proxy, called by syn_proxy_syn_rcv().
 * do following things:
 * 1) set tcp options;
 * 2) compute seq with cookie func.
 * 3) set tcp seq and ack_seq;
 * 4) exchange ip addr and tcp port;
 * 5) compute iphdr and tcp check.
 *
 */
static void
syn_proxy_reuse_skb(int af, struct sk_buff *skb, struct ip_vs_synproxy_opt *opt)
{
	__u32 isn;
	unsigned short tmpport;
	unsigned int tcphoff;
	struct tcphdr *th;

#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6)
		tcphoff = sizeof(struct ipv6hdr);
	else
#endif
		tcphoff = ip_hdrlen(skb);

	th = (void *)skb_network_header(skb) + tcphoff;

	/* deal with tcp options */
	syn_proxy_parse_set_opts(skb, th, opt);

	/* get cookie */
	skb_set_transport_header(skb, tcphoff);
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6)
		isn = ip_vs_synproxy_cookie_v6_init_sequence(skb, opt);
	else
#endif
		isn = ip_vs_synproxy_cookie_v4_init_sequence(skb, opt);

	/* Set syn-ack flag
	 * the tcp opt in syn/ack packet : 00010010 = 0x12
	 */
	((u_int8_t *) th)[13] = 0x12;

	/* Exchange ports */
	tmpport = th->dest;
	th->dest = th->source;
	th->source = tmpport;

	/* Set seq(cookie) and ack_seq */
	th->ack_seq = htonl(ntohl(th->seq) + 1);
	th->seq = htonl(isn);

	/* Exchange addresses and compute checksums */
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6) {
		struct ipv6hdr *iph = ipv6_hdr(skb);
		struct in6_addr tmpAddr;

		memcpy(&tmpAddr, &iph->saddr, sizeof(struct in6_addr));
		memcpy(&iph->saddr, &iph->daddr, sizeof(struct in6_addr));
		memcpy(&iph->daddr, &tmpAddr, sizeof(struct in6_addr));

		iph->hop_limit = sysctl_ip_vs_synproxy_synack_ttl;

		th->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
		th->check = csum_ipv6_magic(&iph->saddr, &iph->daddr,
					    skb->len - tcphoff,
					    IPPROTO_TCP, skb->csum);
	} else
#endif
	{
		struct iphdr *iph = ip_hdr(skb);
		__be32 tmpAddr;

		tmpAddr = iph->saddr;
		iph->saddr = iph->daddr;
		iph->daddr = tmpAddr;

		iph->ttl = sysctl_ip_vs_synproxy_synack_ttl;
		iph->tos = 0;

		ip_send_check(iph);

		th->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
		th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
					      skb->len - tcphoff,
					      IPPROTO_TCP, skb->csum);
	}
}
예제 #29
0
파일: ax25_in.c 프로젝트: robacklin/nxc2620
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
	ax25_address *dev_addr, struct packet_type *ptype)
{
	ax25_address src, dest, *next_digi = NULL;
	int type = 0, mine = 0, dama;
	struct sock *make, *sk;
	ax25_digi dp, reverse_dp;
	ax25_cb *ax25;
	ax25_dev *ax25_dev;

	/*
	 *	Process the AX.25/LAPB frame.
	 */

	skb_reset_transport_header(skb);

	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Parse the address header.
	 */

	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Ours perhaps ?
	 */
	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
		next_digi = &dp.calls[dp.lastrepeat + 1];

	/*
	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
	 */
	skb_pull(skb, ax25_addr_size(&dp));

	/* For our port addresses ? */
	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* Also match on any registered callsign from L3/4 */
	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* UI frame - bypass LAPB processing */
	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
		skb_set_transport_header(skb, 2); /* skip control and pid */

		ax25_send_to_raw(&dest, skb, skb->data[1]);

		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
			kfree_skb(skb);
			return 0;
		}

		/* Now we are pointing at the pid byte */
		switch (skb->data[1]) {
		case AX25_P_IP:
			skb_pull(skb,2);		/* drop PID/CTRL */
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_IP);
			netif_rx(skb);
			break;

		case AX25_P_ARP:
			skb_pull(skb,2);
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_ARP);
			netif_rx(skb);
			break;
		case AX25_P_TEXT:
			/* Now find a suitable dgram socket */
			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
			if (sk != NULL) {
				bh_lock_sock(sk);
				if (atomic_read(&sk->sk_rmem_alloc) >=
				    sk->sk_rcvbuf) {
					kfree_skb(skb);
				} else {
					/*
					 *	Remove the control and PID.
					 */
					skb_pull(skb, 2);
					if (sock_queue_rcv_skb(sk, skb) != 0)
						kfree_skb(skb);
				}
				bh_unlock_sock(sk);
				sock_put(sk);
			} else {
				kfree_skb(skb);
			}
			break;

		default:
			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
			break;
		}

		return 0;
	}

	/*
	 *	Is connected mode supported on this device ?
	 *	If not, should we DM the incoming frame (except DMs) or
	 *	silently ignore them. For now we stay quiet.
	 */
	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) {
		kfree_skb(skb);
		return 0;
	}

	/* LAPB */

	/* AX.25 state 1-4 */

	ax25_digi_invert(&dp, &reverse_dp);

	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
		/*
		 *	Process the frame. If it is queued up internally it
		 *	returns one otherwise we free it immediately. This
		 *	routine itself wakes the user context layers so we do
		 *	no further work
		 */
		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
			kfree_skb(skb);

		ax25_cb_put(ax25);
		return 0;
	}

	/* AX.25 state 0 (disconnected) */

	/* a) received not a SABM(E) */

	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
	    (*skb->data & ~AX25_PF) != AX25_SABME) {
		/*
		 *	Never reply to a DM. Also ignore any connects for
		 *	addresses that are not our interfaces and not a socket.
		 */
		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
			ax25_return_dm(dev, &src, &dest, &dp);

		kfree_skb(skb);
		return 0;
	}

	/* b) received SABM(E) */

	if (dp.lastrepeat + 1 == dp.ndigi)
		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
	else
		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);

	if (sk != NULL) {
		bh_lock_sock(sk);
		if (sk_acceptq_is_full(sk) ||
		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
			if (mine)
				ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			bh_unlock_sock(sk);
			sock_put(sk);

			return 0;
		}

		ax25 = ax25_sk(make);
		skb_set_owner_r(skb, make);
		skb_queue_head(&sk->sk_receive_queue, skb);

		make->sk_state = TCP_ESTABLISHED;

		sk->sk_ack_backlog++;
		bh_unlock_sock(sk);
	} else {
		if (!mine) {
			kfree_skb(skb);
			return 0;
		}

		if ((ax25 = ax25_create_cb()) == NULL) {
			ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			return 0;
		}

		ax25_fillin_cb(ax25, ax25_dev);
	}

	ax25->source_addr = dest;
	ax25->dest_addr   = src;

	/*
	 *	Sort out any digipeated paths.
	 */
	if (dp.ndigi && !ax25->digipeat &&
	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		ax25_destroy_socket(ax25);
		if (sk)
			sock_put(sk);
		return 0;
	}

	if (dp.ndigi == 0) {
		kfree(ax25->digipeat);
		ax25->digipeat = NULL;
	} else {
		/* Reverse the source SABM's path */
		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
	}

	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
		ax25->modulus = AX25_EMODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
	} else {
		ax25->modulus = AX25_MODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
	}

	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);

#ifdef CONFIG_AX25_DAMA_SLAVE
	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
		ax25_dama_on(ax25);
#endif

	ax25->state = AX25_STATE_3;

	ax25_cb_add(ax25);

	ax25_start_heartbeat(ax25);
	ax25_start_t3timer(ax25);
	ax25_start_idletimer(ax25);

	if (sk) {
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, skb->len);
		sock_put(sk);
	} else
		kfree_skb(skb);

	return 0;
}
예제 #30
0
//-----------------------------------------------------------------------------
static unsigned int
_gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_pP) {
//-----------------------------------------------------------------------------

    struct iphdr   *iph_p            = ip_hdr(orig_skb_pP);
    struct iphdr   *iph2_p           = NULL;
    struct udphdr  *udph_p           = NULL;
    struct gtpuhdr *gtpuh_p          = NULL;
    struct sk_buff *skb_p            = NULL;
#if defined(NEW_SKB)
    struct sk_buff *new_skb_p        = NULL;
    struct iphdr   *new_ip_p         = NULL;
#endif
    uint16_t        gtp_payload_size = 0;

    /* Create a new copy of the original skb_p...can't avoid :-( LG: WHY???*/
#if defined(ROUTE_PACKET)
    skb_p = skb_copy(orig_skb_pP, GFP_ATOMIC);
    if (skb_p == NULL) {
        return NF_ACCEPT;
    }
    skb_p->skb_iif  = orig_skb_pP->skb_iif;
    pr_info("GTPURH: skb protocol %04X\n", orig_skb_pP->protocol);
    skb_p->protocol = orig_skb_pP->protocol;
#else
    skb_p = orig_skb_pP;
    if (skb_linearize(skb_p) < 0) {
        pr_info("GTPURH: skb DROPPED (no linearize)\n");
        return NF_DROP;
    }
#endif
    //---------------------------
    // check if is GTPU TUNNEL
    if (iph_p->protocol != IPPROTO_UDP) {
        pr_info("GTPURH: skb DROPPED Not GTPV1U packet (not UDP)\n");
        return NF_ACCEPT;
    }

    //---------------------------
    // check if is fragment
    // but should not happen since MTU should have been set bigger than 1500 + GTP encap.
    // TO DO later segment, but did not succeed in getting in this target all framents of an ip packet!
    if (_gtpurh_ip_is_fragment(iph_p)) {
        pr_info("GTPURH: ip_is_fragment YES, FLAGS %04X & %04X = %04X\n",
                iph_p->frag_off,
                htons(IP_MF | IP_OFFSET),
                iph_p->frag_off & htons(IP_MF | IP_OFFSET));
            return NF_ACCEPT;
    }

    if (skb_p->len <= sizeof (struct udphdr) + sizeof (struct gtpuhdr) + sizeof (struct iphdr)) {
        pr_info("GTPURH: Thought was GTPV1U packet but too short length\n");
        return NF_ACCEPT;
    }
    /* Remove IP header */
    udph_p = (struct udphdr*)skb_pull(skb_p, (iph_p->ihl << 2));

    if (udph_p->dest != htons(GTPURH_PORT)) {
        pr_info("GTPURH: Not GTPV1U packet (bad UDP dest port)\n");
        skb_push(skb_p, (iph_p->ihl << 2));
        return NF_ACCEPT;
    }

    /* Remove UDP header */
    gtpuh_p = (struct gtpuhdr*)skb_pull(skb_p, sizeof(struct udphdr));
    gtp_payload_size = ntohs(gtpuh_p->length);

    skb_p->mark = ntohl(gtpuh_p->tunid);
    /* Remove GTPu header */
    skb_pull(skb_p, sizeof(struct gtpuhdr));

    /* If additional fields are present in header, remove them also */
    if (gtpuh_p->flags & GTPURH_ANY_EXT_HDR_BIT)
    {
        pr_info("GTPURH: GTPURH_ANY_EXT_HDR_BIT found\n");
        skb_pull(skb_p, sizeof(short) + sizeof(char) + sizeof(char)); /* #Seq, #N-PDU, #ExtHdr Type */
        gtp_payload_size = gtp_payload_size - sizeof(short) - sizeof(char) - sizeof(char);
    }
    skb_set_network_header(skb_p, 0);
    iph2_p   = ip_hdr(skb_p);
    skb_set_transport_header(skb_p, iph2_p->ihl << 2);


    if ((iph2_p->version  != 4 ) && (iph2_p->version  != 6)) {
        pr_info("\nGTPURH: Decapsulated packet dropped because not IPvx protocol see all GTPU packet here:\n");
        _gtpurh_print_hex_octets((unsigned char*)iph_p, ntohs(iph_p->tot_len));
        return NF_DROP;
    }
#if 0
    if ((skb_p->mark == 0) || (gtp_payload_size != ntohs(iph2_p->tot_len))) {
        pr_info("\nGTPURH: Decapsulated packet: %d.%d.%d.%d --> %d.%d.%d.%d Proto: %d, Total Len (IP): %u mark %u Frag offset %u Flags 0x%0x\n",
                iph2_p->saddr  & 0xFF,
                (iph2_p->saddr & 0x0000FF00) >> 8,
                (iph2_p->saddr & 0x00FF0000) >> 16,
                iph2_p->saddr >> 24,
                iph2_p->daddr  & 0xFF,
                (iph2_p->daddr & 0x0000FF00) >> 8,
                (iph2_p->daddr & 0x00FF0000) >> 16,
                iph2_p->daddr >> 24,
                iph2_p->protocol,
                ntohs(iph2_p->tot_len),
                skb_p->mark,
                ntohs(iph_p->frag_off) & 0x1FFFFFFF,
                ntohs(iph_p->frag_off) >> 13);

        if (gtp_payload_size != ntohs(iph2_p->tot_len)) {
            pr_info("GTPURH: Mismatch in lengths GTPU length: %u -> %u, IP length %u\n",
                    ntohs(gtpuh_p->length),
                    gtp_payload_size,
                    ntohs(iph2_p->tot_len));

            _gtpurh_print_hex_octets((unsigned char*)iph_p, ntohs(iph_p->tot_len));
            return NF_DROP;
        }
    }