Esempio n. 1
0
int vxlan_xmit_skb(struct vxlan_sock *vs,
		   struct rtable *rt, struct sk_buff *skb,
		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
		   __be16 src_port, __be16 dst_port, __be32 vni)
{
	struct vxlanhdr *vxh;
	struct udphdr *uh;
	int min_headroom;
	int err;

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ VXLAN_HLEN + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	/* Need space for new headers (invalidates iph ptr) */
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
		return err;
	}

	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!vlan_insert_tag_set_proto(skb,
							skb->vlan_proto,
							vlan_tx_tag_get(skb))))
			return -ENOMEM;

		vlan_set_tci(skb, 0);
	}

	skb_reset_inner_headers(skb);

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = htonl(VXLAN_FLAGS);
	vxh->vx_vni = vni;

	__skb_push(skb, sizeof(*uh));
	skb_reset_transport_header(skb);
	uh = udp_hdr(skb);

	uh->dest = dst_port;
	uh->source = src_port;

	uh->len = htons(skb->len);
	uh->check = 0;

	vxlan_set_owner(vs->sock->sk, skb);

	skb = handle_offloads(skb);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
			     tos, ttl, df, false);
}
int bpf_pop_vlan(struct bpf_context *pctx)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct sk_buff *skb = ctx->skb;

	if (unlikely(!skb))
		return -EINVAL;

	ctx->context.vlan_tag = 0;
	if (vlan_tx_tag_present(skb)) {
		skb->vlan_tci = 0;
	} else {
		if (skb->protocol != htons(ETH_P_8021Q) ||
		    skb->len < VLAN_ETH_HLEN)
			return 0;

		if (!pskb_may_pull(skb, ETH_HLEN))
			return 0;

		__skb_pull(skb, ETH_HLEN);
		skb = vlan_untag(skb);
		if (!skb) {
			ctx->skb = NULL;
			return -ENOMEM;
		}
		__skb_push(skb, ETH_HLEN);

		skb->vlan_tci = 0;
		ctx->context.length = skb->len;
		ctx->skb = skb;
	}
	/* move next vlan tag to hw accel tag */
	if (skb->protocol != htons(ETH_P_8021Q) ||
	    skb->len < VLAN_ETH_HLEN)
		return 0;

	if (!pskb_may_pull(skb, ETH_HLEN))
		return 0;

	__skb_pull(skb, ETH_HLEN);
	skb = vlan_untag(skb);
	if (!skb) {
		ctx->skb = NULL;
		return -ENOMEM;
	}
	__skb_push(skb, ETH_HLEN);

	ctx->context.vlan_tag = vlan_tx_tag_get(skb);
	ctx->context.length = skb->len;
	ctx->skb = skb;

	return 0;
}
Esempio n. 3
0
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	netdev_features_t mpls_features;
	__be16 mpls_protocol;

	/* Setup inner SKB. */
	mpls_protocol = skb->protocol;
	skb->protocol = skb->inner_protocol;

	/* Push back the mac header that skb_mac_gso_segment() has pulled.
	 * It will be re-pulled by the call to skb_mac_gso_segment() below
	 */
	__skb_push(skb, skb->mac_len);

	/* Segment inner packet. */
	mpls_features = skb->dev->mpls_features & features;
	segs = skb_mac_gso_segment(skb, mpls_features);


	/* Restore outer protocol. */
	skb->protocol = mpls_protocol;

	/* Re-pull the mac header that the call to skb_mac_gso_segment()
	 * above pulled.  It will be re-pushed after returning
	 * skb_mac_gso_segment(), an indirect caller of this function.
	 */
	__skb_pull(skb, skb->data - skb_mac_header(skb));

	return segs;
}
Esempio n. 4
0
static bool udpencap_insert_header(struct sk_buff *skb, const struct xt_udpencap_tginfo *info)
{
	struct udphdr *uh;
	if (!skb_make_writable(&skb, skb_transport_offset(skb)))
		return false;

	if (skb->len + sizeof(struct udphdr) > 65535)
		return false;

	if (skb_cow(skb, sizeof(struct udphdr) + LL_RESERVED_SPACE(skb_dst(skb)->dev)))
		return false;

	memmove(skb->data - sizeof(struct udphdr), skb->data, skb_transport_offset(skb));
	__skb_push(skb, sizeof(struct udphdr));
	skb->network_header -= sizeof(struct udphdr);
	skb->transport_header -= sizeof(struct udphdr);

	uh = udp_hdr(skb);
	uh->source = info->sport;
	uh->dest = info->dport;
	uh->len = htons(skb->len - skb_transport_offset(skb));
	uh->check = 0;

	return true;
}
Esempio n. 5
0
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	skb_scrub_packet(skb, xnet);

	skb->rxhash = 0;
	skb_dst_set(skb, &rt->dst);
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;
	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
Esempio n. 6
0
int xfrm4_transport_finish(struct sk_buff *skb, int async)
{
	struct xfrm_offload *xo = xfrm_offload(skb);
	struct iphdr *iph = ip_hdr(skb);

	iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;

#ifndef CONFIG_NETFILTER
	if (!async)
		return -iph->protocol;
#endif

	__skb_push(skb, skb->data - skb_network_header(skb));
	iph->tot_len = htons(skb->len);
	ip_send_check(iph);

	if (xo && (xo->flags & XFRM_GRO)) {
		skb_mac_header_rebuild(skb);
		return 0;
	}

	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
		dev_net(skb->dev), NULL, skb, skb->dev, NULL,
		xfrm4_rcv_encap_finish);
	return 0;
}
Esempio n. 7
0
static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *ip6h;
	const unsigned char *old_mac;
	int size = sizeof(struct ipv6hdr);
	int err;

	err = skb_cow_head(skb, size + skb->mac_len);
	if (err)
		goto out;

	__skb_push(skb, size);
	skb_reset_network_header(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);

	xfrm6_beet_make_header(skb);

	ip6h = ipv6_hdr(skb);
	ip6h->payload_len = htons(skb->len - size);
	ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
	ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
	err = 0;
out:
	return err;
}
Esempio n. 8
0
static int pep_reply(struct sock *sk, struct sk_buff *oskb,
			u8 code, const void *data, int len, gfp_t priority)
{
	const struct pnpipehdr *oph = pnp_hdr(oskb);
	struct pnpipehdr *ph;
	struct sk_buff *skb;

	skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, data, len);
	__skb_push(skb, sizeof(*ph));
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = oph->utid;
	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
	ph->pipe_handle = oph->pipe_handle;
	ph->error_code = code;

	return pn_skb_send(sk, skb, &pipe_srv);
}
Esempio n. 9
0
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
		      int hdr_len)
{
	struct gre_base_hdr *greh;

	__skb_push(skb, hdr_len);

	greh = (struct gre_base_hdr *)skb->data;
	greh->flags = tnl_flags_to_gre_flags(tpi->flags);
	greh->protocol = tpi->proto;

	if (tpi->flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);

		if (tpi->flags & TUNNEL_SEQ) {
			*ptr = tpi->seq;
			ptr--;
		}
		if (tpi->flags & TUNNEL_KEY) {
			*ptr = tpi->key;
			ptr--;
		}
		if (tpi->flags & TUNNEL_CSUM && !is_gre_gso(skb)) {
			*ptr = 0;
			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
						skb->len, 0));
		}
	}
}
Esempio n. 10
0
bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
	struct phy_device *phydev;
	unsigned int type;

	if (skb_headroom(skb) < ETH_HLEN)
		return false;
	__skb_push(skb, ETH_HLEN);

	type = classify(skb);

	__skb_pull(skb, ETH_HLEN);

	switch (type) {
	case PTP_CLASS_V1_IPV4:
	case PTP_CLASS_V1_IPV6:
	case PTP_CLASS_V2_IPV4:
	case PTP_CLASS_V2_IPV6:
	case PTP_CLASS_V2_L2:
	case PTP_CLASS_V2_VLAN:
		phydev = skb->dev->phydev;
		if (likely(phydev->drv->rxtstamp))
			return phydev->drv->rxtstamp(phydev, skb, type);
		break;
	default:
		break;
	}

	return false;
}
Esempio n. 11
0
/* Transmit a fully formatted Geneve frame.
 *
 * When calling this function. The skb->data should point
 * to the geneve header which is fully formed.
 *
 * This function will add other UDP tunnel headers.
 */
int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
		    bool xnet)
{
	struct genevehdr *gnvh;
	int min_headroom;
	int err;

	skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err))
		return err;

	skb = vlan_hwaccel_push_inside(skb);
	if (unlikely(!skb))
		return -ENOMEM;

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);

	skb_set_inner_protocol(skb, htons(ETH_P_TEB));

	return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
				   tos, ttl, df, src_port, dst_port, xnet);
}
Esempio n. 12
0
static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
			    struct bpf_prog *prog, struct sk_buff *skb,
			    int hdr_len)
{
	struct sk_buff *nskb = NULL;
	u32 index;

	if (skb_shared(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return NULL;
		skb = nskb;
	}

	/* temporarily advance data past protocol header */
	if (!pskb_pull(skb, hdr_len)) {
		kfree_skb(nskb);
		return NULL;
	}
	index = bpf_prog_run_save_cb(prog, skb);
	__skb_push(skb, hdr_len);

	consume_skb(nskb);

	if (index >= socks)
		return NULL;

	return reuse->socks[index];
}
Esempio n. 13
0
static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *ph;
	struct sk_buff *skb;

	skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
	__skb_push(skb, sizeof(*ph) + 4);
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = 0;
	ph->message_id = PNS_PEP_STATUS_IND;
	ph->pipe_handle = pn->pipe_handle;
	ph->pep_type = PN_PEP_TYPE_COMMON;
	ph->data[1] = type;
	ph->data[2] = PAD;
	ph->data[3] = PAD;
	ph->data[4] = status;

	return pn_skb_send(sk, skb, &pipe_srv);
}
Esempio n. 14
0
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	netdev_features_t mpls_features;
	__be16 mpls_protocol;

	if (unlikely(skb_shinfo(skb)->gso_type &
				~(SKB_GSO_TCPV4 |
				  SKB_GSO_TCPV6 |
				  SKB_GSO_UDP |
				  SKB_GSO_DODGY |
				  SKB_GSO_TCP_ECN |
				  SKB_GSO_GRE |
				  SKB_GSO_GRE_CSUM |
				  SKB_GSO_IPIP |
				  SKB_GSO_MPLS)))
		goto out;

	/* Setup inner SKB. */
	mpls_protocol = skb->protocol;
	skb->protocol = skb->inner_protocol;

	/* Push back the mac header that skb_mac_gso_segment() has pulled.
	 * It will be re-pulled by the call to skb_mac_gso_segment() below
	 */
	__skb_push(skb, skb->mac_len);

	/* Segment inner packet. */
	mpls_features = skb->dev->mpls_features & netif_skb_features(skb);
	segs = skb_mac_gso_segment(skb, mpls_features);


	/* Restore outer protocol. */
	skb->protocol = mpls_protocol;

	/* Re-pull the mac header that the call to skb_mac_gso_segment()
	 * above pulled.  It will be re-pushed after returning
	 * skb_mac_gso_segment(), an indirect caller of this function.
	 */
	__skb_push(skb, skb->data - skb_mac_header(skb));

out:
	return segs;
}
Esempio n. 15
0
static int ethertap_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_local *lp = (struct net_local *)dev->priv;
#ifdef CONFIG_ETHERTAP_MC
	struct ethhdr *eth = (struct ethhdr*)skb->data;
#endif

	if (skb_headroom(skb) < 2) {
		static int once;
	  	struct sk_buff *skb2;

		if (!once) {
			once = 1;
			printk(KERN_DEBUG "%s: not aligned xmit by protocol %04x\n", dev->name, skb->protocol);
		}

		skb2 = skb_realloc_headroom(skb, 2);
		dev_kfree_skb(skb);
		if (skb2 == NULL)
			return 0;
		skb = skb2;
	}
	__skb_push(skb, 2);

	/* Make the same thing, which loopback does. */
	if (skb_shared(skb)) {
	  	struct sk_buff *skb2 = skb;
	  	skb = skb_clone(skb, GFP_ATOMIC);	/* Clone the buffer */
	  	if (skb==NULL) {
			dev_kfree_skb(skb2);
			return 0;
		}
	  	dev_kfree_skb(skb2);
	}
	/* ... but do not orphan it here, netlink does it in any case. */

	lp->stats.tx_bytes+=skb->len;
	lp->stats.tx_packets++;

#ifndef CONFIG_ETHERTAP_MC
	netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC);
#else
	if (dev->flags&IFF_NOARP) {
		netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC);
		return 0;
	}

	if (!(eth->h_dest[0]&1)) {
		/* Unicast packet */
		__u32 pid;
		memcpy(&pid, eth->h_dest+2, 4);
		netlink_unicast(lp->nl, skb, ntohl(pid), MSG_DONTWAIT);
	} else
		netlink_broadcast(lp->nl, skb, 0, ethertap_mc_hash(eth->h_dest), GFP_ATOMIC);
#endif
	return 0;
}
Esempio n. 16
0
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
		   struct tcf_result *res)
{
	struct tcf_bpf *prog = act->priv;
	int action, filter_res;
	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;

	if (unlikely(!skb_mac_header_was_set(skb)))
		return TC_ACT_UNSPEC;

	spin_lock(&prog->tcf_lock);

	prog->tcf_tm.lastuse = jiffies;
	bstats_update(&prog->tcf_bstats, skb);

	/* Needed here for accessing maps. */
	rcu_read_lock();
	if (at_ingress) {
		__skb_push(skb, skb->mac_len);
		filter_res = BPF_PROG_RUN(prog->filter, skb);
		__skb_pull(skb, skb->mac_len);
	} else {
		filter_res = BPF_PROG_RUN(prog->filter, skb);
	}
	rcu_read_unlock();

	/* A BPF program may overwrite the default action opcode.
	 * Similarly as in cls_bpf, if filter_res == -1 we use the
	 * default action specified from tc.
	 *
	 * In case a different well-known TC_ACT opcode has been
	 * returned, it will overwrite the default one.
	 *
	 * For everything else that is unkown, TC_ACT_UNSPEC is
	 * returned.
	 */
	switch (filter_res) {
	case TC_ACT_PIPE:
	case TC_ACT_RECLASSIFY:
	case TC_ACT_OK:
		action = filter_res;
		break;
	case TC_ACT_SHOT:
		action = filter_res;
		prog->tcf_qstats.drops++;
		break;
	case TC_ACT_UNSPEC:
		action = prog->tcf_action;
		break;
	default:
		action = TC_ACT_UNSPEC;
		break;
	}

	spin_unlock(&prog->tcf_lock);
	return action;
}
Esempio n. 17
0
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
		   struct tcf_result *res)
{
	struct tcf_bpf *prog = act->priv;
	struct bpf_prog *filter;
	int action, filter_res;
	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;

	if (unlikely(!skb_mac_header_was_set(skb)))
		return TC_ACT_UNSPEC;

	tcf_lastuse_update(&prog->tcf_tm);
	bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);

	rcu_read_lock();
	filter = rcu_dereference(prog->filter);
	if (at_ingress) {
		__skb_push(skb, skb->mac_len);
		filter_res = BPF_PROG_RUN(filter, skb);
		__skb_pull(skb, skb->mac_len);
	} else {
		filter_res = BPF_PROG_RUN(filter, skb);
	}
	rcu_read_unlock();

	/* A BPF program may overwrite the default action opcode.
	 * Similarly as in cls_bpf, if filter_res == -1 we use the
	 * default action specified from tc.
	 *
	 * In case a different well-known TC_ACT opcode has been
	 * returned, it will overwrite the default one.
	 *
	 * For everything else that is unkown, TC_ACT_UNSPEC is
	 * returned.
	 */
	switch (filter_res) {
	case TC_ACT_PIPE:
	case TC_ACT_RECLASSIFY:
	case TC_ACT_OK:
	case TC_ACT_REDIRECT:
		action = filter_res;
		break;
	case TC_ACT_SHOT:
		action = filter_res;
		qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
		break;
	case TC_ACT_UNSPEC:
		action = prog->tcf_action;
		break;
	default:
		action = TC_ACT_UNSPEC;
		break;
	}

	return action;
}
Esempio n. 18
0
static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
		struct sk_buff *skb, gfp_t flags)
{
	int pack_len = skb->len;
	int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
	int headroom = skb_headroom(skb);
	int tailroom = skb_tailroom(skb);
	int need_tail = 0;
	__le16 *len;

	/*                                                              */
	if ((pack_with_header_len) < dev->maxpacket)
		need_tail = dev->maxpacket - pack_with_header_len + 1;
	/*
                                                                    
                                                                     
                                 
  */
	else if (!(pack_with_header_len % dev->maxpacket))
		need_tail = 1;

	if (!skb_cloned(skb) &&
			(headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
		if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
			skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
					skb->data, skb->len);
			skb_set_tail_pointer(skb, skb->len);
		}
	} else {
		struct sk_buff *skb2;

		skb2 = skb_copy_expand(skb,
				INT51X1_HEADER_SIZE,
				need_tail,
				flags);
		dev_kfree_skb_any(skb);
		if (!skb2)
			return NULL;
		skb = skb2;
	}

	pack_len += need_tail;
	pack_len &= 0x07ff;

	len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
	*len = cpu_to_le16(pack_len);

	if(need_tail)
		memset(__skb_put(skb, need_tail), 0, need_tail);

	return skb;
}
Esempio n. 19
0
static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
		struct sk_buff *skb, gfp_t flags)
{
	int pack_len = skb->len;
	int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
	int headroom = skb_headroom(skb);
	int tailroom = skb_tailroom(skb);
	int need_tail = 0;
	__le16 *len;

	/* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
	if ((pack_with_header_len) < dev->maxpacket)
		need_tail = dev->maxpacket - pack_with_header_len + 1;
	/*
	 * usbnet would send a ZLP if packetlength mod urbsize == 0 for us,
	 * but we need to know ourself, because this would add to the length
	 * we send down to the device...
	 */
	else if (!(pack_with_header_len % dev->maxpacket))
		need_tail = 1;

	if (!skb_cloned(skb) &&
			(headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
		if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
			skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
					skb->data, skb->len);
			skb_set_tail_pointer(skb, skb->len);
		}
	} else {
		struct sk_buff *skb2;

		skb2 = skb_copy_expand(skb,
				INT51X1_HEADER_SIZE,
				need_tail,
				flags);
		dev_kfree_skb_any(skb);
		if (!skb2)
			return NULL;
		skb = skb2;
	}

	pack_len += need_tail;
	pack_len &= 0x07ff;

	len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
	*len = cpu_to_le16(pack_len);

	if(need_tail)
		memset(__skb_put(skb, need_tail), 0, need_tail);

	return skb;
}
Esempio n. 20
0
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                         struct sk_buff *skb,
                         struct net_device *dev, struct in6_addr *saddr,
                         struct in6_addr *daddr,
                         __u8 prio, __u8 ttl, __be16 src_port,
                         __be16 dst_port, bool nocheck)
{
    struct udphdr *uh;
    struct ipv6hdr *ip6h;

    __skb_push(skb, sizeof(*uh));
    skb_reset_transport_header(skb);
    uh = udp_hdr(skb);

    uh->dest = dst_port;
    uh->source = src_port;

    uh->len = htons(skb->len);

    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
    IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
                          | IPSKB_REROUTED);
    skb_dst_set(skb, dst);

    udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);

    __skb_push(skb, sizeof(*ip6h));
    skb_reset_network_header(skb);
    ip6h		  = ipv6_hdr(skb);
    ip6_flow_hdr(ip6h, prio, htonl(0));
    ip6h->payload_len = htons(skb->len);
    ip6h->nexthdr     = IPPROTO_UDP;
    ip6h->hop_limit   = ttl;
    ip6h->daddr	  = *daddr;
    ip6h->saddr	  = *saddr;

    ip6tunnel_xmit(sk, skb, dev);
    return 0;
}
Esempio n. 21
0
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
			    __be16 dst_port)
{
	struct udphdr *udph;

	__skb_push(skb, sizeof(*udph));
	skb_reset_transport_header(skb);
	udph = udp_hdr(skb);

	udph->dest = dst_port;
	udph->source = src_port;
	udph->len = htons(skb->len);
	udph->check = 0;
}
Esempio n. 22
0
/*
 * When forwarding bridge frames, we save a copy of the original
 * header before processing.
 */
int nf_bridge_copy_header(struct sk_buff *skb)
{
	int err;
	int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);

	err = skb_cow_head(skb, header_size);
	if (err)
		return err;

	skb_copy_to_linear_data_offset(skb, -header_size,
				       skb->nf_bridge->data, header_size);
	__skb_push(skb, nf_bridge_encap_header_len(skb));
	return 0;
}
Esempio n. 23
0
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
					   netdev_features_t features,
					   bool tx_path)
{
	struct iphdr *iph = ip_hdr(skb);
	int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
	int mac_offset = skb_inner_mac_offset(skb);
	struct sk_buff *skb1 = skb;
	struct sk_buff *segs;
	__be16 proto = skb->protocol;
	char cb[sizeof(skb->cb)];

	/* setup whole inner packet to get protocol. */
	__skb_pull(skb, mac_offset);
	skb->protocol = __skb_network_protocol(skb);

	/* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
	__skb_pull(skb, (pkt_hlen - mac_offset));
	skb_reset_mac_header(skb);
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);

	/* From 3.9 kernel skb->cb is used by skb gso. Therefore
	 * make copy of it to restore it back. */
	memcpy(cb, skb->cb, sizeof(cb));

	segs = __skb_gso_segment(skb, 0, tx_path);
	if (!segs || IS_ERR(segs))
		goto free;

	skb = segs;
	while (skb) {
		__skb_push(skb, pkt_hlen);
		skb_reset_mac_header(skb);
		skb_reset_network_header(skb);
		skb_set_transport_header(skb, sizeof(struct iphdr));
		skb->mac_len = 0;

		memcpy(ip_hdr(skb), iph, pkt_hlen);
		memcpy(skb->cb, cb, sizeof(cb));
		if (OVS_GSO_CB(skb)->fix_segment)
			OVS_GSO_CB(skb)->fix_segment(skb);

		skb->protocol = proto;
		skb = skb->next;
	}
free:
	consume_skb(skb1);
	return segs;
}
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	/* inlined skb_scrub_packet() */
	if (xnet)
		skb_orphan(skb);
	skb->pkt_type = PACKET_HOST;
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0)
	skb->skb_iif = 0;
#endif
	skb_dst_drop(skb);
	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);

	skb->rxhash = 0;
	skb_dst_set(skb, &rt_dst(rt));
#if 0
	/* Do not clear ovs_skb_cb.  It will be done in gso code. */
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#endif

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;
	tunnel_ip_select_ident(skb,
			       (const struct iphdr *)skb_inner_network_header(skb),
			       &rt_dst(rt));

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
Esempio n. 25
0
static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
					int len, gfp_t priority)
{
	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return NULL;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, payload, len);
	__skb_push(skb, sizeof(struct pnpipehdr));
	skb_reset_transport_header(skb);
	return skb;
}
Esempio n. 26
0
void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
                      __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
                      __be16 df, bool xnet)
{
	struct net_device *dev = skb->dev;
	int pkt_len = skb->len - skb_inner_network_offset(skb);
	struct iphdr *iph;
	int err;

	skb_scrub_packet(skb, xnet);

	skb_clear_hash(skb);
	skb_dst_set(skb, &rt->dst);

#if 0
	/* Do not clear ovs_skb_cb.  It will be done in gso code. */
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#endif

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;

#ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY
	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
#elif defined(HAVE_IP_SELECT_IDENT_USING_NET)
	__ip_select_ident(dev_net(rt->dst.dev), iph,
			  skb_shinfo(skb)->gso_segs ?: 1);
#else
	__ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
#endif

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	iptunnel_xmit_stats(dev, pkt_len);
}
Esempio n. 27
0
/* Fill in the header for fragmented IP packets handled by
 * the IPv4 connection tracking code.
 */
int nf_bridge_copy_header(struct sk_buff *skb) {
  int err;
  unsigned int header_size;

  nf_bridge_update_protocol(skb);
  header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
  err = skb_cow_head(skb, header_size);
  if (err) {
    return err;
  }

  skb_copy_to_linear_data_offset(skb, -header_size,
                                 skb->nf_bridge->data, header_size);
  __skb_push(skb, nf_bridge_encap_header_len(skb));
  return 0;
}
Esempio n. 28
0
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
	skb_network_header(skb)[IP6CB(skb)->nhoff] =
		XFRM_MODE_SKB_CB(skb)->protocol;

#ifndef CONFIG_NETFILTER
	if (!async)
		return 1;
#endif

	ipv6_hdr(skb)->payload_len = htons(skb->len);
	__skb_push(skb, skb->data - skb_network_header(skb));

	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
		ip6_rcv_finish);
	return -1;
}
Esempio n. 29
0
File: vxlan.c Progetto: hisaki/ovs
int vxlan_xmit_skb(struct vxlan_sock *vs,
		   struct rtable *rt, struct sk_buff *skb,
		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
		   __be16 src_port, __be16 dst_port,
		   struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
	struct vxlanhdr *vxh;
	int min_headroom;
	int err;
	bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);

	skb = udp_tunnel_handle_offloads(skb, udp_sum, true);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ VXLAN_HLEN + sizeof(struct iphdr)
			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);

	/* Need space for new headers (invalidates iph ptr) */
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
		return err;
	}

	skb = vlan_hwaccel_push_inside(skb);
	if (WARN_ON(!skb))
		return -ENOMEM;

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = htonl(VXLAN_HF_VNI);
	vxh->vx_vni = md->vni;

	if (vxflags & VXLAN_F_GBP)
		vxlan_build_gbp_hdr(vxh, vxflags, md);

	vxlan_set_owner(vs->sock->sk, skb);

	ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB));

	return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
				   ttl, df, src_port, dst_port, xnet,
				   !udp_sum);
}
static struct sk_buff *
rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
	struct rndis_data_hdr	*hdr;
	struct sk_buff		*skb2;
	unsigned		len = skb->len;

	if (likely(!skb_cloned(skb))) {
		int	room = skb_headroom(skb);

		/* enough head room as-is? */
		if (unlikely((sizeof *hdr) <= room))
			goto fill;

		/* enough room, but needs to be readjusted? */
		room += skb_tailroom(skb);
		if (likely((sizeof *hdr) <= room)) {
			skb->data = memmove(skb->head + sizeof *hdr,
					    skb->data, len);
			skb_set_tail_pointer(skb, len);
			goto fill;
		}
	}

	/* create a new skb, with the correct size (and tailpad) */
	skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags);
	dev_kfree_skb_any(skb);
	if (unlikely(!skb2))
		return skb2;
	skb = skb2;

	/* fill out the RNDIS header.  we won't bother trying to batch
	 * packets; Linux minimizes wasted bandwidth through tx queues.
	 */
fill:
	hdr = (void *) __skb_push(skb, sizeof *hdr);
	memset(hdr, 0, sizeof *hdr);
	hdr->msg_type = RNDIS_MSG_PACKET;
	hdr->msg_len = cpu_to_le32(skb->len);
	hdr->data_offset = ccpu2(sizeof(*hdr) - 8);
	hdr->data_len = cpu_to_le32(len);

	/* FIXME make the last packet always be short ... */
	return skb;
}