Пример #1
0
/* Transmit a fully formatted Geneve frame.
 *
 * When calling this function. The skb->data should point
 * to the geneve header which is fully formed.
 *
 * This function will add other UDP tunnel headers.
 */
int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
		    bool xnet)
{
	struct genevehdr *gnvh;
	int min_headroom;
	int err;

	skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err))
		return err;

	skb = vlan_hwaccel_push_inside(skb);
	if (unlikely(!skb))
		return -ENOMEM;

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);

	skb_set_inner_protocol(skb, htons(ETH_P_TEB));

	return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
				   tos, ttl, df, src_port, dst_port, xnet);
}
Пример #2
0
static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
				struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);

	if (OVTYPE_IS_GRETAP (skb)) {
		gretap_gre_tap_xmit_in (skb) = rdtsc ();
	}

	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
	if (IS_ERR(skb))
		goto out;

	if (skb_cow_head(skb, dev->needed_headroom))
		goto free_skb;

	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));

	return NETDEV_TX_OK;

free_skb:
	kfree_skb(skb);
out:
	dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}
Пример #3
0
/*
 * Transmit a packet to the base station on behalf of the network stack
 *
 *
 * Returns: NETDEV_TX_OK (always, even in case of error)
 *
 * In case of error, we just drop it. Reasons:
 *
 *  - we add a hw header to each skb, and if the network stack
 *    retries, we have no way to know if that skb has it or not.
 *
 *  - network protocols have their own drop-recovery mechanisms
 *
 *  - there is not much else we can do
 *
 * If the device is idle, we need to wake it up; that is an operation
 * that will sleep. See i2400m_net_wake_tx() for details.
 */
static
netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
					 struct net_device *net_dev)
{
	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
	struct device *dev = i2400m_dev(i2400m);
	int result = -1;

	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);

	if (skb_cow_head(skb, 0))
		goto drop;

	if (i2400m->state == I2400M_SS_IDLE)
		result = i2400m_net_wake_tx(i2400m, net_dev, skb);
	else
		result = i2400m_net_tx(i2400m, net_dev, skb);
	if (result <  0) {
drop:
		net_dev->stats.tx_dropped++;
	} else {
		net_dev->stats.tx_packets++;
		net_dev->stats.tx_bytes += skb->len;
	}
	dev_kfree_skb(skb);
	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
	return NETDEV_TX_OK;
}
Пример #4
0
static int xlgmac_prep_tso(struct sk_buff *skb,
			   struct xlgmac_pkt_info *pkt_info)
{
	int ret;

	if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
		return 0;

	ret = skb_cow_head(skb, 0);
	if (ret)
		return ret;

	pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	pkt_info->tcp_header_len = tcp_hdrlen(skb);
	pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
	pkt_info->mss = skb_shinfo(skb)->gso_size;

	XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
	XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
		  pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
	XLGMAC_PR("mss=%u\n", pkt_info->mss);

	/* Update the number of packets that will ultimately be transmitted
	 * along with the extra bytes for each extra packet
	 */
	pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
	pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;

	return 0;
}
Пример #5
0
static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
		    const struct ovs_action_push_eth *ethh)
{
	struct ethhdr *hdr;

	/* Add the new Ethernet header */
	if (skb_cow_head(skb, ETH_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, ETH_HLEN);
	skb_reset_mac_header(skb);
	skb_reset_mac_len(skb);

	hdr = eth_hdr(skb);
	ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
	ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
	hdr->h_proto = skb->protocol;

	skb_postpush_rcsum(skb, hdr, ETH_HLEN);

	/* safe right before invalidate_flow_key */
	key->mac_proto = MAC_PROTO_ETHERNET;
	invalidate_flow_key(key);
	return 0;
}
Пример #6
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	struct mpls_shim_hdr *new_mpls_lse;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	if (!skb->inner_protocol) {
		skb_set_inner_network_header(skb, skb->mac_len);
		skb_set_inner_protocol(skb, skb->protocol);
	}

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb->mac_len);

	new_mpls_lse = mpls_hdr(skb);
	new_mpls_lse->label_stack_entry = mpls->mpls_lse;

	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);

	if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
		update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Пример #7
0
static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *ip6h;
	const unsigned char *old_mac;
	int size = sizeof(struct ipv6hdr);
	int err;

	err = skb_cow_head(skb, size + skb->mac_len);
	if (err)
		goto out;

	__skb_push(skb, size);
	skb_reset_network_header(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);

	xfrm6_beet_make_header(skb);

	ip6h = ipv6_hdr(skb);
	ip6h->payload_len = htons(skb->len - size);
	ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
	ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
	err = 0;
out:
	return err;
}
Пример #8
0
static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct dsa_port *dp = dsa_slave_to_port(dev);
	u16 *lan9303_tag;

	/* insert a special VLAN tag between the MAC addresses
	 * and the current ethertype field.
	 */
	if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) {
		dev_dbg(&dev->dev,
			"Cannot make room for the special tag. Dropping packet\n");
		return NULL;
	}

	/* provide 'LAN9303_TAG_LEN' bytes additional space */
	skb_push(skb, LAN9303_TAG_LEN);

	/* make room between MACs and Ether-Type */
	memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN);

	lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN);
	lan9303_tag[0] = htons(ETH_P_8021Q);
	lan9303_tag[1] = lan9303_xmit_use_arl(dp, skb->data) ?
				LAN9303_TAG_TX_USE_ALR :
				dp->index | LAN9303_TAG_TX_STP_OVERRIDE;
	lan9303_tag[1] = htons(lan9303_tag[1]);

	return skb;
}
Пример #9
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);

	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Пример #10
0
static int push_mpls(struct sk_buff *skb,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)mac_header_end(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;
	if (!ovs_skb_get_inner_protocol(skb))
		ovs_skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;
	return 0;
}
Пример #11
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;

	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Пример #12
0
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
			      struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr *tnl_params;

	if (OVTYPE_IS_GRE (skb)) {
		gre_ipgre_xmit_in (skb) = rdtsc ();
	}

	if (dev->header_ops) {
		/* Need space for new headers */
		if (skb_cow_head(skb, dev->needed_headroom -
				      (tunnel->hlen + sizeof(struct iphdr))))
			goto free_skb;

		tnl_params = (const struct iphdr *)skb->data;

		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
		 * to gre header.
		 */
		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
		skb_reset_mac_header(skb);
	} else {
		if (skb_cow_head(skb, dev->needed_headroom))
			goto free_skb;

		tnl_params = &tunnel->parms.iph;
	}

	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
	if (IS_ERR(skb))
		goto out;

	__gre_xmit(skb, dev, tnl_params, skb->protocol);

	return NETDEV_TX_OK;

free_skb:
	kfree_skb(skb);
out:
	dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}
Пример #13
0
static int rawsock_add_header(struct sk_buff *skb)
{

	if (skb_cow_head(skb, 1))
		return -ENOMEM;

	*skb_push(skb, 1) = 0;

	return 0;
}
Пример #14
0
static void nci_add_rx_data_frag(struct nci_dev *ndev,
				 struct sk_buff *skb,
				 __u8 pbf, __u8 conn_id, __u8 status)
{
	int reassembly_len;
	int err = 0;

	if (status) {
		err = status;
		goto exit;
	}

	if (ndev->rx_data_reassembly) {
		reassembly_len = ndev->rx_data_reassembly->len;

		/* first, make enough room for the already accumulated data */
		if (skb_cow_head(skb, reassembly_len)) {
			pr_err("error adding room for accumulated rx data\n");

			kfree_skb(skb);
			skb = NULL;

			kfree_skb(ndev->rx_data_reassembly);
			ndev->rx_data_reassembly = NULL;

			err = -ENOMEM;
			goto exit;
		}

		/* second, combine the two fragments */
		memcpy(skb_push(skb, reassembly_len),
		       ndev->rx_data_reassembly->data,
		       reassembly_len);

		/* third, free old reassembly */
		kfree_skb(ndev->rx_data_reassembly);
		ndev->rx_data_reassembly = NULL;
	}

	if (pbf == NCI_PBF_CONT) {
		/* need to wait for next fragment, store skb and exit */
		ndev->rx_data_reassembly = skb;
		return;
	}

exit:
	if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
		/* Data received in Target mode, forward to nfc core */
		err = nfc_tm_data_received(ndev->nfc_dev, skb);
		if (err)
			pr_err("unable to handle received data\n");
	} else {
		nci_data_exchange_complete(ndev, skb, conn_id, err);
	}
}
Пример #15
0
int vxlan_xmit_skb(struct vxlan_sock *vs,
		   struct rtable *rt, struct sk_buff *skb,
		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
		   __be16 src_port, __be16 dst_port, __be32 vni)
{
	struct vxlanhdr *vxh;
	struct udphdr *uh;
	int min_headroom;
	int err;

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ VXLAN_HLEN + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	/* Need space for new headers (invalidates iph ptr) */
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
		return err;
	}

	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!vlan_insert_tag_set_proto(skb,
							skb->vlan_proto,
							vlan_tx_tag_get(skb))))
			return -ENOMEM;

		vlan_set_tci(skb, 0);
	}

	skb_reset_inner_headers(skb);

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = htonl(VXLAN_FLAGS);
	vxh->vx_vni = vni;

	__skb_push(skb, sizeof(*uh));
	skb_reset_transport_header(skb);
	uh = udp_hdr(skb);

	uh->dest = dst_port;
	uh->source = src_port;

	uh->len = htons(skb->len);
	uh->check = 0;

	vxlan_set_owner(vs->sock->sk, skb);

	skb = handle_offloads(skb);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
			     tos, ttl, df, false);
}
Пример #16
0
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
			      struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr *tnl_params;

	skb = handle_offloads(tunnel, skb);
	if (IS_ERR(skb))
		goto out;

	if (dev->header_ops) {
		/* Need space for new headers */
		if (skb_cow_head(skb, dev->needed_headroom -
				      (tunnel->hlen + sizeof(struct iphdr))))
			goto free_skb;

		tnl_params = (const struct iphdr *)skb->data;

		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
		 * to gre header.
		 */
		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
	} else {
		if (skb_cow_head(skb, dev->needed_headroom))
			goto free_skb;

		tnl_params = &tunnel->parms.iph;
	}

	__gre_xmit(skb, dev, tnl_params, skb->protocol);

	return NETDEV_TX_OK;

free_skb:
	dev_kfree_skb(skb);
out:
	dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}
Пример #17
0
/*
 * When forwarding bridge frames, we save a copy of the original
 * header before processing.
 */
int nf_bridge_copy_header(struct sk_buff *skb)
{
	int err;
	int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);

	err = skb_cow_head(skb, header_size);
	if (err)
		return err;

	skb_copy_to_linear_data_offset(skb, -header_size,
				       skb->nf_bridge->data, header_size);
	__skb_push(skb, nf_bridge_encap_header_len(skb));
	return 0;
}
Пример #18
0
/* Fill in the header for fragmented IP packets handled by
 * the IPv4 connection tracking code.
 */
int nf_bridge_copy_header(struct sk_buff *skb) {
  int err;
  unsigned int header_size;

  nf_bridge_update_protocol(skb);
  header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
  err = skb_cow_head(skb, header_size);
  if (err) {
    return err;
  }

  skb_copy_to_linear_data_offset(skb, -header_size,
                                 skb->nf_bridge->data, header_size);
  __skb_push(skb, nf_bridge_encap_header_len(skb));
  return 0;
}
Пример #19
0
Файл: vxlan.c Проект: hisaki/ovs
int vxlan_xmit_skb(struct vxlan_sock *vs,
		   struct rtable *rt, struct sk_buff *skb,
		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
		   __be16 src_port, __be16 dst_port,
		   struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
	struct vxlanhdr *vxh;
	int min_headroom;
	int err;
	bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);

	skb = udp_tunnel_handle_offloads(skb, udp_sum, true);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ VXLAN_HLEN + sizeof(struct iphdr)
			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);

	/* Need space for new headers (invalidates iph ptr) */
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
		return err;
	}

	skb = vlan_hwaccel_push_inside(skb);
	if (WARN_ON(!skb))
		return -ENOMEM;

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = htonl(VXLAN_HF_VNI);
	vxh->vx_vni = md->vni;

	if (vxflags & VXLAN_F_GBP)
		vxlan_build_gbp_hdr(vxh, vxflags, md);

	vxlan_set_owner(vs->sock->sk, skb);

	ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB));

	return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
				   ttl, df, src_port, dst_port, xnet,
				   !udp_sum);
}
Пример #20
0
static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
{
    struct brnf_frag_data *data;
    int err;

    data = this_cpu_ptr(&brnf_frag_data_storage);
    err = skb_cow_head(skb, data->size);

    if (err) {
        kfree_skb(skb);
        return 0;
    }

    skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
    __skb_push(skb, data->encap_size);

    return br_dev_queue_push_xmit(sk, skb);
}
Пример #21
0
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
{
	int result;

	/* TODO: We must check if we can release all references to non-payload
	 * data using skb_header_release in our skbs to allow skb_cow_header to
	 * work optimally. This means that those skbs are not allowed to read
	 * or write any data which is before the current position of skb->data
	 * after that call and thus allow other skbs with the same data buffer
	 * to write freely in that area.
	 */
	result = skb_cow_head(skb, len);
	if (result < 0)
		return result;

	skb_push(skb, len);
	return 0;
}
Пример #22
0
static bool expand_for_outer_iph(struct sk_buff *skb,
                                 const struct net_device *out_dev)
{
        unsigned int max_headroom;
        unsigned int len_to_expand;
        max_headroom = sizeof(struct iphdr) + LL_RESERVED_SPACE(out_dev);
        if (skb_headroom(skb) < max_headroom) {
                len_to_expand = max_headroom - skb_headroom(skb);
                if (unlikely(skb_cow_head(skb, len_to_expand))) {
                        printk(KERN_INFO "[FlexPath] Failed to expand sk_buff\n");
                        return false;
                }
        }
        skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
        skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
        return true;
}
Пример #23
0
static void nci_add_rx_data_frag(struct nci_dev *ndev,
				struct sk_buff *skb,
				__u8 pbf)
{
	int reassembly_len;
	int err = 0;

	if (ndev->rx_data_reassembly) {
		reassembly_len = ndev->rx_data_reassembly->len;

		/* first, make enough room for the already accumulated data */
		if (skb_cow_head(skb, reassembly_len)) {
			nfc_err("error adding room for accumulated rx data");

			kfree_skb(skb);
			skb = 0;

			kfree_skb(ndev->rx_data_reassembly);
			ndev->rx_data_reassembly = 0;

			err = -ENOMEM;
			goto exit;
		}

		/* second, combine the two fragments */
		memcpy(skb_push(skb, reassembly_len),
				ndev->rx_data_reassembly->data,
				reassembly_len);

		/* third, free old reassembly */
		kfree_skb(ndev->rx_data_reassembly);
		ndev->rx_data_reassembly = 0;
	}

	if (pbf == NCI_PBF_CONT) {
		/* need to wait for next fragment, store skb and exit */
		ndev->rx_data_reassembly = skb;
		return;
	}

exit:
	nci_data_exchange_complete(ndev, skb, err);
}
Пример #24
0
static void nci_add_rx_data_frag(struct nci_dev *ndev,
				 struct sk_buff *skb,
				 __u8 pbf)
{
	int reassembly_len;
	int err = 0;

	if (ndev->rx_data_reassembly) {
		reassembly_len = ndev->rx_data_reassembly->len;

		/*                                                          */
		if (skb_cow_head(skb, reassembly_len)) {
			pr_err("error adding room for accumulated rx data\n");

			kfree_skb(skb);
			skb = 0;

			kfree_skb(ndev->rx_data_reassembly);
			ndev->rx_data_reassembly = 0;

			err = -ENOMEM;
			goto exit;
		}

		/*                                   */
		memcpy(skb_push(skb, reassembly_len),
		       ndev->rx_data_reassembly->data,
		       reassembly_len);

		/*                            */
		kfree_skb(ndev->rx_data_reassembly);
		ndev->rx_data_reassembly = 0;
	}

	if (pbf == NCI_PBF_CONT) {
		/*                                                    */
		ndev->rx_data_reassembly = skb;
		return;
	}

exit:
	nci_data_exchange_complete(ndev, skb, err);
}
Пример #25
0
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct dsa_port *dp = dsa_slave_to_port(dev);
	u8 *mtk_tag;
	bool is_vlan_skb = true;

	/* Build the special tag after the MAC Source Address. If VLAN header
	 * is present, it's required that VLAN header and special tag is
	 * being combined. Only in this way we can allow the switch can parse
	 * the both special and VLAN tag at the same time and then look up VLAN
	 * table with VID.
	 */
	if (!skb_vlan_tagged(skb)) {
		if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
			return NULL;

		skb_push(skb, MTK_HDR_LEN);
		memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
		is_vlan_skb = false;
	}

	mtk_tag = skb->data + 2 * ETH_ALEN;

	/* Mark tag attribute on special tag insertion to notify hardware
	 * whether that's a combined special tag with 802.1Q header.
	 */
	mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
		     MTK_HDR_XMIT_UNTAGGED;
	mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;

	/* Tag control information is kept for 802.1Q */
	if (!is_vlan_skb) {
		mtk_tag[2] = 0;
		mtk_tag[3] = 0;
	}

	return skb;
}
Пример #26
0
/**
 * add new mac header and ip header.
 */
int __remote_encapulation(struct datapath *dp, struct sk_buff *skb, int dst_ip)
{
    struct ethhdr *eth;
    struct iphdr *iph;

    if (skb_cow_head(skb, ETH_HLEN+ETH_IP_HLEN+IP_HLEN) < 0) { //make enough room at the head, new ether and ip header
        kfree_skb(skb);
        return -1;
    }

    /*add new eth_ip header*/
    skb_push(skb,ETH_IP_HLEN);
    *((unsigned short*)(skb->data)) = 0x3;

    /*add new ip header*/
    iph = (struct iphdr *)skb_push(skb,IP_HLEN);
    iph->ihl=5;
    iph->version=4;
    iph->tos=0;
    iph->tot_len = htons(skb->len);
    iph->id = htonl(0);
    iph->frag_off = 0;
    iph->ttl = 255;
    iph->protocol = LC_REMOTE_IP_PROTO;
    iph->saddr=htonl(dp->local_ip);
    iph->daddr=htonl(dst_ip);
    iph->check=0; //MUST be set to 0 first.
    iph->check=ip_fast_csum((unsigned char *)iph,iph->ihl); //csum here

    /*add new ethernet header*/
    eth = (struct ethhdr *)skb_push(skb,ETH_HLEN);
    memcpy(skb->data, skb->data+ETH_IP_HLEN+ETH_HLEN+IP_HLEN, ETH_HLEN);
    eth->h_proto = htons(ETH_P_IP); //ip packet
    memcpy(&(eth->h_dest),gw_mac,ETH_ALEN);

    skb->mac_header -= (ETH_IP_HLEN+IP_HLEN+ETH_HLEN);

    return 0;
}
Пример #27
0
netdev_tx_t qinq_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct dsa_slave_priv *p = netdev_priv(dev);
    struct vlan_ethhdr *veth;
    unsigned int len;
    int ret;

    if (skb_cow_head(skb, VLAN_HLEN) < 0)
        goto out_free_skb;

    veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);

    /* Move the mac addresses to the beginning of the new header. */
    memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
    skb->mac_header -= VLAN_HLEN;

    /* setup VLAN header fields */
    veth->h_vlan_proto = htons(ETH_P_QINQ);
    veth->h_vlan_TCI = htons(p->port);

    len = skb->len;
    skb->protocol = htons(ETH_P_QINQ);
    skb->dev = p->parent->dst->master_netdev;

    ret = dev_queue_xmit(skb);
    if (unlikely(ret != NET_XMIT_SUCCESS))
        goto out_dropped;

    dev->stats.tx_packets++;
    dev->stats.tx_bytes += len;

    return NETDEV_TX_OK;

out_free_skb:
    kfree_skb(skb);
out_dropped:
    dev->stats.tx_dropped++;
    return NETDEV_TX_OK;
}
Пример #28
0
static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
				struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);

	skb = handle_offloads(tunnel, skb);
	if (IS_ERR(skb))
		goto out;

	if (skb_cow_head(skb, dev->needed_headroom))
		goto free_skb;

	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));

	return NETDEV_TX_OK;

free_skb:
	dev_kfree_skb(skb);
out:
	dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}
Пример #29
0
static netdev_tx_t
ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	int ret;

	if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
		u16 crc = crc_ccitt(0, skb->data, skb->len);

		put_unaligned_le16(crc, skb_put(skb, 2));
	}

	if (skb_cow_head(skb, local->hw.extra_tx_headroom))
		goto err_tx;

	/* Stop the netif queue on each sub_if_data object. */
	ieee802154_stop_queue(&local->hw);

	/* async is priority, otherwise sync is fallback */
	if (local->ops->xmit_async) {
		ret = drv_xmit_async(local, skb);
		if (ret) {
			ieee802154_wake_queue(&local->hw);
			goto err_tx;
		}

		dev->stats.tx_packets++;
		dev->stats.tx_bytes += skb->len;
	} else {
		local->tx_skb = skb;
		queue_work(local->workqueue, &local->tx_work);
	}

	return NETDEV_TX_OK;

err_tx:
	kfree_skb(skb);
	return NETDEV_TX_OK;
}
Пример #30
0
static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct brnf_frag_data *data;
	int err;

	data = this_cpu_ptr(&brnf_frag_data_storage);
	err = skb_cow_head(skb, data->size);

	if (err) {
		kfree_skb(skb);
		return 0;
	}

	if (data->vlan_tci) {
		skb->vlan_tci = data->vlan_tci;
		skb->vlan_proto = data->vlan_proto;
	}

	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
	__skb_push(skb, data->encap_size);

	nf_bridge_info_free(skb);
	return br_dev_queue_push_xmit(net, sk, skb);
}