Esempio n. 1
0
/*
 * CFG802.11 network device handler for data transmission.
 */
static int
mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
	struct sk_buff *new_skb;
	struct mwifiex_txinfo *tx_info;
	struct timeval tv;

	dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
		jiffies, priv->bss_type, priv->bss_num);

	if (priv->adapter->surprise_removed) {
		kfree_skb(skb);
		priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
		dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
		kfree_skb(skb);
		priv->stats.tx_dropped++;
		return 0;
	}
	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
		dev_dbg(priv->adapter->dev,
			"data: Tx: insufficient skb headroom %d\n",
			skb_headroom(skb));
		/* Insufficient skb headroom - allocate a new skb */
		new_skb =
			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
		if (unlikely(!new_skb)) {
			dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
			kfree_skb(skb);
			priv->stats.tx_dropped++;
			return 0;
		}
		kfree_skb(skb);
		skb = new_skb;
		dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
			skb_headroom(skb));
	}

	tx_info = MWIFIEX_SKB_TXCB(skb);
	memset(tx_info, 0, sizeof(*tx_info));
	tx_info->bss_num = priv->bss_num;
	tx_info->bss_type = priv->bss_type;

	/* Record the current time the packet was queued; used to
	 * determine the amount of time the packet was queued in
	 * the driver before it was sent to the firmware.
	 * The delay is then sent along with the packet to the
	 * firmware for aggregate delay calculation for stats and
	 * MSDU lifetime expiry.
	 */
	do_gettimeofday(&tv);
	skb->tstamp = timeval_to_ktime(tv);

	mwifiex_queue_tx_pkt(priv, skb);

	return 0;
}
Esempio n. 2
0
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
	struct sk_buff *skbn;
	unsigned char *ptr;
	int headroom;

	if (ax25->ax25_dev == NULL) {
		ax25_disconnect(ax25, ENETUNREACH);
		return;
	}

	headroom = ax25_addr_size(ax25->digipeat);

	if (skb_headroom(skb) < headroom) {
		if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
			printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
			kfree_skb(skb);
			return;
		}

		if (skb->sk != NULL)
			skb_set_owner_w(skbn, skb->sk);

		consume_skb(skb);
		skb = skbn;
	}

	ptr = skb_push(skb, headroom);

	ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);

	ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
Esempio n. 3
0
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, ax25_address *dest, ax25_digi *digi)
{
	struct sk_buff *skbn;
	unsigned char *bp;
	int len;

	len = digi->ndigi * AX25_ADDR_LEN;

	if (skb_headroom(skb) < len) {
		if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
			printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n");
			return NULL;
		}

		if (skb->sk != NULL)
			skb_set_owner_w(skbn, skb->sk);

		kfree_skb(skb);

		skb = skbn;
	}

	bp = skb_push(skb, len);

	ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS);

	return skb;
}
static struct sk_buff *rndis_add_header(struct gether *port,
					struct sk_buff *skb)
{
	struct sk_buff *skb2;
	struct rndis_packet_msg_type *header = NULL;
	struct f_rndis *rndis = func_to_rndis(&port->func);
	struct usb_composite_dev *cdev = port->func.config->cdev;

	if (rndis->port.multi_pkt_xfer || cdev->gadget->sg_supported) {
		if (port->header) {
			header = port->header;
			header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
			header->MessageLength = cpu_to_le32(skb->len +
							sizeof(*header));
			header->DataOffset = cpu_to_le32(36);
			header->DataLength = cpu_to_le32(skb->len);
			pr_debug("MessageLength:%d DataLength:%d\n",
						header->MessageLength,
						header->DataLength);
			return skb;
		} else {
			dev_kfree_skb_any(skb);
			pr_err("RNDIS header is NULL.\n");
			return NULL;
		}
	} else {
		skb2 = skb_realloc_headroom(skb,
				sizeof(struct rndis_packet_msg_type));
		if (skb2)
			rndis_add_hdr(skb2);

		dev_kfree_skb_any(skb);
		return skb2;
	}
}
Esempio n. 5
0
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
	struct sk_buff *skbn;
	unsigned char *ptr;
	int headroom;

	if (ax25->ax25_dev == NULL) {
		ax25_disconnect(ax25, ENETUNREACH);
		return;
	}

	headroom = ax25_addr_size(ax25->digipeat);

	if (skb_headroom(skb) < headroom) {
		if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
			printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
			kfree_skb(skb);
			return;
		}

		if (skb->sk != NULL)
			skb_set_owner_w(skbn, skb->sk);

		kfree_skb(skb);
		skb = skbn;
	}

	p
Esempio n. 6
0
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
{
	struct sk_buff *skb;
	int ihl = pkt->nh.iph->ihl<<2;
	struct igmphdr *igmp;
	struct igmpmsg *msg;
	int ret;

#ifdef CONFIG_IP_PIMSM
	if (assert == IGMPMSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
	else
#endif
		skb = alloc_skb(128, GFP_ATOMIC);

	if(!skb)
		return -ENOBUFS;

#ifdef CONFIG_IP_PIMSM
	if (assert == IGMPMSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix ihl, length etc.
		   And all this only to mangle msg->im_msgtype and
		   to set msg->im_mbz to "mbz" :-)
		 */
		msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr));
		skb->nh.raw = skb->h.raw = (u8*)msg;
		memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
		msg->im_msgtype = IGMPMSG_WHOLEPKT;
		msg->im_mbz = 0;
 		msg->im_vif = reg_vif_num;
		skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
		skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
	} else 
Esempio n. 7
0
/* When forwarding a packet, we must ensure that we've got enough headroom
 * for the encapsulation packet in the skb.  This also gives us an
 * opportunity to figure out what the payload_len, dsfield, ttl, and df
 * values should be, so that we won't need to look at the old ip header
 * again
 */
static struct sk_buff *
ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
			   unsigned int max_headroom, __u8 *next_protocol,
			   __u32 *payload_len, __u8 *dsfield, __u8 *ttl,
			   __be16 *df)
{
	struct sk_buff *new_skb = NULL;
	struct iphdr *old_iph = NULL;
#ifdef CONFIG_IP_VS_IPV6
	struct ipv6hdr *old_ipv6h = NULL;
#endif

	ip_vs_drop_early_demux_sk(skb);

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
		new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb)
			goto error;
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		consume_skb(skb);
		skb = new_skb;
	}

#ifdef CONFIG_IP_VS_IPV6
	if (skb_af == AF_INET6) {
		old_ipv6h = ipv6_hdr(skb);
		*next_protocol = IPPROTO_IPV6;
		if (payload_len)
			*payload_len =
				ntohs(old_ipv6h->payload_len) +
				sizeof(*old_ipv6h);
		*dsfield = ipv6_get_dsfield(old_ipv6h);
		*ttl = old_ipv6h->hop_limit;
		if (df)
			*df = 0;
	} else
#endif
	{
		old_iph = ip_hdr(skb);
		/* Copy DF, reset fragment offset and MF */
		if (df)
			*df = (old_iph->frag_off & htons(IP_DF));
		*next_protocol = IPPROTO_IPIP;

		/* fix old IP header checksum */
		ip_send_check(old_iph);
		*dsfield = ipv4_get_dsfield(old_iph);
		*ttl = old_iph->ttl;
		if (payload_len)
			*payload_len = ntohs(old_iph->tot_len);
	}

	return skb;
error:
	kfree_skb(skb);
	return ERR_PTR(-ENOMEM);
}
Esempio n. 8
0
static int ethertap_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_local *lp = (struct net_local *)dev->priv;
#ifdef CONFIG_ETHERTAP_MC
	struct ethhdr *eth = (struct ethhdr*)skb->data;
#endif

	if (skb_headroom(skb) < 2) {
		static int once;
	  	struct sk_buff *skb2;

		if (!once) {
			once = 1;
			printk(KERN_DEBUG "%s: not aligned xmit by protocol %04x\n", dev->name, skb->protocol);
		}

		skb2 = skb_realloc_headroom(skb, 2);
		dev_kfree_skb(skb);
		if (skb2 == NULL)
			return 0;
		skb = skb2;
	}
	__skb_push(skb, 2);

	/* Make the same thing, which loopback does. */
	if (skb_shared(skb)) {
	  	struct sk_buff *skb2 = skb;
	  	skb = skb_clone(skb, GFP_ATOMIC);	/* Clone the buffer */
	  	if (skb==NULL) {
			dev_kfree_skb(skb2);
			return 0;
		}
	  	dev_kfree_skb(skb2);
	}
	/* ... but do not orphan it here, netlink does it in any case. */

	lp->stats.tx_bytes+=skb->len;
	lp->stats.tx_packets++;

#ifndef CONFIG_ETHERTAP_MC
	netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC);
#else
	if (dev->flags&IFF_NOARP) {
		netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC);
		return 0;
	}

	if (!(eth->h_dest[0]&1)) {
		/* Unicast packet */
		__u32 pid;
		memcpy(&pid, eth->h_dest+2, 4);
		netlink_unicast(lp->nl, skb, ntohl(pid), MSG_DONTWAIT);
	} else
		netlink_broadcast(lp->nl, skb, 0, ethertap_mc_hash(eth->h_dest), GFP_ATOMIC);
#endif
	return 0;
}
Esempio n. 9
0
/*
 * Function irlan_eth_tx (skb)
 *
 *    Transmits ethernet frames over IrDA link.
 *
 */
static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
					struct net_device *dev)
{
	struct irlan_cb *self = netdev_priv(dev);
	int ret;
	unsigned int len;

	/* skb headroom large enough to contain all IrDA-headers? */
	if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
		struct sk_buff *new_skb =
			skb_realloc_headroom(skb, self->max_header_size);

		/*  We have to free the original skb anyway */
		dev_kfree_skb(skb);

		/* Did the realloc succeed? */
		if (new_skb == NULL)
			return NETDEV_TX_OK;

		/* Use the new skb instead */
		skb = new_skb;
	}

	dev->trans_start = jiffies;

	len = skb->len;
	/* Now queue the packet in the transport layer */
	if (self->use_udata)
		ret = irttp_udata_request(self->tsap_data, skb);
	else
		ret = irttp_data_request(self->tsap_data, skb);

	if (ret < 0) {
		/*
		 * IrTTPs tx queue is full, so we just have to
		 * drop the frame! You might think that we should
		 * just return -1 and don't deallocate the frame,
		 * but that is dangerous since it's possible that
		 * we have replaced the original skb with a new
		 * one with larger headroom, and that would really
		 * confuse do_dev_queue_xmit() in dev.c! I have
		 * tried :-) DB
		 */
		/* irttp_data_request already free the packet */
		dev->stats.tx_dropped++;
	} else {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += len;
	}

	return NETDEV_TX_OK;
}
Esempio n. 10
0
static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
	struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
	ATM_SKB(skb)->vcc = pvcc->atmvcc;
	pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
	if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
		(void) skb_pull(skb, 1);
	switch (pvcc->encaps) {		/* LLC encapsulation needed */
	case e_llc:
		if (skb_headroom(skb) < LLC_LEN) {
			struct sk_buff *n;
			n = skb_realloc_headroom(skb, LLC_LEN);
			if (n != NULL &&
			    !atm_may_send(pvcc->atmvcc, n->truesize)) {
				kfree_skb(n);
				goto nospace;
			}
			kfree_skb(skb);
			skb = n;
			if (skb == NULL)
				return DROP_PACKET;
		} else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
			goto nospace;
		memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
		break;
	case e_vc:
		if (!atm_may_send(pvcc->atmvcc, skb->truesize))
			goto nospace;
		break;
	case e_autodetect:
		pr_debug("Trying to send without setting encaps!\n");
		kfree_skb(skb);
		return 1;
	}

	atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
	return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
	    ? DROP_PACKET : 1;
nospace:
	/*
	 * We don't have space to send this SKB now, but we might have
	 * already applied SC_COMP_PROT compression, so may need to undo
	 */
	if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
	    skb->data[-1] == '\0')
		(void) skb_push(skb, 1);
	return 0;
}
static struct sk_buff *_rmnet_add_headroom(struct sk_buff **skb,
					   struct net_device *dev)
{
	struct sk_buff *skbn;

	if (skb_headroom(*skb) < dev->needed_headroom) {
		msm_rmnet_bam_headroom_check_failure++;
		skbn = skb_realloc_headroom(*skb, dev->needed_headroom);
		kfree_skb(*skb);
		*skb = skbn;
	} else {
		skbn = *skb;
	}

	return skbn;
}
Esempio n. 12
0
/* modelled after ip_finish_output2 */
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct rtable *rt = (struct rtable *)dst;
	struct net_device *dev = dst->dev;
	unsigned int hh_len = LL_RESERVED_SPACE(dev);
	struct neighbour *neigh;
	u32 nexthop;
	int ret = -EINVAL;

	nf_reset(skb);

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (!skb2) {
			ret = -ENOMEM;
			goto err;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);

		consume_skb(skb);
		skb = skb2;
	}

	rcu_read_lock_bh();

	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
	if (unlikely(!neigh))
		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
	if (!IS_ERR(neigh)) {
		sock_confirm_neigh(skb, neigh);
		ret = neigh_output(neigh, skb);
		rcu_read_unlock_bh();
		return ret;
	}

	rcu_read_unlock_bh();
err:
	vrf_tx_error(skb->dev, skb);
	return ret;
}
Esempio n. 13
0
static int dn_long_output(struct sk_buff *skb)
{
    struct dst_entry *dst = skb->dst;
    struct neighbour *neigh = dst->neighbour;
    struct net_device *dev = neigh->dev;
    int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
    unsigned char *data;
    struct dn_long_packet *lp;
    struct dn_skb_cb *cb = DN_SKB_CB(skb);


    if (skb_headroom(skb) < headroom) {
        struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
        if (skb2 == NULL) {
            if (net_ratelimit())
                printk(KERN_CRIT "dn_long_output: no memory\n");
            kfree_skb(skb);
            return -ENOBUFS;
        }
        kfree_skb(skb);
        skb = skb2;
        if (net_ratelimit())
            printk(KERN_INFO "dn_long_output: Increasing headroom\n");
    }

    data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
    lp = (struct dn_long_packet *)(data+3);

    *((unsigned short *)data) = dn_htons(skb->len - 2);
    *(data + 2) = 1 | DN_RT_F_PF; /* Padding */

    lp->msgflg   = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS));
    lp->d_area   = lp->d_subarea = 0;
    dn_dn2eth(lp->d_id, dn_ntohs(cb->dst));
    lp->s_area   = lp->s_subarea = 0;
    dn_dn2eth(lp->s_id, dn_ntohs(cb->src));
    lp->nl2      = 0;
    lp->visit_ct = cb->hops & 0x3f;
    lp->s_class  = 0;
    lp->pt       = 0;

    skb->nh.raw = skb->data;

    return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
}
Esempio n. 14
0
static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
	int ret = 0;

	if (!skb || !skb->data)
		return -EINVAL;

	if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) {
		BT_ERR("Tx Error: Bad skb length %d : %d",
						skb->len, BTM_UPLD_SIZE);
		return -EINVAL;
	}

	if (skb_headroom(skb) < BTM_HEADER_LEN) {
		struct sk_buff *tmp = skb;

		skb = skb_realloc_headroom(skb, BTM_HEADER_LEN);
		if (!skb) {
			BT_ERR("Tx Error: realloc_headroom failed %d",
				BTM_HEADER_LEN);
			skb = tmp;
			return -EINVAL;
		}

		kfree_skb(tmp);
	}

	skb_push(skb, BTM_HEADER_LEN);

	/* header type: byte[3]
	 * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor
	 * header length: byte[2][1][0]
	 */

	skb->data[0] = (skb->len & 0x0000ff);
	skb->data[1] = (skb->len & 0x00ff00) >> 8;
	skb->data[2] = (skb->len & 0xff0000) >> 16;
	skb->data[3] = bt_cb(skb)->pkt_type;

	if (priv->hw_host_to_card)
		ret = priv->hw_host_to_card(priv, skb->data, skb->len);

	return ret;
}
Esempio n. 15
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);
	unsigned seq;

	/* Be paranoid, rather than too clever. */
//	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
	if (unlikely(skb_headroom(skb) < hh_len &&   (dev->header_ops && dev->header_ops->create) )) {

		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		do {
			int hh_alen;

			seq = read_seqbegin(&hh->hh_lock);
			hh_alen = HH_DATA_ALIGN(hh->hh_len);
			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		} while (read_seqretry(&hh->hh_lock, seq));
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Esempio n. 16
0
/** Make enough room in an skb for extra header and trailer.
 *
 * @param pskb return parameter for expanded skb
 * @param skb skb
 * @param head_n required headroom
 * @param tail_n required tailroom
 * @return 0 on success, error code otherwise
 */
int skb_make_room(struct sk_buff **pskb, struct sk_buff *skb, int head_n, int tail_n){
    int err = 0;
    int has_headroom = (head_n <= skb_headroom(skb));
    int has_tailroom = (tail_n <= skb_tailroom(skb));
    int writeable = !skb_cloned(skb) && !skb_shared(skb);

    dprintf("> skb=%p headroom=%d head_n=%d tailroom=%d tail_n=%d\n",
            skb,
            skb_headroom(skb), head_n,
            skb_tailroom(skb), tail_n);
    if(writeable && has_headroom && has_tailroom){
        // There's room! Reuse it.
        *pskb = skb;
    } else if(writeable && has_tailroom){
        // Tailroom, no headroom. Expand header the way GRE does.
        struct sk_buff *new_skb = skb_realloc_headroom(skb, head_n + 16);
        if(!new_skb){
            err = -ENOMEM;
            goto exit;
        }
        kfree_skb(skb);
        *pskb = new_skb;
    } else {
        // No room. Expand. There may be more efficient ways to do
        // this, but this is simple and correct.
        struct sk_buff *new_skb = skb_copy_expand(skb, head_n + 16, tail_n, GFP_ATOMIC);
        if(!new_skb){
            err = -ENOMEM;
            goto exit;
        }
        kfree_skb(skb);
        *pskb = new_skb;
    }
    dprintf("> skb=%p headroom=%d head_n=%d tailroom=%d tail_n=%d\n",
            *pskb,
            skb_headroom(*pskb), head_n,
            skb_tailroom(*pskb), tail_n);
  exit:
    dprintf("< err=%d\n", err);
    return err;
}
Esempio n. 17
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len )) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		int hh_alen;

		write_seqlock_bh(&hh->hh_lock);
		hh_alen = HH_DATA_ALIGN(hh->hh_len);
		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		write_sequnlock_bh(&hh->hh_lock);
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			pr_debug(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Esempio n. 18
0
static int dn_short_output(struct sk_buff *skb)
{
    struct dst_entry *dst = skb->dst;
    struct neighbour *neigh = dst->neighbour;
    struct net_device *dev = neigh->dev;
    int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
    struct dn_short_packet *sp;
    unsigned char *data;
    struct dn_skb_cb *cb = DN_SKB_CB(skb);


    if (skb_headroom(skb) < headroom) {
        struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
        if (skb2 == NULL) {
            if (net_ratelimit())
                printk(KERN_CRIT "dn_short_output: no memory\n");
            kfree_skb(skb);
            return -ENOBUFS;
        }
        kfree_skb(skb);
        skb = skb2;
        if (net_ratelimit())
            printk(KERN_INFO "dn_short_output: Increasing headroom\n");
    }

    data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
    *((unsigned short *)data) = dn_htons(skb->len - 2);
    sp = (struct dn_short_packet *)(data+2);

    sp->msgflg     = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS));
    sp->dstnode    = cb->dst;
    sp->srcnode    = cb->src;
    sp->forward    = cb->hops & 0x3f;

    skb->nh.raw = skb->data;

    return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
}
Esempio n. 19
0
/*
 * Direct send packets to output.
 * Stolen from ip_finish_output2.
 */
static inline int bcm_fast_path_output(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);
	int ret = 0;

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, hh_len);
		if (skb2 == NULL) {
			kfree_skb(skb);
			return -ENOMEM;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (dst->hh)
		ret = neigh_hh_output(dst->hh, skb);
	else if (dst->neighbour)
		ret = dst->neighbour->output(skb);
	else {
#ifdef DEBUG
		if (net_ratelimit())
			printk(KERN_DEBUG "bcm_fast_path_output: No header cache and no neighbour!\n");
#endif
		kfree_skb(skb);
		return -EINVAL;
	}

	/* Don't return 1 */
	return (ret == 1) ? 0 : ret;
}
Esempio n. 20
0
static int ipmr_cache_report(struct net *net,
			     struct sk_buff *pkt, vifi_t vifi, int assert)
{
	struct sk_buff *skb;
	const int ihl = ip_hdrlen(pkt);
	struct igmphdr *igmp;
	struct igmpmsg *msg;
	int ret;

#ifdef CONFIG_IP_PIMSM
	if (assert == IGMPMSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
	else
#endif
		skb = alloc_skb(128, GFP_ATOMIC);

	if (!skb)
		return -ENOBUFS;

#ifdef CONFIG_IP_PIMSM
	if (assert == IGMPMSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix ihl, length etc.
		   And all this only to mangle msg->im_msgtype and
		   to set msg->im_mbz to "mbz" :-)
		 */
		skb_push(skb, sizeof(struct iphdr));
		skb_reset_network_header(skb);
		skb_reset_transport_header(skb);
		msg = (struct igmpmsg *)skb_network_header(skb);
		memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
		msg->im_msgtype = IGMPMSG_WHOLEPKT;
		msg->im_mbz = 0;
		msg->im_vif = net->ipv4.mroute_reg_vif_num;
		ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
		ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
					     sizeof(struct iphdr));
	} else
Esempio n. 21
0
int ip_xfrm_me_harder(struct sk_buff **pskb)
{
	struct flowi fl;
	unsigned int hh_len;
	struct dst_entry *dst;

	if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED)
		return 0;
	if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0)
		return -1;

	dst = (*pskb)->dst;
	if (dst->xfrm)
		dst = ((struct xfrm_dst *)dst)->route;
	dst_hold(dst);

	if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0)
		return -1;

	dst_release((*pskb)->dst);
	(*pskb)->dst = dst;

	/* Change in oif may mean change in hh_len. */
	hh_len = (*pskb)->dst->dev->hard_header_len;
	if (skb_headroom(*pskb) < hh_len) {
		struct sk_buff *nskb;

		nskb = skb_realloc_headroom(*pskb, hh_len);
		if (!nskb)
			return -1;
		if ((*pskb)->sk)
			skb_set_owner_w(nskb, (*pskb)->sk);
		kfree_skb(*pskb);
		*pskb = nskb;
	}
	return 0;
}
Esempio n. 22
0
static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
						 unsigned short tag)
{
	struct vlan_ethhdr *veth;
	static unsigned int c;

	if (skb_headroom(skb) < VLAN_HLEN) {
		struct sk_buff *sk_tmp = skb;
		pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c);
		skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
		if (!skb)
			return NULL;
		dev_kfree_skb_any(sk_tmp);
	}
	veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);

	/* Move the mac addresses to the top of buffer */
	memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);

	veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q);
	veth->h_vlan_TCI = htons(tag);

	return skb;
}
Esempio n. 23
0
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                     struct ip_vs_protocol *pp)
{
    struct rt6_info *rt;		/* Route to the other host */
    struct in6_addr saddr;		/* Source for tunnel */
    struct net_device *tdev;	/* Device to other host */
    struct ipv6hdr  *old_iph = ipv6_hdr(skb);
    struct ipv6hdr  *iph;		/* Our new IP header */
    unsigned int max_headroom;	/* The extra header space needed */
    int    mtu;
    int ret;

    EnterFunction(10);

    if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
                                     &saddr, 1, 1|2)))
        goto tx_error_icmp;
    if (__ip_vs_is_local_route6(rt)) {
        dst_release(&rt->dst);
        IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
    }

    tdev = rt->dst.dev;

    mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
    if (mtu < IPV6_MIN_MTU) {
        IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
                     IPV6_MIN_MTU);
        goto tx_error_put;
    }
    if (skb_dst(skb))
        skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);

    if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
            !skb_is_gso(skb)) {
        if (!skb->dev) {
            struct net *net = dev_net(skb_dst(skb)->dev);

            skb->dev = net->loopback_dev;
        }
        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
        IP_VS_DBG_RL("%s(): frag needed\n", __func__);
        goto tx_error_put;
    }

    /*
     * Okay, now see if we can stuff it in the buffer as-is.
     */
    max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);

    if (skb_headroom(skb) < max_headroom
            || skb_cloned(skb) || skb_shared(skb)) {
        struct sk_buff *new_skb =
            skb_realloc_headroom(skb, max_headroom);
        if (!new_skb) {
            dst_release(&rt->dst);
            kfree_skb(skb);
            IP_VS_ERR_RL("%s(): no memory\n", __func__);
            return NF_STOLEN;
        }
        kfree_skb(skb);
        skb = new_skb;
        old_iph = ipv6_hdr(skb);
    }

    skb->transport_header = skb->network_header;

    skb_push(skb, sizeof(struct ipv6hdr));
    skb_reset_network_header(skb);
    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

    /* drop old route */
    skb_dst_drop(skb);
    skb_dst_set(skb, &rt->dst);

    /*
     *	Push down and install the IPIP header.
     */
    iph			=	ipv6_hdr(skb);
    iph->version		=	6;
    iph->nexthdr		=	IPPROTO_IPV6;
    iph->payload_len	=	old_iph->payload_len;
    be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
    iph->priority		=	old_iph->priority;
    memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
    ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
    ipv6_addr_copy(&iph->saddr, &saddr);
    iph->hop_limit		=	old_iph->hop_limit;

    /* Another hack: avoid icmp_send in ip_fragment */
    skb->local_df = 1;

    ret = IP_VS_XMIT_TUNNEL(skb, cp);
    if (ret == NF_ACCEPT)
        ip6_local_out(skb);
    else if (ret == NF_DROP)
        kfree_skb(skb);

    LeaveFunction(10);

    return NF_STOLEN;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    kfree_skb(skb);
    LeaveFunction(10);
    return NF_STOLEN;
tx_error_put:
    dst_release(&rt->dst);
    goto tx_error;
}
Esempio n. 24
0
/*
 *   IP Tunneling transmitter
 *
 *   This function encapsulates the packet in a new IP packet, its
 *   destination will be set to cp->daddr. Most code of this function
 *   is taken from ipip.c.
 *
 *   It is used in VS/TUN cluster. The load balancer selects a real
 *   server from a cluster based on a scheduling algorithm,
 *   encapsulates the request packet and forwards it to the selected
 *   server. For example, all real servers are configured with
 *   "ifconfig tunl0 <Virtual IP Address> up". When the server receives
 *   the encapsulated packet, it will decapsulate the packet, processe
 *   the request and return the response packets directly to the client
 *   without passing the load balancer. This can greatly increase the
 *   scalability of virtual server.
 *
 *   Used for ANY protocol
 */
int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp)
{
    struct rtable *rt;			/* Route to the other host */
    struct net_device *tdev;		/* Device to other host */
    struct iphdr  *old_iph = ip_hdr(skb);
    u8     tos = old_iph->tos;
    __be16 df = old_iph->frag_off;
    struct iphdr  *iph;			/* Our new IP header */
    unsigned int max_headroom;		/* The extra header space needed */
    int    mtu;
    int ret;

    EnterFunction(10);

    if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
                                  RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
                                  IP_VS_RT_MODE_NON_LOCAL)))
        goto tx_error_icmp;
    if (rt->rt_flags & RTCF_LOCAL) {
        ip_rt_put(rt);
        IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
    }

    tdev = rt->dst.dev;

    mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
    if (mtu < 68) {
        IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
        goto tx_error_put;
    }
    if (skb_dst(skb))
        skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);

    df |= (old_iph->frag_off & htons(IP_DF));

    if ((old_iph->frag_off & htons(IP_DF) &&
            mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
        icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
        IP_VS_DBG_RL("%s(): frag needed\n", __func__);
        goto tx_error_put;
    }

    /*
     * Okay, now see if we can stuff it in the buffer as-is.
     */
    max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);

    if (skb_headroom(skb) < max_headroom
            || skb_cloned(skb) || skb_shared(skb)) {
        struct sk_buff *new_skb =
            skb_realloc_headroom(skb, max_headroom);
        if (!new_skb) {
            ip_rt_put(rt);
            kfree_skb(skb);
            IP_VS_ERR_RL("%s(): no memory\n", __func__);
            return NF_STOLEN;
        }
        kfree_skb(skb);
        skb = new_skb;
        old_iph = ip_hdr(skb);
    }

    skb->transport_header = skb->network_header;

    /* fix old IP header checksum */
    ip_send_check(old_iph);

    skb_push(skb, sizeof(struct iphdr));
    skb_reset_network_header(skb);
    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

    /* drop old route */
    skb_dst_drop(skb);
    skb_dst_set(skb, &rt->dst);

    /*
     *	Push down and install the IPIP header.
     */
    iph			=	ip_hdr(skb);
    iph->version		=	4;
    iph->ihl		=	sizeof(struct iphdr)>>2;
    iph->frag_off		=	df;
    iph->protocol		=	IPPROTO_IPIP;
    iph->tos		=	tos;
    iph->daddr		=	rt->rt_dst;
    iph->saddr		=	rt->rt_src;
    iph->ttl		=	old_iph->ttl;
    ip_select_ident(iph, &rt->dst, NULL);

    /* Another hack: avoid icmp_send in ip_fragment */
    skb->local_df = 1;

    ret = IP_VS_XMIT_TUNNEL(skb, cp);
    if (ret == NF_ACCEPT)
        ip_local_out(skb);
    else if (ret == NF_DROP)
        kfree_skb(skb);

    LeaveFunction(10);

    return NF_STOLEN;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    kfree_skb(skb);
    LeaveFunction(10);
    return NF_STOLEN;
tx_error_put:
    ip_rt_put(rt);
    goto tx_error;
}
Esempio n. 25
0
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
	struct net_device *dev = (struct net_device *)vcc->proto_data;
	struct sk_buff *new_skb;
	eg_cache_entry *eg;
	struct mpoa_client *mpc;
	uint32_t tag;
	char *tmp;
	
	ddprintk("mpoa: (%s) mpc_push:\n", dev->name);
	if (skb == NULL) {
		dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name);
		mpc_vcc_close(vcc, dev);
		return;
	}
	
	skb->dev = dev;
	if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
		struct sock *sk = sk_atm(vcc);

		dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name);
		/* Pass control packets to daemon */
		skb_queue_tail(&sk->sk_receive_queue, skb);
		sk->sk_data_ready(sk, skb->len);
		return;
	}

	/* data coming over the shortcut */
	atm_return(vcc, skb->truesize);

	mpc = find_mpc_by_lec(dev);
	if (mpc == NULL) {
		printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name);
		return;
	}

	if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
		ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name);

	} else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
		printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name);
		printk("           mpc_push: non-tagged data unsupported, purging\n");
		dev_kfree_skb_any(skb);
		return;
	} else {
		printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name);
		dev_kfree_skb_any(skb);
		return;
	}

	tmp = skb->data + sizeof(struct llc_snap_hdr);
	tag = *(uint32_t *)tmp;

	eg = mpc->eg_ops->get_by_tag(tag, mpc);
	if (eg == NULL) {
		printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n",
		       dev->name,tag);
		purge_egress_shortcut(vcc, NULL);
		dev_kfree_skb_any(skb);
		return;
	}
	
	/*
	 * See if ingress MPC is using shortcut we opened as a return channel.
	 * This means we have a bi-directional vcc opened by us.
	 */ 
	if (eg->shortcut == NULL) {
		eg->shortcut = vcc;
		printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name);
	}

	skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */
	new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */
	dev_kfree_skb_any(skb);
	if (new_skb == NULL){
		mpc->eg_ops->put(eg);
		return;
	}
	skb_push(new_skb, eg->ctrl_info.DH_length);     /* add MAC header */
	memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length);
	new_skb->protocol = eth_type_trans(new_skb, dev);
	new_skb->nh.raw = new_skb->data;

	eg->latest_ip_addr = new_skb->nh.iph->saddr;
	eg->packets_rcvd++;
	mpc->eg_ops->put(eg);

	memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
	netif_rx(new_skb);

	return;
}
Esempio n. 26
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
	     struct ipv6_txoptions *opt)
{
	struct net *net = sock_net(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl6->daddr;
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr;
	u8  proto = fl6->flowi6_proto;
	int seg_len = skb->len;
	int hlimit = -1;
	int tclass = 0;
	u32 mtu;

	if (opt) {
		unsigned int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	/*
	 *	Fill in the IPv6 header
	 */
	if (np) {
		tclass = np->tclass;
		hlimit = np->hop_limit;
	}
	if (hlimit < 0)
		hlimit = ip6_dst_hoplimit(dst);

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_OUT, skb->len);
		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
			       dst->dev, dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 27
0
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
    struct net_device_stats *stats = &tunnel->stat;
    struct iphdr  *tiph = &tunnel->parms.iph;
    struct ipv6hdr *iph6 = skb->nh.ipv6h;
    u8     tos = tunnel->parms.iph.tos;
    struct rtable *rt;     			/* Route to the other host */
    struct net_device *tdev;			/* Device to other host */
    struct iphdr  *iph;			/* Our new IP header */
    int    max_headroom;			/* The extra header space needed */
    u32    dst = tiph->daddr;
    int    mtu;
    struct in6_addr *addr6;
    int addr_type;

    if (tunnel->recursion++) {
        tunnel->stat.collisions++;
        goto tx_error;
    }

    if (skb->protocol != htons(ETH_P_IPV6))
        goto tx_error;

    if (!dst)
        dst = try_6to4(&iph6->daddr);

    if (!dst) {
        struct neighbour *neigh = NULL;

        if (skb->dst)
            neigh = skb->dst->neighbour;

        if (neigh == NULL) {
            if (net_ratelimit())
                printk(KERN_DEBUG "sit: nexthop == NULL\n");
            goto tx_error;
        }

        addr6 = (struct in6_addr*)&neigh->primary_key;
        addr_type = ipv6_addr_type(addr6);

        if (addr_type == IPV6_ADDR_ANY) {
            addr6 = &skb->nh.ipv6h->daddr;
            addr_type = ipv6_addr_type(addr6);
        }

        if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
            goto tx_error_icmp;

        dst = addr6->s6_addr32[3];
    }

    {
        struct flowi fl = { .nl_u = { .ip4_u =
                {   .daddr = dst,
                    .saddr = tiph->saddr,
                    .tos = RT_TOS(tos)
                }
            },
            .oif = tunnel->parms.link
        };
        if (ip_route_output_key(&rt, &fl)) {
            tunnel->stat.tx_carrier_errors++;
            goto tx_error_icmp;
        }
    }
    if (rt->rt_type != RTN_UNICAST) {
        tunnel->stat.tx_carrier_errors++;
        goto tx_error_icmp;
    }
    tdev = rt->u.dst.dev;

    if (tdev == dev) {
        ip_rt_put(rt);
        tunnel->stat.collisions++;
        goto tx_error;
    }

    if (tiph->frag_off)
        mtu = dst_pmtu(&rt->u.dst) - sizeof(struct iphdr);
    else
        mtu = skb->dst ? dst_pmtu(skb->dst) : dev->mtu;

    if (mtu < 68) {
        tunnel->stat.collisions++;
        ip_rt_put(rt);
        goto tx_error;
    }
    if (mtu < IPV6_MIN_MTU)
        mtu = IPV6_MIN_MTU;
    if (tunnel->parms.iph.daddr && skb->dst)
        skb->dst->ops->update_pmtu(skb->dst, mtu);

    if (skb->len > mtu) {
        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
        ip_rt_put(rt);
        goto tx_error;
    }

    if (tunnel->err_count > 0) {
        if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
            tunnel->err_count--;
            dst_link_failure(skb);
        } else
            tunnel->err_count = 0;
    }

    skb->h.raw = skb->nh.raw;

    /*
     * Okay, now see if we can stuff it in the buffer as-is.
     */
    max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);

    if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
        struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
        if (!new_skb) {
            ip_rt_put(rt);
            stats->tx_dropped++;
            dev_kfree_skb(skb);
            tunnel->recursion--;
            return 0;
        }
        if (skb->sk)
            skb_set_owner_w(new_skb, skb->sk);
        dev_kfree_skb(skb);
        skb = new_skb;
        iph6 = skb->nh.ipv6h;
    }

    skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
    dst_release(skb->dst);
    skb->dst = &rt->u.dst;

    /*
     *	Push down and install the IPIP header.
     */

    iph 			=	skb->nh.iph;
    iph->version		=	4;
    iph->ihl		=	sizeof(struct iphdr)>>2;
    if (mtu > IPV6_MIN_MTU)
        iph->frag_off	=	htons(IP_DF);
    else
        iph->frag_off	=	0;

    iph->protocol		=	IPPROTO_IPV6;
    iph->tos		=	INET_ECN_encapsulate(tos, ip6_get_dsfield(iph6));
    iph->daddr		=	rt->rt_dst;
    iph->saddr		=	rt->rt_src;

    if ((iph->ttl = tiph->ttl) == 0)
        iph->ttl	=	iph6->hop_limit;

#ifdef CONFIG_NETFILTER
    nf_conntrack_put(skb->nfct);
    skb->nfct = NULL;
#ifdef CONFIG_NETFILTER_DEBUG
    skb->nf_debug = 0;
#endif
#endif

    IPTUNNEL_XMIT();
    tunnel->recursion--;
    return 0;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    stats->tx_errors++;
    dev_kfree_skb(skb);
    tunnel->recursion--;
    return 0;
}
Esempio n. 28
0
static int ipip_tunnel_xmit(struct sk_buff *skb, struct device *dev)
{
	struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
	struct net_device_stats *stats = &tunnel->stat;
	struct iphdr  *tiph = &tunnel->parms.iph;
	u8     tos = tunnel->parms.iph.tos;
	u16    df = tiph->frag_off;
	struct rtable *rt;     			/* Route to the other host */
	struct device *tdev;			/* Device to other host */
	struct iphdr  *old_iph = skb->nh.iph;
	struct iphdr  *iph;			/* Our new IP header */
	int    max_headroom;			/* The extra header space needed */
	u32    dst = tiph->daddr;
	int    mtu;

	if (tunnel->recursion++) {
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (skb->protocol != __constant_htons(ETH_P_IP))
		goto tx_error;

	if (tos&1)
		tos = old_iph->tos;

	if (!dst) {
		/* NBMA tunnel */
		if ((rt = (struct rtable*)skb->dst) == NULL) {
			tunnel->stat.tx_fifo_errors++;
			goto tx_error;
		}
		if ((dst = rt->rt_gateway) == 0)
			goto tx_error_icmp;
	}

	if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	tdev = rt->u.dst.dev;

	if (tdev == dev) {
		ip_rt_put(rt);
		tunnel->stat.collisions++;
		goto tx_error;
	}

	mtu = rt->u.dst.pmtu - sizeof(struct iphdr);
	if (mtu < 68) {
		tunnel->stat.collisions++;
		ip_rt_put(rt);
		goto tx_error;
	}
	if (skb->dst && mtu < skb->dst->pmtu)
		skb->dst->pmtu = mtu;

	df |= (old_iph->frag_off&__constant_htons(IP_DF));

	if ((old_iph->frag_off&__constant_htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
		ip_rt_put(rt);
		goto tx_error;
	}

	if (tunnel->err_count > 0) {
		if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
			tunnel->err_count--;
			dst_link_failure(skb);
		} else
			tunnel->err_count = 0;
	}

	skb->h.raw = skb->nh.raw;

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = (((tdev->hard_header_len+15)&~15)+sizeof(struct iphdr));

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
  			stats->tx_dropped++;
			dev_kfree_skb(skb);
			tunnel->recursion--;
			return 0;
		}
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		dev_kfree_skb(skb);
		skb = new_skb;
	}

	skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/*
	 *	Push down and install the IPIP header.
	 */

	iph 			=	skb->nh.iph;
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	iph->frag_off		=	df;
	iph->protocol		=	IPPROTO_IPIP;
	iph->tos		=	tos;
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;

	if ((iph->ttl = tiph->ttl) == 0)
		iph->ttl	=	old_iph->ttl;

	iph->tot_len		=	htons(skb->len);
	iph->id			=	htons(ip_id_count++);
	ip_send_check(iph);

	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	ip_send(skb);
	tunnel->recursion--;
	return 0;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	stats->tx_errors++;
	dev_kfree_skb(skb);
	tunnel->recursion--;
	return 0;
}
Esempio n. 29
0
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
	struct sock *sk = (struct sock *) chan->private;
	struct pppox_sock *po = pppox_sk(sk);
	struct pptp_opt *opt=&po->proto.pptp;
	struct pptp_gre_header *hdr;
	unsigned int header_len=sizeof(*hdr);
	int len=skb?skb->len:0;
	int err=0;
	int window;

	struct rtable *rt;     			/* Route to the other host */
	struct net_device *tdev;			/* Device to other host */
	struct iphdr  *iph;			/* Our new IP header */
	int    max_headroom;			/* The extra header space needed */

	INC_TX_PACKETS;

	spin_lock_bh(&opt->xmit_lock);
	
	window=WRAPPED(opt->ack_recv,opt->seq_sent)?(__u32)0xffffffff-opt->seq_sent+opt->ack_recv:opt->seq_sent-opt->ack_recv;

	if (!skb){
	    if (opt->ack_sent == opt->seq_recv) goto exit;
	}else if (window>opt->window){
		__set_bit(PPTP_FLAG_PAUSE,(unsigned long*)&opt->flags);
		#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
		mod_timer(&opt->ack_timeout_timer,opt->stat->rtt/100*HZ/10000);
		#else
		schedule_delayed_work(&opt->ack_timeout_work,opt->stat->rtt/100*HZ/10000);
		#endif
		goto exit;
	}

	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
	{
		struct rt_key key = {
			.dst=opt->dst_addr.sin_addr.s_addr,
			.src=opt->src_addr.sin_addr.s_addr,
			.tos=RT_TOS(0),
		};
		if ((err=ip_route_output_key(&rt, &key))) {
			goto tx_error;
		}
	}
	#else
	{
		struct flowi fl = { .oif = 0,
				    .nl_u = { .ip4_u =
					      { .daddr = opt->dst_addr.sin_addr.s_addr,
						.saddr = opt->src_addr.sin_addr.s_addr,
						.tos = RT_TOS(0) } },
				    .proto = IPPROTO_GRE };
		if ((err=ip_route_output_key(&rt, &fl))) {
			goto tx_error;
		}
	}
	#endif
	tdev = rt->u.dst.dev;
	
	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
	max_headroom = ((tdev->hard_header_len+15)&~15) + sizeof(*iph)+sizeof(*hdr)+2;
	#else
	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph)+sizeof(*hdr)+2;
	#endif
	

	if (!skb){
		skb=dev_alloc_skb(max_headroom);
		if (!skb) {
			ip_rt_put(rt);
			goto tx_error;
		}
		skb_reserve(skb,max_headroom-skb_headroom(skb));
	}else if (skb_headroom(skb) < max_headroom ||
						skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
			goto tx_error;
		}
		if (skb->sk)
		skb_set_owner_w(new_skb, skb->sk);
		kfree_skb(skb);
		skb = new_skb;
	}
	
	if (skb->len){
		int islcp;
		unsigned char *data=skb->data;
		islcp=((data[0] << 8) + data[1])== PPP_LCP && 1 <= data[2] && data[2] <= 7;		
		
		/* compress protocol field */
		if ((opt->ppp_flags & SC_COMP_PROT) && data[0]==0 && !islcp)
			skb_pull(skb,1);
		
		/*
		 * Put in the address/control bytes if necessary
		 */
		if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
			data=skb_push(skb,2);
			data[0]=0xff;
			data[1]=0x03;
		}
	}
	len=skb->len;

	if (len==0) header_len-=sizeof(hdr->seq);
	if (opt->ack_sent == opt->seq_recv) header_len-=sizeof(hdr->ack);

	// Push down and install GRE header
	skb_push(skb,header_len);
	hdr=(struct pptp_gre_header *)(skb->data);

	hdr->flags       = PPTP_GRE_FLAG_K;
	hdr->ver         = PPTP_GRE_VER;
	hdr->protocol    = htons(PPTP_GRE_PROTO);
	hdr->call_id     = htons(opt->dst_addr.call_id);

	if (!len){
		hdr->payload_len = 0;
		hdr->ver |= PPTP_GRE_FLAG_A;
		/* ack is in odd place because S == 0 */
		hdr->seq = htonl(opt->seq_recv);
		opt->ack_sent = opt->seq_recv;
		opt->stat->tx_acks++;
	}else {
		hdr->flags |= PPTP_GRE_FLAG_S;
		hdr->seq    = htonl(opt->seq_sent++);
		if (log_level>=3 && opt->seq_sent<=log_packets)
			printk(KERN_INFO"PPTP[%i]: send packet: seq=%i",opt->src_addr.call_id,opt->seq_sent);
		if (opt->ack_sent != opt->seq_recv)	{
		/* send ack with this message */
			hdr->ver |= PPTP_GRE_FLAG_A;
			hdr->ack  = htonl(opt->seq_recv);
			opt->ack_sent = opt->seq_recv;
			if (log_level>=3 && opt->seq_sent<=log_packets)
				printk(" ack=%i",opt->seq_recv);
		}
		hdr->payload_len = htons(len);
		if (log_level>=3 && opt->seq_sent<=log_packets)
			printk("\n");
	}

	/*
	 *	Push down and install the IP header.
	 */

	#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
	skb->transport_header = skb->network_header;
	skb_push(skb, sizeof(*iph));
	skb_reset_network_header(skb);
	#else
	skb->h.raw = skb->nh.raw;
	skb->nh.raw = skb_push(skb, sizeof(*iph));
	#endif
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
			      IPSKB_REROUTED);
	#endif

	#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
	iph 			=	ip_hdr(skb);
	#else
	iph 			=	skb->nh.iph;
	#endif
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr) >> 2;
	iph->frag_off		=	0;//df;
	iph->protocol		=	IPPROTO_GRE;
	iph->tos		=	0;
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;
	#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
	iph->ttl = sysctl_ip_default_ttl;
	#else
	iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
	#endif
	iph->tot_len = htons(skb->len);

	dst_release(skb->dst);
	skb->dst = &rt->u.dst;
	
	nf_reset(skb);

	skb->ip_summed = CHECKSUM_NONE;
	ip_select_ident(iph, &rt->u.dst, NULL);
	ip_send_check(iph);

	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);
	
	wake_up(&opt->wait);

	if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) {
		opt->stat->tx_sent++;
		if (!opt->stat->pt_seq){
			opt->stat->pt_seq  = opt->seq_sent;
			do_gettimeofday(&opt->stat->pt_time);
		}
	}else{
		INC_TX_ERRORS;
		opt->stat->tx_failed++;	
	}

	spin_unlock_bh(&opt->xmit_lock);
	return 1;

tx_error:
	INC_TX_ERRORS;
	opt->stat->tx_failed++;
	if (!len) kfree_skb(skb);
	spin_unlock_bh(&opt->xmit_lock);
	return 1;
exit:
	spin_unlock_bh(&opt->xmit_lock);
	return 0;
}
Esempio n. 30
0
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct net_device_stats *stats = &tunnel->stat;
	struct iphdr  *tiph = &tunnel->parms.iph;
	struct ipv6hdr *iph6 = ipv6_hdr(skb);
	u8     tos = tunnel->parms.iph.tos;
	struct rtable *rt;     			/* Route to the other host */
	struct net_device *tdev;			/* Device to other host */
	struct iphdr  *iph;			/* Our new IP header */
	unsigned int max_headroom;		/* The extra header space needed */
	__be32 dst = tiph->daddr;
	int    mtu;

	if (tunnel->recursion++) {
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (skb->protocol != htons(ETH_P_IPV6))
		goto tx_error;

	if (extract_ipv4_endpoint(&iph6->daddr, &dst) < 0)
		goto tx_error_icmp;

	{
		struct flowi fl = { .nl_u = { .ip4_u =
					      { .daddr = dst,
						.saddr = tiph->saddr,
						.tos = RT_TOS(tos) } },
				    .oif = tunnel->parms.link,
				    .proto = IPPROTO_IPV6 };
		if (ip_route_output_key(&rt, &fl)) {
			tunnel->stat.tx_carrier_errors++;
			goto tx_error_icmp;
		}
	}
	if (rt->rt_type != RTN_UNICAST) {
		ip_rt_put(rt);
		tunnel->stat.tx_carrier_errors++;
		goto tx_error_icmp;
	}
	tdev = rt->u.dst.dev;

	if (tdev == dev) {
		ip_rt_put(rt);
		tunnel->stat.collisions++;
		goto tx_error;
	}

	if (tiph->frag_off)
		mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
	else
		mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;

	if (mtu < 68) {
		tunnel->stat.collisions++;
		ip_rt_put(rt);
		goto tx_error;
	}
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;
	if (tunnel->parms.iph.daddr && skb->dst)
		skb->dst->ops->update_pmtu(skb->dst, mtu);

	if (skb->len > mtu) {
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
		ip_rt_put(rt);
		goto tx_error;
	}

	if (tunnel->err_count > 0) {
		if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
			tunnel->err_count--;
			dst_link_failure(skb);
		} else
			tunnel->err_count = 0;
	}

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);

	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb) {
			ip_rt_put(rt);
			stats->tx_dropped++;
			dev_kfree_skb(skb);
			tunnel->recursion--;
			return 0;
		}
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		dev_kfree_skb(skb);
		skb = new_skb;
		iph6 = ipv6_hdr(skb);
	}

	skb->transport_header = skb->network_header;
	skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	IPCB(skb)->flags = 0;
	dst_release(skb->dst);
	skb->dst = &rt->u.dst;

	/*
	 *	Push down and install the IPIP header.
	 */

	iph 			=	ip_hdr(skb);
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	if (mtu > IPV6_MIN_MTU)
		iph->frag_off	=	htons(IP_DF);
	else
		iph->frag_off	=	0;

	iph->protocol		=	IPPROTO_IPV6;
	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
	iph->daddr		=	rt->rt_dst;
	iph->saddr		=	rt->rt_src;

	if ((iph->ttl = tiph->ttl) == 0)
		iph->ttl	=	iph6->hop_limit;

	nf_reset(skb);

	IPTUNNEL_XMIT();
	tunnel->recursion--;
	return 0;

tx_error_icmp:
	dst_link_failure(skb);
tx_error:
	stats->tx_errors++;
	dev_kfree_skb(skb);
	tunnel->recursion--;
	return 0;
}