/* Generic function for mangling variable-length address changes inside
 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
 * command in FTP).
 *
 * Takes care about all the nasty sequence number changes, checksumming,
 * skb enlargement, ...
 *
 * */
int
nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
			 struct nf_conn *ct,
			 enum ip_conntrack_info ctinfo,
			 unsigned int match_offset,
			 unsigned int match_len,
			 const char *rep_buffer,
			 unsigned int rep_len)
{
	struct iphdr *iph;
	struct tcphdr *tcph;
	int oldlen, datalen;

	if (!skb_make_writable(pskb, (*pskb)->len))
		return 0;

	if (rep_len > match_len &&
	    rep_len - match_len > skb_tailroom(*pskb) &&
	    !enlarge_skb(pskb, rep_len - match_len))
		return 0;

	SKB_LINEAR_ASSERT(*pskb);

	iph = (*pskb)->nh.iph;
	tcph = (void *)iph + iph->ihl*4;

	oldlen = (*pskb)->len - iph->ihl*4;
	mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
			match_offset, match_len, rep_buffer, rep_len);

	datalen = (*pskb)->len - iph->ihl*4;
	if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
		tcph->check = 0;
		tcph->check = tcp_v4_check(datalen,
					   iph->saddr, iph->daddr,
					   csum_partial((char *)tcph,
							datalen, 0));
	} else
		nf_proto_csum_replace2(&tcph->check, *pskb,
				       htons(oldlen), htons(datalen), 1);

	if (rep_len != match_len) {
		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
		adjust_tcp_sequence(ntohl(tcph->seq),
				    (int)rep_len - (int)match_len,
				    ct, ctinfo);
		/* Tell TCP window tracking about seq change */
		nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4,
					ct, CTINFO2DIR(ctinfo));
	}
	return 1;
}
Esempio n. 2
0
/*
 * Called from net/core when new frame is available.
 */
static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct kingsun_cb *kingsun;
	int wraplen;
	int ret = 0;

	if (skb == NULL || netdev == NULL)
		return -EINVAL;

	netif_stop_queue(netdev);

	/* the IRDA wrapping routines don't deal with non linear skb */
	SKB_LINEAR_ASSERT(skb);

	kingsun = netdev_priv(netdev);

	spin_lock(&kingsun->lock);

	/* Append data to the end of whatever data remains to be transmitted */
	wraplen = async_wrap_skb(skb,
		kingsun->out_buf,
		KINGSUN_FIFO_SIZE);

	/* Calculate how much data can be transmitted in this urb */
	usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
		usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
		kingsun->out_buf, wraplen, kingsun_send_irq,
		kingsun, 1);

	if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) {
		err("kingsun_hard_xmit: failed tx_urb submit: %d", ret);
		switch (ret) {
		case -ENODEV:
		case -EPIPE:
			break;
		default:
			kingsun->stats.tx_errors++;
			netif_start_queue(netdev);
		}
	} else {
		kingsun->stats.tx_packets++;
		kingsun->stats.tx_bytes += skb->len;
	}

	dev_kfree_skb(skb);
	spin_unlock(&kingsun->lock);

	return ret;
}
static netdev_tx_t kingsun_hard_xmit(struct sk_buff *skb,
					   struct net_device *netdev)
{
	struct kingsun_cb *kingsun;
	int wraplen;
	int ret = 0;

	netif_stop_queue(netdev);

	
	SKB_LINEAR_ASSERT(skb);

	kingsun = netdev_priv(netdev);

	spin_lock(&kingsun->lock);

	
	wraplen = async_wrap_skb(skb,
		kingsun->out_buf,
		KINGSUN_FIFO_SIZE);

	
	usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
		usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
		kingsun->out_buf, wraplen, kingsun_send_irq,
		kingsun, 1);

	if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) {
		err("kingsun_hard_xmit: failed tx_urb submit: %d", ret);
		switch (ret) {
		case -ENODEV:
		case -EPIPE:
			break;
		default:
			netdev->stats.tx_errors++;
			netif_start_queue(netdev);
		}
	} else {
		netdev->stats.tx_packets++;
		netdev->stats.tx_bytes += skb->len;
	}

	dev_kfree_skb(skb);
	spin_unlock(&kingsun->lock);

	return NETDEV_TX_OK;
}
Esempio n. 4
0
/* Generic function for mangling variable-length address changes inside
 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
 * command in FTP).
 *
 * Takes care about all the nasty sequence number changes, checksumming,
 * skb enlargement, ...
 *
 * */
int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
			       struct nf_conn *ct,
			       enum ip_conntrack_info ctinfo,
			       unsigned int match_offset,
			       unsigned int match_len,
			       const char *rep_buffer,
			       unsigned int rep_len, bool adjust)
{
	struct rtable *rt = skb_rtable(skb);
	struct iphdr *iph;
	struct tcphdr *tcph;
	int oldlen, datalen;
	unsigned char ipOffSet;	//Leo add

	if (!skb_make_writable(skb, skb->len))
		return 0;

	if (rep_len > match_len &&
	    rep_len - match_len > skb_tailroom(skb) &&
	    !enlarge_skb(skb, rep_len - match_len))
		return 0;

	SKB_LINEAR_ASSERT(skb);

	iph = ip_hdr(skb);

	ipOffSet = iph->ihl << 2;
	
	tcph = (void *)iph + ipOffSet;

	oldlen = skb->len - ipOffSet;
	mangle_contents(skb, ipOffSet + tcph->doff*4,
			match_offset, match_len, rep_buffer, rep_len);

	datalen = skb->len - ipOffSet;
	if (skb->ip_summed != CHECKSUM_PARTIAL) {
		if (!(rt->rt_flags & RTCF_LOCAL) &&
		    skb->dev->features & NETIF_F_V4_CSUM) {
			skb->ip_summed = CHECKSUM_PARTIAL;
			skb->csum_start = skb_headroom(skb) +
					  skb_network_offset(skb) +
					  ipOffSet;
			skb->csum_offset = offsetof(struct tcphdr, check);
			tcph->check = ~tcp_v4_check(datalen,
						    iph->saddr, iph->daddr, 0);
		} else {
Esempio n. 5
0
/*
 * Called from net/core when new frame is available.
 */
static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct ksdazzle_cb *kingsun;
	unsigned int wraplen;
	int ret = 0;

	if (skb == NULL || netdev == NULL)
		return -EINVAL;

	netif_stop_queue(netdev);

	/* the IRDA wrapping routines don't deal with non linear skb */
	SKB_LINEAR_ASSERT(skb);

	kingsun = netdev_priv(netdev);

	spin_lock(&kingsun->lock);
	kingsun->new_speed = irda_get_next_speed(skb);

	/* Append data to the end of whatever data remains to be transmitted */
	wraplen =
	    async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
	kingsun->tx_buf_clear_used = wraplen;

	if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
		err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret);
		switch (ret) {
		case -ENODEV:
		case -EPIPE:
			break;
		default:
			netdev->stats.tx_errors++;
			netif_start_queue(netdev);
		}
	} else {
		netdev->stats.tx_packets++;
		netdev->stats.tx_bytes += skb->len;

	}

	dev_kfree_skb(skb);
	spin_unlock(&kingsun->lock);

	return ret;
}
int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
			       struct nf_conn *ct,
			       enum ip_conntrack_info ctinfo,
			       unsigned int match_offset,
			       unsigned int match_len,
			       const char *rep_buffer,
			       unsigned int rep_len, bool adjust)
{
	struct iphdr *iph;
	struct tcphdr *tcph;
	int oldlen, datalen;

	if (!skb_make_writable(skb, skb->len))
		return 0;

	if (rep_len > match_len &&
	    rep_len - match_len > skb_tailroom(skb) &&
	    !enlarge_skb(skb, rep_len - match_len))
		return 0;

	SKB_LINEAR_ASSERT(skb);

	iph = ip_hdr(skb);
	tcph = (void *)iph + iph->ihl*4;

	oldlen = skb->len - iph->ihl*4;
	mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
			match_offset, match_len, rep_buffer, rep_len);

	datalen = skb->len - iph->ihl*4;
	nf_nat_csum(skb, iph, tcph, datalen, &tcph->check, oldlen);

	if (adjust && rep_len != match_len)
		nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
				      (int)rep_len - (int)match_len);

	return 1;
}