/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	/* Unfragmented part is taken from the first segment. */
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_shinfo(head)->frag_list) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_shinfo(head)->frag_list = NULL;
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	/* We have to remove fragment header from datagram and to relocate
	 * header in order to calculate ICV correctly. */
	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));
	atomic_sub(head->truesize, &fq->q.net->mem);

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
		atomic_sub(fp->truesize, &fq->q.net->mem);
	}

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;

	/* Yes, and fold redundant checksum back. 8) */
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
Esempio n. 2
0
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
	int mtu = netdev_vport->dev->mtu;
	int len;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		if (net_ratelimit())
			pr_warn("%s: dropped over-mtu packet: %d > %d\n",
				ovs_dp_name(vport->dp), packet_length(skb), mtu);
		goto error;
	}

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	skb->dev = netdev_vport->dev;
	forward_ip_summed(skb, true);

	if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
		int features;

		features = netif_skb_features(skb);

		if (!vlan_tso)
			features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
				      NETIF_F_UFO | NETIF_F_FSO);

		if (netif_needs_gso(skb, features)) {
			struct sk_buff *nskb;

			nskb = skb_gso_segment(skb, features);
			if (!nskb) {
				if (unlikely(skb_cloned(skb) &&
				    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
					kfree_skb(skb);
					return 0;
				}

				skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
				goto tag;
			}

			if (IS_ERR(nskb)) {
				kfree_skb(skb);
				return 0;
			}
			consume_skb(skb);
			skb = nskb;

			len = 0;
			do {
				nskb = skb->next;
				skb->next = NULL;

				skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
				if (likely(skb)) {
					len += skb->len;
					vlan_set_tci(skb, 0);
					dev_queue_xmit(skb);
				}

				skb = nskb;
			} while (skb);

			return len;
		}

tag:
		skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
		if (unlikely(!skb))
			return 0;
		vlan_set_tci(skb, 0);
	}

	len = skb->len;
	dev_queue_xmit(skb);

	return len;

error:
	kfree_skb(skb);
	ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
	return 0;
}
static inline int ip_rcv_finish(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct iphdr *iph = skb->nh.iph;

	/*
	 *	Initialise the virtual path cache for the packet. It describes
	 *	how the packet travels inside Linux networking.
	 */ 
	if (skb->dst == NULL) {
		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
			goto drop; 
	}

#ifdef CONFIG_NET_CLS_ROUTE
	if (skb->dst->tclassid) {
		struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id();
		u32 idx = skb->dst->tclassid;
		st[idx&0xFF].o_packets++;
		st[idx&0xFF].o_bytes+=skb->len;
		st[(idx>>16)&0xFF].i_packets++;
		st[(idx>>16)&0xFF].i_bytes+=skb->len;
	}
#endif

	if (iph->ihl > 5) {
		struct ip_options *opt;

		/* It looks as overkill, because not all
		   IP options require packet mangling.
		   But it is the easiest for now, especially taking
		   into account that combination of IP options
		   and running sniffer is extremely rare condition.
		                                      --ANK (980813)
		*/

		if (skb_cow(skb, skb_headroom(skb)))
			goto drop;
		iph = skb->nh.iph;

		if (ip_options_compile(NULL, skb))
			goto inhdr_error;

		opt = &(IPCB(skb)->opt);
		if (opt->srr) {
			struct in_device *in_dev = in_dev_get(dev);
			if (in_dev) {
				if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
					if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
						printk(KERN_INFO "source route option %u.%u.%u.%u -> %u.%u.%u.%u\n",
						       NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
					in_dev_put(in_dev);
					goto drop;
				}
				in_dev_put(in_dev);
			}
			if (ip_options_rcv_srr(skb))
				goto drop;
		}
	}

	return skb->dst->input(skb);

inhdr_error:
	IP_INC_STATS_BH(IpInHdrErrors);
drop:
        kfree_skb(skb);
        return NET_RX_DROP;
}
Esempio n. 4
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;

	ipq_kill(qp);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		kfree_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &qp->q.net->mem);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &qp->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;

	iph = ip_hdr(head);
	iph->frag_off = 0;
	iph->tot_len = htons(len);
	/* RFC3168 5.3 Fragmentation support
	 * If one fragment had INET_ECN_NOT_ECT,
	 *	reassembled frame also has INET_ECN_NOT_ECT
	 * Elif one fragment had INET_ECN_CE
	 *	reassembled frame also has INET_ECN_CE
	 */
	if (qp->ecn & IPFRAG_ECN_CLEAR)
		iph->tos &= ~INET_ECN_MASK;
	else if (qp->ecn & IPFRAG_ECN_SET_CE)
		iph->tos |= INET_ECN_CE;

	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
			      "queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	if (net_ratelimit())
		printk(KERN_INFO "Oversized IP packet from %pI4.\n",
			&qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Esempio n. 5
0
int qdisc_restart(struct net_device *dev)
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;

	/* BRCM: bail out if queue is null */
	if (!q)
		return 0;

	/* Dequeue packet */
	if (!q) {
		if (net_ratelimit())
			printk(KERN_DEBUG "HELP ME! qdisc_restart called, but no Qdisc!\n");
		return 0;
	}
	if ((skb = q->dequeue(q)) != NULL) {
		if (spin_trylock(&dev->xmit_lock)) {
			/* Remember that the driver is grabbed by us. */
			dev->xmit_lock_owner = smp_processor_id();

			/* And release queue */
			spin_unlock(&dev->queue_lock);

			if (!netif_queue_stopped(dev)) {
				if (netdev_nit
#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
				    && !(skb->imq_flags & IMQ_F_ENQUEUE)
#endif
				    )
					dev_queue_xmit_nit(skb, dev);

				if (dev->hard_start_xmit(skb, dev) == 0) {
					dev->xmit_lock_owner = -1;
					spin_unlock(&dev->xmit_lock);

					spin_lock(&dev->queue_lock);
					return -1;
				}
			}

			/* Release the driver */
			dev->xmit_lock_owner = -1;
			spin_unlock(&dev->xmit_lock);
			spin_lock(&dev->queue_lock);
			q = dev->qdisc;
		} else {
			/* So, someone grabbed the driver. */

			/* It may be transient configuration error,
			   when hard_start_xmit() recurses. We detect
			   it by checking xmit owner and drop the
			   packet when deadloop is detected.
			 */
			if (dev->xmit_lock_owner == smp_processor_id()) {
				kfree_skb(skb);
				if (net_ratelimit())
					printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
				return -1;
			}
			netdev_rx_stat[smp_processor_id()].cpu_collision++;
		}

		/* Device kicked us out :(
		   This is possible in three cases:

		   0. driver is locked
		   1. fastroute is enabled
		   2. device cannot determine busy state
		      before start of transmission (f.e. dialout)
		   3. device is buggy (ppp)
		 */

		q->ops->requeue(skb, q);
		netif_schedule(dev);
		return 1;
	}
	return q->q.qlen;
}
Esempio n. 6
0
static int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip6_tnl *t = netdev_priv(dev);
	struct net_device_stats *stats = &t->stat;
	struct ipv6hdr *ipv6h = skb->nh.ipv6h;
	int encap_limit = -1;
	struct ipv6_tel_txoption opt;
	__u16 offset;
	struct flowi fl;
	struct dst_entry *dst;
	struct net_device *tdev;
	int mtu;
	int max_headroom = sizeof(struct ipv6hdr);
	u8 proto;
	int err;
	int pkt_len;
	int dsfield;

	if (t->recursion++) {
		stats->collisions++;
		goto tx_err;
	}
	if (skb->protocol != htons(ETH_P_IPV6) ||
	    !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
		goto tx_err;

	if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
		struct ipv6_tlv_tnl_enc_lim *tel;
		tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
		if (tel->encap_limit == 0) {
			icmpv6_send(skb, ICMPV6_PARAMPROB,
				    ICMPV6_HDR_FIELD, offset + 2, skb->dev);
			goto tx_err;
		}
		encap_limit = tel->encap_limit - 1;
	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
		encap_limit = t->parms.encap_limit;

	memcpy(&fl, &t->fl, sizeof (fl));
	proto = fl.proto;

	dsfield = ipv6_get_dsfield(ipv6h);
	if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
		fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
	if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
		fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);

	if ((dst = ip6_tnl_dst_check(t)) != NULL)
		dst_hold(dst);
	else {
		dst = ip6_route_output(if_dev_vrf(dev), if_dev_litevrf_id(dev), NULL, &fl);

		if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0)
			goto tx_err_link_failure;
	}

	tdev = dst->dev;

	if (tdev == dev) {
		stats->collisions++;
		if (net_ratelimit())
			printk(KERN_WARNING
			       "%s: Local routing loop detected!\n",
			       t->parms.name);
		goto tx_err_dst_release;
	}
	mtu = dst_mtu(dst) - sizeof (*ipv6h);
	if (encap_limit >= 0) {
		max_headroom += 8;
		mtu -= 8;
	}
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;
	if (skb->dst)
		skb->dst->ops->update_pmtu(skb->dst, mtu);
	if (skb->len > mtu) {
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
		goto tx_err_dst_release;
	}

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom += LL_RESERVED_SPACE(tdev);

	if (skb_headroom(skb) < max_headroom ||
	    skb_cloned(skb) || skb_shared(skb)) {
		struct sk_buff *new_skb;

		if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
			goto tx_err_dst_release;

		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		kfree_skb(skb);
		skb = new_skb;
	}
	dst_release(skb->dst);
	skb->dst = dst_clone(dst);

	skb->h.raw = skb->nh.raw;

	if (encap_limit >= 0) {
		init_tel_txopt(&opt, encap_limit);
		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
	}
	skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
	ipv6h = skb->nh.ipv6h;
	*(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
	dsfield = INET_ECN_encapsulate(0, dsfield);
	ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
	ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
	ipv6h->hop_limit = t->parms.hop_limit;
	ipv6h->nexthdr = proto;
	ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src);
	ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst);
	nf_reset(skb);
	ns_reset(skb);
	pkt_len = skb->len;
	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
		      skb->dst->dev, dst_output);

	if (net_xmit_eval(err) == 0) {
		stats->tx_bytes += pkt_len;
		stats->tx_packets++;
	} else {
		stats->tx_errors++;
		stats->tx_aborted_errors++;
	}
	ip6_tnl_dst_store(t, dst);
	t->recursion--;
	return 0;
tx_err_link_failure:
	stats->tx_carrier_errors++;
	dst_link_failure(skb);
tx_err_dst_release:
	dst_release(dst);
tx_err:
	stats->tx_errors++;
	stats->tx_dropped++;
	kfree_skb(skb);
	t->recursion--;
	return 0;
}
Esempio n. 7
0
static int ftmac100_poll(struct napi_struct *napi, int budget)
{
	struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
	struct net_device *netdev = priv->netdev;
	unsigned int status;
	bool completed = true;
	int rx = 0;

	status = ioread32(priv->base + FTMAC100_OFFSET_ISR);

	if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
		/*
                              
                                                            
    
                          
                          
   */
		bool retry;

		do {
			retry = ftmac100_rx_packet(priv, &rx);
		} while (retry && rx < budget);

		if (retry && rx == budget)
			completed = false;
	}

	if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
		/*
                          
                                                
    
                            
                                                    
                                     
   */
		ftmac100_tx_complete(priv);
	}

	if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
		      FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
		if (net_ratelimit())
			netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
				    status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
				    status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
				    status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
				    status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");

		if (status & FTMAC100_INT_NORXBUF) {
			/*                       */
			netdev->stats.rx_over_errors++;
		}

		if (status & FTMAC100_INT_RPKT_LOST) {
			/*                                          */
			netdev->stats.rx_fifo_errors++;
		}

		if (status & FTMAC100_INT_PHYSTS_CHG) {
			/*                        */
			mii_check_link(&priv->mii);
		}
	}

	if (completed) {
		/*              */
		napi_complete(napi);
		ftmac100_enable_all_int(priv);
	}

	return rx;
}
Esempio n. 8
0
/*
 * 	Main IP Receive routine.
 */ 
int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
{
	struct iphdr *iph = skb->nh.iph;

	/*
	 * 	When the interface is in promisc. mode, drop all the crap
	 * 	that it receives, do not try to analyse it.
	 */
	if (skb->pkt_type == PACKET_OTHERHOST)
		goto drop;

	ip_statistics.IpInReceives++;

	/*
	 *	RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *
	 *	Is the datagram acceptable?
	 *
	 *	1.	Length at least the size of an ip header
	 *	2.	Version of 4
	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *	4.	Doesn't have a bogus length
	 */

	if (skb->len < sizeof(struct iphdr))
		goto inhdr_error; 
	if (iph->ihl < 5 || iph->version != 4 || ip_fast_csum((u8 *)iph, iph->ihl) != 0)
		goto inhdr_error; 

	{
	__u32 len = ntohs(iph->tot_len); 
	if (skb->len < len)
		goto inhdr_error; 

	/*
	 *	Our transport medium may have padded the buffer out. Now we know it
	 *	is IP we can trim to the true length of the frame.
	 *	Note this now means skb->len holds ntohs(iph->tot_len).
	 */

	__skb_trim(skb, len);
	}
	
	/*
	 *	Initialise the virtual path cache for the packet. It describes
	 *	how the packet travels inside Linux networking.
	 */ 
	if (skb->dst == NULL) {
		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
			goto drop; 
#ifdef CONFIG_CPU_IS_SLOW
		if (net_cpu_congestion > 10 && !(iph->tos&IPTOS_RELIABILITY) &&
		    IPTOS_PREC(iph->tos) < IPTOS_PREC_INTERNETCONTROL) {
			goto drop;
		}
#endif
	}

#ifdef CONFIG_IP_ALWAYS_DEFRAG
	if (iph->frag_off & htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb);
		if (!skb)
			return 0;
		iph = skb->nh.iph;
		ip_send_check(iph);
	}
#endif

	if (iph->ihl > 5) {
		struct ip_options *opt;

		/* It looks as overkill, because not all
		   IP options require packet mangling.
		   But it is the easiest for now, especially taking
		   into account that combination of IP options
		   and running sniffer is extremely rare condition.
		                                      --ANK (980813)
		*/
		   
		skb = skb_cow(skb, skb_headroom(skb));
		if (skb == NULL)
			return 0;
		iph = skb->nh.iph;

		skb->ip_summed = 0;
		if (ip_options_compile(NULL, skb))
			goto inhdr_error;

		opt = &(IPCB(skb)->opt);
		if (opt->srr) {
			struct in_device *in_dev = dev->ip_ptr;
			if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) {
				if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
					printk(KERN_INFO "source route option %d.%d.%d.%d -> %d.%d.%d.%d\n",
					       NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
				goto drop;
			}
			if (ip_options_rcv_srr(skb))
				goto drop;
		}
	}

	/*
	 *	See if the firewall wants to dispose of the packet. 
	 *
	 *	Note: the current standard firewall code expects that the 
	 *	destination address was already checked against the interface 
	 *	address lists.
	 *
	 *	If this code is ever moved in front of ip_route_input() you need
	 *	to fix the fw code [moving it might be a good idea anyways,
	 *	so that we can firewall against potentially bugs in the options
	 *	or routing code]
	 */
	
#ifdef	CONFIG_FIREWALL
        {
		int fwres;
		u16 rport;
#ifdef  CONFIG_IP_ROUTE_TOS
		u8  tos = iph->tos;
#endif

		if ((fwres=call_in_firewall(PF_INET, skb->dev, iph, &rport, &skb))<FW_ACCEPT) {
			if (fwres==FW_REJECT)
				icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
			goto drop;
		}

#ifdef	CONFIG_IP_TRANSPARENT_PROXY
		if (fwres==FW_REDIRECT && (IPCB(skb)->redirport = rport) != 0)
			return ip_local_deliver(skb);
#endif
#ifdef	CONFIG_IP_ROUTE_TOS
		/* It is for 2.2 only. Firewalling should make smart
		   rerouting itself, ideally, but now it is too late
		   to teach it. 			--ANK (980905)
		 */
		if (iph->tos != tos && ((struct rtable*)skb->dst)->rt_type == RTN_UNICAST) {
			dst_release(skb->dst);
			skb->dst = NULL;
			if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
				goto drop; 
		}
#endif
	}
#endif

	return skb->dst->input(skb);

inhdr_error:
	ip_statistics.IpInHdrErrors++;
drop:
        kfree_skb(skb);
        return(0);
}
Esempio n. 9
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt)
{
	struct ipv6_pinfo * np = sk ? &sk->net_pinfo.af_inet6 : NULL;
	struct in6_addr *first_hop = &fl->fl6_dst;
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit;
	u32 mtu;

	if (opt) {
		int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + ((dst->dev->hard_header_len + 15)&~15);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			kfree_skb(skb);
			skb = skb2;
			if (skb == NULL)
				return -ENOBUFS;
			if (sk)
				skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));

	/*
	 *	Fill in the IPv6 header
	 */

	*(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel;
	hlimit = -1;
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = dst_metric(dst, RTAX_HOPLIMIT);

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	mtu = dst_pmtu(dst);
	if (skb->len <= mtu) {
		IP6_INC_STATS(Ip6OutRequests);
		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 10
0
static unsigned int route_oif(const struct ipt_route_target_info *route_info,
			      struct sk_buff *skb) 
{
	unsigned int ifindex = 0;
	struct net_device *dev_out = NULL;

	/* The user set the interface name to use.
	 * Getting the current interface index.
	 */
	if ((dev_out = dev_get_by_name(&init_net,route_info->oif))) {
		ifindex = dev_out->ifindex;
	} else {
		/* Unknown interface name : packet dropped */
		if (net_ratelimit()) 
			pr_debug("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
		return NF_DROP;
	}

	/* Trying the standard way of routing packets */
	switch (route(skb, ifindex, route_info)) {
	case 1:
		dev_put(dev_out);
		if (route_info->flags & IPT_ROUTE_CONTINUE)
			return IPT_CONTINUE;

		ip_direct_send(skb);
		return NF_STOLEN;

	case 0:
		/* Failed to send to oif. Trying the hard way */
		if (route_info->flags & IPT_ROUTE_CONTINUE)
			return NF_DROP;

		if (net_ratelimit()) 
			pr_debug("ipt_ROUTE: forcing the use of %i\n",
			       ifindex);

		/* We have to force the use of an interface.
		 * This interface must be a tunnel interface since
		 * otherwise we can't guess the hw address for
		 * the packet. For a tunnel interface, no hw address
		 * is needed.
		 */
		if ((dev_out->type != ARPHRD_TUNNEL)
		    && (dev_out->type != ARPHRD_IPGRE)) {
			if (net_ratelimit()) 
				pr_debug("ipt_ROUTE: can't guess the hw addr !\n");
			dev_put(dev_out);
			return NF_DROP;
		}
	
		/* Send the packet. This will also free skb
		 * Do not go through the POST_ROUTING hook because 
		 * skb->dst is not set and because it will probably
		 * get confused by the destination IP address.
		 */
		skb->dev = dev_out;
		dev_direct_send(skb);
		dev_put(dev_out);
		return NF_STOLEN;
		
	default:
		/* Unexpected error */
		dev_put(dev_out);
		return NF_DROP;
	}
}
Esempio n. 11
0
static unsigned int ipt_route_target(struct sk_buff *skb,
				     const struct xt_action_param *par)
{
	const struct ipt_route_target_info *route_info = par->targinfo;
	//struct sk_buff *skb = *pskb;
	unsigned int res;

	if (skb->nfct == &route_tee_track.ct_general) {
		/* Loopback - a packet we already routed, is to be
		 * routed another time. Avoid that, now.
		 */
		if (net_ratelimit()) 
			pr_debug(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n");
		return NF_DROP;
	}

	/* If we are at PREROUTING or INPUT hook
	 * the TTL isn't decreased by the IP stack
	 */
	if (par->hooknum == NF_INET_PRE_ROUTING ||
	    par->hooknum == NF_INET_LOCAL_IN) {

		struct iphdr *iph = ip_hdr(skb);

		if (iph->ttl <= 1) {
			struct rtable *rt;
			struct flowi fl = {
				.oif = 0,
				.nl_u = {
					.ip4_u = {
						.daddr = iph->daddr,
						.saddr = iph->saddr,
						.tos = RT_TOS(iph->tos),
						.scope = ((iph->tos & RTO_ONLINK) ?
							  RT_SCOPE_LINK :
							  RT_SCOPE_UNIVERSE)
					}
				} 
			};

			if (ip_route_output_key(&init_net,&rt, &fl)) {
				return NF_DROP;
			}

			if (skb->dev == rt->dst.dev) {
				/* Drop old route. */
				skb_dst_set(skb, &rt->dst);

				/* this will traverse normal stack, and 
				 * thus call conntrack on the icmp packet */
				icmp_send(skb, ICMP_TIME_EXCEEDED, 
					  ICMP_EXC_TTL, 0);
			}

			return NF_DROP;
		}

		/*
		 * If we are at INPUT the checksum must be recalculated since
		 * the length could change as the result of a defragmentation.
		 */
		if(par->hooknum == NF_INET_LOCAL_IN) {
			iph->ttl = iph->ttl - 1;
			iph->check = 0;
			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
		} else {
			ip_decrease_ttl(iph);
		}
	}

	if ((route_info->flags & IPT_ROUTE_TEE)) {
		/*
		 * Copy the *pskb, and route the copy. Will later return
		 * IPT_CONTINUE for the original skb, which should continue
		 * on its way as if nothing happened. The copy should be
		 * independantly delivered to the ROUTE --gw.
		 */
		skb = skb_copy(skb, GFP_ATOMIC);
		if (!skb) {
			if (net_ratelimit()) 
				pr_debug(KERN_DEBUG "ipt_ROUTE: copy failed!\n");
			return IPT_CONTINUE;
		}
	}

	/* Tell conntrack to forget this packet since it may get confused 
	 * when a packet is leaving with dst address == our address.
	 * Good idea ? Dunno. Need advice.
	 *
	 * NEW: mark the skb with our &route_tee_track, so we avoid looping
	 * on any already routed packet.
	 */
	if (!(route_info->flags & IPT_ROUTE_CONTINUE)) {
		nf_conntrack_put(skb->nfct);
		skb->nfct = &route_tee_track.ct_general;
		skb->nfctinfo = IP_CT_NEW;
		nf_conntrack_get(skb->nfct);
	}

	if (route_info->oif[0] != '\0') {
		res = route_oif(route_info, skb);
	} else if (route_info->iif[0] != '\0') {
		res = route_iif(route_info, skb);
	} else if (route_info->gw) {
		res = route_gw(route_info, skb);
	} else {
		if (net_ratelimit()) 
			pr_debug(KERN_DEBUG "ipt_ROUTE: no parameter !\n");
		res = IPT_CONTINUE;
	}

	if ((route_info->flags & IPT_ROUTE_TEE))
		res = IPT_CONTINUE;

	return res;
}
Esempio n. 12
0
/**
 * arc_emac_rx - processing of Rx packets.
 * @ndev:	Pointer to the network device.
 * @budget:	How many BDs to process on 1 call.
 *
 * returns:	Number of processed BDs
 *
 * Iterate through Rx BDs and deliver received packages to upper layer.
 */
static int arc_emac_rx(struct net_device *ndev, int budget)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int work_done;

	for (work_done = 0; work_done < budget; work_done++) {
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct net_device_stats *stats = &ndev->stats;
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
		struct sk_buff *skb;
		dma_addr_t addr;

		if (unlikely((info & OWN_MASK) == FOR_EMAC))
			break;

		/* Make a note that we saw a packet at this BD.
		 * So next time, driver starts from this + 1
		 */
		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;

		if (unlikely((info & FIRST_OR_LAST_MASK) !=
			     FIRST_OR_LAST_MASK)) {
			/* We pre-allocate buffers of MTU size so incoming
			 * packets won't be split/chained.
			 */
			if (net_ratelimit())
				netdev_err(ndev, "incomplete packet received\n");

			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_length_errors++;
			continue;
		}

		pktlen = info & LEN_MASK;
		stats->rx_packets++;
		stats->rx_bytes += pktlen;
		skb = rx_buff->skb;
		skb_put(skb, pktlen);
		skb->dev = ndev;
		skb->protocol = eth_type_trans(skb, ndev);

		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);

		/* Prepare the BD for next cycle */
		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
							 EMAC_BUFFER_SIZE);
		if (unlikely(!rx_buff->skb)) {
			stats->rx_errors++;
			/* Because receive_skb is below, increment rx_dropped */
			stats->rx_dropped++;
			continue;
		}

		/* receive_skb only if new skb was allocated to avoid holes */
		netif_receive_skb(skb);

		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot dma map\n");
			dev_kfree_skb(rx_buff->skb);
			stats->rx_errors++;
			continue;
		}
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
	}

	return work_done;
}
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
	     struct ipv6_txoptions *opt, int tclass)
{
	struct net *net = sock_net(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl6->daddr;
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr;
	u8  proto = fl6->flowi6_proto;
	int seg_len = skb->len;
	int hlimit = -1;
	u32 mtu;

	if (opt) {
		unsigned int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	/*
	 *	Fill in the IPv6 header
	 */
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = ip6_dst_hoplimit(dst);

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	hdr->saddr = fl6->saddr;
	hdr->daddr = *first_hop;

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_OUT, skb->len);
		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
			       dst->dev, dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 14
0
static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
			enum ip_conntrack_info ctinfo,
			unsigned char **data, int dataoff,
			H245_TransportAddress *taddr,
			__be16 port, __be16 rtp_port,
			struct nf_conntrack_expect *rtp_exp,
			struct nf_conntrack_expect *rtcp_exp)
{
	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
	int dir = CTINFO2DIR(ctinfo);
	int i;
	u_int16_t nated_port;

	/* Set expectations for NAT */
	rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
	rtp_exp->expectfn = nf_nat_follow_master;
	rtp_exp->dir = !dir;
	rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
	rtcp_exp->expectfn = nf_nat_follow_master;
	rtcp_exp->dir = !dir;

	/* Lookup existing expects */
	for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) {
		if (info->rtp_port[i][dir] == rtp_port) {
			/* Expected */

			/* Use allocated ports first. This will refresh
			 * the expects */
			rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir];
			rtcp_exp->tuple.dst.u.udp.port =
			    htons(ntohs(info->rtp_port[i][dir]) + 1);
			break;
		} else if (info->rtp_port[i][dir] == 0) {
			/* Not expected */
			break;
		}
	}

	/* Run out of expectations */
	if (i >= H323_RTP_CHANNEL_MAX) {
		if (net_ratelimit())
			pr_notice("nf_nat_h323: out of expectations\n");
		return 0;
	}

	/* Try to get a pair of ports. */
	for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
	     nated_port != 0; nated_port += 2) {
		int ret;

		rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
		ret = nf_ct_expect_related(rtp_exp);
		if (ret == 0) {
			rtcp_exp->tuple.dst.u.udp.port =
			    htons(nated_port + 1);
			ret = nf_ct_expect_related(rtcp_exp);
			if (ret == 0)
				break;
			else if (ret != -EBUSY) {
				nf_ct_unexpect_related(rtp_exp);
				nated_port = 0;
				break;
			}
		} else if (ret != -EBUSY) {
			nated_port = 0;
			break;
		}
	}

	if (nated_port == 0) {	/* No port available */
		if (net_ratelimit())
			pr_notice("nf_nat_h323: out of RTP ports\n");
		return 0;
	}

	/* Modify signal */
	if (set_h245_addr(skb, data, dataoff, taddr,
			  &ct->tuplehash[!dir].tuple.dst.u3,
			  htons((port & htons(1)) ? nated_port + 1 :
						    nated_port)) == 0) {
		/* Save ports */
		info->rtp_port[i][dir] = rtp_port;
		info->rtp_port[i][!dir] = htons(nated_port);
	} else {
		nf_ct_unexpect_related(rtp_exp);
		nf_ct_unexpect_related(rtcp_exp);
		return -1;
	}

	/* Success */
	pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
		 &rtp_exp->tuple.src.u3.ip,
		 ntohs(rtp_exp->tuple.src.u.udp.port),
		 &rtp_exp->tuple.dst.u3.ip,
		 ntohs(rtp_exp->tuple.dst.u.udp.port));
	pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n",
		 &rtcp_exp->tuple.src.u3.ip,
		 ntohs(rtcp_exp->tuple.src.u.udp.port),
		 &rtcp_exp->tuple.dst.u3.ip,
		 ntohs(rtcp_exp->tuple.dst.u.udp.port));

	return 0;
}
Esempio n. 15
0
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
			unsigned len)
{
	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);

	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
		goto drop;
	}
	len -= sizeof(struct virtio_net_hdr);
	BUG_ON(len > MAX_PACKET_LEN);

	skb_trim(skb, len);
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);
	dev->stats.rx_bytes += skb->len;
	dev->stats.rx_packets++;

	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
		pr_debug("Needs csum!\n");
		if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
			goto frame_err;
	}

	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
		pr_debug("GSO!\n");
		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
		case VIRTIO_NET_HDR_GSO_TCPV4:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
			break;
		default:
			if (net_ratelimit())
				printk(KERN_WARNING "%s: bad gso type %u.\n",
				       dev->name, hdr->gso_type);
			goto frame_err;
		}

		if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

		skb_shinfo(skb)->gso_size = hdr->gso_size;
		if (skb_shinfo(skb)->gso_size == 0) {
			if (net_ratelimit())
				printk(KERN_WARNING "%s: zero gso size.\n",
				       dev->name);
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
drop:
	dev_kfree_skb(skb);
}
/**
 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 *
 * When moving to rx_free an SKB is allocated for the slot.
 *
 * Also restock the Rx queue via iwl_rx_queue_restock.
 * This is called as a scheduled work item (except for during initialization)
 */
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (hw_params(trans).rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask,
				  hw_params(trans).rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
					   "order: %d\n",
					   hw_params(trans).rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, hw_params(trans).rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma = dma_map_page(trans->dev, page, 0,
				PAGE_SIZE << hw_params(trans).rx_page_order,
				DMA_FROM_DEVICE);
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}
Esempio n. 17
0
static int
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
	   int type, int code, int offset, __be32 info)
{
	struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
	struct ip6_tnl *t;
	int rel_msg = 0;
	int rel_type = ICMPV6_DEST_UNREACH;
	int rel_code = ICMPV6_ADDR_UNREACH;
	__u32 rel_info = 0;
	__u16 len;
	int err = -ENOENT;

	/* If the packet doesn't contain the original IPv6 header we are
	   in trouble since we might need the source address for further
	   processing of the error. */

	read_lock(&ip6ip6_lock);
	if ((t = ip6ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
		goto out;

	err = 0;

	switch (type) {
		__u32 teli;
		struct ipv6_tlv_tnl_enc_lim *tel;
		__u32 mtu;
	case ICMPV6_DEST_UNREACH:
		if (net_ratelimit())
			printk(KERN_WARNING
			       "%s: Path to destination invalid "
			       "or inactive!\n", t->parms.name);
		rel_msg = 1;
		break;
	case ICMPV6_TIME_EXCEED:
		if (code == ICMPV6_EXC_HOPLIMIT) {
			if (net_ratelimit())
				printk(KERN_WARNING
				       "%s: Too small hop limit or "
				       "routing loop in tunnel!\n",
				       t->parms.name);
			rel_msg = 1;
		}
		break;
	case ICMPV6_PARAMPROB:
		teli = 0;
		if (code == ICMPV6_HDR_FIELD)
			teli = parse_tlv_tnl_enc_lim(skb, skb->data);

		if (teli && teli == ntohl(info) - 2) {
			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
			if (tel->encap_limit == 0) {
				if (net_ratelimit())
					printk(KERN_WARNING
					       "%s: Too small encapsulation "
					       "limit or routing loop in "
					       "tunnel!\n", t->parms.name);
				rel_msg = 1;
			}
		} else if (net_ratelimit()) {
			printk(KERN_WARNING
			       "%s: Recipient unable to parse tunneled "
			       "packet!\n ", t->parms.name);
		}
		break;
	case ICMPV6_PKT_TOOBIG:
		mtu = ntohl(info) - offset;
		if (mtu < IPV6_MIN_MTU)
			mtu = IPV6_MIN_MTU;
		t->dev->mtu = mtu;

		if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
			rel_type = ICMPV6_PKT_TOOBIG;
			rel_code = 0;
			rel_info = mtu;
			rel_msg = 1;
		}
		break;
	}
	if (rel_msg &&  pskb_may_pull(skb, offset + sizeof (*ipv6h))) {
		struct rt6_info *rt;
		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);

		if (!skb2)
			goto out;

		dst_release(skb2->dst);
		skb2->dst = NULL;
		skb_pull(skb2, offset);
		skb2->nh.raw = skb2->data;

		/* Try to guess incoming interface */
		rt = rt6_lookup(if_dev_vrf(skb->dev), skb->litevrf_id, &skb2->nh.ipv6h->saddr, NULL, 0, 0);

		if (rt && rt->rt6i_dev)
			skb2->dev = rt->rt6i_dev;

		icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);

		if (rt)
			dst_release(&rt->u.dst);

		kfree_skb(skb2);
	}
out:
	read_unlock(&ip6ip6_lock);
	return err;
}
Esempio n. 18
0
static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
					  struct sk_buff *skb,
					  struct ieee80211_tx_status *status)
{
	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb_data(skb);
	struct sta_info *sta;
	struct sta_rate_control *srctrl;

	sta = sta_info_get(local, hdr->addr1);

	if (!sta)
	    return;

	srctrl = sta->rate_ctrl_priv;
	srctrl->tx_num_xmit++;
	if (status->excessive_retries) {
		sta->antenna_sel_tx = sta->antenna_sel_tx == 1 ? 2 : 1;
		sta->antenna_sel_rx = sta->antenna_sel_rx == 1 ? 2 : 1;
		if (local->sta_antenna_sel == STA_ANTENNA_SEL_SW_CTRL_DEBUG) {
			printk(KERN_DEBUG "%s: " MAC_FMT " TX antenna --> %d "
			       "RX antenna --> %d (@%lu)\n",
			       dev->name, MAC_ARG(hdr->addr1),
			       sta->antenna_sel_tx, sta->antenna_sel_rx, jiffies);
		}
		srctrl->tx_num_failures++;
		sta->tx_retry_failed++;
		sta->tx_num_consecutive_failures++;
		sta->tx_num_mpdu_fail++;
	} else {
		sta->last_ack_rssi[0] = sta->last_ack_rssi[1];
		sta->last_ack_rssi[1] = sta->last_ack_rssi[2];
		sta->last_ack_rssi[2] = status->ack_signal;
		sta->tx_num_consecutive_failures = 0;
		sta->tx_num_mpdu_ok++;
	}
	sta->tx_retry_count += status->retry_count;
	sta->tx_num_mpdu_fail += status->retry_count;

	if (time_after(jiffies,
		       srctrl->last_rate_change + RATE_CONTROL_INTERVAL) &&
		srctrl->tx_num_xmit > RATE_CONTROL_MIN_TX) {
		u32 per_failed;
		srctrl->last_rate_change = jiffies;

		per_failed = (100 * sta->tx_num_mpdu_fail) /
			(sta->tx_num_mpdu_fail + sta->tx_num_mpdu_ok);
		/* TODO: calculate average per_failed to make adjusting
		 * parameters easier */
#if 0
		if (net_ratelimit()) {
			printk(KERN_DEBUG "MPDU fail=%d ok=%d per_failed=%d\n",
			       sta->tx_num_mpdu_fail, sta->tx_num_mpdu_ok,
			       per_failed);
		}
#endif

		/*
		 * XXX: Make these configurable once we have an
		 * interface to the rate control algorithms
		 */
		if (per_failed > RATE_CONTROL_NUM_DOWN) {
			rate_control_rate_dec(local, sta);
		} else if (per_failed < RATE_CONTROL_NUM_UP) {
			rate_control_rate_inc(local, sta);
		}
		//srctrl->tx_avg_rate_sum += status->control.rate->rate;//FIXME
		srctrl->tx_avg_rate_num++;
		srctrl->tx_num_failures = 0;
		srctrl->tx_num_xmit = 0;
	} else if (sta->tx_num_consecutive_failures >=
		   RATE_CONTROL_EMERG_DEC) {
		rate_control_rate_dec(local, sta);
	}

	if (srctrl->avg_rate_update + 60 * HZ < jiffies) {
		srctrl->avg_rate_update = jiffies;
		if (srctrl->tx_avg_rate_num > 0) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
			printk(KERN_DEBUG "%s: STA " MAC_FMT " Average rate: "
			       "%d (%d/%d)\n",
			       dev->name, MAC_ARG(sta->addr),
			       srctrl->tx_avg_rate_sum /
			       srctrl->tx_avg_rate_num,
			       srctrl->tx_avg_rate_sum,
			       srctrl->tx_avg_rate_num);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
			srctrl->tx_avg_rate_sum = 0;
			srctrl->tx_avg_rate_num = 0;
		}
	}

	sta_info_put(sta);
}
Esempio n. 19
0
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
{
	struct net_device *netdev = priv->netdev;
	struct ftmac100_rxdes *rxdes;
	struct sk_buff *skb;
	struct page *page;
	dma_addr_t map;
	int length;

	rxdes = ftmac100_rx_locate_first_segment(priv);
	if (!rxdes)
		return false;

	if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
		ftmac100_rx_drop_packet(priv);
		return true;
	}

	/*
                                                 
                                                         
  */
	if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
		BUG();

	/*                  */
	skb = netdev_alloc_skb_ip_align(netdev, 128);
	if (unlikely(!skb)) {
		if (net_ratelimit())
			netdev_err(netdev, "rx skb alloc failed\n");

		ftmac100_rx_drop_packet(priv);
		return true;
	}

	if (unlikely(ftmac100_rxdes_multicast(rxdes)))
		netdev->stats.multicast++;

	map = ftmac100_rxdes_get_dma_addr(rxdes);
	dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);

	length = ftmac100_rxdes_frame_length(rxdes);
	page = ftmac100_rxdes_get_page(rxdes);
	skb_fill_page_desc(skb, 0, page, 0, length);
	skb->len += length;
	skb->data_len += length;

	/*                                           */
	if (length > 64)
		skb->truesize += PAGE_SIZE;
	__pskb_pull_tail(skb, min(length, 64));

	ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);

	ftmac100_rx_pointer_advance(priv);

	skb->protocol = eth_type_trans(skb, netdev);

	netdev->stats.rx_packets++;
	netdev->stats.rx_bytes += skb->len;

	/*                               */
	netif_receive_skb(skb);

	(*processed)++;
	return true;
}
Esempio n. 20
0
static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
{
	struct sk_buff *skb;
	struct mrt6msg *msg;
	int ret;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
						+sizeof(*msg));
	else
#endif
		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);

	if (!skb)
		return -ENOBUFS;

	/* I suppose that internal messages
	 * do not require checksums */

	skb->ip_summed = CHECKSUM_UNNECESSARY;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix length etc.
		   And all this only to mangle msg->im6_msgtype and
		   to set msg->im6_mbz to "mbz" :-)
		 */
		skb_push(skb, -skb_network_offset(pkt));

		skb_push(skb, sizeof(*msg));
		skb_reset_transport_header(skb);
		msg = (struct mrt6msg *)skb_transport_header(skb);
		msg->im6_mbz = 0;
		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
		msg->im6_mif = reg_vif_num;
		msg->im6_pad = 0;
		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else
#endif
	{
	/*
	 *	Copy the IP header
	 */

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));

	/*
	 *	Add our header
	 */
	skb_put(skb, sizeof(*msg));
	skb_reset_transport_header(skb);
	msg = (struct mrt6msg *)skb_transport_header(skb);

	msg->im6_mbz = 0;
	msg->im6_msgtype = assert;
	msg->im6_mif = mifi;
	msg->im6_pad = 0;
	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

	skb->dst = dst_clone(pkt->dst);
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	skb_pull(skb, sizeof(struct ipv6hdr));
	}

	if (mroute6_socket == NULL) {
		kfree_skb(skb);
		return -EINVAL;
	}

	/*
	 *	Deliver to user space multicast routing algorithms
	 */
	if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
		if (net_ratelimit())
			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
		kfree_skb(skb);
	}

	return ret;
}
Esempio n. 21
0
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &fq->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;

	
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
Esempio n. 22
0
/* Kick device.

   Returns:  0  - queue is empty or throttled.
	    >0  - queue is not empty.

   NOTE: Called under dev->queue_lock with locally disabled BH.
*/
static inline int qdisc_restart(struct net_device *dev)
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;

	/* Dequeue packet */
	if (((skb = dev->gso_skb)) || (q && (skb = q->dequeue(q)))) {
		unsigned nolock = (dev->features & NETIF_F_LLTX);

		dev->gso_skb = NULL;

		/*
		 * When the driver has LLTX set it does its own locking
		 * in start_xmit. No need to add additional overhead by
		 * locking again. These checks are worth it because
		 * even uncongested locks can be quite expensive.
		 * The driver can do trylock like here too, in case
		 * of lock congestion it should return -1 and the packet
		 * will be requeued.
		 */
		if (!nolock) {
			if (!netif_tx_trylock(dev)) {
			collision:
				/* So, someone grabbed the driver. */

				/* It may be transient configuration error,
				   when hard_start_xmit() recurses. We detect
				   it by checking xmit owner and drop the
				   packet when deadloop is detected.
				*/
				if (dev->xmit_lock_owner == smp_processor_id()) {
					kfree_skb(skb);
					if (net_ratelimit())
						printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
					goto out;
				}
				__get_cpu_var(netdev_rx_stat).cpu_collision++;
				goto requeue;
			}
		}

		{
			/* And release queue */
			spin_unlock(&dev->queue_lock);

			if (!netif_queue_stopped(dev)) {
				int ret;

				ret = dev_hard_start_xmit(skb, dev);
				if (ret == NETDEV_TX_OK) {
					if (!nolock) {
						netif_tx_unlock(dev);
					}
					spin_lock(&dev->queue_lock);
					q = dev->qdisc;
					goto out;
				}
				if (ret == NETDEV_TX_LOCKED && nolock) {
					spin_lock(&dev->queue_lock);
					q = dev->qdisc;
					goto collision;
				}
			}

			/* NETDEV_TX_BUSY - we need to requeue */
			/* Release the driver */
			if (!nolock) {
				netif_tx_unlock(dev);
			}
			spin_lock(&dev->queue_lock);
			q = dev->qdisc;
		}

		/* Device kicked us out :(
		   This is possible in three cases:

		   0. driver is locked
		   1. fastroute is enabled
		   2. device cannot determine busy state
		      before start of transmission (f.e. dialout)
		   3. device is buggy (ppp)
		 */

requeue:
		if (unlikely(q == &noop_qdisc))
			kfree_skb(skb);
		else if (skb->next)
			dev->gso_skb = skb;
		else
			q->ops->requeue(skb, q);
		netif_schedule(dev);
	}
	return 0;

out:
	BUG_ON((int) q->q.qlen < 0);
	return q->q.qlen;
}
Esempio n. 23
0
static void pcan_usb_read_notify(purb_t purb)
#endif
{
	int err = 0;
	struct pcan_usb_interface *usb_if = purb->context;
	struct pcandev *dev = &usb_if->dev[0];

#if 0
	DPRINTK(KERN_DEBUG "%s: %s() status=%d\n", 
	        DEVICE_NAME, __FUNCTION__, purb->status);
#endif

	// un-register outstanding urb
	atomic_dec(&usb_if->active_urbs);

	// do interleaving read
	if (!purb->status && dev->ucPhysicallyInstalled) // stop with first error
	{
		uint8_t *read_buffer_addr = purb->transfer_buffer;
		const int read_buffer_len = purb->actual_length;
		int read_buffer_size;

		// buffer interleave to increase speed
		if (read_buffer_addr == usb_if->read_buffer_addr[0])
		{
			FILL_BULK_URB(purb, usb_if->usb_dev,
				           usb_rcvbulkpipe(usb_if->usb_dev, 
			                              usb_if->pipe_read.ucNumber),
				           usb_if->read_buffer_addr[1], usb_if->read_buffer_size, 
			              pcan_usb_read_notify, usb_if);
		}
		else
		{
			FILL_BULK_URB(purb, usb_if->usb_dev,
				           usb_rcvbulkpipe(usb_if->usb_dev, 
			                              usb_if->pipe_read.ucNumber),
				           usb_if->read_buffer_addr[0], usb_if->read_buffer_size, 
			              pcan_usb_read_notify, usb_if);
		}

		// start next urb
		if ((err = __usb_submit_urb(purb)))
		{
			dev->nLastError = err;
			dev->dwErrorCounter++;

			printk(KERN_ERR "%s: %s() URB submit failure %d\n",
			       DEVICE_NAME, __FUNCTION__, err);
		}
		else
			atomic_inc(&usb_if->active_urbs);

#ifdef PCAN_USB_DEBUG_DECODE
		printk(KERN_INFO "%s: got %u bytes URB, "
		                 "decoding it by packets of %u bytes:\n",
		       DEVICE_NAME, read_buffer_len, usb_if->read_packet_size);
#endif

		for (read_buffer_size=0; read_buffer_size < read_buffer_len; )
		{
#ifdef PCAN_USB_DEBUG_DECODE
			printk(KERN_INFO "%s: decoding @offset %u:\n",
			       DEVICE_NAME, read_buffer_size);
#endif
			err = usb_if->device_msg_decode(usb_if, 
			                                read_buffer_addr, 
			                                usb_if->read_packet_size);
			if (err < 0)
			{
				dev->nLastError = err;
				dev->wCANStatus |= CAN_ERR_QOVERRUN;
				dev->dwErrorCounter++;

				if (net_ratelimit())
					printk(KERN_DEBUG "%s: @offset %d: message decoding error %d\n", 
					       DEVICE_NAME, read_buffer_size, err);
			}

			read_buffer_addr += usb_if->read_packet_size;
			read_buffer_size += usb_if->read_packet_size;
		}
	}
	else
	{
		if (purb->status != -ENOENT)
		{
			printk(KERN_ERR "%s: read data stream turned off caused by ", 
			       DEVICE_NAME);

			if (!dev->ucPhysicallyInstalled)
				printk("device plug out!\n");
			else if (purb->status == -ESHUTDOWN)
				printk("endpoint shutdown\n");
			else
				printk("err %d!\n", purb->status);
		}
	}
}
Esempio n. 24
0
static struct sk_buff *
mvswitch_mangle_tx(struct net_device *dev, struct sk_buff *skb)
{
    struct mvswitch_priv *priv;
    char *buf = NULL;
    u16 vid;

    priv = dev->phy_ptr;
    if (unlikely(!priv))
        goto error;

    if (unlikely(skb->len < 16))
        goto error;

#ifdef HEADER_MODE
    if (__vlan_hwaccel_get_tag(skb, &vid))
        goto error;

    if (skb_cloned(skb) || (skb->len <= 62) || (skb_headroom(skb) < MV_HEADER_SIZE)) {
        if (pskb_expand_head(skb, MV_HEADER_SIZE, (skb->len < 62 ? 62 - skb->len : 0), GFP_ATOMIC))
            goto error_expand;
        if (skb->len < 62)
            skb->len = 62;
    }
    buf = skb_push(skb, MV_HEADER_SIZE);
#else
    if (__vlan_get_tag(skb, &vid))
        goto error;

    if (unlikely((vid > 15 || !priv->vlans[vid])))
        goto error;

    if (skb->len <= 64) {
        if (pskb_expand_head(skb, 0, 64 + MV_TRAILER_SIZE - skb->len, GFP_ATOMIC))
            goto error_expand;

        buf = skb->data + 64;
        skb->len = 64 + MV_TRAILER_SIZE;
    } else {
        if (skb_cloned(skb) || unlikely(skb_tailroom(skb) < 4)) {
            if (pskb_expand_head(skb, 0, 4, GFP_ATOMIC))
                goto error_expand;
        }
        buf = skb_put(skb, 4);
    }

    /* move the ethernet header 4 bytes forward, overwriting the vlan tag */
    memmove(skb->data + 4, skb->data, 12);
    skb->data += 4;
    skb->len -= 4;
    skb->mac_header += 4;
#endif

    if (!buf)
        goto error;


#ifdef HEADER_MODE
    /* prepend the tag */
    *((__be16 *) buf) = cpu_to_be16(
                            ((vid << MV_HEADER_VLAN_S) & MV_HEADER_VLAN_M) |
                            ((priv->vlans[vid] << MV_HEADER_PORTS_S) & MV_HEADER_PORTS_M)
                        );
#else
    /* append the tag */
    *((__be32 *) buf) = cpu_to_be32((
                                        (MV_TRAILER_OVERRIDE << MV_TRAILER_FLAGS_S) |
                                        ((priv->vlans[vid] & MV_TRAILER_PORTS_M) << MV_TRAILER_PORTS_S)
                                    ));
#endif

    return skb;

error_expand:
    if (net_ratelimit())
        printk("%s: failed to expand/update skb for the switch\n", dev->name);

error:
    /* any errors? drop the packet! */
    dev_kfree_skb_any(skb);
    return NULL;
}
Esempio n. 25
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt, int ipfragok)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl->fl6_dst;
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit, tclass;
	u32 mtu;

	if (opt) {
		int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(ip6_dst_idev(skb->dst),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			if (sk)
				skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));

	/*
	 *	Fill in the IPv6 header
	 */

	hlimit = -1;
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
	if (hlimit < 0)
		hlimit = ipv6_get_hoplimit(dst->dev);

	tclass = -1;
	if (np)
		tclass = np->tclass;
	if (tclass < 0)
		tclass = 0;

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	skb->priority = sk->sk_priority;

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
		IP6_INC_STATS(ip6_dst_idev(skb->dst),
			      IPSTATS_MIB_OUTREQUESTS);
		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
				dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 26
0
static int help(struct sk_buff *skb, unsigned int protoff,
		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff;
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct tcphdr _tcph;
	const char *data_limit;
	char *data, *ib_ptr;
	int dir = CTINFO2DIR(ctinfo);
	struct nf_conntrack_expect *exp;
	struct nf_conntrack_tuple *tuple;
	__be32 dcc_ip;
	u_int16_t dcc_port;
	__be16 port;
	int i, ret = NF_ACCEPT;
	char *addr_beg_p, *addr_end_p;
	typeof(nf_nat_irc_hook) nf_nat_irc;

	/* If packet is coming from IRC server */
	if (dir == IP_CT_DIR_REPLY)
		return NF_ACCEPT;

	/* Until there's been traffic both ways, don't look in packets. */
	if (ctinfo != IP_CT_ESTABLISHED &&
	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
		return NF_ACCEPT;

	/* Not a full tcp header? */
	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = protoff + th->doff*4;
	if (dataoff >= skb->len)
		return NF_ACCEPT;

	spin_lock_bh(&irc_buffer_lock);
	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
				    irc_buffer);
	BUG_ON(ib_ptr == NULL);

	data = ib_ptr;
	data_limit = ib_ptr + skb->len - dataoff;

	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
	while (data < data_limit - (19 + MINMATCHLEN)) {
		if (memcmp(data, "\1DCC ", 5)) {
			data++;
			continue;
		}
		data += 5;
		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */

		iph = ip_hdr(skb);
		pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
			 &iph->saddr, ntohs(th->source),
			 &iph->daddr, ntohs(th->dest));

		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
				/* no match */
				continue;
			}
			data += strlen(dccprotos[i]);
			pr_debug("DCC %s detected\n", dccprotos[i]);

			/* we have at least
			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
			 * data left (== 14/13 bytes) */
			if (parse_dcc(data, data_limit, &dcc_ip,
				       &dcc_port, &addr_beg_p, &addr_end_p)) {
				pr_debug("unable to parse dcc command\n");
				continue;
			}

			pr_debug("DCC bound ip/port: %pI4:%u\n",
				 &dcc_ip, dcc_port);

			/* dcc_ip can be the internal OR external (NAT'ed) IP */
			tuple = &ct->tuplehash[dir].tuple;
			if (tuple->src.u3.ip != dcc_ip &&
			    tuple->dst.u3.ip != dcc_ip) {
				if (net_ratelimit())
					printk(KERN_WARNING
						"Forged DCC command from %pI4: %pI4:%u\n",
						&tuple->src.u3.ip,
						&dcc_ip, dcc_port);
				continue;
			}

			exp = nf_ct_expect_alloc(ct);
			if (exp == NULL) {
				ret = NF_DROP;
				goto out;
			}
			tuple = &ct->tuplehash[!dir].tuple;
			port = htons(dcc_port);
			nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
					  tuple->src.l3num,
					  NULL, &tuple->dst.u3,
					  IPPROTO_TCP, NULL, &port);

			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
				ret = nf_nat_irc(skb, ctinfo,
						 addr_beg_p - ib_ptr,
						 addr_end_p - addr_beg_p,
						 exp);
			else if (nf_ct_expect_related(exp) != 0)
				ret = NF_DROP;
			nf_ct_expect_put(exp);
			goto out;
		}
	}
 out:
	spin_unlock_bh(&irc_buffer_lock);
	return ret;
}
Esempio n. 27
0
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	unsigned int otcplen, hh_len;
	int tcphoff, needs_ack;
	struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h;
	struct dst_entry *dst = NULL;
	u8 proto;
	struct flowi fl;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		pr_debug("ip6t_REJECT: addr is not unicast.\n");
		return;
	}

	proto = oip6h->nexthdr;
	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);

	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
		pr_debug("ip6t_REJECT: Can't get TCP header.\n");
		return;
	}

	otcplen = oldskb->len - tcphoff;

	/* IP header checks: fragment, too short. */
	if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
		pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, "
			 "or too short. otcplen = %d\n",
			 proto, otcplen);
		return;
	}

	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
		BUG();

	/* No RST for RST. */
	if (otcph.rst) {
		pr_debug("ip6t_REJECT: RST is set\n");
		return;
	}

	/* Check checksum. */
	if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
			    skb_checksum(oldskb, tcphoff, otcplen, 0))) {
		pr_debug("ip6t_REJECT: TCP checksum is invalid\n");
		return;
	}

	memset(&fl, 0, sizeof(fl));
	fl.proto = IPPROTO_TCP;
	ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr);
	ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr);
	fl.fl_ip_sport = otcph.dest;
	fl.fl_ip_dport = otcph.source;
	security_skb_classify_flow(oldskb, &fl);
	dst = ip6_route_output(NULL, &fl);
	if (dst == NULL)
		return;
	if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		if (net_ratelimit())
			printk("ip6t_REJECT: Can't alloc skb\n");
		dst_release(dst);
		return;
	}

	nskb->dst = dst;

	skb_reserve(nskb, hh_len + dst->header_len);

	skb_put(nskb, sizeof(struct ipv6hdr));
	skb_reset_network_header(nskb);
	ip6h = ipv6_hdr(nskb);
	ip6h->version = 6;
	ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
	ip6h->nexthdr = IPPROTO_TCP;
	ip6h->payload_len = htons(sizeof(struct tcphdr));
	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
	ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);

	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	tcph->source = otcph.dest;
	tcph->dest = otcph.source;

	if (otcph.ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + otcplen - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;
	tcph->window = 0;
	tcph->urg_ptr = 0;
	tcph->check = 0;

	/* Adjust TCP checksum */
	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
				      &ipv6_hdr(nskb)->daddr,
				      sizeof(struct tcphdr), IPPROTO_TCP,
				      csum_partial(tcph,
						   sizeof(struct tcphdr), 0));

	nf_ct_attach(nskb, oldskb);

	NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		dst_output);
}
Esempio n. 28
0
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		napi_complete(napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		napi_complete(napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	napi_complete(napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
Esempio n. 29
0
static void enic_rq_indicate_buf(struct vnic_rq *rq,
	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
	int skipped, void *opaque)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
	struct net_device *netdev = enic->netdev;
	struct sk_buff *skb;

	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
	u8 packet_error;
	u16 q_number, completed_index, bytes_written, vlan, checksum;
	u32 rss_hash;

	if (skipped)
		return;

	skb = buf->os_buf;
	prefetch(skb->data - NET_IP_ALIGN);
	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);

	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
		&type, &color, &q_number, &completed_index,
		&ingress_port, &fcoe, &eop, &sop, &rss_type,
		&csum_not_calc, &rss_hash, &bytes_written,
		&packet_error, &vlan_stripped, &vlan, &checksum,
		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
		&fcs_ok);

	if (packet_error) {

		if (bytes_written > 0 && !fcs_ok) {
			if (net_ratelimit())
				printk(KERN_ERR PFX
					"%s: packet error: bad FCS\n",
					netdev->name);
		}

		dev_kfree_skb_any(skb);

		return;
	}

	if (eop && bytes_written > 0) {

		/* Good receive
		 */

		skb_put(skb, bytes_written);
		skb->protocol = eth_type_trans(skb, netdev);

		if (enic->csum_rx_enabled && !csum_not_calc) {
			skb->csum = htons(checksum);
			skb->ip_summed = CHECKSUM_COMPLETE;
		}

		skb->dev = netdev;

		if (enic->vlan_group && vlan_stripped) {

			if ((netdev->features & NETIF_F_LRO) && ipv4)
				lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
					skb, enic->vlan_group,
					vlan, cq_desc);
			else
				vlan_hwaccel_receive_skb(skb,
					enic->vlan_group, vlan);

		} else {

			if ((netdev->features & NETIF_F_LRO) && ipv4)
				lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
			else
				netif_receive_skb(skb);

		}

	} else {

		/* Buffer overflow
		 */

		dev_kfree_skb_any(skb);
	}
}
Esempio n. 30
0
static int help(struct sk_buff **pskb,
                struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
	struct ts_state ts;
	struct ip_conntrack_expect *exp;
	unsigned int dataoff, start, stop, off, i;
	char pbuf[sizeof("65535")], *tmp;
	u_int16_t port, len;
	int ret = NF_ACCEPT;

	/* Only look at packets from the Amanda server */
	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	/* increase the UDP timeout of the master connection as replies from
	 * Amanda clients to the server can be quite delayed */
	ip_ct_refresh(ct, *pskb, master_timeout * HZ);

	/* No data? */
	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
	if (dataoff >= (*pskb)->len) {
		if (net_ratelimit())
			printk("amanda_help: skblen = %u\n", (*pskb)->len);
		return NF_ACCEPT;
	}

	memset(&ts, 0, sizeof(ts));
	start = skb_find_text(*pskb, dataoff, (*pskb)->len,
			      search[SEARCH_CONNECT].ts, &ts);
	if (start == UINT_MAX)
		goto out;
	start += dataoff + search[SEARCH_CONNECT].len;

	memset(&ts, 0, sizeof(ts));
	stop = skb_find_text(*pskb, start, (*pskb)->len,
			     search[SEARCH_NEWLINE].ts, &ts);
	if (stop == UINT_MAX)
		goto out;
	stop += start;

	for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
		memset(&ts, 0, sizeof(ts));
		off = skb_find_text(*pskb, start, stop, search[i].ts, &ts);
		if (off == UINT_MAX)
			continue;
		off += start + search[i].len;

		len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off);
		if (skb_copy_bits(*pskb, off, pbuf, len))
			break;
		pbuf[len] = '\0';

		port = simple_strtoul(pbuf, &tmp, 10);
		len = tmp - pbuf;
		if (port == 0 || len > 5)
			break;

		exp = ip_conntrack_expect_alloc(ct);
		if (exp == NULL) {
			ret = NF_DROP;
			goto out;
		}

		exp->expectfn = NULL;
		exp->flags = 0;

		exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
		exp->tuple.src.u.tcp.port = 0;
		exp->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
		exp->tuple.dst.protonum = IPPROTO_TCP;
		exp->tuple.dst.u.tcp.port = htons(port);

		exp->mask.src.ip = htonl(0xFFFFFFFF);
		exp->mask.src.u.tcp.port = 0;
		exp->mask.dst.ip = htonl(0xFFFFFFFF);
		exp->mask.dst.protonum = 0xFF;
		exp->mask.dst.u.tcp.port = htons(0xFFFF);

		if (ip_nat_amanda_hook)
			ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff,
						 len, exp);
		else if (ip_conntrack_expect_related(exp) != 0)
			ret = NF_DROP;
		ip_conntrack_expect_put(exp);
	}

out:
	return ret;
}