Exemplo n.º 1
0
static int
tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	__skb_queue_head(&sch->q, skb);
	sch->stats.backlog += skb->len;
	return 1;
}
Exemplo n.º 2
0
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
	unsigned hash = sfq_hash(q, skb);
	sfq_index x;

	x = q->ht[hash];
	if (x == SFQ_DEPTH) {
		q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
		q->hash[x] = hash;
	}
	__skb_queue_head(&q->qs[x], skb);
	sfq_inc(q, x);
	if (q->qs[x].qlen == 1) {		/* The flow is new */
		if (q->tail == SFQ_DEPTH) {	/* It is the first flow */
			q->tail = x;
			q->next[x] = x;
			q->allot[x] = q->quantum;
		} else {
			q->next[x] = q->next[q->tail];
			q->next[q->tail] = x;
			q->tail = x;
		}
	}
	if (++sch->q.qlen < q->limit - 1)
		return 0;

	sch->stats.drops++;
	sfq_drop(sch);
	return NET_XMIT_CN;
}
Exemplo n.º 3
0
void skb_queue_head(struct sk_buff_head *list,
				    struct sk_buff *newsk)
{
	pthread_mutex_lock(&list->lock);
	__skb_queue_head(list, newsk);
	pthread_mutex_unlock(&list->lock);
}
Exemplo n.º 4
0
static int
teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct teql_sched_data *q = (struct teql_sched_data *)sch->data;

	__skb_queue_head(&q->q, skb);
	return 0;
}
Exemplo n.º 5
0
static int
sfq_q_enqueue(struct sk_buff *skb, struct sfq_sched_data *q, int end)
{
	unsigned hash = sfq_hash(q, skb);
	sfq_index x;

	x = q->ht[hash];
	if (x == q->depth) {
		q->ht[hash] = x = q->dep[q->depth].next;
		q->hash[x] = hash;
	}

	if (end == SFQ_TAIL) {
		/* If selected queue has length q->limit, this means that
		 * all other queues are empty and that we do simple tail drop,
		 * i.e. drop _this_ packet.
		 */
		if (q->qs[x].qlen >= q->limit) {
			unsigned int drop_len = skb->len;

			kfree_skb(skb);
			return drop_len;
		}
		__skb_queue_tail(&q->qs[x], skb);
	} else { /* end == SFQ_HEAD */
		__skb_queue_head(&q->qs[x], skb);
		/* If selected queue has length q->limit+1, this means that
		 * all other queues are empty and we do simple tail drop.
		 * This packet is still requeued at head of queue, tail packet
		 * is dropped.
		 */
		if (q->qs[x].qlen > q->limit) {
			unsigned int drop_len;

			skb = q->qs[x].prev;
			drop_len = skb->len;
			__skb_unlink(skb, &q->qs[x]);
			kfree_skb(skb);
			return drop_len;
		}
	}

	sfq_inc(q, x);
	if (q->qs[x].qlen == 1) {		/* The flow is new */
		if (q->tail == q->depth) {	/* It is the first flow */
			q->tail = x;
			q->next[x] = x;
			q->allot[x] = q->quantum;
		} else {
			q->next[x] = q->next[q->tail];
			q->next[q->tail] = x;
			q->tail = x;
		}
	}

	return 0;
}
Exemplo n.º 6
0
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int num, err;
	struct scatterlist sg[1+MAX_SKB_FRAGS];
	struct virtio_net_hdr *hdr;
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
	DECLARE_MAC_BUF(mac);

	sg_init_table(sg, 1+MAX_SKB_FRAGS);

	pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));

	free_old_xmit_skbs(vi);

	/* Encode metadata header at front. */
	hdr = skb_vnet_hdr(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
		hdr->csum_start = skb->csum_start - skb_headroom(skb);
		hdr->csum_offset = skb->csum_offset;
	} else {
		hdr->flags = 0;
		hdr->csum_offset = hdr->csum_start = 0;
	}

	if (skb_is_gso(skb)) {
		hdr->gso_size = skb_shinfo(skb)->gso_size;
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4_ECN;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
		else
			BUG();
	} else {
		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->gso_size = 0;
	}

	vnet_hdr_to_sg(sg, skb);
	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
	__skb_queue_head(&vi->send, skb);
	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
	if (err) {
		pr_debug("%s: virtio not prepared to send\n", dev->name);
		skb_unlink(skb, &vi->send);
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	vi->svq->vq_ops->kick(vi->svq);

	return 0;
}
Exemplo n.º 7
0
static int
pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
	struct sk_buff_head *list;

	list = ((struct sk_buff_head*)qdisc->data) +
		prio2band[skb->priority&TC_PRIO_MAX];

	__skb_queue_head(list, skb);
	qdisc->q.qlen++;
	return 0;
}
Exemplo n.º 8
0
static int
pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
	struct sk_buff_head *list = qdisc_priv(qdisc);

	list += prio2band[skb->priority&TC_PRIO_MAX];

	__skb_queue_head(list, skb);
	qdisc->q.qlen++;
	qdisc->qstats.requeues++;
	return 0;
}
static __inline__ void skb_head_to_pool(struct sk_buff *skb)
{
	struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;

	if (skb_queue_len(list) < sysctl_hot_list_len) {
		unsigned long flags;

		local_irq_save(flags);
		__skb_queue_head(list, skb);
		local_irq_restore(flags);

		return;
	}
	kmem_cache_free(skbuff_head_cache, skb);
}
Exemplo n.º 10
0
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned hash = sfq_hash(q, skb);
	sfq_index x;

	x = q->ht[hash];
	if (x == SFQ_DEPTH) {
		q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
		q->hash[x] = hash;
	}
	sch->qstats.backlog += skb->len;
	__skb_queue_head(&q->qs[x], skb);
	/* If selected queue has length q->limit+1, this means that
	 * all another queues are empty and we do simple tail drop.
	 * This packet is still requeued at head of queue, tail packet
	 * is dropped.
	 */
	if (q->qs[x].qlen > q->limit) {
		skb = q->qs[x].prev;
		__skb_unlink(skb, &q->qs[x]);
		sch->qstats.drops++;
		sch->qstats.backlog -= skb->len;
		kfree_skb(skb);
		return NET_XMIT_CN;
	}
	sfq_inc(q, x);
	if (q->qs[x].qlen == 1) {		/* The flow is new */
		if (q->tail == SFQ_DEPTH) {	/* It is the first flow */
			q->tail = x;
			q->next[x] = x;
			q->allot[x] = q->quantum;
		} else {
			q->next[x] = q->next[q->tail];
			q->next[q->tail] = x;
			q->tail = x;
		}
	}
	if (++sch->q.qlen <= q->limit) {
		sch->qstats.requeues++;
		return 0;
	}

	sch->qstats.drops++;
	sfq_drop(sch);
	return NET_XMIT_CN;
}
Exemplo n.º 11
0
static int try_shortcut(struct sk_buff *shortcut, struct sk_buff *skb,
			struct sk_buff_head *head, struct mptcp_cb *mpcb)
{
	struct tcp_sock *tp;
	struct sk_buff *skb1, *best_shortcut = NULL;
	u32 seq = TCP_SKB_CB(skb)->seq;
	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
	u32 distance = 0xffffffff;

	/* First, check the tp's shortcut */
	if (!shortcut) {
		if (skb_queue_empty(head)) {
			__skb_queue_head(head, skb);
			return 0;
		}
	} else {
		/* Does the tp's shortcut is a hit? If yes, we insert. */
		if (mptcp_ofo_queue_after(head, skb, shortcut))
			goto clean_covered;
	}

	/* Check the shortcuts of the other subsockets. */
	mptcp_for_each_tp(mpcb, tp) {
		shortcut = tp->mptcp->shortcut_ofoqueue;
		/* Can we queue it here? If yes, do so! */
		if (shortcut && mptcp_ofo_queue_after(head, skb, shortcut))
			goto clean_covered;

		/* Could not queue it, check if we are close.
		 * We are looking for a shortcut, close enough to seq to
		 * set skb1 prematurely and thus improve the subsequent lookup,
		 * which tries to find a skb1 so that skb1->seq <= seq.
		 *
		 * So, here we only take shortcuts, whose shortcut->seq > seq,
		 * and minimize the distance between shortcut->seq and seq and
		 * set best_shortcut to this one with the minimal distance.
		 *
		 * That way, the subsequent while-loop is shortest.
		 */
		if (shortcut && after(TCP_SKB_CB(shortcut)->seq, seq)) {
			/* Are we closer than the current best shortcut? */
			if ((u32)(seq - TCP_SKB_CB(shortcut)->seq) < distance) {
				distance = (u32)(seq - TCP_SKB_CB(shortcut)->seq);
				best_shortcut = shortcut;
			}
		}
	}
Exemplo n.º 12
0
/*
 * Insert one skb into qdisc.
 * Note: parent depends on return value to account for queue length.
 * 	NET_XMIT_DROP: queue length didn't change.
 *      NET_XMIT_SUCCESS: one skb was queued.
 */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
	struct sk_buff *skb2;
	int count = 1;

	/* Random duplication */
	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
		++count;

	/* Drop packet? */
	if (loss_event(q))
		--count;

	if (count == 0) {
		sch->qstats.drops++;
		kfree_skb(skb);
		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
	}

	skb_orphan(skb);

	/*
	 * If we need to duplicate packet, then re-insert at top of the
	 * qdisc tree, since parent queuer expects that only one
	 * skb will be queued.
	 */
	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
		struct Qdisc *rootq = qdisc_root(sch);
		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
		q->duplicate = 0;

		qdisc_enqueue_root(skb2, rootq);
		q->duplicate = dupsave;
	}

	/*
	 * Randomized packet corruption.
	 * Make copy if needed since we are modifying
	 * If packet is going to be hardware checksummed, then
	 * do it now in software before we mangle it.
	 */
	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
		    (skb->ip_summed == CHECKSUM_PARTIAL &&
		     skb_checksum_help(skb)))
			return qdisc_drop(skb, sch);

		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}

	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
		return qdisc_reshape_fail(skb, sch);

	sch->qstats.backlog += qdisc_pkt_len(skb);

	cb = netem_skb_cb(skb);
	if (q->gap == 0 ||		/* not doing reordering */
	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
	    q->reorder < get_crandom(&q->reorder_cor)) {
		psched_time_t now;
		psched_tdiff_t delay;

		delay = tabledist(q->latency, q->jitter,
				  &q->delay_cor, q->delay_dist);

		now = psched_get_time();

		if (q->rate) {
			struct sk_buff_head *list = &sch->q;

			delay += packet_len_2_sched_time(skb->len, q);

			if (!skb_queue_empty(list)) {
				/*
				 * Last packet in queue is reference point (now).
				 * First packet in queue is already in flight,
				 * calculate this time bonus and substract
				 * from delay.
				 */
				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
			}
		}

		cb->time_to_send = now + delay;
		++q->counter;
		tfifo_enqueue(skb, sch);
	} else {
		/*
		 * Do re-ordering by putting one out of N packets at the front
		 * of the queue.
		 */
		cb->time_to_send = psched_get_time();
		q->counter = 0;

		__skb_queue_head(&sch->q, skb);
		sch->qstats.requeues++;
	}

	return NET_XMIT_SUCCESS;
}
Exemplo n.º 13
0
static int accept(struct socket *sock, struct socket *new_sock, int flags)
{
	struct sock *sk = sock->sk;
	struct sk_buff *buf;
	int res;

	lock_sock(sk);

	if (sock->state != SS_LISTENING) {
		res = -EINVAL;
		goto exit;
	}

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (flags & O_NONBLOCK) {
			res = -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		res = wait_event_interruptible(*sk_sleep(sk),
				(!skb_queue_empty(&sk->sk_receive_queue)));
		lock_sock(sk);
		if (res)
			goto exit;
	}

	buf = skb_peek(&sk->sk_receive_queue);

	res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
	if (!res) {
		struct sock *new_sk = new_sock->sk;
		struct tipc_sock *new_tsock = tipc_sk(new_sk);
		struct tipc_port *new_tport = new_tsock->p;
		u32 new_ref = new_tport->ref;
		struct tipc_msg *msg = buf_msg(buf);

		lock_sock(new_sk);

		/*
		 * Reject any stray messages received by new socket
		 * before the socket lock was taken (very, very unlikely)
		 */

		reject_rx_queue(new_sk);

		/* Connect new socket to it's peer */

		new_tsock->peer_name.ref = msg_origport(msg);
		new_tsock->peer_name.node = msg_orignode(msg);
		tipc_connect2port(new_ref, &new_tsock->peer_name);
		new_sock->state = SS_CONNECTED;

		tipc_set_portimportance(new_ref, msg_importance(msg));
		if (msg_named(msg)) {
			new_tport->conn_type = msg_nametype(msg);
			new_tport->conn_instance = msg_nameinst(msg);
		}

		/*
		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
		 * Respond to 'SYN+' by queuing it on new socket.
		 */

		if (!msg_data_sz(msg)) {
			struct msghdr m = {NULL,};

			advance_rx_queue(sk);
			send_packet(NULL, new_sock, &m, 0);
		} else {
			__skb_dequeue(&sk->sk_receive_queue);
			__skb_queue_head(&new_sk->sk_receive_queue, buf);
		}
		release_sock(new_sk);
	}
exit:
	release_sock(sk);
	return res;
}
Exemplo n.º 14
0
static struct sk_buff *
tbf_dequeue(struct Qdisc* sch)
{
	struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
	struct sk_buff *skb;
	
	skb = __skb_dequeue(&sch->q);

	if (skb) {
		psched_time_t now;
		long toks;
		long ptoks = 0;

		PSCHED_GET_TIME(now);

		toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer, 0);

		if (q->P_tab) {
			ptoks = toks + q->ptokens;
			if (ptoks > (long)q->mtu)
				ptoks = q->mtu;
			ptoks -= L2T_P(q, skb->len);
		}
		toks += q->tokens;
		if (toks > (long)q->buffer)
			toks = q->buffer;
		toks -= L2T(q, skb->len);

		if ((toks|ptoks) >= 0) {
			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			sch->stats.backlog -= skb->len;
			return skb;
		}

		if (!sch->dev->tbusy) {
			long delay = PSCHED_US2JIFFIE(max(-toks, -ptoks));

			if (delay == 0)
				delay = 1;

			del_timer(&q->wd_timer);
			q->wd_timer.expires = jiffies + delay;
			add_timer(&q->wd_timer);
		}

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFCS)
		 */
		__skb_queue_head(&sch->q, skb);

		sch->stats.overlimits++;
	}
	return NULL;
}
Exemplo n.º 15
0
/**
 * stmmac_tx:
 * @priv: private driver structure
 * Description: it reclaims resources after transmission completes.
 */
static void stmmac_tx(struct stmmac_priv *priv)
{
	unsigned int txsize = priv->dma_tx_size;

	while (priv->dirty_tx != priv->cur_tx) {
		int last;
		unsigned int entry = priv->dirty_tx % txsize;
		struct sk_buff *skb = priv->tx_skbuff[entry];
		struct dma_desc *p = priv->dma_tx + entry;

		/* Check if the descriptor is owned by the DMA. */
		if (priv->hw->desc->get_tx_owner(p))
			break;

		/* Verify tx error by looking at the last segment */
		last = priv->hw->desc->get_tx_ls(p);
		if (likely(last)) {
			int tx_error =
				priv->hw->desc->tx_status(&priv->dev->stats,
							  &priv->xstats, p,
							  priv->ioaddr);
			if (likely(tx_error == 0)) {
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
			} else
				priv->dev->stats.tx_errors++;
		}
		TX_DBG("%s: curr %d, dirty %d\n", __func__,
			priv->cur_tx, priv->dirty_tx);

		if (likely(p->des2))
			dma_unmap_single(priv->device, p->des2,
					 priv->hw->desc->get_tx_len(p),
					 DMA_TO_DEVICE);
		if (unlikely(p->des3))
			p->des3 = 0;

		if (likely(skb != NULL)) {
			/*
			 * If there's room in the queue (limit it to size)
			 * we add this skb back into the pool,
			 * if it's the right size.
			 */
			if ((skb_queue_len(&priv->rx_recycle) <
				priv->dma_rx_size) &&
				skb_recycle_check(skb, priv->dma_buf_sz))
				__skb_queue_head(&priv->rx_recycle, skb);
			else
				dev_kfree_skb(skb);

			priv->tx_skbuff[entry] = NULL;
		}

		priv->hw->desc->release_tx_desc(p);

		entry = (++priv->dirty_tx) % txsize;
	}
	if (unlikely(netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
		netif_tx_lock(priv->dev);
		if (netif_queue_stopped(priv->dev) &&
		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
			TX_DBG("%s: restart transmit\n", __func__);
			netif_wake_queue(priv->dev);
		}
		netif_tx_unlock(priv->dev);
	}
}
Exemplo n.º 16
0
Arquivo: dev.c Projeto: 0xffea/gnumach
static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
{
	unsigned long flags;
	struct sk_buff_head *list;
	int retransmission = 0;	/* used to say if the packet should go	*/
				/* at the front or the back of the	*/
				/* queue - front is a retransmit try	*/

	if(pri>=0 && !skb_device_locked(skb))
		skb_device_lock(skb);	/* Shove a lock on the frame */
#if CONFIG_SKB_CHECK 
	IS_SKB(skb);
#endif    
	skb->dev = dev;

	/*
	 *	Negative priority is used to flag a frame that is being pulled from the
	 *	queue front as a retransmit attempt. It therefore goes back on the queue
	 *	start on a failure.
	 */
	 
  	if (pri < 0) 
  	{
		pri = -pri-1;
		retransmission = 1;
  	}

#ifdef CONFIG_NET_DEBUG
	if (pri >= DEV_NUMBUFFS) 
	{
		printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
		pri = 1;
	}
#endif

	/*
	 *	If the address has not been resolved. Call the device header rebuilder.
	 *	This can cover all protocols and technically not just ARP either.
	 */
	 
	if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
		return;
	}

	/*
	 *
	 * 	If dev is an alias, switch to its main device.
	 *	"arp" resolution has been made with alias device, so
	 *	arp entries refer to alias, not main.
	 *
	 */

#ifdef CONFIG_NET_ALIAS
	if (net_alias_is(dev))
	  	skb->dev = dev = net_alias_dev_tx(dev);
#endif

	/*
	 *	If we are bridging and this is directly generated output
	 *	pass the frame via the bridge.
	 */

#ifdef CONFIG_BRIDGE
	if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
	{
		if(br_tx_frame(skb))
			return;
	}
#endif

	list = dev->buffs + pri;

	save_flags(flags);
	/* if this isn't a retransmission, use the first packet instead... */
	if (!retransmission) {
		if (skb_queue_len(list)) {
			/* avoid overrunning the device queue.. */
			if (skb_queue_len(list) > dev->tx_queue_len) {
				dev_kfree_skb(skb, FREE_WRITE);
				return;
			}
		}

		/* copy outgoing packets to any sniffer packet handlers */
		if (dev_nit) {
			struct packet_type *ptype;
			skb->stamp=xtime;
			for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) 
			{
				/* Never send packets back to the socket
				 * they originated from - MvS ([email protected])
				 */
				if ((ptype->dev == dev || !ptype->dev) &&
				   ((struct sock *)ptype->data != skb->sk))
				{
					struct sk_buff *skb2;
					if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
						break;
					/* FIXME?: Wrong when the hard_header_len
					 * is an upper bound. Is this even
					 * used anywhere?
					 */
					skb2->h.raw = skb2->data + dev->hard_header_len;
					/* On soft header devices we
					 * yank the header before mac.raw
					 * back off. This is set by
					 * dev->hard_header().
					 */
					if (dev->flags&IFF_SOFTHEADERS)
						skb_pull(skb2,skb2->mac.raw-skb2->data);
					skb2->mac.raw = skb2->data;
					ptype->func(skb2, skb->dev, ptype);
				}
			}
		}

		if (skb_queue_len(list)) {
			cli();
			skb_device_unlock(skb);		/* Buffer is on the device queue and can be freed safely */
			__skb_queue_tail(list, skb);
			skb = __skb_dequeue(list);
			skb_device_lock(skb);		/* New buffer needs locking down */
			restore_flags(flags);
		}
	}
	if (dev->hard_start_xmit(skb, dev) == 0) {
		/*
		 *	Packet is now solely the responsibility of the driver
		 */
		return;
	}

	/*
	 *	Transmission failed, put skb back into a list. Once on the list it's safe and
	 *	no longer device locked (it can be freed safely from the device queue)
	 */
	cli();
	skb_device_unlock(skb);
	__skb_queue_head(list,skb);
	restore_flags(flags);
}
Exemplo n.º 17
0
static void sdio_mux_send_open_cmd(uint32_t id)
{
    struct sdio_mux_hdr hdr = {
        .magic_num = SDIO_MUX_HDR_MAGIC_NO,
        .cmd = SDIO_MUX_HDR_CMD_OPEN,
        .reserved = 0,
        .ch_id = id,
        .pkt_len = 0,
        .pad_len = 0
    };

    sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
}

static void sdio_mux_write_data(struct work_struct *work)
{
    int rc, reschedule = 0;
    int notify = 0;
    struct sk_buff *skb;
    unsigned long flags;
    int avail;
    int ch_id;

    spin_lock_irqsave(&sdio_mux_write_lock, flags);
    while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
        ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;

        avail = sdio_write_avail(sdio_mux_ch);
        if (avail < skb->len) {
            /* we may have to wait for write avail
             * notification from sdio al
             */
            DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
                __func__, avail, skb->len);

            reschedule = 1;
            break;
        }
        spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
        rc = sdio_mux_write(skb);
        spin_lock_irqsave(&sdio_mux_write_lock, flags);
        if (rc == 0) {

            spin_lock(&sdio_ch[ch_id].lock);
            sdio_ch[ch_id].num_tx_pkts--;
            spin_unlock(&sdio_ch[ch_id].lock);

            if (sdio_ch[ch_id].write_done)
                sdio_ch[ch_id].write_done(
                    sdio_ch[ch_id].priv, skb);
            else
                dev_kfree_skb_any(skb);
        } else if (rc == -EAGAIN || rc == -ENOMEM) {
            /* recoverable error - retry again later */
            reschedule = 1;
            break;
        } else if (rc == -ENODEV) {
            /*
             * sdio_al suffered some kind of fatal error
             * prevent future writes and clean up pending ones
             */
            fatal_error = 1;
            do {
                ch_id = ((struct sdio_mux_hdr *) skb->data)->ch_id;
                spin_lock(&sdio_ch[ch_id].lock);
                sdio_ch[ch_id].num_tx_pkts--;
                spin_unlock(&sdio_ch[ch_id].lock);
                dev_kfree_skb_any(skb);
            } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
            spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
            return;
        } else {
            /* unknown error condition - drop the
             * skb and reschedule for the
             * other skb's
             */
            pr_err("%s: sdio_mux_write error %d"
                   " for ch %d, skb=%p\n",
                   __func__, rc, ch_id, skb);
            notify = 1;
            break;
        }
    }

    if (reschedule) {
        if (sdio_ch_is_in_reset(ch_id)) {
            notify = 1;
        } else {
            __skb_queue_head(&sdio_mux_write_pool, skb);
            queue_delayed_work(sdio_mux_workqueue,
                               &delayed_work_sdio_mux_write,
                               msecs_to_jiffies(250)
                              );
        }
    }

    if (notify) {
        spin_lock(&sdio_ch[ch_id].lock);
        sdio_ch[ch_id].num_tx_pkts--;
        spin_unlock(&sdio_ch[ch_id].lock);

        if (sdio_ch[ch_id].write_done)
            sdio_ch[ch_id].write_done(
                sdio_ch[ch_id].priv, skb);
        else
            dev_kfree_skb_any(skb);
    }
    spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
}
Exemplo n.º 18
0
inline bool skb_recycler_consume(struct sk_buff *skb) {
	unsigned long flags;
	struct sk_buff_head *h;

	/* Can we recycle this skb?  If not, simply return that we cannot */
	if (unlikely(!consume_skb_can_recycle(skb, SKB_RECYCLE_MIN_SIZE,
					      SKB_RECYCLE_MAX_SIZE)))
		return false;

	/*
	 * If we can, then it will be much faster for us to recycle this one
	 * later than to allocate a new one from scratch.
	 */
	preempt_disable();
	h = &__get_cpu_var(recycle_list);
	local_irq_save(flags);
	/* Attempt to enqueue the CPU hot recycle list first */
	if (likely(skb_queue_len(h) < SKB_RECYCLE_MAX_SKBS)) {
		__skb_queue_head(h, skb);
		local_irq_restore(flags);
		preempt_enable();
		return true;
	}
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
	h = &__get_cpu_var(recycle_spare_list);

	/* The CPU hot recycle list was full; if the spare list is also full,
	 * attempt to move the spare list to the global list for other CPUs to
	 * use.
	 */
	if (unlikely(skb_queue_len(h) >= SKB_RECYCLE_SPARE_MAX_SKBS)) {
		uint8_t cur_tail, next_tail;
		spin_lock(&glob_recycler.lock);
		cur_tail = glob_recycler.tail;
		next_tail = (cur_tail + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK;
		if (next_tail != glob_recycler.head) {
			struct sk_buff_head *p = &glob_recycler.pool[cur_tail];

			/* Optimized, inlined SKB List splice */
			p->next = h->next;
			h->next->prev = (struct sk_buff *)p;
			p->prev = h->prev;
			h->prev->next = (struct sk_buff *)p;
			p->qlen = SKB_RECYCLE_SPARE_MAX_SKBS;

			/* Done with global list init */
			glob_recycler.tail = next_tail;
			spin_unlock(&glob_recycler.lock);

			/* Optimized, inlined spare SKB list init */
			h->next = (struct sk_buff *)h;
			h->prev = (struct sk_buff *)h;
			h->qlen = 0;

			/* We have now cleared room in the spare; enqueue */
			__skb_queue_head(h, skb);
			local_irq_restore(flags);
			preempt_enable();
			return true;
		}
		/* We still have a full spare because the global is also full */
		spin_unlock(&glob_recycler.lock);
	} else {
		/* We have room in the spare list; enqueue to spare list */
		__skb_queue_head(h, skb);
		local_irq_restore(flags);
		preempt_enable();
		return true;
	}
#endif

	local_irq_restore(flags);
	preempt_enable();

	return false;
}
Exemplo n.º 19
0
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int num, err;
	struct scatterlist sg[1+MAX_SKB_FRAGS];
	struct virtio_net_hdr *hdr;
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;

	sg_init_table(sg, 1+MAX_SKB_FRAGS);

	pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb,
		 dest[0], dest[1], dest[2],
		 dest[3], dest[4], dest[5]);

	/* Encode metadata header at front. */
	hdr = skb_vnet_hdr(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
		hdr->csum_start = skb->csum_start - skb_headroom(skb);
		hdr->csum_offset = skb->csum_offset;
	} else {
		hdr->flags = 0;
		hdr->csum_offset = hdr->csum_start = 0;
	}

	if (skb_is_gso(skb)) {
		hdr->hdr_len = skb_transport_header(skb) - skb->data;
		hdr->gso_size = skb_shinfo(skb)->gso_size;
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
		else
			BUG();
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
	} else {
		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->gso_size = hdr->hdr_len = 0;
	}

	vnet_hdr_to_sg(sg, skb);
	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
	__skb_queue_head(&vi->send, skb);

again:
	/* Free up any pending old buffers before queueing new ones. */
	free_old_xmit_skbs(vi);
	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
	if (err) {
		pr_debug("%s: virtio not prepared to send\n", dev->name);
		netif_stop_queue(dev);

		/* Activate callback for using skbs: if this returns false it
		 * means some were used in the meantime. */
		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
			vi->svq->vq_ops->disable_cb(vi->svq);
			netif_start_queue(dev);
			goto again;
		}
		__skb_unlink(skb, &vi->send);

		return NETDEV_TX_BUSY;
	}
	vi->svq->vq_ops->kick(vi->svq);

	return 0;
}
Exemplo n.º 20
0
/*
 * Insert one skb into qdisc.
 * Note: parent depends on return value to account for queue length.
 * 	NET_XMIT_DROP: queue length didn't change.
 *      NET_XMIT_SUCCESS: one skb was queued.
 */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
	struct sk_buff *skb2;
	int ret;
	int count = 1;

	/* Random duplication */
	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
		++count;

	/* Drop packet? */
	if (loss_event(q))
		--count;

	if (count == 0) {
		sch->qstats.drops++;
		kfree_skb(skb);
		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
	}

	skb_orphan(skb);

	/*
	 * If we need to duplicate packet, then re-insert at top of the
	 * qdisc tree, since parent queuer expects that only one
	 * skb will be queued.
	 */
	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
		struct Qdisc *rootq = qdisc_root(sch);
		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
		q->duplicate = 0;

		qdisc_enqueue_root(skb2, rootq);
		q->duplicate = dupsave;
	}

	/*
	 * Randomized packet corruption.
	 * Make copy if needed since we are modifying
	 * If packet is going to be hardware checksummed, then
	 * do it now in software before we mangle it.
	 */
	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
		    (skb->ip_summed == CHECKSUM_PARTIAL &&
		     skb_checksum_help(skb))) {
			sch->qstats.drops++;
			return NET_XMIT_DROP;
		}

		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}

	cb = netem_skb_cb(skb);
	if (q->gap == 0 ||		/* not doing reordering */
	    q->counter < q->gap ||	/* inside last reordering gap */
	    q->reorder < get_crandom(&q->reorder_cor)) {
		psched_time_t now;
		psched_tdiff_t delay;

		delay = tabledist(q->latency, q->jitter,
				  &q->delay_cor, q->delay_dist);

		now = psched_get_time();
		cb->time_to_send = now + delay;
		++q->counter;
		ret = qdisc_enqueue(skb, q->qdisc);
	} else {
		/*
		 * Do re-ordering by putting one out of N packets at the front
		 * of the queue.
		 */
		cb->time_to_send = psched_get_time();
		q->counter = 0;

		__skb_queue_head(&q->qdisc->q, skb);
		sch->qstats.backlog += qdisc_pkt_len(skb);
		sch->qstats.requeues++;
		ret = NET_XMIT_SUCCESS;
	}

	if (ret != NET_XMIT_SUCCESS) {
		if (net_xmit_drop_count(ret)) {
			sch->qstats.drops++;
			return ret;
		}
	}

	sch->q.qlen++;
	return NET_XMIT_SUCCESS;
}
static int
pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	__skb_queue_head(&sch->q, skb);
	return 0;
}