Esempio n. 1
0
/**
 * filter_ack - filters incoming packets for acknowledgements
 * @dev: the mac80211 device
 * @rx_hdr: received header
 * @stats: the status for the received packet
 *
 * This functions looks for ACK packets and tries to match them with the
 * frames in the tx queue. If a match is found the frame will be dequeued and
 * the upper layers is informed about the successful transmission. If
 * mac80211 queues have been stopped and the number of frames still to be
 * transmitted is low the queues will be opened again.
 *
 * Returns 1 if the frame was an ACK, 0 if it was ignored.
 */
static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
		      struct ieee80211_rx_status *stats)
{
	u16 fc = le16_to_cpu(rx_hdr->frame_control);
	struct sk_buff *skb;
	struct sk_buff_head *q;
	unsigned long flags;

	if ((fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) !=
	    (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK))
		return 0;

	q = &zd_hw_mac(hw)->ack_wait_queue;
	spin_lock_irqsave(&q->lock, flags);
	for (skb = q->next; skb != (struct sk_buff *)q; skb = skb->next) {
		struct ieee80211_hdr *tx_hdr;

		tx_hdr = (struct ieee80211_hdr *)skb->data;
		if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
		{
			struct ieee80211_tx_status status;

			memset(&status, 0, sizeof(status));
			status.flags = IEEE80211_TX_STATUS_ACK;
			status.ack_signal = stats->ssi;
			__skb_unlink(skb, q);
			tx_status(hw, skb, &status, 1);
			goto out;
		}
	}
out:
	spin_unlock_irqrestore(&q->lock, flags);
	return 1;
}
Esempio n. 2
0
static int
tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;

	__skb_queue_tail(&sch->q, skb);
	if ((sch->stats.backlog += skb->len) <= q->limit) {
		sch->stats.bytes += skb->len;
		sch->stats.packets++;
		return 1;
	}

	/* Drop action: undo the things that we just did,
	 * i.e. make tail drop
	 */

	__skb_unlink(skb, &sch->q);
	sch->stats.backlog -= skb->len;
	sch->stats.drops++;
#ifdef CONFIG_NET_CLS_POLICE
	if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
#endif
		kfree_skb(skb);
	return 0;
}
Esempio n. 3
0
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
	struct sk_buff *skb = NULL;
	unsigned int len, received = 0;

again:
	while (received < budget &&
	       (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
		__skb_unlink(skb, &vi->recv);
		receive_skb(vi->dev, skb, len);
		vi->num--;
		received++;
	}

	/* FIXME: If we oom and completely run out of inbufs, we need
	 * to start a timer trying to fill more. */
	if (vi->num < vi->max / 2)
		try_fill_recv(vi);

	/* Out of packets? */
	if (received < budget) {
		netif_rx_complete(vi->dev, napi);
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
		    && napi_schedule_prep(napi)) {
			vi->rvq->vq_ops->disable_cb(vi->rvq);
			__netif_rx_schedule(vi->dev, napi);
			goto again;
		}
	}

	return received;
}
Esempio n. 4
0
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;

		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue);
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else{
                            if(!skb->next || IS_ERR(skb->next)){
                                printk("[NET] skb->next error in %s\n", __func__);
                                error = -EAGAIN;
                                spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                                goto no_packet;
                            }else{
				__skb_unlink(skb, &sk->sk_receive_queue);
                            }
                        }
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Esempio n. 5
0
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	if (qdisc_is_throttled(sch))
		return NULL;

tfifo_dequeue:
	skb = qdisc_peek_head(sch);
	if (skb) {
		const struct netem_skb_cb *cb = netem_skb_cb(skb);

		/* if more time remaining? */
		if (cb->time_to_send <= psched_get_time()) {
			__skb_unlink(skb, &sch->q);
			sch->qstats.backlog -= qdisc_pkt_len(skb);

#ifdef CONFIG_NET_CLS_ACT
			/*
			 * If it's at ingress let's pretend the delay is
			 * from the network (tstamp will be updated).
			 */
			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
				skb->tstamp.tv64 = 0;
#endif

			if (q->qdisc) {
				int err = qdisc_enqueue(skb, q->qdisc);

				if (unlikely(err != NET_XMIT_SUCCESS)) {
					if (net_xmit_drop_count(err)) {
						sch->qstats.drops++;
						qdisc_tree_decrease_qlen(sch, 1);
					}
				}
				goto tfifo_dequeue;
			}
deliver:
			qdisc_unthrottled(sch);
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		if (q->qdisc) {
			skb = q->qdisc->ops->dequeue(q->qdisc);
			if (skb)
				goto deliver;
		}
		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
	}

	if (q->qdisc) {
		skb = q->qdisc->ops->dequeue(q->qdisc);
		if (skb)
			goto deliver;
	}
	return NULL;
}
Esempio n. 6
0
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@off: an offset in bytes to peek skb from. Returns an offset
 *	      within an skb where data actually starts
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb, *last;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;
		int _off = *off;

		last = (struct sk_buff *)queue;
		spin_lock_irqsave(&queue->lock, cpu_flags);
		skb_queue_walk(queue, skb) {
			last = skb;
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				if (_off >= skb->len && (skb->len || _off ||
							 skb->peeked)) {
					_off -= skb->len;
					continue;
				}
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);

			spin_unlock_irqrestore(&queue->lock, cpu_flags);
			*off = _off;
			return skb;
		}
		spin_unlock_irqrestore(&queue->lock, cpu_flags);

		if (sk_can_busy_loop(sk) &&
		    sk_busy_loop(sk, flags & MSG_DONTWAIT))
			continue;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_more_packets(sk, err, &timeo, last));
Esempio n. 7
0
File: aoenet.c Progetto: E-LLP/n900
void
aoenet_xmit(struct sk_buff_head *queue)
{
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(queue, skb, tmp) {
		__skb_unlink(skb, queue);
		dev_queue_xmit(skb);
	}
Esempio n. 8
0
static int
sfq_q_enqueue(struct sk_buff *skb, struct sfq_sched_data *q, int end)
{
	unsigned hash = sfq_hash(q, skb);
	sfq_index x;

	x = q->ht[hash];
	if (x == q->depth) {
		q->ht[hash] = x = q->dep[q->depth].next;
		q->hash[x] = hash;
	}

	if (end == SFQ_TAIL) {
		/* If selected queue has length q->limit, this means that
		 * all other queues are empty and that we do simple tail drop,
		 * i.e. drop _this_ packet.
		 */
		if (q->qs[x].qlen >= q->limit) {
			unsigned int drop_len = skb->len;

			kfree_skb(skb);
			return drop_len;
		}
		__skb_queue_tail(&q->qs[x], skb);
	} else { /* end == SFQ_HEAD */
		__skb_queue_head(&q->qs[x], skb);
		/* If selected queue has length q->limit+1, this means that
		 * all other queues are empty and we do simple tail drop.
		 * This packet is still requeued at head of queue, tail packet
		 * is dropped.
		 */
		if (q->qs[x].qlen > q->limit) {
			unsigned int drop_len;

			skb = q->qs[x].prev;
			drop_len = skb->len;
			__skb_unlink(skb, &q->qs[x]);
			kfree_skb(skb);
			return drop_len;
		}
	}

	sfq_inc(q, x);
	if (q->qs[x].qlen == 1) {		/* The flow is new */
		if (q->tail == q->depth) {	/* It is the first flow */
			q->tail = x;
			q->next[x] = x;
			q->allot[x] = q->quantum;
		} else {
			q->next[x] = q->next[q->tail];
			q->next[q->tail] = x;
			q->tail = x;
		}
	}

	return 0;
}
Esempio n. 9
0
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb;
	long timeo;
	int error = 0;

	if ((!sk) || (IS_ERR(sk)))
		goto no_packet;

	error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;

		spin_lock_irqsave(&queue->lock, cpu_flags);
		skb_queue_walk(queue, skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
#ifdef CONFIG_HTC_NETWORK_MODIFY
				if (*off >= skb->len && skb->len) {
#else
				if (*off >= skb->len && skb->len) {
#endif
					*off -= skb->len;
					continue;
				}
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);

			spin_unlock_irqrestore(&queue->lock, cpu_flags);
			return skb;
		}
		spin_unlock_irqrestore(&queue->lock, cpu_flags);

		
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Esempio n. 10
0
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	sfq_index d = q->max_depth;
	struct sk_buff *skb;
	unsigned int len;

	/* Queue is full! Find the longest slot and
	   drop a packet from it */

	if (d > 1) {
		sfq_index x = q->dep[d + SFQ_DEPTH].next;
		skb = q->qs[x].prev;
		len = skb->len;
		__skb_unlink(skb, &q->qs[x]);
		kfree_skb(skb);
		sfq_dec(q, x);
		sch->q.qlen--;
		sch->qstats.drops++;
		sch->qstats.backlog -= len;
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
		d = q->next[q->tail];
		q->next[q->tail] = q->next[d];
		q->allot[q->next[d]] += q->quantum;
		skb = q->qs[d].prev;
		len = skb->len;
		__skb_unlink(skb, &q->qs[d]);
		kfree_skb(skb);
		sfq_dec(q, d);
		sch->q.qlen--;
		q->ht[q->hash[d]] = SFQ_DEPTH;
		sch->qstats.drops++;
		sch->qstats.backlog -= len;
		return len;
	}

	return 0;
}
Esempio n. 11
0
static void free_old_xmit_skbs(struct virtnet_info *vi)
{
	struct sk_buff *skb;
	unsigned int len;

	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
		pr_debug("Sent skb %p\n", skb);
		__skb_unlink(skb, &vi->send);
		vi->dev->stats.tx_bytes += len;
		vi->dev->stats.tx_packets++;
		kfree_skb(skb);
	}
}
Esempio n. 12
0
void
aoenet_xmit(struct sk_buff_head *queue)
{
	struct sk_buff *skb, *tmp;
	ulong flags;

	skb_queue_walk_safe(queue, skb, tmp) {
		__skb_unlink(skb, queue);
		spin_lock_irqsave(&txlock, flags);
		skb_queue_tail(&skbtxq, skb);
		spin_unlock_irqrestore(&txlock, flags);
		wake_up(&txwq);
	}
Esempio n. 13
0
void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
	if (flags & MSG_PEEK) {
		spin_lock_bh(&sk->sk_receive_queue.lock);
		if (skb == skb_peek(&sk->sk_receive_queue)) {
			__skb_unlink(skb, &sk->sk_receive_queue);
			atomic_dec(&skb->users);
		}
		spin_unlock_bh(&sk->sk_receive_queue.lock);
	}

	kfree_skb(skb);
}
Esempio n. 14
0
static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
{
	unsigned long		flags;

	spin_lock_irqsave(&list->lock, flags);
	__skb_unlink(skb, list);
	spin_unlock(&list->lock);
	spin_lock(&dev->done.lock);
	__skb_queue_tail(&dev->done, skb);
	if (dev->done.qlen == 1)
		tasklet_schedule(&dev->bh);
	spin_unlock_irqrestore(&dev->done.lock, flags);
}
Esempio n. 15
0
File: dev.c Progetto: 0xffea/gnumach
void dev_tint(struct device *dev)
{
	int i;
	unsigned long flags;
	struct sk_buff_head * head;
	
	/*
	 * aliases do not transmit (for now :) )
	 */

#ifdef CONFIG_NET_ALIAS
	if (net_alias_is(dev)) return;
#endif
	head = dev->buffs;
	save_flags(flags);
	cli();

	/*
	 *	Work the queues in priority order
	 */	 
	for(i = 0;i < DEV_NUMBUFFS; i++,head++)
	{

		while (!skb_queue_empty(head)) {
			struct sk_buff *skb;

			skb = head->next;
			__skb_unlink(skb, head);
			/*
			 *	Stop anyone freeing the buffer while we retransmit it
			 */
			skb_device_lock(skb);
			restore_flags(flags);
			/*
			 *	Feed them to the output stage and if it fails
			 *	indicate they re-queue at the front.
			 */
			do_dev_queue_xmit(skb,dev,-i - 1);
			/*
			 *	If we can take no more then stop here.
			 */
			if (dev->tbusy)
				return;
			cli();
		}
	}
	restore_flags(flags);
}
Esempio n. 16
0
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned hash = sfq_hash(q, skb);
	sfq_index x;

	x = q->ht[hash];
	if (x == SFQ_DEPTH) {
		q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
		q->hash[x] = hash;
	}
	sch->qstats.backlog += skb->len;
	__skb_queue_head(&q->qs[x], skb);
	/* If selected queue has length q->limit+1, this means that
	 * all another queues are empty and we do simple tail drop.
	 * This packet is still requeued at head of queue, tail packet
	 * is dropped.
	 */
	if (q->qs[x].qlen > q->limit) {
		skb = q->qs[x].prev;
		__skb_unlink(skb, &q->qs[x]);
		sch->qstats.drops++;
		sch->qstats.backlog -= skb->len;
		kfree_skb(skb);
		return NET_XMIT_CN;
	}
	sfq_inc(q, x);
	if (q->qs[x].qlen == 1) {		/* The flow is new */
		if (q->tail == SFQ_DEPTH) {	/* It is the first flow */
			q->tail = x;
			q->next[x] = x;
			q->allot[x] = q->quantum;
		} else {
			q->next[x] = q->next[q->tail];
			q->next[q->tail] = x;
			q->tail = x;
		}
	}
	if (++sch->q.qlen <= q->limit) {
		sch->qstats.requeues++;
		return 0;
	}

	sch->qstats.drops++;
	sfq_drop(sch);
	return NET_XMIT_CN;
}
Esempio n. 17
0
static void hfa384x_process_reqs(struct net_device *dev) {
	struct hostap_interface *iface = netdev_priv(dev);
	local_info_t *local = iface->local;
	struct hostap_usb_priv *hw_priv = local->hw_priv;
	unsigned long flags;
	struct sk_buff *skb;
	int process = 0;
	int err = 0;

	printk(KERN_DEBUG "process_reqs\n");
	spin_lock_irqsave(&hw_priv->tx_queue.lock, flags);
	skb = skb_peek(&hw_priv->tx_queue);

	if (skb && (hfa384x_cb(skb)->error ||
			(hfa384x_cb(skb)->acked &&
			 (hfa384x_cb(skb)->response || hfa384x_cb(skb)->noresp)))) {
		process = 1;
		__skb_unlink(skb, &hw_priv->tx_queue);
	}

	printk(KERN_DEBUG "process_reqs %d %d %d %d %p\n",
			process,
			skb ? hfa384x_cb(skb)->error : -1,
			skb ? hfa384x_cb(skb)->acked: -1,
			skb ? hfa384x_cb(skb)->noresp: -1,
			skb ? hfa384x_cb(skb)->response: (void*)-1
			);

	print_hex_dump_bytes("pr ", DUMP_PREFIX_OFFSET, skb->data, skb->len);
	spin_unlock_irqrestore(&hw_priv->tx_queue.lock, flags);

	if (process) {
		hostap_urb_calb urb_calb = hfa384x_cb(skb)->calb;
		if (hfa384x_cb(skb)->error)
			err = 1;

		complete_all(&hfa384x_cb(skb)->comp);
		if (urb_calb)
			(*urb_calb)(dev, skb);

		if (!err)
			hfa384x_usbout(dev, NULL);
	}

	return;

}
Esempio n. 18
0
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
			  struct sk_buff_head *hitlist)
{
	struct sk_buff *skb;
	struct sk_buff *next;

	spin_lock(&x->sk_receive_queue.lock);
	receive_queue_for_each_skb(x, next, skb) {
		/*
		 *	Do we have file descriptors ?
		 */
		if (UNIXCB(skb).fp) {
			bool hit = false;
			/*
			 *	Process the descriptors of this socket
			 */
			int nfd = UNIXCB(skb).fp->count;
			struct file **fp = UNIXCB(skb).fp->fp;
			while (nfd--) {
				/*
				 *	Get the socket the fd matches
				 *	if it indeed does so
				 */
				struct sock *sk = unix_get_socket(*fp++);
				if (sk) {
					struct unix_sock *u = unix_sk(sk);

					/*
					 * Ignore non-candidates, they could
					 * have been added to the queues after
					 * starting the garbage collection
					 */
					if (u->gc_candidate) {
						hit = true;
						func(u);
					}
				}
			}
			if (hit && hitlist != NULL) {
				__skb_unlink(skb, &x->sk_receive_queue);
				__skb_queue_tail(hitlist, skb);
			}
		}
	}
	spin_unlock(&x->sk_receive_queue.lock);
}
Esempio n. 19
0
static int
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct net_device *dev = sch->dev;
	struct teql_sched_data *q = (struct teql_sched_data *)sch->data;

	__skb_queue_tail(&q->q, skb);
	if (q->q.qlen <= dev->tx_queue_len) {
		sch->stats.bytes += skb->len;
		sch->stats.packets++;
		return 0;
	}

	__skb_unlink(skb, &q->q);
	kfree_skb(skb);
	sch->stats.drops++;
	return NET_XMIT_DROP;
}
Esempio n. 20
0
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
	int err = 0;

	if (flags & MSG_PEEK) {
		err = -ENOENT;
		spin_lock_bh(&sk->sk_receive_queue.lock);
		if (skb == skb_peek(&sk->sk_receive_queue)) {
			__skb_unlink(skb, &sk->sk_receive_queue);
			atomic_dec(&skb->users);
			err = 0;
		}
		spin_unlock_bh(&sk->sk_receive_queue.lock);
	}

	skb_free_datagram(sk, skb);
	return err;
}
Esempio n. 21
0
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
					  struct sk_buff_head *queue,
					  unsigned int flags,
					  void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
					  int *peeked, int *off, int *err,
					  struct sk_buff **last)
{
	bool peek_at_off = false;
	struct sk_buff *skb;
	int _off = 0;

	if (unlikely(flags & MSG_PEEK && *off >= 0)) {
		peek_at_off = true;
		_off = *off;
	}

	*last = queue->prev;
	skb_queue_walk(queue, skb) {
		if (flags & MSG_PEEK) {
			if (peek_at_off && _off >= skb->len &&
			    (_off || skb->peeked)) {
				_off -= skb->len;
				continue;
			}
			if (!skb->len) {
				skb = skb_set_peeked(skb);
				if (unlikely(IS_ERR(skb))) {
					*err = PTR_ERR(skb);
					return NULL;
				}
			}
			*peeked = 1;
			refcount_inc(&skb->users);
		} else {
			__skb_unlink(skb, queue);
			if (destructor)
				destructor(sk, skb);
		}
		*off = _off;
		return skb;
	}
	return NULL;
}
Esempio n. 22
0
void __release_sock(struct sock *sk)
{
#ifdef CONFIG_INET
	if (!sk->prot || !sk->prot->rcv)
		return;
		
	/* See if we have any packets built up. */
	start_bh_atomic();
	while (!skb_queue_empty(&sk->back_log)) {
		struct sk_buff * skb = sk->back_log.next;
		__skb_unlink(skb, &sk->back_log);
		sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv,
			      skb->saddr, skb->len, skb->daddr, 1,
			      /* Only used for/by raw sockets. */
			      (struct inet_protocol *)sk->pair); 
	}
	end_bh_atomic();
#endif  
}
Esempio n. 23
0
static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
		struct sk_buff_head *list, enum skb_state state)
{
	unsigned long		flags;
	enum skb_state 		old_state;
	struct skb_data *entry = (struct skb_data *) skb->cb;

	spin_lock_irqsave(&list->lock, flags);
	old_state = entry->state;
	entry->state = state;
	__skb_unlink(skb, list);
	spin_unlock(&list->lock);
	spin_lock(&dev->done.lock);
	__skb_queue_tail(&dev->done, skb);
	if (dev->done.qlen == 1)
		tasklet_schedule(&dev->bh);
	spin_unlock_irqrestore(&dev->done.lock, flags);
	return old_state;
}
Esempio n. 24
0
static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
{
	unsigned long		flags;


	spin_lock_irqsave(&list->lock, flags);
	//HTC: ensure next and prev pointer are both valid before call __skb_unlink()
	if (skb->next != NULL && skb->prev!=NULL)
		__skb_unlink(skb, list);
	else {
		pr_info("%s(%d) skb next:%p prev:%p !!!\n", __func__, __LINE__, skb->next, skb->prev);
		list->qlen--;
	}

	//HTC: if qlen is already 0, but list->next != list, it means the list is corrupted
	//     call __skb_queue_head_init() to recover the list to inital state
	if ((list->qlen == 0) && !skb_queue_empty(list)){
		pr_info("%s(%d) __skb_queue_head_init list:%p next:%p prev:%p !!!\n", __func__, __LINE__, list, list->next, list->prev);
		__skb_queue_head_init(list);
	}
	spin_unlock(&list->lock);
	spin_lock(&dev->done.lock);
	__skb_queue_tail(&dev->done, skb);

    //HTC+++
	if (!test_bit (EVENT_DEV_ASLEEP, &dev->flags) && (dev->done.qlen > USBNET_DONE_QUEUE_HIGH_WATERMARK))
		pr_info("%s(%d) [USBNET] dev->done.qlen:%d\n", __func__, __LINE__, dev->done.qlen);
    //HTC---

	if (dev->done.qlen == 1)
		tasklet_schedule(&dev->bh);
	//HTC+++
	else if (dev->done.qlen > USBNET_DONE_QUEUE_HIGH_WATERMARK) {
		// HALT HSIC RX
		if (!test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
			netdev_err(dev->net, "!!! [USBNET] dev->done.qlen %d > USBNET_DONE_QUEUE_HIGH_WATERMARK, set EVENT_RX_HALT !!!\n", dev->done.qlen);
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
		}
	}
	//HTC---
	spin_unlock_irqrestore(&dev->done.lock, flags);
}
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
{
	struct sk_buff *skb;
	unsigned int read = 0;
	struct iscsi_conn *conn = c3cn->user_data;
	int err = 0;

	cxgb3i_rx_debug("cn 0x%p.\n", c3cn);

	read_lock(&c3cn->callback_lock);
	if (unlikely(!conn || conn->suspend_rx)) {
		cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
				conn, conn ? conn->id : 0xFF,
				conn ? conn->suspend_rx : 0xFF);
		read_unlock(&c3cn->callback_lock);
		return;
	}
	skb = skb_peek(&c3cn->receive_queue);
	while (!err && skb) {
		__skb_unlink(skb, &c3cn->receive_queue);
		read += skb_rx_pdulen(skb);
		cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
				conn, c3cn, skb, skb_rx_pdulen(skb));
		err = cxgb3i_conn_read_pdu_skb(conn, skb);
		__kfree_skb(skb);
		skb = skb_peek(&c3cn->receive_queue);
	}
	read_unlock(&c3cn->callback_lock);
	if (c3cn) {
		c3cn->copied_seq += read;
		cxgb3i_c3cn_rx_credits(c3cn, read);
	}
	conn->rxdata_octets += read;

	if (err) {
		cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	}
}
Esempio n. 26
0
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
			struct sk_buff *skb, unsigned int flags,
			void (*destructor)(struct sock *sk,
					   struct sk_buff *skb))
{
	int err = 0;

	if (flags & MSG_PEEK) {
		err = -ENOENT;
		spin_lock_bh(&sk_queue->lock);
		if (skb->next) {
			__skb_unlink(skb, sk_queue);
			refcount_dec(&skb->users);
			if (destructor)
				destructor(sk, skb);
			err = 0;
		}
		spin_unlock_bh(&sk_queue->lock);
	}

	atomic_inc(&sk->sk_drops);
	return err;
}
Esempio n. 27
0
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
			  struct sk_buff_head *hitlist)
{
	struct sk_buff *skb;
	struct sk_buff *next;

	spin_lock(&x->sk_receive_queue.lock);
	receive_queue_for_each_skb(x, next, skb) {
		/*
		 *	Do we have file descriptors ?
		 */
		if (UNIXCB(skb).fp) {
			bool hit = false;
			/*
			 *	Process the descriptors of this socket
			 */
			int nfd = UNIXCB(skb).fp->count;
			struct file **fp = UNIXCB(skb).fp->fp;
			while (nfd--) {
				/*
				 *	Get the socket the fd matches
				 *	if it indeed does so
				 */
				struct sock *sk = unix_get_socket(*fp++);
				if (sk) {
					hit = true;
					func(unix_sk(sk));
				}
			}
			if (hit && hitlist != NULL) {
				__skb_unlink(skb, &x->sk_receive_queue);
				__skb_queue_tail(hitlist, skb);
			}
		}
	}
	spin_unlock(&x->sk_receive_queue.lock);
}
Esempio n. 28
0
static void netem_watchdog(unsigned long arg)
{
	struct Qdisc *sch = (struct Qdisc *)arg;
	struct netem_sched_data *q = qdisc_priv(sch);
	struct net_device *dev = sch->dev;
	struct sk_buff *skb;
	psched_time_t now;

	pr_debug("netem_watchdog: fired @%lu\n", jiffies);

	spin_lock_bh(&dev->queue_lock);
	PSCHED_GET_TIME(now);

	while ((skb = skb_peek(&q->delayed)) != NULL) {
		const struct netem_skb_cb *cb
			= (const struct netem_skb_cb *)skb->cb;
		long delay 
			= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
		pr_debug("netem_watchdog: skb %p@%lu %ld\n",
			 skb, jiffies, delay);

		/* if more time remaining? */
		if (delay > 0) {
			mod_timer(&q->timer, jiffies + delay);
			break;
		}
		__skb_unlink(skb, &q->delayed);

		if (q->qdisc->enqueue(skb, q->qdisc)) {
			sch->q.qlen--;
			sch->qstats.drops++;
		}
	}
	qdisc_run(dev);
	spin_unlock_bh(&dev->queue_lock);
}
Esempio n. 29
0
/* NOTE:
	This function return a list of SKB which is proper to be aggregated. 
	If no proper SKB is found to do aggregation, SendList will only contain the input SKB.
*/
u8 AMSDU_GetAggregatibleList(
	struct rtllib_device *	ieee,
	struct sk_buff *		pCurSkb,
	struct sk_buff_head 		*pSendList,
	u8				queue_index
	)
{
	struct sk_buff 			*pSkb = NULL;
	u16				nMaxAMSDUSize = 0;
	u32				AggrSize = 0;
	u32				nAggrSkbNum = 0;
	u8 				padding = 0;
	struct sta_info			*psta = NULL;	
	u8 				*addr = (u8*)(pCurSkb->data);	
	struct sk_buff_head *header;
	struct sk_buff 	  *punlinkskb = NULL;	

	padding = ((4-pCurSkb->len%4)==4)?0:(4-pCurSkb->len%4);
	AggrSize = AMSDU_SUBHEADER_LEN + pCurSkb->len + padding;
	skb_queue_tail(pSendList, pCurSkb);
	nAggrSkbNum++;

	//
	// Get A-MSDU aggregation threshold.
	//
	if(ieee->iw_mode == IW_MODE_MASTER){	
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else if(ieee->iw_mode == IW_MODE_ADHOC){
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else{
		nMaxAMSDUSize = ieee->pHTInfo->nCurrent_AMSDU_MaxSize;
	}
	nMaxAMSDUSize = ((nMaxAMSDUSize)==0)?HT_AMSDU_SIZE_4K:HT_AMSDU_SIZE_8K;

	if(ieee->pHTInfo->ForcedAMSDUMode == HT_AGG_FORCE_ENABLE)
	{
		nMaxAMSDUSize = ieee->pHTInfo->ForcedAMSDUMaxSize;
	}
	
	// 
	// Build pSendList
	//
	header = (&ieee->skb_aggQ[queue_index]);
	pSkb = header->next;
	while(pSkb != (struct sk_buff*)header)
	{
		//
		// Get Aggregation List. Only those frames with the same RA can be aggregated.
		// For Infrastructure mode, RA is always the AP MAC address so the frame can just be aggregated 
		// without checking RA.
		// For AP mode and IBSS mode, RA is the same as DA. Checking RA is needed before aggregation.
		//
		if((ieee->iw_mode == IW_MODE_MASTER) ||(ieee->iw_mode == IW_MODE_ADHOC))
		{
			if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) //DA
			{
				//printk(""MAC_FMT"-"MAC_FMT"\n",MAC_ARG(pCurSkb->data), MAC_ARG(pSkb->data));
				pSkb = pSkb->next;
				continue;				
			}
		}
		//
		// Limitation shall be checked:
		// (1) A-MSDU size limitation
		//
		if((AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize) )
		{
			// Unlink skb
			punlinkskb = pSkb;
			pSkb = pSkb->next;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
			skb_unlink(punlinkskb, header);	
#else
			/*
			 * __skb_unlink before linux2.6.14 does not use spinlock to protect list head.
			 * add spinlock function manually. john,2008/12/03
			 */
			{
				unsigned long flags;
				spin_lock_irqsave(&ieee->lock, flags);
				__skb_unlink(punlinkskb,header);
				spin_unlock_irqrestore(&ieee->lock, flags);
			}
#endif
			
			//Do aggregation
			padding = ((4-punlinkskb->len%4)==4)?0:(4-punlinkskb->len%4);
			AggrSize += AMSDU_SUBHEADER_LEN + punlinkskb->len + padding;
			//printk(""MAC_FMT": %d\n",MAC_ARG(punlinkskb->data),punlinkskb->len);
			skb_queue_tail(pSendList, punlinkskb);
			nAggrSkbNum++;
		}
		else
		{
			//Do not do aggregation because out of resources
			//printk("\nStop aggregation: ");
			if(!(AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize))
				;//printk("[A-MSDU size limitation]");
		
			break; //To be sure First In First Out, 'break' should be marked, 
		}
	}
	return nAggrSkbNum;
}
Esempio n. 30
0
/* NOTE:
	This function return a list of SKB which is proper to be aggregated. 
	If no proper SKB is found to do aggregation, SendList will only contain the input SKB.
*/
u8 AMSDU_GetAggregatibleList(
	struct rtllib_device *	ieee,
	struct sk_buff *		pCurSkb,
	struct sk_buff_head 		*pSendList,
	u8				queue_index,
	bool				is_ap
	)
{
	struct sk_buff 			*pSkb = NULL;
	u16				nMaxAMSDUSize = 0;
	u32				AggrSize = 0;
	u32				nAggrSkbNum = 0;
	u8 				padding = 0;
	struct sta_info			*psta = NULL;	
	u8 				*addr = (u8*)(pCurSkb->data);	
	struct sk_buff_head *header;
	struct sk_buff 	  *punlinkskb = NULL;	

	padding = ((4-pCurSkb->len%4)==4)?0:(4-pCurSkb->len%4);
	AggrSize = AMSDU_SUBHEADER_LEN + pCurSkb->len + padding;
	skb_queue_tail(pSendList, pCurSkb);
	nAggrSkbNum++;

	if (is_ap) {
#ifdef ASL	    
		if((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_APSTA) && (ieee->ap_state == RTLLIB_LINKED)){	
		    
		psta = ap_get_stainfo(ieee, addr);
		if(NULL != psta){
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		}
		else {
			return 1;
		}

		}else
#endif
			return 1;
	}else {
		if(ieee->iw_mode == IW_MODE_ADHOC){
		psta = GetStaInfo(ieee, addr);
		if(NULL != psta)
			nMaxAMSDUSize = psta->htinfo.AMSDU_MaxSize;
		else
			return 1;
	}else{
		nMaxAMSDUSize = ieee->pHTInfo->nCurrent_AMSDU_MaxSize;
	}
	}

	if(ieee->pHTInfo->ForcedAMSDUMode == HT_AGG_FORCE_ENABLE)
	{
		nMaxAMSDUSize = ieee->pHTInfo->ForcedAMSDUMaxSize;
	}
	
	if (is_ap) {
#ifdef ASL
		header = (&ieee->skb_apaggQ[queue_index]);
#endif
	} else 
	header = (&ieee->skb_aggQ[queue_index]);
	pSkb = header->next;
	while(pSkb != (struct sk_buff*)header)
	{
		if (is_ap) {
#ifdef ASL
			if((ieee->iw_mode == IW_MODE_MASTER) ||(ieee->iw_mode == IW_MODE_APSTA))
		{
			if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) 
			{
				pSkb = pSkb->next;
				continue;				
			}
		}
#endif
		} else {
			if(ieee->iw_mode == IW_MODE_ADHOC)
			{
				if(memcmp(pCurSkb->data, pSkb->data, ETH_ALEN) != 0) 
				{
					pSkb = pSkb->next;
					continue;				
				}
			}
		}
		if((AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize) )
		{
			punlinkskb = pSkb;
			pSkb = pSkb->next;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
			skb_unlink(punlinkskb, header);	
#else
			/*
			 * __skb_unlink before linux2.6.14 does not use spinlock to protect list head.
			 * add spinlock function manually. john,2008/12/03
			 */
			{
				unsigned long flags;
				spin_lock_irqsave(&ieee->lock, flags);
				__skb_unlink(punlinkskb,header);
				spin_unlock_irqrestore(&ieee->lock, flags);
			}
#endif
			
			padding = ((4-punlinkskb->len%4)==4)?0:(4-punlinkskb->len%4);
			AggrSize += AMSDU_SUBHEADER_LEN + punlinkskb->len + padding;
			skb_queue_tail(pSendList, punlinkskb);
			nAggrSkbNum++;
		}
		else
		{
			if(!(AMSDU_SUBHEADER_LEN + pSkb->len + AggrSize < nMaxAMSDUSize))
				;
		
			break; 
		}
	}
	return nAggrSkbNum;
}