Exemple #1
0
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	int rc = -ENOTCONN;

	lock_sock(sk);

	if (sk->sk_state == DCCP_LISTEN)
		goto out;

	switch (cmd) {
	case SIOCINQ: {
		struct sk_buff *skb;
		unsigned long amount = 0;

		skb = skb_peek(&sk->sk_receive_queue);
		if (skb != NULL) {
			/*
			 * We will only return the amount of this packet since
			 * that is all that will be read.
			 */
			amount = skb->len;
		}
		rc = put_user(amount, (int __user *)arg);
	}
		break;
	default:
		rc = -ENOIOCTLCMD;
		break;
	}
out:
	release_sock(sk);
	return rc;
}
Exemple #2
0
static int msm_ipc_router_extract_msg(struct msghdr *m,
				      struct sk_buff_head *msg_head)
{
	struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)m->msg_name;
	struct rr_header *hdr;
	struct sk_buff *temp;
	int offset = 0, data_len = 0, copy_len;

	if (!m || !msg_head) {
		pr_err("%s: Invalid pointers passed\n", __func__);
		return -EINVAL;
	}

	temp = skb_peek(msg_head);
	hdr = (struct rr_header *)(temp->data);
	if (addr || (hdr->src_port_id != IPC_ROUTER_ADDRESS)) {
		addr->family = AF_MSM_IPC;
		addr->address.addrtype = MSM_IPC_ADDR_ID;
		addr->address.addr.port_addr.node_id = hdr->src_node_id;
		addr->address.addr.port_addr.port_id = hdr->src_port_id;
		m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
	}

	data_len = hdr->size;
	skb_pull(temp, IPC_ROUTER_HDR_SIZE);
	skb_queue_walk(msg_head, temp) {
		copy_len = data_len < temp->len ? data_len : temp->len;
		if (copy_to_user(m->msg_iov->iov_base + offset, temp->data,
				 copy_len)) {
			pr_err("%s: Copy to user failed\n", __func__);
			return -EFAULT;
		}
		offset += copy_len;
		data_len -= copy_len;
	}
Exemple #3
0
static void aun_tx_ack(unsigned long seq, int result)
{
	struct sk_buff *skb;
	unsigned long flags;
	struct ec_cb *eb;

	spin_lock_irqsave(&aun_queue_lock, flags);
	skb = skb_peek(&aun_queue);
	while (skb && skb != (struct sk_buff *)&aun_queue)
	{
		struct sk_buff *newskb = skb->next;
		eb = (struct ec_cb *)&skb->cb;
		if (eb->seq == seq)
			goto foundit;

		skb = newskb;
	}
	spin_unlock_irqrestore(&aun_queue_lock, flags);
	printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
	return;

foundit:
	tx_result(skb->sk, eb->cookie, result);
	skb_unlink(skb);
	spin_unlock_irqrestore(&aun_queue_lock, flags);
	kfree_skb(skb);
}
Exemple #4
0
static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	switch (cmd) {
	case SIOCOUTQ: {
		int amount = sk_wmem_alloc_get(sk);

		return put_user(amount, (int __user *)arg);
	}
	case SIOCINQ: {
		struct sk_buff *skb;
		int amount = 0;

		spin_lock_bh(&sk->sk_receive_queue.lock);
		skb = skb_peek(&sk->sk_receive_queue);
		if (skb != NULL)
			amount = skb->len;
		spin_unlock_bh(&sk->sk_receive_queue.lock);
		return put_user(amount, (int __user *)arg);
	}

	default:
#ifdef CONFIG_IP_MROUTE
		return ipmr_ioctl(sk, cmd, (void __user *)arg);
#else
		return -ENOIOCTLCMD;
#endif
	}
}
Exemple #5
0
/**
 * rsi_get_num_pkts_dequeue() - This function determines the number of
 *		                packets to be dequeued based on the number
 *			        of bytes calculated using txop.
 *
 * @common: Pointer to the driver private structure.
 * @q_num: the queue from which pkts have to be dequeued
 *
 * Return: pkt_num: Number of pkts to be dequeued.
 */
static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
{
	struct rsi_hw *adapter = common->priv;
	struct sk_buff *skb;
	u32 pkt_cnt = 0;
	s16 txop = common->tx_qinfo[q_num].txop * 32;
	__le16 r_txop;
	struct ieee80211_rate rate;

	rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
	if (q_num == VI_Q)
		txop = ((txop << 5) / 80);

	if (skb_queue_len(&common->tx_queue[q_num]))
		skb = skb_peek(&common->tx_queue[q_num]);
	else
		return 0;

	do {
		r_txop = ieee80211_generic_frame_duration(adapter->hw,
							  adapter->vifs[0],
							  common->band,
							  skb->len, &rate);
		txop -= le16_to_cpu(r_txop);
		pkt_cnt += 1;
		/*checking if pkts are still there*/
		if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
			skb = skb->next;
		else
			break;

	} while (txop > 0);

	return pkt_cnt;
}
Exemple #6
0
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
	const struct netdev_queue *txq = q->dev_queue;
	spinlock_t *lock = NULL;
	struct sk_buff *skb;

	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
	}

	skb = skb_peek(&q->skb_bad_txq);
	if (skb) {
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			skb = __skb_dequeue(&q->skb_bad_txq);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
				qdisc_qstats_cpu_qlen_dec(q);
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
			skb = NULL;
		}
	}

	if (lock)
		spin_unlock(lock);

	return skb;
}
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				/*
				 * See 8.1.5 - Handshake Completion.
				 *
				 * For robustness we resend Confirm options until the client has
				 * entered OPEN. During the initial feature negotiation, the MPS
				 * is smaller than usual, reduced by the Change/Confirm options.
				 */
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	struct sk_buff *skb;
	int answ;

	switch (cmd) {
	case SIOCINQ:
		lock_sock(sk);
		skb = skb_peek(&sk->sk_receive_queue);
		answ = skb ? skb->len : 0;
		release_sock(sk);
		return put_user(answ, (int __user *)arg);

	case SIOCPNADDRESOURCE:
	case SIOCPNDELRESOURCE: {
			u32 res;
			if (get_user(res, (u32 __user *)arg))
				return -EFAULT;
			if (res >= 256)
				return -EINVAL;
			if (cmd == SIOCPNADDRESOURCE)
				return pn_sock_bind_res(sk, res);
			else
				return pn_sock_unbind_res(sk, res);
		}
	}

	return -ENOIOCTLCMD;
}
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;

		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue);
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else{
                            if(!skb->next || IS_ERR(skb->next)){
                                printk("[NET] skb->next error in %s\n", __func__);
                                error = -EAGAIN;
                                spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                                goto no_packet;
                            }else{
				__skb_unlink(skb, &sk->sk_receive_queue);
                            }
                        }
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Exemple #10
0
void lapb_kick(struct lapb_cb *lapb)
{
	struct sk_buff *skb, *skbn;
	unsigned short modulus, start, end;

	modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS;
	start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs;
	end   = (lapb->va + lapb->window) % modulus;

	if (!(lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) &&
	    start != end && skb_peek(&lapb->write_queue)) {
		lapb->vs = start;

		/*
		 * Dequeue the frame and copy it.
		 */
		skb = skb_dequeue(&lapb->write_queue);

		do {
			if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
				skb_queue_head(&lapb->write_queue, skb);
				break;
			}

			if (skb->sk)
				skb_set_owner_w(skbn, skb->sk);

			/*
			 * Transmit the frame copy.
			 */
			lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);

			lapb->vs = (lapb->vs + 1) % modulus;

			/*
			 * Requeue the original data frame.
			 */
			skb_queue_tail(&lapb->ack_queue, skb);

		} while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL);

		lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;

		if (!lapb_t1timer_running(lapb))
			lapb_start_t1timer(lapb);
	}
}
struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
{
	struct cfpkt *tmp;
	spin_lock(&pktq->lock);
	tmp = skb_to_pkt(skb_peek(&pktq->head));
	spin_unlock(&pktq->lock);
	return tmp;
}
static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
	struct cfhsi_desc *desc = NULL;
	int len = 0;
	int res;

	dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	desc = (struct cfhsi_desc *)cfhsi->tx_buf;

	do {
		/*
		 * Send flow on if flow off has been previously signalled
		 * and number of packets is below low water mark.
		 */
		spin_lock_bh(&cfhsi->lock);
		if (cfhsi->flow_off_sent &&
				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
				cfhsi->cfdev.flowctrl) {

			cfhsi->flow_off_sent = 0;
			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
		}
		spin_unlock_bh(&cfhsi->lock);

		/* Create HSI frame. */
		do {
			len = cfhsi_tx_frm(desc, cfhsi);
			if (!len) {
				spin_lock_bh(&cfhsi->lock);
				if (unlikely(skb_peek(&cfhsi->qhead))) {
					spin_unlock_bh(&cfhsi->lock);
					continue;
				}
				cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
				/* Start inactivity timer. */
				mod_timer(&cfhsi->timer,
					jiffies + CFHSI_INACTIVITY_TOUT);
				spin_unlock_bh(&cfhsi->lock);
				goto done;
			}
		} while (!len);

		/* Set up new transfer. */
		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
		if (WARN_ON(res < 0)) {
			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
				__func__, res);
		}
	} while (res < 0);

done:
	return;
}
Exemple #13
0
static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
{
	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);

	if (unlikely(skb))
		skb = __skb_dequeue_bad_txq(q);

	return skb;
}
Exemple #14
0
static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct shaper *shaper = dev->priv;
 	struct sk_buff *ptr;

	spin_lock(&shaper->lock);
 	ptr=shaper->sendq.prev;

 	/*
 	 *	Set up our packet details
 	 */

 	SHAPERCB(skb)->shapelatency=0;
 	SHAPERCB(skb)->shapeclock=shaper->recovery;
 	if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
 		SHAPERCB(skb)->shapeclock=jiffies;
 	skb->priority=0;	/* short term bug fix */
 	SHAPERCB(skb)->shapestamp=jiffies;

 	/*
 	 *	Time slots for this packet.
 	 */

 	SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);

	{
		struct sk_buff *tmp;
		/*
		 *	Up our shape clock by the time pending on the queue
		 *	(Should keep this in the shaper as a variable..)
		 */
		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
			tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
			SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
		/*
		 *	Queue over time. Spill packet.
		 */
		if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
			dev_kfree_skb(skb);
			shaper->stats.tx_dropped++;
		} else
			skb_queue_tail(&shaper->sendq, skb);
	}

	if(sh_debug)
 		printk("Frame queued.\n");
 	if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
 	{
 		ptr=skb_dequeue(&shaper->sendq);
                dev_kfree_skb(ptr);
                shaper->stats.collisions++;
 	}
	shaper_kick(shaper);
	spin_unlock(&shaper->lock);
 	return 0;
}
Exemple #15
0
static void unix_data_wait(unix_socket * sk)
{
	cli();
	if (!skb_peek(&sk->receive_queue)) {
		sk->socket->flags |= SO_WAITDATA;
		interruptible_sleep_on(sk->sleep);
		sk->socket->flags &= ~SO_WAITDATA;
	}
	sti();
}
Exemple #16
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, err);
			if (err && err != -EINTR)
				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
				
				if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
					DCCP_WARN("Payload too large (%d) for featneg.\n", len);
					dccp_send_ack(sk);
					dccp_feat_list_purge(&dp->dccps_featneg);
				}

				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err)
				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
					 err);
		} else {
			dccp_pr_debug("packet discarded due to err=%d\n", err);
			kfree_skb(skb);
		}
	}
}
Exemple #17
0
void dccp_write_xmit(struct sock *sk, int block)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	long timeo = 30000; 	/* If a packet is taking longer than 2 secs
				   we have other issues */

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					 skb->len);

		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
			} else
				err = dccp_wait_for_ccid(sk, skb, &timeo);
			if (err) {
				printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
						 " %d\n", __FUNCTION__, err);
				dump_stack();
			}
		}

		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;

			if (sk->sk_state == DCCP_PARTOPEN) {
				/* See 8.1.5.  Handshake Completion */
				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err) {
				printk(KERN_CRIT "%s:err from "
					         "ccid_hc_tx_packet_sent %d\n",
					         __FUNCTION__, err);
				dump_stack();
			}
		} else
			kfree(skb);
	}
}
static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
{
	int prio;
	struct sk_buff_head *list = qdisc_priv(qdisc);

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		if (!skb_queue_empty(list + prio))
			return skb_peek(list + prio);
	}

	return NULL;
}
Exemple #19
0
void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
	if (flags & MSG_PEEK) {
		spin_lock_bh(&sk->sk_receive_queue.lock);
		if (skb == skb_peek(&sk->sk_receive_queue)) {
			__skb_unlink(skb, &sk->sk_receive_queue);
			atomic_dec(&skb->users);
		}
		spin_unlock_bh(&sk->sk_receive_queue.lock);
	}

	kfree_skb(skb);
}
Exemple #20
0
static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	struct pep_sock *pn = pep_sk(sk);
	int answ;
	int ret = -ENOIOCTLCMD;

	switch (cmd) {
	case SIOCINQ:
		if (sk->sk_state == TCP_LISTEN) {
			ret = -EINVAL;
			break;
		}

		lock_sock(sk);
		if (sock_flag(sk, SOCK_URGINLINE) &&
		    !skb_queue_empty(&pn->ctrlreq_queue))
			answ = skb_peek(&pn->ctrlreq_queue)->len;
		else if (!skb_queue_empty(&sk->sk_receive_queue))
			answ = skb_peek(&sk->sk_receive_queue)->len;
		else
			answ = 0;
		release_sock(sk);
		ret = put_user(answ, (int __user *)arg);
		break;

	case SIOCPNENABLEPIPE:
		lock_sock(sk);
		if (sk->sk_state == TCP_SYN_SENT)
			ret =  -EBUSY;
		else if (sk->sk_state == TCP_ESTABLISHED)
			ret = -EISCONN;
		else
			ret = pep_sock_enable(sk, NULL, 0);
		release_sock(sk);
		break;
	}

	return ret;
}
Exemple #21
0
static struct sk_buff *
sfq_peek(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	sfq_index a;

	/* No active slots */
	if (q->tail == SFQ_DEPTH)
		return NULL;

	a = q->next[q->tail];
	return skb_peek(&q->qs[a]);
}
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
{
	struct sk_buff *skb;
	unsigned int read = 0;
	struct iscsi_conn *conn = c3cn->user_data;
	int err = 0;

	cxgb3i_rx_debug("cn 0x%p.\n", c3cn);

	read_lock(&c3cn->callback_lock);
	if (unlikely(!conn || conn->suspend_rx)) {
		cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
				conn, conn ? conn->id : 0xFF,
				conn ? conn->suspend_rx : 0xFF);
		read_unlock(&c3cn->callback_lock);
		return;
	}
	skb = skb_peek(&c3cn->receive_queue);
	while (!err && skb) {
		__skb_unlink(skb, &c3cn->receive_queue);
		read += skb_rx_pdulen(skb);
		cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
				conn, c3cn, skb, skb_rx_pdulen(skb));
		err = cxgb3i_conn_read_pdu_skb(conn, skb);
		__kfree_skb(skb);
		skb = skb_peek(&c3cn->receive_queue);
	}
	read_unlock(&c3cn->callback_lock);
	if (c3cn) {
		c3cn->copied_seq += read;
		cxgb3i_c3cn_rx_credits(c3cn, read);
	}
	conn->rxdata_octets += read;

	if (err) {
		cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	}
}
Exemple #23
0
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
	int band = bitmap2band[priv->bitmap];

	if (band >= 0) {
		struct sk_buff_head *list = band2list(priv, band);

		return skb_peek(list);
	}

	return NULL;
}
Exemple #24
0
static inline void wait_for_packet(struct sock * sk)
{
	struct wait_queue wait = { current, NULL };

	add_wait_queue(sk->sleep, &wait);
	current->state = TASK_INTERRUPTIBLE;

	if (skb_peek(&sk->receive_queue) == NULL)
		schedule();

	current->state = TASK_RUNNING;
	remove_wait_queue(sk->sleep, &wait);
}
/**
 *	skb_recv_datagram - Receive a datagram skbuff
 *	@sk - socket
 *	@flags - MSG_ flags
 *	@noblock - blocking operation?
 *	@err - error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
				  int noblock, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, noblock);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was corrent in any case. 8)
		 */
		if (flags & MSG_PEEK) {
			unsigned long cpu_flags;

			spin_lock_irqsave(&sk->sk_receive_queue.lock,
					  cpu_flags);
			skb = skb_peek(&sk->sk_receive_queue);
			if (skb)
				atomic_inc(&skb->users);
			spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
					       cpu_flags);
		} else
			skb = skb_dequeue(&sk->sk_receive_queue);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Exemple #26
0
static int netlink_select(struct inode *inode, struct file *file, int sel_type, select_table * wait)
{
	unsigned int minor = MINOR(inode->i_rdev);
	switch (sel_type) {
	case SEL_IN:
		if (skb_peek(&skb_queue_rd[minor])!=NULL)
			return 1;
		select_wait(&read_space_wait[minor], wait);
		break;
	case SEL_OUT:
		return 1;
	}
	return 0;
}
Exemple #27
0
void nr_frames_acked(struct sock *sk, unsigned short nr)
{
	struct nr_sock *nrom = nr_sk(sk);
	struct sk_buff *skb;

	
	if (nrom->va != nr) {
		while (skb_peek(&nrom->ack_queue) != NULL && nrom->va != nr) {
			skb = skb_dequeue(&nrom->ack_queue);
			kfree_skb(skb);
			nrom->va = (nrom->va + 1) % NR_MODULUS;
		}
	}
}
/**
 * msm_ipc_router_hsic_remote_write() - Write to XPRT
 * @data: Data to be written to the XPRT.
 * @len: Length of the data to be written.
 * @xprt: XPRT to which the data has to be written.
 *
 * @return: Data Length on success, standard Linux error codes on failure.
 */
static int msm_ipc_router_hsic_remote_write(void *data,
		uint32_t len, struct msm_ipc_router_xprt *xprt)
{
	struct rr_packet *pkt = (struct rr_packet *)data;
	struct sk_buff *skb;
	struct ipc_bridge_platform_data *pdata;
	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
	int ret;

	if (!pkt || pkt->length != len || !xprt) {
		pr_err("%s: Invalid input parameters\n", __func__);
		return -EINVAL;
	}

	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
	mutex_lock(&hsic_xprtp->ss_reset_lock);
	if (hsic_xprtp->ss_reset) {
		pr_err("%s: Trying to write on a reset link\n", __func__);
		mutex_unlock(&hsic_xprtp->ss_reset_lock);
		return -ENETRESET;
	}

	if (!hsic_xprtp->pdev) {
		pr_err("%s: Trying to write on a closed link\n", __func__);
		mutex_unlock(&hsic_xprtp->ss_reset_lock);
		return -ENODEV;
	}

	pdata = hsic_xprtp->pdev->dev.platform_data;
	if (!pdata || !pdata->write) {
		pr_err("%s on a uninitialized link\n", __func__);
		mutex_unlock(&hsic_xprtp->ss_reset_lock);
		return -EFAULT;
	}

	skb = skb_peek(pkt->pkt_fragment_q);
	if (!skb) {
		pr_err("%s SKB is NULL\n", __func__);
		mutex_unlock(&hsic_xprtp->ss_reset_lock);
		return -EINVAL;
	}
	D("%s: About to write %d bytes\n", __func__, len);
	ret = pdata->write(hsic_xprtp->pdev, skb->data, skb->len);
	if (ret == skb->len)
		ret = len;
	D("%s: Finished writing %d bytes\n", __func__, len);
	mutex_unlock(&hsic_xprtp->ss_reset_lock);
	return ret;
}
Exemple #29
0
static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	struct pep_sock *pn = pep_sk(sk);
	int answ;

	switch (cmd) {
	case SIOCINQ:
		if (sk->sk_state == TCP_LISTEN)
			return -EINVAL;

		lock_sock(sk);
		if (sock_flag(sk, SOCK_URGINLINE) &&
		    !skb_queue_empty(&pn->ctrlreq_queue))
			answ = skb_peek(&pn->ctrlreq_queue)->len;
		else if (!skb_queue_empty(&sk->sk_receive_queue))
			answ = skb_peek(&sk->sk_receive_queue)->len;
		else
			answ = 0;
		release_sock(sk);
		return put_user(answ, (int __user *)arg);
	}

	return -ENOIOCTLCMD;
}
Exemple #30
0
/*
 * This routine purges the input queue of those frames that have been
 * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
 * SDL diagram.
 */
void nr_frames_acked(struct sock *sk, unsigned short nr)
{
	struct sk_buff *skb;

	/*
	 * Remove all the ack-ed frames from the ack queue.
	 */
	if (sk->protinfo.nr->va != nr) {
		while (skb_peek(&sk->protinfo.nr->ack_queue) != NULL && sk->protinfo.nr->va != nr) {
		        skb = skb_dequeue(&sk->protinfo.nr->ack_queue);
			kfree_skb(skb);
			sk->protinfo.nr->va = (sk->protinfo.nr->va + 1) % NR_MODULUS;
		}
	}
}