Пример #1
0
static int recv_stream(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *m, size_t buf_len, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	unsigned int q_len;
	unsigned int sz;
	int sz_to_copy;
	int sz_copied = 0;
	int needed;
	char __user *crs = m->msg_iov->iov_base;
	unsigned char *buf_crs;
	u32 err;
	int res;

	/* Currently doesn't support receiving into multiple iovec entries */

	if (m->msg_iovlen != 1)
		return -EOPNOTSUPP;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	if (unlikely(sock->state == SS_DISCONNECTING)) {
		if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
			return -ENOTCONN;
	} else if (unlikely(sock->state != SS_CONNECTED))
		return -ENOTCONN;

	/* Look for a message in receive queue; wait if necessary */

	if (unlikely(down_interruptible(&tsock->sem)))
		return -ERESTARTSYS;

restart:
	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & MSG_DONTWAIT))) {
		res = -EWOULDBLOCK;
		goto exit;
	}

	if ((res = wait_event_interruptible(
		*sock->sk->sk_sleep,
		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
		 (sock->state == SS_DISCONNECTING))) )) {
		goto exit;
	}

	/* Catch attempt to receive on an already terminated connection */
	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */

	if (!q_len) {
		res = -ENOTCONN;
		goto exit;
	}

	/* Get access to first message in receive queue */

	buf = skb_peek(&sock->sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_queue(tsock);
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */

	if (sz_copied == 0) {
		set_orig_addr(m, msg);
		if ((res = anc_data_recv(m, msg, tsock->p)))
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
		sz = skb_tail_pointer(buf) - buf_crs;

		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;
		if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
			res = -EFAULT;
			goto exit;
		}
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
				TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
			goto exit;
		}

		crs += sz_to_copy;
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
		advance_queue(tsock);
	}

	/* Loop around if more data is required */

	if ((sz_copied < buf_len)    /* didn't get all requested data */
	    && (flags & MSG_WAITALL) /* ... and need to wait for more */
	    && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
	    && (!err)                /* ... and haven't reached a FIN */
	    )
		goto restart;

exit:
	up(&tsock->sem);
	return sz_copied ? sz_copied : res;
}
Пример #2
0
static void
dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
{
	struct hfc4s8s_l1 *l1 = iface->ifc.priv;
	struct sk_buff *skb = (struct sk_buff *) arg;
	u_long flags;

	switch (pr) {

	case (PH_DATA | REQUEST):
		if (!l1->enabled) {
			dev_kfree_skb(skb);
			break;
		}
		spin_lock_irqsave(&l1->lock, flags);
		skb_queue_tail(&l1->d_tx_queue, skb);
		if ((skb_queue_len(&l1->d_tx_queue) == 1) &&
		    (l1->tx_cnt <= 0)) {
			l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
				0x10;
			spin_unlock_irqrestore(&l1->lock, flags);
			schedule_work(&l1->hw->tqueue);
		} else
			spin_unlock_irqrestore(&l1->lock, flags);
		break;

	case (PH_ACTIVATE | REQUEST):
		if (!l1->enabled)
			break;
		if (!l1->nt_mode) {
			if (l1->l1_state < 6) {
				spin_lock_irqsave(&l1->lock,
						  flags);

				Write_hfc8(l1->hw, R_ST_SEL,
					   l1->st_num);
				Write_hfc8(l1->hw, A_ST_WR_STA,
					   0x60);
				mod_timer(&l1->l1_timer,
					  jiffies + L1_TIMER_T3);
				spin_unlock_irqrestore(&l1->lock,
						       flags);
			} else if (l1->l1_state == 7)
				l1->d_if.ifc.l1l2(&l1->d_if.ifc,
						  PH_ACTIVATE |
						  INDICATION,
						  NULL);
		} else {
			if (l1->l1_state != 3) {
				spin_lock_irqsave(&l1->lock,
						  flags);
				Write_hfc8(l1->hw, R_ST_SEL,
					   l1->st_num);
				Write_hfc8(l1->hw, A_ST_WR_STA,
					   0x60);
				spin_unlock_irqrestore(&l1->lock,
						       flags);
			} else if (l1->l1_state == 3)
				l1->d_if.ifc.l1l2(&l1->d_if.ifc,
						  PH_ACTIVATE |
						  INDICATION,
						  NULL);
		}
		break;

	default:
		printk(KERN_INFO
		       "HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n",
		       pr);
		break;
	}
	if (!l1->enabled)
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
}				/*          */
Пример #3
0
static int accept(struct socket *sock, struct socket *newsock, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	int res = -EFAULT;

	if (sock->state == SS_READY)
		return -EOPNOTSUPP;
	if (sock->state != SS_LISTENING)
		return -EINVAL;

	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & O_NONBLOCK)))
		return -EWOULDBLOCK;

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	if (wait_event_interruptible(*sock->sk->sk_sleep,
				     skb_queue_len(&sock->sk->sk_receive_queue))) {
		res = -ERESTARTSYS;
		goto exit;
	}
	buf = skb_peek(&sock->sk->sk_receive_queue);

	res = tipc_create(newsock, 0);
	if (!res) {
		struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
		struct tipc_portid id;
		struct tipc_msg *msg = buf_msg(buf);
		u32 new_ref = new_tsock->p->ref;

		id.ref = msg_origport(msg);
		id.node = msg_orignode(msg);
		tipc_connect2port(new_ref, &id);
		newsock->state = SS_CONNECTED;

		tipc_set_portimportance(new_ref, msg_importance(msg));
		if (msg_named(msg)) {
			new_tsock->p->conn_type = msg_nametype(msg);
			new_tsock->p->conn_instance = msg_nameinst(msg);
		}

	       /*
		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
		 * Respond to 'SYN+' by queuing it on new socket.
		 */

		msg_dbg(msg,"<ACC<: ");
		if (!msg_data_sz(msg)) {
			struct msghdr m = {NULL,};

			send_packet(NULL, newsock, &m, 0);
			advance_queue(tsock);
		} else {
			sock_lock(tsock);
			skb_dequeue(&sock->sk->sk_receive_queue);
			sock_unlock(tsock);
			skb_queue_head(&newsock->sk->sk_receive_queue, buf);
		}
	}
exit:
	up(&tsock->sem);
	return res;
}
Пример #4
0
static int recv_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t buf_len, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	unsigned int q_len;
	unsigned int sz;
	u32 err;
	int res;

	/* Currently doesn't support receiving into multiple iovec entries */

	if (m->msg_iovlen != 1)
		return -EOPNOTSUPP;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	if (sock->type == SOCK_SEQPACKET) {
		if (unlikely(sock->state == SS_UNCONNECTED))
			return -ENOTCONN;
		if (unlikely((sock->state == SS_DISCONNECTING) &&
			     (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
			return -ENOTCONN;
	}

	/* Look for a message in receive queue; wait if necessary */

	if (unlikely(down_interruptible(&tsock->sem)))
		return -ERESTARTSYS;

restart:
	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & MSG_DONTWAIT))) {
		res = -EWOULDBLOCK;
		goto exit;
	}

	if ((res = wait_event_interruptible(
		*sock->sk->sk_sleep,
		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
		 (sock->state == SS_DISCONNECTING))) )) {
		goto exit;
	}

	/* Catch attempt to receive on an already terminated connection */
	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */

	if (!q_len) {
		res = -ENOTCONN;
		goto exit;
	}

	/* Get access to first message in receive queue */

	buf = skb_peek(&sock->sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Complete connection setup for an implied connect */

	if (unlikely(sock->state == SS_CONNECTING)) {
		if ((res = auto_connect(sock, tsock, msg)))
			goto exit;
	}

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_queue(tsock);
		goto restart;
	}

	/* Capture sender's address (optional) */

	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */

	if ((res = anc_data_recv(m, msg, tsock->p)))
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
		if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
					  sz))) {
			res = -EFAULT;
			goto exit;
		}
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
		advance_queue(tsock);
	}
exit:
	up(&tsock->sem);
	return res;
}
Пример #5
0
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
	struct socket *sock;
	u32 recv_q_len;

	/* Reject message if socket is closing */

	if (!tsock)
		return TIPC_ERR_NO_PORT;

	/* Reject message if it is wrong sort of message for socket */

	/*
	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
	 */
	sock = tsock->sk.sk_socket;
	if (sock->state == SS_READY) {
		if (msg_connected(msg)) {
			msg_dbg(msg, "dispatch filter 1\n");
			return TIPC_ERR_NO_PORT;
		}
	} else {
		if (msg_mcast(msg)) {
			msg_dbg(msg, "dispatch filter 2\n");
			return TIPC_ERR_NO_PORT;
		}
		if (sock->state == SS_CONNECTED) {
			if (!msg_connected(msg)) {
				msg_dbg(msg, "dispatch filter 3\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_CONNECTING) {
			if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
				msg_dbg(msg, "dispatch filter 4\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_LISTENING) {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 5\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_DISCONNECTING) {
			msg_dbg(msg, "dispatch filter 6\n");
			return TIPC_ERR_NO_PORT;
		}
		else /* (sock->state == SS_UNCONNECTED) */ {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 7\n");
				return TIPC_ERR_NO_PORT;
			}
		}
	}

	/* Reject message if there isn't room to queue it */

	if (unlikely((u32)atomic_read(&tipc_queue_size) >
		     OVERLOAD_LIMIT_BASE)) {
		if (queue_overloaded(atomic_read(&tipc_queue_size),
				     OVERLOAD_LIMIT_BASE, msg))
			return TIPC_ERR_OVERLOAD;
	}
	recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
	if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
		if (queue_overloaded(recv_q_len,
				     OVERLOAD_LIMIT_BASE / 2, msg))
			return TIPC_ERR_OVERLOAD;
	}

	/* Initiate connection termination for an incoming 'FIN' */

	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
		sock->state = SS_DISCONNECTING;
		/* Note: Use signal since port lock is already taken! */
		tipc_k_signal((Handler)async_disconnect, tport->ref);
	}

	/* Enqueue message (finally!) */

	msg_dbg(msg,"<DISP<: ");
	TIPC_SKB_CB(buf)->handle = msg_data(msg);
	atomic_inc(&tipc_queue_size);
	skb_queue_tail(&sock->sk->sk_receive_queue, buf);

	if (waitqueue_active(sock->sk->sk_sleep))
		wake_up_interruptible(sock->sk->sk_sleep);
	return TIPC_OK;
}
Пример #6
0
static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
		   int flags)
{
   struct tipc_sock *tsock = tipc_sk(sock->sk);
   struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
   struct msghdr m = {NULL,};
   struct sk_buff *buf;
   struct tipc_msg *msg;
   int res;

   /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */

   if (sock->state == SS_READY)
	   return -EOPNOTSUPP;

   /* Issue Posix-compliant error code if socket is in the wrong state */

   if (sock->state == SS_LISTENING)
	   return -EOPNOTSUPP;
   if (sock->state == SS_CONNECTING)
	   return -EALREADY;
   if (sock->state != SS_UNCONNECTED)
	   return -EISCONN;

   /*
    * Reject connection attempt using multicast address
    *
    * Note: send_msg() validates the rest of the address fields,
    *       so there's no need to do it here
    */

   if (dst->addrtype == TIPC_ADDR_MCAST)
	   return -EINVAL;

   /* Send a 'SYN-' to destination */

   m.msg_name = dest;
   m.msg_namelen = destlen;
   if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
	   sock->state = SS_DISCONNECTING;
	   return res;
   }

   if (down_interruptible(&tsock->sem))
	   return -ERESTARTSYS;

   /* Wait for destination's 'ACK' response */

   res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
					  skb_queue_len(&sock->sk->sk_receive_queue),
					  sock->sk->sk_rcvtimeo);
   buf = skb_peek(&sock->sk->sk_receive_queue);
   if (res > 0) {
	   msg = buf_msg(buf);
	   res = auto_connect(sock, tsock, msg);
	   if (!res) {
		   if (!msg_data_sz(msg))
			   advance_queue(tsock);
	   }
   } else {
	   if (res == 0) {
		   res = -ETIMEDOUT;
	   } else
		   { /* leave "res" unchanged */ }
	   sock->state = SS_DISCONNECTING;
   }

   up(&tsock->sem);
   return res;
}
Пример #7
0
struct sk_buff *AMSDU_Aggregation(
	struct rtllib_device 	*ieee,
	struct sk_buff_head 		*pSendList
	)
{
	struct sk_buff *	pSkb;
	struct sk_buff * 	pAggrSkb;
	u8		i;
	u32		total_length = 0;
	u32		skb_len, num_skb;
	pcb_desc 	pcb;
	u8 		amsdu_shdr[AMSDU_SUBHEADER_LEN];
	u8		padding = 0;
	u8		*p = NULL, *q=NULL;
	u16		ether_type;

	//
	// Calculate the total length
	//
	num_skb = skb_queue_len(pSendList);
	if(num_skb == 0)
		return NULL;
	if(num_skb == 1)
	{
		pSkb = (struct sk_buff *)skb_dequeue(pSendList);
		memset(pSkb->cb, 0, sizeof(pSkb->cb));
		pcb = (pcb_desc)(pSkb->cb + MAX_DEV_ADDR_SIZE);
		pcb->bFromAggrQ = true;
		return pSkb;
	}

	total_length += sizeof(struct ethhdr);
	for(i=0; i<num_skb; i++)	
	{
		pSkb= (struct sk_buff *)skb_dequeue(pSendList);
		if(pSkb->len <= (ETH_ALEN*2))
		{
			dev_kfree_skb_any(pSkb);
			continue;
		}
		skb_len = pSkb->len - ETH_ALEN*2 + SNAP_SIZE + AMSDU_SUBHEADER_LEN;
		if(i < (num_skb-1))
		{
			skb_len += ((4-skb_len%4)==4)?0:(4-skb_len%4);
		}
		total_length += skb_len;
		skb_queue_tail(pSendList, pSkb);
	}
	
	//
	// Create A-MSDU
	//
	pAggrSkb = dev_alloc_skb(total_length);
	if(NULL == pAggrSkb)
	{
		skb_queue_purge(pSendList);
		printk("%s: Can not alloc skb!\n", __FUNCTION__);
		return NULL;
	}
	skb_put(pAggrSkb,total_length);
	pAggrSkb->priority = pSkb->priority;

	//
	// Fill AMSDU attibutes within cb
	//
	memset(pAggrSkb->cb, 0, sizeof(pAggrSkb->cb));
	pcb = (pcb_desc)(pAggrSkb->cb + MAX_DEV_ADDR_SIZE);
	pcb->bFromAggrQ = true;
	pcb->bAMSDU = true;

	//printk("======== In %s: num_skb=%d total_length=%d\n", __FUNCTION__,num_skb, total_length);
	//
	// Make A-MSDU
	//
	memset(amsdu_shdr, 0, AMSDU_SUBHEADER_LEN);
	p = pAggrSkb->data;
	for(i=0; i<num_skb; i++)	
	{
		q = p;
		pSkb= (struct sk_buff *)skb_dequeue(pSendList);
		ether_type = ntohs(((struct ethhdr *)pSkb->data)->h_proto);

		skb_len = pSkb->len - sizeof(struct ethhdr) + AMSDU_SUBHEADER_LEN + SNAP_SIZE + sizeof(u16);
		if(i < (num_skb-1))
		{
			padding = ((4-skb_len%4)==4)?0:(4-skb_len%4);
			skb_len += padding;
		}
		if(i == 0)
		{
			memcpy(p, pSkb->data, sizeof(struct ethhdr));
			p += sizeof(struct ethhdr);
		}
		//if(memcmp(pSkb->data, pAggrSkb->data, sizeof(struct ethhdr)))
		//	printk(""MAC_FMT"-"MAC_FMT"\n",MAC_ARG(pSkb->data), MAC_ARG(pAggrSkb->data));
		memcpy(amsdu_shdr, pSkb->data, (ETH_ALEN*2));
		skb_pull(pSkb, sizeof(struct ethhdr));
		*(u16*)(amsdu_shdr+ETH_ALEN*2) = ntohs(pSkb->len + SNAP_SIZE + sizeof(u16));
		memcpy(p, amsdu_shdr, AMSDU_SUBHEADER_LEN);
		p += AMSDU_SUBHEADER_LEN;

		rtllib_put_snap(p, ether_type);
		p += SNAP_SIZE + sizeof(u16);

		memcpy(p, pSkb->data, pSkb->len);
		p += pSkb->len;
		if(padding > 0)
		{
			memset(p, 0, padding);
			p += padding;
			padding = 0;
		}
		dev_kfree_skb_any(pSkb);
	}
	
	//printk("-------%d\n",pAggrSkb->len);
	return pAggrSkb;
}
Пример #8
0
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
	struct socket *sock = sk->sk_socket;
	struct tipc_msg *msg = buf_msg(buf);
	u32 recv_q_len;

	/* Reject message if it is wrong sort of message for socket */

	/*
	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
	 */

	if (sock->state == SS_READY) {
		if (msg_connected(msg)) {
			msg_dbg(msg, "dispatch filter 1\n");
			return TIPC_ERR_NO_PORT;
		}
	} else {
		if (msg_mcast(msg)) {
			msg_dbg(msg, "dispatch filter 2\n");
			return TIPC_ERR_NO_PORT;
		}
		if (sock->state == SS_CONNECTED) {
			if (!msg_connected(msg)) {
				msg_dbg(msg, "dispatch filter 3\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_CONNECTING) {
			if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
				msg_dbg(msg, "dispatch filter 4\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_LISTENING) {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 5\n");
				return TIPC_ERR_NO_PORT;
			}
		}
		else if (sock->state == SS_DISCONNECTING) {
			msg_dbg(msg, "dispatch filter 6\n");
			return TIPC_ERR_NO_PORT;
		}
		else /* (sock->state == SS_UNCONNECTED) */ {
			if (msg_connected(msg) || msg_errcode(msg)) {
				msg_dbg(msg, "dispatch filter 7\n");
				return TIPC_ERR_NO_PORT;
			}
		}
	}

	/* Reject message if there isn't room to queue it */

	recv_q_len = (u32)atomic_read(&tipc_queue_size);
	if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
		if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
			return TIPC_ERR_OVERLOAD;
	}
	recv_q_len = skb_queue_len(&sk->sk_receive_queue);
	if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
		if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
			return TIPC_ERR_OVERLOAD;
	}

	/* Enqueue message (finally!) */

	msg_dbg(msg, "<DISP<: ");
	TIPC_SKB_CB(buf)->handle = msg_data(msg);
	atomic_inc(&tipc_queue_size);
	__skb_queue_tail(&sk->sk_receive_queue, buf);

	/* Initiate connection termination for an incoming 'FIN' */

	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
		sock->state = SS_DISCONNECTING;
		tipc_disconnect_port(tipc_sk_port(sk));
	}

	if (waitqueue_active(sk->sk_sleep))
		wake_up_interruptible(sk->sk_sleep);
	return TIPC_OK;
}
Пример #9
0
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	/*
	 * Allocate skbuffs greedily, even though we batch updates to the
	 * receive ring. This creates a less bursty demand on the memory
	 * allocator, so should reduce the chance of failed allocation requests
	 * both for ourself and for other kernel subsystems.
	 */
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		/* Align ip header to a 16 bytes boundary */
		skb_reserve(skb, NET_IP_ALIGN);

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			/* Any skbuffs queued for refill? Force them out. */
			if (i != 0)
				goto refill;
			/* Could not allocate any skbuffs. Try again later. */
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	/* Is the batch large enough to be worthwhile? */
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	/* Adjust our fill target if we risked running out of buffers. */
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		/* barrier so backend seens requests */

	/* Above is a suitable barrier to ensure backend will see requests. */
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Пример #10
0
int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
	struct rtllib_device *ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
	struct rtllib_txb *txb = NULL;
	struct rtllib_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;
	struct rtllib_crypt_data* crypt;
	cb_desc *tcb_desc;
	u8 bIsMulticast = false;
#if defined(RTL8192U) || defined(RTL8192SU) || defined(RTL8192SE)
	struct sta_info *p_sta = NULL;
#endif	
	u8 IsAmsdu = false;
#ifdef ENABLE_AMSDU	
	u8 queue_index = WME_AC_BE;
	cb_desc *tcb_desc_skb;
	u8 bIsSptAmsdu = false;
#endif	

	bool	bdhcp =false;
#ifndef _RTL8192_EXT_PATCH_
	//PRT_POWER_SAVE_CONTROL pPSC = (PRT_POWER_SAVE_CONTROL)(&(ieee->PowerSaveControl));//added by amy for Leisure PS 090402
#endif
	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, dont' bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
	   ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
		       ieee->dev->name);
		goto success;
	}
	

	if(likely(ieee->raw_tx == 0)){
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		/* Save source and destination addresses */
		memcpy(dest, skb->data, ETH_ALEN);
		memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
	
#ifdef ENABLE_AMSDU	
		if(ieee->iw_mode == IW_MODE_ADHOC)
		{
			p_sta = GetStaInfo(ieee, dest);
			if(p_sta)	{
				if(p_sta->htinfo.bEnableHT)
					bIsSptAmsdu = true;
			}
		}else if(ieee->iw_mode == IW_MODE_INFRA) {
			bIsSptAmsdu = true;
		}else
			bIsSptAmsdu = true;
		bIsSptAmsdu = (bIsSptAmsdu && ieee->pHTInfo->bCurrent_AMSDU_Support && qos_actived);
			
		//u8 *a = skb->data;
		//u8 *b = (u8*)skb->data + ETH_ALEN;
		//printk("\n&&&&&&&skb=%p len=%d dst:"MAC_FMT" src:"MAC_FMT"\n",skb,skb->len,MAC_ARG(a),MAC_ARG(b));
		tcb_desc_skb = (pcb_desc)(skb->cb + MAX_DEV_ADDR_SIZE);  //YJ,move,081104
		if(bIsSptAmsdu) {
			if(!tcb_desc_skb->bFromAggrQ)  //Normal MSDU
			{
				if(qos_actived)
				{
					queue_index = UP2AC(skb->priority);
				} else {
					queue_index = WME_AC_BE;
				}

				//printk("Normal MSDU,queue_idx=%d nic_enough=%d queue_len=%d\n", queue_index, ieee->check_nic_enough_desc(ieee->dev,queue_index), skb_queue_len(&ieee->skb_aggQ[queue_index]));
				if ((skb_queue_len(&ieee->skb_aggQ[queue_index]) != 0)||
#if defined RTL8192SE || defined RTL8192CE
				   (ieee->get_nic_desc_num(ieee->dev,queue_index)) > 1||
#else
				   (!ieee->check_nic_enough_desc(ieee->dev,queue_index))||
#endif
				   (ieee->queue_stop) ||
				   (ieee->amsdu_in_process)) //YJ,add,090409 
				{
					/* insert the skb packet to the Aggregation queue */
					//printk("!!!!!!!!!!%s(): intert to aggr queue\n", __FUNCTION__);
					skb_queue_tail(&ieee->skb_aggQ[queue_index], skb);
					spin_unlock_irqrestore(&ieee->lock, flags);
					return 0;
				}
			}
			else  //AMSDU
			{
				//printk("AMSDU!!!!!!!!!!!!!\n");
				if(tcb_desc_skb->bAMSDU)
					IsAmsdu = true;
				
				//YJ,add,090409
				ieee->amsdu_in_process = false;
			}
		}
#endif	
		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		// The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time 
		// to prevent DHCP protocol fail
		if (skb->len > 282){//MINIMUM_DHCP_PACKET_SIZE) {
			if (ETH_P_IP == ether_type) {// IP header
				const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
				if (IPPROTO_UDP == ip->protocol) {//FIXME windows is 11 but here UDP in linux kernel is 17.
					struct udphdr *udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
					//if(((ntohs(udp->source) == 68) && (ntohs(udp->dest) == 67)) ||
					 ///   ((ntohs(udp->source) == 67) && (ntohs(udp->dest) == 68))) {
					if(((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) ||
					    ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) {
						// 68 : UDP BOOTP client
						// 67 : UDP BOOTP server
						printk("===>DHCP Protocol start tx DHCP pkt src port:%d, dest port:%d!!\n", ((u8 *)udp)[1],((u8 *)udp)[3]);
						// Use low rate to send DHCP packet.
						//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
						//{
						//	tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
						//	tcb_desc->bTxDisableRateFallBack = false;
						//}
						//else
						//pTcb->DataRate = Adapter->MgntInfo.LowestBasicRate; 
						//RTPRINT(FDM, WA_IOT, ("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

						bdhcp = true;
#ifdef _RTL8192_EXT_PATCH_
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2; //AMY,090701
#else
						ieee->LPSDelayCnt = 100;//pPSC->LPSAwakeIntvl*2;
#endif	
					}
				}
			}else if(ETH_P_ARP == ether_type){// IP ARP packet
				printk("=================>DHCP Protocol start tx ARP pkt!!\n");
				bdhcp = true;
				ieee->LPSDelayCnt = ieee->current_network.tim.tim_count;

				//if(pMgntInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
				//{
				//	tcb_desc->DataRate = MgntQuery_TxRateExcludeCCKRates(Adapter->MgntInfo.mBrates);//0xc;//ofdm 6m
				//	tcb_desc->bTxDisableRateFallBack = FALSE;
				//}
				//else
				//	tcb_desc->DataRate = Adapter->MgntInfo.LowestBasicRate; 
				//RTPRINT(FDM, WA_IOT, ("ARP TranslateHeader(), pTcb->DataRate = 0x%x\n", pTcb->DataRate));

			}
		}
		
		skb->priority = rtllib_classify(skb, IsAmsdu);
	
#ifdef _RTL8192_EXT_PATCH_
		crypt = ieee->sta_crypt[ieee->tx_keyidx];
#else
		crypt = ieee->crypt[ieee->tx_keyidx];
#endif	
		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;
	
		if (!encrypt && ieee->ieee802_1x &&
		ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
	#ifdef CONFIG_RTLLIB_DEBUG
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
			RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}
	#endif
	
		/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

                /* Determine total amount of storage required for TXB packets */
#ifdef ENABLE_AMSDU	
		if(!IsAmsdu)
			bytes = skb->len + SNAP_SIZE + sizeof(u16);
		else
			bytes = skb->len;
#else
		bytes = skb->len + SNAP_SIZE + sizeof(u16);
#endif	

		if (encrypt)
			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
		else 
			fc = RTLLIB_FTYPE_DATA; 
		
		//if(ieee->current_network.QoS_Enable) 
		if(qos_actived)
			fc |= RTLLIB_STYPE_QOS_DATA; 
		else
			fc |= RTLLIB_STYPE_DATA;
	
#ifdef _RTL8192_EXT_PATCH_
		if ((ieee->iw_mode == IW_MODE_INFRA) 
			//|| ((ieee->iw_mode == IW_MODE_MESH) && (ieee->only_mesh == 0)))  //YJ,test,090610
			|| (ieee->iw_mode == IW_MODE_MESH) ) 
#else
		if (ieee->iw_mode == IW_MODE_INFRA) 
#endif
		{
			fc |= RTLLIB_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			if(IsAmsdu)
				memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
			else
				memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
		}

		bIsMulticast = is_broadcast_ether_addr(header.addr1) ||is_multicast_ether_addr(header.addr1);

                header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (bIsMulticast) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		}
		else {
#ifdef ENABLE_AMSDU	
			if(bIsSptAmsdu) {
				if(ieee->iw_mode == IW_MODE_ADHOC) {
					if(p_sta)
						frag_size = p_sta->htinfo.AMSDU_MaxSize;
					else
						frag_size = ieee->pHTInfo->nAMSDU_MaxSize;
				}
				else
					frag_size = ieee->pHTInfo->nAMSDU_MaxSize;
				qos_ctl = 0;
			}
			else
#endif	
			{
				frag_size = ieee->fts;//default:392
				qos_ctl = 0;
			}
		}
	
		if(qos_actived)
		{
			hdr_len = RTLLIB_3ADDR_LEN + 2;

                    /* in case we are a client verify acm is not set for this ac */
                    while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
                        printk("skb->priority = %x\n", skb->priority);
                        if (wme_downgrade_ac(skb)) {
                            break;
                        }
                        printk("converted skb->priority = %x\n", skb->priority);
                    }
                    qos_ctl |= skb->priority; //set in the rtllib_classify 	
#ifdef ENABLE_AMSDU	
			if(IsAmsdu)
			{
				qos_ctl |= QOS_CTL_AMSDU_PRESENT;
			}
                    header.qos_ctl = cpu_to_le16(qos_ctl);
#else	
                    header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
#endif
		} else {
			hdr_len = RTLLIB_3ADDR_LEN;		
		}
		/* Determine amount of payload per fragment.  Regardless of if
		* this stack is providing the full 802.11 header, one will
		* eventually be affixed to this fragment -- so we must account for
		* it when determining the amount of payload space. */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
			bytes_per_frag -= RTLLIB_FCS_LEN;
	
		/* Each fragment may need to have room for encryptiong pre/postfix */
		if (encrypt)
			bytes_per_frag -= crypt->ops->extra_prefix_len +
				crypt->ops->extra_postfix_len;
	
		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;
	
		/* When we allocate the TXB we allocate enough space for the reserve
		* and full fragment bytes (bytes_per_frag doesn't include prefix,
		* postfix, header, FCS, etc.) */
		txb = rtllib_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		//if (ieee->current_network.QoS_Enable) 
		if(qos_actived)
		{
			txb->queue_index = UP2AC(skb->priority);
		} else {
			txb->queue_index = WME_AC_BE;;
		}

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
#ifdef _RTL8192_EXT_PATCH_
			tcb_desc->mesh_pkt = 0;//AMY added 090226
			if(ieee->iw_mode == IW_MODE_ADHOC)
				tcb_desc->badhoc = 1;
			else
				tcb_desc->badhoc = 0;
#endif
			if(qos_actived){
				skb_frag->priority = skb->priority;//UP2AC(skb->priority);	
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BE;
				tcb_desc->queue_index = WME_AC_BE;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt){
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
			}
			else
			{
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct rtllib_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);
	
			/* If this is not the last fragment, then add the MOREFRAGS
			* bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | RTLLIB_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;
		
			} else {
				/* The last fragment takes the remaining length */
				bytes = bytes_last_frag;
			}
			//if(ieee->current_network.QoS_Enable) 
			if((qos_actived) && (!bIsMulticast))
			{	
				// add 1 only indicate to corresponding seq number control 2006/7/12
				//frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
				frag_hdr->seq_ctl = rtllib_query_seqnum(ieee, skb_frag, header.addr1); 
				frag_hdr->seq_ctl = cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
			} else {
				frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}
			/* Put a SNAP header on the first fragment */
#ifdef ENABLE_AMSDU	
			if ((i == 0) && (!IsAmsdu)) 
#else
			if (i == 0) 
#endif	
			{
				rtllib_put_snap(
					skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
					ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}
	
			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
	
			/* Advance the SKB... */
			skb_pull(skb, bytes);
	
			/* Encryption routine will move the header forward in order
			* to insert the IV between the header and the payload */
			if (encrypt)
#ifdef _RTL8192_EXT_PATCH_
				rtllib_encrypt_fragment(ieee, skb_frag, hdr_len, 0);
#else
				rtllib_encrypt_fragment(ieee, skb_frag, hdr_len);
#endif
			if (ieee->config &
			(CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if((qos_actived) && (!bIsMulticast))
		{
		  if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
			ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
		  else
			ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
  		  if (ieee->seq_ctrl[0] == 0xFFF)
			ieee->seq_ctrl[0] = 0;
		  else
			ieee->seq_ctrl[0]++;
		}
	}else{
		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}
	
		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
		if(!txb){
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		
		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
	}	

 success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
	if (txb)
	{
#if 1	
		cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		tcb_desc->priority = skb->priority;

                if(ether_type == ETH_P_PAE) {
			if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
			{
				tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
				tcb_desc->bTxDisableRateFallBack = false;
			}else{
                        tcb_desc->data_rate = ieee->basic_rate;
                        tcb_desc->bTxDisableRateFallBack = 1;
			}
			
			printk("EAPOL TranslateHeader(), pTcb->DataRate = 0x%x\n", tcb_desc->data_rate);
			
                        tcb_desc->RATRIndex = 7;                        
                        tcb_desc->bTxUseDriverAssingedRate = 1;
                } else {
		if (is_multicast_ether_addr(header.addr1))
			tcb_desc->bMulticast = 1;
		if (is_broadcast_ether_addr(header.addr1))
			tcb_desc->bBroadcast = 1;
#if defined(RTL8192U) || defined(RTL8192SU) || defined(RTL8192SE)
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast){
			rtllib_txrate_selectmode(ieee, tcb_desc, 7);  
			tcb_desc->data_rate = ieee->basic_rate;
		}
		else
		{
			if(ieee->iw_mode == IW_MODE_ADHOC)
			{
				u8 is_peer_shortGI_40M = 0;
				u8 is_peer_shortGI_20M = 0;
				u8 is_peer_BW_40M = 0;
				p_sta = GetStaInfo(ieee, header.addr1);
				if(NULL == p_sta)
				{
					rtllib_txrate_selectmode(ieee, tcb_desc, 7);
					tcb_desc->data_rate = ieee->rate;
				}
				else
				{
					rtllib_txrate_selectmode(ieee, tcb_desc, p_sta->ratr_index);
					tcb_desc->data_rate = CURRENT_RATE(p_sta->wireless_mode, p_sta->CurDataRate, p_sta->htinfo.HTHighestOperaRate);
					is_peer_shortGI_40M = p_sta->htinfo.bCurShortGI40MHz;
					is_peer_shortGI_20M = p_sta->htinfo.bCurShortGI20MHz;
					is_peer_BW_40M = p_sta->htinfo.bCurTxBW40MHz;
				}
				rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
				rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
				rtllib_ibss_query_HTCapShortGI(ieee, tcb_desc,is_peer_shortGI_40M,is_peer_shortGI_20M); 
				rtllib_ibss_query_BandwidthMode(ieee, tcb_desc,is_peer_BW_40M);
				rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
				//CB_DESC_DUMP(tcb_desc, __FUNCTION__);
			}
			else {
				rtllib_txrate_selectmode(ieee, tcb_desc, 0); 
				tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
				if(bdhcp == true){
					// Use low rate to send DHCP packet.
					if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) {
						tcb_desc->data_rate = MGN_1M;//MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
						tcb_desc->bTxDisableRateFallBack = false;
					}else{
						tcb_desc->data_rate = MGN_1M;
						tcb_desc->bTxDisableRateFallBack = 1;
					}

					tcb_desc->RATRIndex = 7;
					tcb_desc->bTxUseDriverAssingedRate = 1;
					tcb_desc->bdhcp = 1;
				}
				rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
				rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
				rtllib_query_HTCapShortGI(ieee, tcb_desc); 
				rtllib_query_BandwidthMode(ieee, tcb_desc);
				rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
				
			}
		}
#else
		rtllib_txrate_selectmode(ieee, tcb_desc);
		if ( tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
			tcb_desc->data_rate = ieee->basic_rate;
		else
			//tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
			tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);

		if(bdhcp == true){
			// Use low rate to send DHCP packet.
			if(ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom)	
			{
				tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee);//0xc;//ofdm 6m
				tcb_desc->bTxDisableRateFallBack = false;
			}else{
				tcb_desc->data_rate = MGN_1M;
                    tcb_desc->bTxDisableRateFallBack = 1;
			}

			//printk("DHCP TranslateHeader(), pTcb->DataRate = 0x%x\n", tcb_desc->data_rate);
			
                    	tcb_desc->RATRIndex = 7;
                    tcb_desc->bTxUseDriverAssingedRate = 1;
                    //tcb_desc->bTxEnableFwCalcDur = 1;
                }
		
		rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
		rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
		rtllib_query_HTCapShortGI(ieee, tcb_desc); 
		rtllib_query_BandwidthMode(ieee, tcb_desc);
		rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
#endif
                } 		
//		rtllib_query_seqnum(ieee, txb->fragments[0], header.addr1);
//		RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
		//RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, tcb_desc, sizeof(cb_desc));
#endif
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
			rtllib_softmac_xmit(txb, ieee);
		}else{
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			rtllib_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}
int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
	memset(skb->cb, 0, sizeof(skb->cb));
	return rtllib_xmit_inter(skb, dev);
}
Пример #11
0
int a_netbuf_queue_size(A_NETBUF_QUEUE_T *q)
{
    return(skb_queue_len((struct sk_buff_head *) q));
}
Пример #12
0
static ssize_t ieee80211_if_fmt_num_buffered_multicast(
	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
	return scnprintf(buf, buflen, "%u\n",
			 skb_queue_len(&sdata->u.ap.ps_bc_buf));
}
Пример #13
0
static void ifb_ri_tasklet(unsigned long _txp)
{
	struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
	skb = skb_peek(&txp->tq);
	if (!skb) {
		if (!__netif_tx_trylock(txq))
			goto resched;
		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
		__netif_tx_unlock(txq);
	}

	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
		skb->tc_redirected = 0;
		skb->tc_skip_classify = 1;

		u64_stats_update_begin(&txp->tsync);
		txp->tx_packets++;
		txp->tx_bytes += skb->len;
		u64_stats_update_end(&txp->tsync);

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			txp->dev->stats.tx_dropped++;
			if (skb_queue_len(&txp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = txp->dev->ifindex;

		if (!skb->tc_from_ingress) {
			dev_queue_xmit(skb);
		} else {
			skb_pull(skb, skb->mac_len);
			netif_receive_skb(skb);
		}
	}

	if (__netif_tx_trylock(txq)) {
		skb = skb_peek(&txp->rq);
		if (!skb) {
			txp->tasklet_pending = 0;
			if (netif_tx_queue_stopped(txq))
				netif_tx_wake_queue(txq);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		txp->tasklet_pending = 1;
		tasklet_schedule(&txp->ifb_tasklet);
	}

}
Пример #14
0
static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
	struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
	struct sk_buff *skb;
	int ret;

	while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
		< MAX_PENDING_REQS) &&
	       (skb_queue_len(&vif->tx_queue) < budget)) {
		struct xen_netif_tx_request txreq;
		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
		struct page *page;
		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
		u16 pending_idx;
		RING_IDX idx;
		int work_to_do;
		unsigned int data_len;
		pending_ring_idx_t index;

		if (vif->tx.sring->req_prod - vif->tx.req_cons >
		    XEN_NETIF_TX_RING_SIZE) {
			netdev_err(vif->dev,
				   "Impossible number of requests. "
				   "req_prod %d, req_cons %d, size %ld\n",
				   vif->tx.sring->req_prod, vif->tx.req_cons,
				   XEN_NETIF_TX_RING_SIZE);
			xenvif_fatal_tx_err(vif);
			continue;
		}

		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
		if (!work_to_do)
			break;

		idx = vif->tx.req_cons;
		rmb(); /* Ensure that we see the request before we copy it. */
		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));

		/* Credit-based scheduling. */
		if (txreq.size > vif->remaining_credit &&
		    tx_credit_exceeded(vif, txreq.size))
			break;

		vif->remaining_credit -= txreq.size;

		work_to_do--;
		vif->tx.req_cons = ++idx;

		memset(extras, 0, sizeof(extras));
		if (txreq.flags & XEN_NETTXF_extra_info) {
			work_to_do = xenvif_get_extras(vif, extras,
						       work_to_do);
			idx = vif->tx.req_cons;
			if (unlikely(work_to_do < 0))
				break;
		}

		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
		if (unlikely(ret < 0))
			break;

		idx += ret;

		if (unlikely(txreq.size < ETH_HLEN)) {
			netdev_dbg(vif->dev,
				   "Bad packet size: %d\n", txreq.size);
			xenvif_tx_err(vif, &txreq, idx);
			break;
		}

		/* No crossing a page as the payload mustn't fragment. */
		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
			netdev_err(vif->dev,
				   "txreq.offset: %x, size: %u, end: %lu\n",
				   txreq.offset, txreq.size,
				   (txreq.offset&~PAGE_MASK) + txreq.size);
			xenvif_fatal_tx_err(vif);
			break;
		}

		index = pending_index(vif->pending_cons);
		pending_idx = vif->pending_ring[index];

		data_len = (txreq.size > PKT_PROT_LEN &&
			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
			PKT_PROT_LEN : txreq.size;

		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
				GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(skb == NULL)) {
			netdev_dbg(vif->dev,
				   "Can't allocate a skb in start_xmit.\n");
			xenvif_tx_err(vif, &txreq, idx);
			break;
		}

		/* Packets passed to netif_rx() must have some headroom. */
		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);

		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
			struct xen_netif_extra_info *gso;
			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];

			if (xenvif_set_skb_gso(vif, skb, gso)) {
				/* Failure in xenvif_set_skb_gso is fatal. */
				kfree_skb(skb);
				break;
			}
		}

		/* XXX could copy straight to head */
		page = xenvif_alloc_page(vif, pending_idx);
		if (!page) {
			kfree_skb(skb);
			xenvif_tx_err(vif, &txreq, idx);
			break;
		}

		gop->source.u.ref = txreq.gref;
		gop->source.domid = vif->domid;
		gop->source.offset = txreq.offset;

		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
		gop->dest.domid = DOMID_SELF;
		gop->dest.offset = txreq.offset;

		gop->len = txreq.size;
		gop->flags = GNTCOPY_source_gref;

		gop++;

		memcpy(&vif->pending_tx_info[pending_idx].req,
		       &txreq, sizeof(txreq));
		vif->pending_tx_info[pending_idx].head = index;
		*((u16 *)skb->data) = pending_idx;

		__skb_put(skb, data_len);

		skb_shinfo(skb)->nr_frags = ret;
		if (data_len < txreq.size) {
			skb_shinfo(skb)->nr_frags++;
			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
					     pending_idx);
		} else {
			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
					     INVALID_PENDING_IDX);
		}

		vif->pending_cons++;

		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
		if (request_gop == NULL) {
			kfree_skb(skb);
			xenvif_tx_err(vif, &txreq, idx);
			break;
		}
		gop = request_gop;

		__skb_queue_tail(&vif->tx_queue, skb);

		vif->tx.req_cons = idx;

		if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
			break;
	}

	return gop - vif->tx_copy_ops;
}
Пример #15
0
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	u32			uid, pid, seq, sid;
	void			*data;
	struct audit_status	*status_get, status_set;
	int			err;
	struct audit_buffer	*ab;
	u16			msg_type = nlh->nlmsg_type;
	uid_t			loginuid; /* loginuid of sender */
	struct audit_sig_info   *sig_data;
	char			*ctx;
	u32			len;

	err = audit_netlink_ok(skb, msg_type);
	if (err)
		return err;

	/* As soon as there's any sign of userspace auditd,
	 * start kauditd to talk to it */
	if (!kauditd_task)
		kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
	if (IS_ERR(kauditd_task)) {
		err = PTR_ERR(kauditd_task);
		kauditd_task = NULL;
		return err;
	}

	pid  = NETLINK_CREDS(skb)->pid;
	uid  = NETLINK_CREDS(skb)->uid;
	loginuid = NETLINK_CB(skb).loginuid;
	sid  = NETLINK_CB(skb).sid;
	seq  = nlh->nlmsg_seq;
	data = NLMSG_DATA(nlh);

	switch (msg_type) {
	case AUDIT_GET:
		status_set.enabled	 = audit_enabled;
		status_set.failure	 = audit_failure;
		status_set.pid		 = audit_pid;
		status_set.rate_limit	 = audit_rate_limit;
		status_set.backlog_limit = audit_backlog_limit;
		status_set.lost		 = atomic_read(&audit_lost);
		status_set.backlog	 = skb_queue_len(&audit_skb_queue);
		audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
				 &status_set, sizeof(status_set));
		break;
	case AUDIT_SET:
		if (nlh->nlmsg_len < sizeof(struct audit_status))
			return -EINVAL;
		status_get   = (struct audit_status *)data;
		if (status_get->mask & AUDIT_STATUS_ENABLED) {
			err = audit_set_enabled(status_get->enabled,
							loginuid, sid);
			if (err < 0) return err;
		}
		if (status_get->mask & AUDIT_STATUS_FAILURE) {
			err = audit_set_failure(status_get->failure,
							 loginuid, sid);
			if (err < 0) return err;
		}
		if (status_get->mask & AUDIT_STATUS_PID) {
			int old   = audit_pid;
			if (sid) {
				if ((err = selinux_ctxid_to_string(
						sid, &ctx, &len)))
					return err;
				else
					audit_log(NULL, GFP_KERNEL,
						AUDIT_CONFIG_CHANGE,
						"audit_pid=%d old=%d by auid=%u subj=%s",
						status_get->pid, old,
						loginuid, ctx);
				kfree(ctx);
			} else
				audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
					"audit_pid=%d old=%d by auid=%u",
					  status_get->pid, old, loginuid);
			audit_pid = status_get->pid;
		}
		if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
			err = audit_set_rate_limit(status_get->rate_limit,
							 loginuid, sid);
		if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
			err = audit_set_backlog_limit(status_get->backlog_limit,
							loginuid, sid);
		break;
	case AUDIT_USER:
	case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG:
	case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2:
		if (!audit_enabled && msg_type != AUDIT_USER_AVC)
			return 0;

		err = audit_filter_user(&NETLINK_CB(skb), msg_type);
		if (err == 1) {
			err = 0;
			ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
			if (ab) {
				audit_log_format(ab,
						 "user pid=%d uid=%u auid=%u",
						 pid, uid, loginuid);
				if (sid) {
					if (selinux_ctxid_to_string(
							sid, &ctx, &len)) {
						audit_log_format(ab, 
							" ssid=%u", sid);
						/* Maybe call audit_panic? */
					} else
						audit_log_format(ab, 
							" subj=%s", ctx);
					kfree(ctx);
				}
				audit_log_format(ab, " msg='%.1024s'",
					 (char *)data);
				audit_set_pid(ab, pid);
				audit_log_end(ab);
			}
		}
		break;
	case AUDIT_ADD:
	case AUDIT_DEL:
		if (nlmsg_len(nlh) < sizeof(struct audit_rule))
			return -EINVAL;
		/* fallthrough */
	case AUDIT_LIST:
		err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
					   uid, seq, data, nlmsg_len(nlh),
					   loginuid, sid);
		break;
	case AUDIT_ADD_RULE:
	case AUDIT_DEL_RULE:
		if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
			return -EINVAL;
		/* fallthrough */
	case AUDIT_LIST_RULES:
		err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
					   uid, seq, data, nlmsg_len(nlh),
					   loginuid, sid);
		break;
	case AUDIT_SIGNAL_INFO:
		err = selinux_ctxid_to_string(audit_sig_sid, &ctx, &len);
		if (err)
			return err;
		sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL);
		if (!sig_data) {
			kfree(ctx);
			return -ENOMEM;
		}
		sig_data->uid = audit_sig_uid;
		sig_data->pid = audit_sig_pid;
		memcpy(sig_data->ctx, ctx, len);
		kfree(ctx);
		audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 
				0, 0, sig_data, sizeof(*sig_data) + len);
		kfree(sig_data);
		break;
	default:
		err = -EINVAL;
		break;
	}

	return err < 0 ? err : 0;
}