static int ccmni_v2_receive(ccmni_v2_instance_t *ccmni, const unsigned char *ccmni_ptr, int ccmni_len)
{
	int						packet_type, ret = 0;
	struct sk_buff			*skb;
	ccmni_v2_ctl_block_t	*ctl_b = (ccmni_v2_ctl_block_t*)ccmni->owner;
	int						md_id = ctl_b->m_md_id;

	if ((ccmni == NULL) || (ccmni_ptr == NULL) || (ccmni_len <= 0))
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d_receive: invalid private data\n", ccmni->channel);
		ret = -1;
	}

	skb = dev_alloc_skb(ccmni_len);

	if (skb)
	{
		packet_type = ccmni_ptr[0] & 0xF0;
		memcpy(skb_put(skb, ccmni_len), ccmni_ptr, ccmni_len);
		ccmni_make_etherframe(skb->data - ETH_HLEN, ccmni->dev->dev_addr, packet_type);
		skb_set_mac_header(skb, -ETH_HLEN);

		skb->dev = ccmni->dev;
		if(packet_type == IPV6_VERSION)
		{
			skb->protocol  = htons(ETH_P_IPV6);
		}
		else
		{
			skb->protocol  = htons(ETH_P_IP);
		}
		//skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->ip_summed = CHECKSUM_NONE;

		ret = netif_rx(skb);

		CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n", ccmni->channel, ret);

		ccmni->dev->stats.rx_packets++;
		ccmni->dev->stats.rx_bytes	+= ccmni_len;
		CCCI_CCMNI_MSG(md_id, "CCMNI%d rx_pkts=%d, stats_rx_bytes=%d\n",ccmni->channel, \
			ccmni->dev->stats.rx_packets,ccmni->dev->stats.rx_bytes);

		ret = 0;
	}
	else
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d Socket buffer allocate fail\n", ccmni->channel);
		ret = -CCCI_ERR_MEM_CHECK_FAIL;
	}

	return ret;
}
static void ccmni_v2_setup(struct net_device *dev)
{
	ccmni_v2_instance_t *ccmni = netdev_priv(dev);
	int					retry = 10;
    
	ether_setup(dev);

	dev->header_ops        = NULL;
	dev->netdev_ops        = &ccmni_v2_netdev_ops;
	dev->flags             = IFF_NOARP & (~IFF_BROADCAST & ~IFF_MULTICAST);
	dev->mtu               = CCMNI_MTU;
	dev->tx_queue_len      = CCMNI_TX_QUEUE;
	dev->addr_len          = ETH_ALEN;
	dev->destructor        = free_netdev;

	while(retry-->0){
		random_ether_addr((u8 *) dev->dev_addr);
		if(is_mac_addr_duplicate((u8*)dev->dev_addr))
			continue;
		else
			break;
	}

	CCCI_CCMNI_MSG(ccmni->m_md_id, "CCMNI%d_setup: features=0x%08x,flags=0x%08x\n", ccmni->channel, \
		dev->features, dev->flags);

	return;
}
Beispiel #3
0
static void ccmni_read(unsigned long arg)
{
	int part, size;
	int ret;
	int read, write, consumed;
	unsigned char *string;
	struct ccmni_instance_t *ccmni = (struct ccmni_instance_t *) arg;
	struct ccci_msg_t msg;
	struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
	int md_id = ctl_b->m_md_id;
	char *rx_buffer;

	spin_lock_bh(&ccmni->spinlock);
	if (ctl_b->ccci_is_ready == 0) {
		CCCI_DBG_MSG(md_id, "net",
			     "CCMNI%d_read fail when modem not ready\n",
			     ccmni->channel);
		goto out;
	}

	string = ccmni->read_buffer;
	read = ccmni->shared_mem->rx_control.read;
	write = ccmni->shared_mem->rx_control.write;
	size = write - read;
	part = 0;
	rx_buffer = ccmni->shared_mem->buffer;

	if (size < 0)
		size += ccmni->shared_mem->rx_control.length;

	if (read > write) {
		part = ccmni->shared_mem->rx_control.length - read;
		memcpy(string, &rx_buffer[read], part);

		size -= part;
		string += part;
		read = 0;
	}

	memcpy(string, &rx_buffer[read], size);
	CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[Before]: size=%d, read=%d\n",
		       ccmni->channel, (size + part), read);
	consumed = ccmni_receive(ccmni, size + part);
	CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[After]: consume=%d\n",
		       ccmni->channel, consumed);

	/*   Calculate the new position of the read pointer. */
	/*   Take into consideration the number of bytes actually consumed; */
	/*   i.e. number of bytes taken up by complete IP packets.    */
	read += size;
	if (read >= ccmni->shared_mem->rx_control.length)
		read -= ccmni->shared_mem->rx_control.length;

	if (consumed < (size + part)) {
		read -= ((size + part) - consumed);
		if (read < 0)
			read += ccmni->shared_mem->rx_control.length;
	}

	ccmni->shared_mem->rx_control.read = read;
	/*   Send an acknowledgement back to modem side. */
	CCCI_CCMNI_MSG(md_id, "CCMNI%d_read to write mailbox(ch%d, tty%d)\n",
		       ccmni->channel, ccmni->uart_rx_ack,
		       CCMNI_CHANNEL_OFFSET + ccmni->channel);
	/* ret = ccci_write_mailbox(ccmni->uart_rx_ack, CCMNI_CHANNEL_OFFSET + ccmni->channel);  */
	msg.magic = 0xFFFFFFFF;
	msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
	msg.channel = ccmni->uart_rx_ack;
	msg.reserved = 0;
	ret = ccci_message_send(md_id, &msg, 1);
	if (ret == -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
		set_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
		mod_timer(&ccmni->timer, jiffies);
	} else if (ret == sizeof(struct ccci_msg_t))
		clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
 out:
	spin_unlock_bh(&ccmni->spinlock);

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_read invoke wake_lock_timeout(1s)\n",
		       ccmni->channel);
	wake_lock_timeout(&ctl_b->ccmni_wake_lock, HZ);

	return;
}
Beispiel #4
0
static int ccmni_receive(struct ccmni_instance_t *ccmni, int length)
{
	int counter, ret;
	struct packet_info_t packet_info;
	struct complete_ippkt_t *packet;
	struct complete_ippkt_t *processed_packet;
	struct sk_buff *skb;
	struct complete_ippkt_t last_packet = { 0 };
	int offset_put_pkt = 0;
	int offset_parse_frame = 0;
	int packet_type;
	struct ccmni_v1_ctl_block_t *ctl_b = (struct ccmni_v1_ctl_block_t *) ccmni->owner;
	int md_id = ctl_b->m_md_id;

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive() invoke pfp_unframe()\n",
		       ccmni->channel);
	do {
		packet_info = pfp_unframe(ccmni->decode_buffer + offset_put_pkt,
					  CCCI1_CCMNI_BUF_SIZE - offset_put_pkt,
					  ccmni->read_buffer +
					  offset_parse_frame, length,
					  ccmni->channel);
		packet = packet_info.pkt_list;

		CCCI_CCMNI_MSG(md_id,
			       "CCMNI%d num_complete_pkt=%d after pfp_unframe\n",
			       ccmni->channel,
			       packet_info.num_complete_packets);

		for (counter = 0; counter < packet_info.num_complete_packets;
		     counter++) {
			skb = dev_alloc_skb(packet->pkt_size);
			if (skb) {
				packet_type = packet->pkt_data[0] & 0xF0;
				memcpy(skb_put(skb, packet->pkt_size),
				       packet->pkt_data, packet->pkt_size);
				ccmni_make_etherframe(skb->data - ETH_HLEN,
						      ccmni->dev->dev_addr,
						      packet_type);
				skb_set_mac_header(skb, -ETH_HLEN);
				skb->dev = ccmni->dev;
				if (packet_type == IPV6_VERSION)
					skb->protocol = htons(ETH_P_IPV6);
				else
					skb->protocol = htons(ETH_P_IP);
				skb->ip_summed = CHECKSUM_NONE;

				ret = netif_rx(skb);

				CCCI_CCMNI_MSG(md_id,
					       "CCMNI%d invoke netif_rx()=%d\n",
					       ccmni->channel, ret);
				ccmni->dev->stats.rx_packets++;
				ccmni->dev->stats.rx_bytes += packet->pkt_size;
				CCCI_CCMNI_MSG(md_id,
					       "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n",
					       ccmni->channel,
					       ccmni->dev->stats.rx_packets,
					       ccmni->dev->stats.rx_bytes);
			} else {
				CCCI_DBG_MSG(md_id, "net",
					     "CCMNI%d Socket buffer allocate fail\n",
					     ccmni->channel);
			}

			processed_packet = packet;
			last_packet = *processed_packet;
			packet = packet->next;

			/* Only clear the entry_used flag as 0 */
			release_one_used_complete_ippkt_entry(processed_packet);
		};

		/* It must to check if it is necessary to invoke the pfp_unframe()
		 * again due to no available complete_ippkt entry
		 */
		if (packet_info.try_decode_again == 1) {
			offset_put_pkt +=
			    (last_packet.pkt_data - ccmni->decode_buffer +
			     last_packet.pkt_size);
			offset_parse_frame += packet_info.consumed_length;
		}
	} while (packet_info.try_decode_again == 1);

	offset_parse_frame += packet_info.consumed_length;
	return offset_parse_frame;
}
static int ccmni_v2_create_instance(int md_id, int channel)
{
	int  ret, size, count;
	int  uart_rx, uart_rx_ack;
	int  uart_tx, uart_tx_ack;
	ccmni_v2_instance_t  *ccmni;
	struct net_device *dev = NULL;
	int *ccmni_rx_base_phy;
	int *ccmni_rx_base_virt;
	unsigned char *ptr_virt;
#if CCMNI_DBG_INFO
	dbg_info_ccmni_t *dbg_info;
#endif
	ccmni_v2_ctl_block_t *ctl_b = (ccmni_v2_ctl_block_t *)ccmni_ctl_block[md_id];

	//  Network device creation and registration.
	dev = alloc_netdev(sizeof(ccmni_v2_instance_t), "", ccmni_v2_setup);
	if (dev == NULL)
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate netdev fail!\n", channel); 
		return -ENOMEM;
	}
    
	ccmni          = netdev_priv(dev);
	ccmni->dev     = dev;
	ccmni->channel = channel;
	ccmni->owner   = ccmni_ctl_block[md_id];

	if(md_id == 0) {
		sprintf(dev->name, "ccmni%d", channel);
	} else {
		sprintf(dev->name, "cc%dmni%d", md_id+1, channel);
		//sprintf(dev->name, "ccmni%d", channel);
	}

	ret = register_netdev(dev);
	if (ret != 0)
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d register netdev fail: %d\n", ccmni->channel, ret);        
		goto _ccmni_create_instance_exit;
	}

	ASSERT(ccci_ccmni_v2_ctl_mem_base_req(md_id, ccmni->channel, (int*)&ccmni->shared_mem, \
		&ccmni->shared_mem_phys_addr, &size) == 0);

	if (ccmni->shared_mem == NULL)
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n", ccmni->channel);
		unregister_netdev(dev);        
		ret = -ENOMEM;

		goto _ccmni_create_instance_exit;
	}

	CCCI_CCMNI_MSG(md_id, "0x%08X:0x%08X:%d\n", (unsigned int)ccmni->shared_mem, \
		(unsigned int)ccmni->shared_mem_phys_addr, size);
        
	ccmni->shared_mem->rx_control.read_out = 0;
	ccmni->shared_mem->rx_control.avai_out = 0;
	ccmni->shared_mem->rx_control.avai_in  = CCMNI_CTRL_Q_RX_SIZE_DEFAULT - 1;
	ccmni->shared_mem->rx_control.q_length = CCMNI_CTRL_Q_RX_SIZE;
	memset(ccmni->shared_mem->q_rx_ringbuff, 0, ccmni->shared_mem->rx_control.q_length * sizeof(q_ringbuf_ccmni_t));

	ccmni_v2_dl_base_req(md_id, &ccmni_rx_base_virt, &ccmni_rx_base_phy);

	if (ccmni_rx_base_virt == NULL || ccmni_rx_base_phy == NULL)
	{
		CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n", ccmni->channel);
		unregister_netdev(dev);        
		ret = -ENOMEM;
        
		goto _ccmni_create_instance_exit;
	}

	switch(ccmni->channel)
	{
		case 0:
			uart_rx     = CCCI_CCMNI1_RX;
			uart_rx_ack = CCCI_CCMNI1_RX_ACK;
			uart_tx     = CCCI_CCMNI1_TX;
			uart_tx_ack = CCCI_CCMNI1_TX_ACK;
			break;            

		case 1:
			uart_rx     = CCCI_CCMNI2_RX;
			uart_rx_ack = CCCI_CCMNI2_RX_ACK;
			uart_tx     = CCCI_CCMNI2_TX;
			uart_tx_ack = CCCI_CCMNI2_TX_ACK;
			break;            

		case 2:
			uart_rx     = CCCI_CCMNI3_RX;
			uart_rx_ack = CCCI_CCMNI3_RX_ACK;
			uart_tx     = CCCI_CCMNI3_TX;
			uart_tx_ack = CCCI_CCMNI3_TX_ACK;
			break;            

		default:
			CCCI_MSG_INF(md_id, "net", "CCMNI%d, Invalid ccmni number\n", ccmni->channel);
			unregister_netdev(dev);
			ret = -ENOSYS;
			goto _ccmni_create_instance_exit;
	}
	ccmni->m_md_id = md_id;

	//Each channel has 100 RX buffers default
	for (count = 0; count < CCMNI_CTRL_Q_RX_SIZE_DEFAULT; count++)
	{
		ccmni->shared_mem->q_rx_ringbuff[count].ptr = \
			(CCMNI_CTRL_Q_RX_SIZE_DEFAULT * ccmni->channel + count ) * CCMNI_SINGLE_BUFF_SIZE + \
			(unsigned char *)ccmni_rx_base_phy + CCMNI_BUFF_HEADER_SIZE + CCMNI_BUFF_DBG_INFO_SIZE;

		ptr_virt = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_rx_ringbuff[count].ptr));
		
		//buffer header and footer init
		//Assume int to be 32bit. May need further modifying!!!!!
		*((int*)(ptr_virt - CCMNI_BUFF_HEADER_SIZE)) = CCMNI_BUFF_HEADER;
		*((int*)(ptr_virt + CCMNI_BUFF_DATA_FIELD_SIZE)) = CCMNI_BUFF_FOOTER;

#if CCMNI_DBG_INFO
		//debug info
		dbg_info = (dbg_info_ccmni_t *)(ptr_virt - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
		dbg_info->port        = ccmni->channel;
		dbg_info->avai_in_no  = count;
#endif
	}
    
	ccmni->uart_rx      = uart_rx;
	ccmni->uart_rx_ack  = uart_rx_ack;
	ccmni->uart_tx      = uart_tx;
	ccmni->uart_tx_ack  = uart_tx_ack;
    
	// Register this ccmni instance to the ccci driver.
	// pass it the notification handler.
	ASSERT(register_to_logic_ch(md_id, uart_rx,     ccmni_v2_callback, (void *) ccmni) == 0);
	ASSERT(register_to_logic_ch(md_id, uart_tx_ack, ccmni_v2_callback, (void *) ccmni) == 0);

	// Initialize the spinlock.
	spin_lock_init(&ccmni->spinlock);
	setup_timer(&ccmni->timer, timer_func, (unsigned long)ccmni);

	// Initialize the tasklet.
	tasklet_init(&ccmni->tasklet, ccmni_v2_read, (unsigned long)ccmni);

	ctl_b->ccmni_v2_instance[channel] = ccmni;
	ccmni->ready = 1;
	ccmni->net_if_off = 0;

	return ret;
    
_ccmni_create_instance_exit:
	free_netdev(dev);
    
	return ret;
}
//  The function start_xmit is called when there is one packet to transmit.
static int ccmni_v2_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int ret = NETDEV_TX_OK;
	int result = 0;
	int read_out, avai_in, avai_out, q_length, q_idx;
#if CCMNI_DBG_INFO
	dbg_info_ccmni_t	*dbg_info;
#endif

	unsigned char *ccmni_ptr;
	ccmni_v2_instance_t		*ccmni = netdev_priv(dev);
	ccmni_v2_ctl_block_t	*ctl_b = (ccmni_v2_ctl_block_t *)(ccmni->owner);
	int						md_id = ctl_b->m_md_id;
	ccci_msg_t				msg;
   
	spin_lock_bh(&ccmni->spinlock);

	if (ctl_b->ccci_is_ready==0) 
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d transfer data fail when modem not ready \n", ccmni->channel);
		ret = NETDEV_TX_BUSY;
		goto _ccmni_start_xmit_busy;
	}

	read_out = ccmni->shared_mem->tx_control.read_out;
	avai_in  = ccmni->shared_mem->tx_control.avai_in;
	avai_out = ccmni->shared_mem->tx_control.avai_out;
	q_length = ccmni->shared_mem->tx_control.q_length;

	if ((read_out < 0) || (avai_out < 0) || (avai_in < 0) || (q_length < 0))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto _ccmni_start_xmit_busy;
	}
	
	if ((read_out >= q_length) || (avai_out >= q_length) || (avai_in >= q_length))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto _ccmni_start_xmit_busy;
	}

	//Choose Q index
	q_idx = avai_out;
	ccmni_ptr = ccmni->shared_mem->q_tx_ringbuff[q_idx].ptr;

	//check if too many data waiting to be read out or Q not initialized yet
	//ccmni_ptr=NULL when not initialized???? haow.wang
	if ((q_idx == avai_in) || (ccmni_ptr == NULL) )
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d TX busy and stop queue: q_idx=%d, skb->len=%d \n", \
			ccmni->channel, q_idx, skb->len);
        CCCI_DBG_MSG(md_id, "net", "       TX read_out = %d  avai_out = %d avai_in = %d\n", \
			ccmni->shared_mem->tx_control.read_out, ccmni->shared_mem->tx_control.avai_out, ccmni->shared_mem->tx_control.avai_in); 
        CCCI_DBG_MSG(md_id, "net", "       RX read_out = %d  avai_out = %d avai_in = %d\n", \
			ccmni->shared_mem->rx_control.read_out, ccmni->shared_mem->rx_control.avai_out, ccmni->shared_mem->rx_control.avai_in);
				
		netif_stop_queue(ccmni->dev);

		//Set CCMNI ready to ZERO, and wait for the ACK from modem side.
		ccmni->ready = 0;
        ret          = NETDEV_TX_BUSY;
		goto _ccmni_start_xmit_busy;
	}

	ccmni_ptr = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_tx_ringbuff[q_idx].ptr));

    CCCI_CCMNI_MSG(md_id, "CCMNI%d_start_xmit: skb_len=%d, ccmni_ready=%d \n", \
		ccmni->channel, skb->len, ccmni->ready);
    
	if (skb->len > CCMNI_MTU)
	{
		//Sanity check; this should not happen!
		//Digest and return OK.
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d packet size exceed 1500 bytes: size=%d \n", \
			ccmni->channel, skb->len);
		dev->stats.tx_dropped++;
		goto _ccmni_start_xmit_exit;
	}

#if CCMNI_DBG_INFO
	//DBG info
	dbg_info = (dbg_info_ccmni_t *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
	dbg_info->avai_out_no = q_idx;
#endif

	memcpy(ccmni_ptr, skb->data, skb->len);
	ccmni->shared_mem->q_tx_ringbuff[q_idx].len = skb->len;

	//End byte
	*(unsigned char*)(ccmni_ptr + skb->len) = CCMNI_DATA_END;

	mb();

	//Update avail_out after data buffer filled
	q_idx++;
	ccmni->shared_mem->tx_control.avai_out = (q_idx & (q_length - 1));

	mb();

	msg.addr = 0;
	msg.len = skb->len;
	msg.channel = ccmni->uart_tx;
	msg.reserved = 0;
	result = ccci_message_send(md_id, &msg, 1);
	if (result==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
	{
		set_bit(CCMNI_SEND_PENDING,&ccmni->flags);
		ccmni->send_len +=skb->len;
		mod_timer(&ccmni->timer,jiffies);
	}
	else if (result==sizeof(ccci_msg_t))
		clear_bit(CCMNI_SEND_PENDING,&ccmni->flags);

	dev->stats.tx_packets++;
	dev->stats.tx_bytes  += skb->len;
    
_ccmni_start_xmit_exit:

	dev_kfree_skb(skb);

_ccmni_start_xmit_busy:
    
	spin_unlock_bh(&ccmni->spinlock);
    
	return ret;
}
static void ccmni_v2_read(unsigned long arg)
{
	int ret;
	int read_out, avai_out, avai_in, q_length;
	int packet_cnt, packet_cnt_save, consumed;
	int rx_buf_res_left_cnt;
#if CCMNI_DBG_INFO
	dbg_info_ccmni_t *dbg_info;
#endif
	ccmni_v2_instance_t  *ccmni = (ccmni_v2_instance_t *) arg;
	unsigned char *ccmni_ptr;
	unsigned int ccmni_len, q_idx;
	ccmni_v2_ctl_block_t *ctl_b = (ccmni_v2_ctl_block_t*)ccmni->owner;
	int md_id = ctl_b->m_md_id;
	ccci_msg_t msg;

	if (ccmni == NULL)
	{
		CCCI_DBG_MSG(md_id, "net", "[Error]CCMNI%d_read: invalid private data\n", ccmni->channel);
		return;
	}
  
	spin_lock_bh(&ccmni->spinlock);
	
	if (ctl_b->ccci_is_ready==0)  
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail when modem not ready\n", ccmni->channel);
		goto out;
	}

	read_out = ccmni->shared_mem->rx_control.read_out;
	avai_out = ccmni->shared_mem->rx_control.avai_out;
	avai_in  = ccmni->shared_mem->rx_control.avai_in;
	q_length = ccmni->shared_mem->rx_control.q_length;

	if ((read_out < 0) || (avai_out < 0) || (avai_in < 0) || (q_length < 0))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto out;
	}

	if ((read_out >= q_length) || (avai_out >= q_length) || (avai_in >= q_length))
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: avai_out=%d, read_out=%d, avai_in=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, avai_in, q_length);
		goto out;
	}

	//Number of packets waiting to be processed
	packet_cnt = avai_out >= read_out ? (avai_out - read_out) : (avai_out - read_out + q_length);
	
	packet_cnt_save = packet_cnt;
	rx_buf_res_left_cnt = avai_in >= avai_out ? (avai_in - avai_out) : (avai_in - avai_out + q_length);

	if (packet_cnt <= 0)
	{
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: nothing to read, avai_out=%d, read_out=%d, q_length=%d\n", \
			ccmni->channel, avai_out, read_out, q_length);
		goto out;
	}

	q_idx = read_out;
	
    CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[Before]: avai_out=%d, read_out=%d, avai_in=%d, packet_cnt=%d\n", \
		ccmni->channel, avai_out, read_out, avai_in, packet_cnt);

	consumed = 0;

	for (; packet_cnt > 0; packet_cnt--)
	{
		q_idx &= q_length - 1;
				
		ccmni_ptr = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr));
		ccmni_len = ccmni->shared_mem->q_rx_ringbuff[q_idx].len;
#if CCMNI_DBG_INFO
		//DBG info
		dbg_info = (dbg_info_ccmni_t *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
#endif
		if (-CCCI_ERR_MEM_CHECK_FAIL == ccmni_v2_check_info(md_id, ccmni->channel, ccmni_ptr, ccmni_len))
		{
			CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read: check info error, read_out=%d\n", ccmni->channel, read_out);
#if CCMNI_DBG_INFO
			//dbg_info->port        = ccmni->channel;
			dbg_info->avai_in_no  = q_idx;
			//dbg_info->avai_out_no = q_idx;
			dbg_info->read_out_no = q_idx;
#endif
			avai_in++;
			avai_in &= q_length - 1;

			ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr = ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr;

			ccmni_ptr = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr));
#if CCMNI_DBG_INFO
			dbg_info = (dbg_info_ccmni_t *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
			dbg_info->avai_in_no  = avai_in;
#endif
			q_idx++;
			consumed++;
			continue;
		}
		ret = ccmni_v2_receive(ccmni, ccmni_ptr, ccmni_len);
		if(0 == ret)
		{
#if CCMNI_DBG_INFO
			//dbg_info->port        = ccmni->channel;
			dbg_info->avai_in_no  = q_idx;
			//dbg_info->avai_out_no = q_idx;
			dbg_info->read_out_no = q_idx;
#endif
			avai_in++;
			avai_in &= q_length - 1;
			ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr = ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr;

			ccmni_ptr = ccmni_v2_phys_to_virt(md_id, (unsigned char *)(ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr));
#if CCMNI_DBG_INFO
			dbg_info = (dbg_info_ccmni_t *)(ccmni_ptr - CCMNI_BUFF_HEADER_SIZE - CCMNI_BUFF_DBG_INFO_SIZE);
			dbg_info->avai_in_no  = avai_in;
#endif
			q_idx++;
			consumed++;
		} else if (-CCCI_ERR_MEM_CHECK_FAIL == ret)	{
			//If dev_alloc_skb() failed, retry right now may still fail. So setup timer, and retry later.
			set_bit(CCMNI_RECV_PENDING,&ccmni->flags);
			//avai_in++;
			//avai_in &= q_length - 1;
			//ccmni->shared_mem->q_rx_ringbuff[avai_in].ptr = ccmni->shared_mem->q_rx_ringbuff[q_idx].ptr;
			//ccmni->dev->stats.rx_dropped++;
			CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read: no sk_buff, retrying, read_out=%d, avai_out=%d\n", \
				ccmni->channel, q_idx, avai_out);

			mod_timer(&ccmni->timer,jiffies + msecs_to_jiffies(10));	//10 ms???

			break;
			//q_idx++;
			//consumed++;
		}
	}

	read_out = (q_idx & (q_length - 1));

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[After]: consumed=%d\n", ccmni->channel, consumed);

	if (consumed > packet_cnt_save)
	{
		//Sanity check. This should not happen!
		CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail: consumed more than packet_cnt, consumed = %d, packet_cnt = %d\n", \
			ccmni->channel, consumed, packet_cnt_save);

		//Should ignore all data in buffer??? haow.wang
		ccmni->shared_mem->rx_control.read_out = avai_out;
		ccmni->shared_mem->rx_control.avai_in  = avai_in;
		goto out;
	}

	ccmni->shared_mem->rx_control.read_out = read_out;
	ccmni->shared_mem->rx_control.avai_in  = avai_in;

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_read to write mailbox(ch%d, tty%d)\n", ccmni->channel,
		ccmni->uart_rx_ack, CCMNI_CHANNEL_OFFSET + ccmni->channel);
	msg.magic = 0xFFFFFFFF;
	msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
	msg.channel = ccmni->uart_rx_ack;
	msg.reserved = 0;
	ret = ccci_message_send(md_id, &msg, 1); 
	if (ret==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL) {
		set_bit(CCMNI_RECV_ACK_PENDING,&ccmni->flags);
		mod_timer(&ccmni->timer,jiffies);
	} else if (ret==sizeof(ccci_msg_t))
		clear_bit(CCMNI_RECV_ACK_PENDING,&ccmni->flags);

out:
	spin_unlock_bh(&ccmni->spinlock);

	CCCI_CCMNI_MSG(md_id, "CCMNI%d_read invoke wake_lock_timeout(1s)\n", ccmni->channel);
	wake_lock_timeout(&ctl_b->ccmni_wake_lock, HZ);

	return;
}