Пример #1
0
static KAL_INT32 eemcs_cdev_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  port_id;

	DEBUG_LOG_FUNCTION_ENTRY;

	if (skb){
            p_cccih = (CCCI_BUFF_T *)skb->data;
            DBGLOG(CHAR, DBG, "cdev_rx_callback: CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved );
	}
    port_id = ccci_ch_to_port(p_cccih->channel);
    
    if(CDEV_OPEN == atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].cdev_state)){
        skb_queue_tail(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        eemcs_update_statistics_number(0, port_id, RX, QUEUE, \
			atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt));
        wake_up(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_waitq);          /* wake up rx_waitq */
    }else{
        if(port_id != CCCI_PORT_MD_LOG) /* If port_id == CCCI_PORT_MD_LOG, skip drop info (request by ST team)*/
        {
            DBGLOG(CHAR, ERR, "!!! PKT DROP when cdev(%d) close", port_id);
        }
        dev_kfree_skb(skb);	
        eemcs_ccci_release_rx_skb(port_id, 1, skb);
        eemcs_update_statistics(0, port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
Пример #2
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;

	DEBUG_LOG_FUNCTION_ENTRY;

	if (skb){
		p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,TRA,"%s: CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", __FUNCTION__,\
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
	}
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
   	{
	   DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
   	}
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)p_ilm+1;
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        dev_kfree_skb(skb);
        DBGLOG(IPCD, TRA, "IPC(%d) MT_CONN kernel rx callback", node_id);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
		DBGLOG(IPCD,ERR,"PKT DROP while ipc dev(%d) closed", node_id);
		dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
Пример #3
0
int ccci_df_to_ccci_callback(unsigned int rxq_no)
{
    int ret, hc_ret;
    bool is_xcmd = false;
    struct sk_buff * skb = NULL;
    CCCI_BUFF_T *ccci_h  = NULL;
    XBOOT_CMD *p_xcmd = NULL;
    KAL_UINT32   port_id = CCCI_PORT_CTRL;
    static KAL_UINT32 rx_err_cnt[CCCI_PORT_NUM_MAX] = {0};
#ifdef __EEMCS_EXPT_SUPPORT__
    EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID;
#endif
#if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
    KAL_INT16 channel, seq_num, assert_bit;
#endif

    DEBUG_LOG_FUNCTION_ENTRY;
    /* Step 1. read skb from swq */
    skb = hif_dl_read_swq(rxq_no);
    if(skb == NULL) {
    	DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no);
    	if(is_exception_mode(&mode))
    		return KAL_FAIL;
    	else
    		KAL_ASSERT(NULL != skb);
    }

    /* Step 2. call handle complete */
    hc_ret = hif_dl_pkt_handle_complete(rxq_no);
    KAL_ASSERT(0 == hc_ret);

    wake_lock_timeout(&eemcs_wake_lock, HZ/2); // Using 0.5s wake lock

	/* Step 3. buffer type */
    if (rxq_no == RXQ_Q0) {
        //is_xcmd = is_xboot_command(skb);
        p_xcmd = (XBOOT_CMD *)skb->data;
	    if (p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD) {
			if (check_device_state() >= EEMCS_MOLY_HS_P1) {
        		DBGLOG(CCCI, ERR, "can't recv xBoot cmd when EEMCS state=%d", check_device_state());
    		} else {
    			is_xcmd = true;
	    	}
	    }
    }

    if (is_xcmd) {
        /* Step 4. callback to xBoot */
    	CDEV_LOG(port_id, CCCI, INF, "XBOOT_CMD: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\
        	p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]);
    	ret = ccci_port_info[port_id].ch.rx_cb(skb, 0);
    } else {
    	ccci_h = (CCCI_BUFF_T *)skb->data;
    	port_id = ccci_ch_to_port(ccci_h->channel);		
    	CDEV_LOG(port_id, CCCI, INF, "CCCI_H: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\
        	ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved);

    	/*check rx sequence number for expect*/
    	#if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
    	channel = ccci_h->channel;
    	seq_num = ccci_h->seq_num;
    	assert_bit = ccci_h->assert_bit;	
    	DBGLOG(CCCI, DBG, "Port%d CCCI_H: data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X(0x%08X, 0x%08X, 0x%08X)",\
        	port_id, ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->seq_num, \
        	ccci_h->assert_bit, ccci_h->reserved, channel, seq_num, assert_bit);
		
    	if(((seq_num - ccci_seqno_tbl[channel].seqno[RX]) & 0x7FFF) != 1 && assert_bit) {
            DBGLOG(CCCI, ERR, "Port%d seqno out-of-order(0x%02X->0x%02X): data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X", \							
				port_id, seq_num, ccci_seqno_tbl[channel].seqno[RX], ccci_h->data[0], ccci_h->data[1], \
				ccci_h->channel, ccci_h->seq_num, ccci_h->assert_bit, ccci_h->reserved);						
            hif_force_md_assert_swint();
    	}					
    	ccci_seqno_tbl[channel].seqno[RX] = seq_num;
    	#endif
		
        /* Step 4. callback to CCCI device */       
        if(NULL != ccci_port_info[port_id].ch.rx_cb){
            #ifdef __EEMCS_EXPT_SUPPORT__
            if(is_exception_mode(&mode))
            {
                if(!is_valid_exception_port(port_id, true))
                {
                    ret = KAL_FAIL;
                    dev_kfree_skb(skb);
                    eemcs_ccci_release_rx_skb(port_id, 1, skb);
                    eemcs_expt_ccci_rx_drop(port_id);
                    DBGLOG(CCCI, ERR, "PKT DROP when PORT%d(rxq=%d) at md exception", \
						port_id, rxq_no);
                    goto _end;
                } else {
                    ret = ccci_port_info[port_id].ch.rx_cb(skb, 0);
                }
            }
            else
            #endif      
            {
                ret = ccci_port_info[port_id].ch.rx_cb(skb, 0);
            }
            rx_err_cnt[port_id] = 0;
        } else { 
            ret = KAL_FAIL;
            dev_kfree_skb(skb);
            eemcs_ccci_release_rx_skb(port_id, 1, skb);
            if (rx_err_cnt[port_id]%20 == 0) {
            	DBGLOG(CCCI, ERR, "PKT DROP when PORT%d rx callback(ch=%d) not registered", \
					port_id, ccci_h->channel);
            }
            rx_err_cnt[port_id]++;
            eemcs_update_statistics(0, port_id, RX, DROP);
			
        }
        eemcs_update_statistics(0, port_id, RX, NORMAL);
    }
    
_end:    
    DEBUG_LOG_FUNCTION_LEAVE;
    return ret;
}
Пример #4
0
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb)
{
    //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ;		
    //return	mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ;
    CCCI_BUFF_T *pccci_h = NULL;
    KAL_UINT32 tx_queue_idx;
    KAL_INT32 ret = KAL_SUCCESS;
#ifdef __EEMCS_EXPT_SUPPORT__
    ccci_expt_port_cfg *expt_port_info;
    EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID;
#endif
    KAL_UINT32 channel = 0;
    bool force_md_assert_flag = false;
#if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
    KAL_UINT64 flag;
#endif

    DEBUG_LOG_FUNCTION_ENTRY;

    if(NULL != skb){
        pccci_h = (CCCI_BUFF_T *)skb->data;
    	DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\
            pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved);

    	#if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
    	channel = (pccci_h->channel)|(pccci_h->seq_num <<16)|(pccci_h->assert_bit <<31);
    	#else
    	channel = pccci_h->channel;
    	#endif
    	if (channel == CCCI_FORCE_RESET_MODEM_CHANNEL)
            force_md_assert_flag = true;
		
    	//1. seperate data and ack packet for ccmni0&1 UL
    	#if defined (TDD_DL_DROP_SOLUTION2)
    	if ((chn != CH_NET1_TX) && (chn != CH_NET2_TX) && !force_md_assert_flag) {
            KAL_ASSERT(pccci_h->channel == chn); 
    	}
    	#else
    	KAL_ASSERT(pccci_h->channel == chn);
    	#endif

    	//2. ccci channel check
    	KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || force_md_assert_flag); 

    	//3. fs packet check: the value of reserve is less than fs_buf_max_num=5
    	if ((pccci_h->channel == CH_FS_TX) && (pccci_h->reserved > 0x4)) {
            int *pdata = skb->data;
            DBGLOG(CCCI, ERR, "[TX]FS: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x",\
            	*pdata, *(pdata+1), *(pdata+2), *(pdata+3), *(pdata+4), *(pdata+5), *(pdata+6), *(pdata+7));
    	}
    } else {
    	DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn);
    }

    if(force_md_assert_flag){
    	tx_queue_idx = 0;
    	hif_ul_write_swq(tx_queue_idx, skb);
    }else{
#ifdef __EEMCS_EXPT_SUPPORT__
        if(is_exception_mode(&mode))
        {
            if(is_valid_exception_tx_channel(chn))
            {
            	/* add sequence number in ccci header */
            	#if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
            	if (likely(pccci_h)) {
                    spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag);
                    pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++;
                    pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? 			
                    spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag);
			
                    DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \
                            pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \
                            pccci_h->assert_bit, channel);
			
            	}
            	#endif
				
                expt_port_info = get_expt_port_info(ccci_ch_to_port(chn));
                /* set exception TX Q*/
                tx_queue_idx = expt_port_info->expt_txq_id;
                hif_ul_write_swq(tx_queue_idx, skb);
                atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space);
                atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space);
                eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL);
            } else {
                DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn);
                /*
                 * if KAL_FAIL is returned, skb is freed at device layer
                 * we don't have to free it here
                 */
                 //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn));
                 ret = KAL_FAIL;
            }
        }
        else
#endif
        {
    	    /* add sequence number in ccci header */
    	    #if defined (DBG_FEATURE_ADD_CCCI_SEQNO)
    	    if (likely(pccci_h)) {
            	spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag);
            	pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++;
            	pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0????           	
            	spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag);
            	DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \
					pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \
					pccci_h->assert_bit, channel);
    	    }
    	    #endif
			
    	    tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id;
    	    hif_ul_write_swq(tx_queue_idx, skb);
    	    atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space);
    	    atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space);
            eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL);
        }
    }
	
    DEBUG_LOG_FUNCTION_LEAVE;
    return ret;
}
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb)
{
    //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ;		
    //return	mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ;
    CCCI_BUFF_T *pccci_h = NULL;
    KAL_UINT32 tx_queue_idx;
    KAL_INT32 ret = KAL_SUCCESS;
#ifdef __EEMCS_EXPT_SUPPORT__
    ccci_expt_port_cfg *expt_port_info;
    EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID;
#endif
	DEBUG_LOG_FUNCTION_ENTRY;

    if(NULL != skb){
        pccci_h = (CCCI_BUFF_T *)skb->data;
    	DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\
            pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved);
        KAL_ASSERT(pccci_h->channel == chn);
        KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL); 
    }else{
        DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn);
    }
	if(pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL){
	    tx_queue_idx = 0;
		hif_ul_write_swq(tx_queue_idx, skb);
    }else{
#ifdef __EEMCS_EXPT_SUPPORT__
        if(is_exception_mode(&mode))
        {
            if(is_valid_exception_tx_channel(chn))
            {
                expt_port_info = get_expt_port_info(ccci_ch_to_port(chn));
                /* set exception TX Q*/
                tx_queue_idx = expt_port_info->expt_txq_id;
                DBGLOG(CCCI, DBG, "[EXPT] ccci_UL_write_skb_to_swq write skb to DF: ch=%d, txq=%d", chn, tx_queue_idx);
                hif_ul_write_swq(tx_queue_idx, skb);
        	    atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space);
        	    atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space);
                eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL);
            }
            else
            {
                DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn);
                /*
                 * if KAL_FAIL is returned, skb is freed at device layer
                 * we don't have to free it here
                 */
                 //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn));
                 ret = KAL_FAIL;
            }
        }
        else
#endif
        {
    	    tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id;
    		hif_ul_write_swq(tx_queue_idx, skb);
    	    atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space);
    	    atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space);
            eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL);
        }
	}
	
	DEBUG_LOG_FUNCTION_LEAVE;
	return ret;
}
Пример #6
0
static ssize_t eemcs_ipc_read(struct file *fp, char *buf, size_t count, loff_t *ppos)
{
    unsigned int flag;
    eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data;
    KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */
    KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */
    KAL_UINT32 rx_pkt_cnt, read_len;
    struct sk_buff *rx_skb;
    unsigned char *payload=NULL;
    CCCI_BUFF_T *ccci_header;
    int ret = 0;

    DEBUG_LOG_FUNCTION_ENTRY;
    
    flag=fp->f_flags;
    DBGLOG(IPCD,TRA,"ipc_read: deivce iminor=%d, len=%d", node_id ,count);

    if(!eemcs_device_ready())
    {
        DBGLOG(IPCD,ERR,"MD device not ready!");
        ret= -EIO;
		return ret;
    }

    /* Check receive pkt count */
    rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt);
    KAL_ASSERT(rx_pkt_cnt >= 0);
    
    if(rx_pkt_cnt == 0){
        if (flag&O_NONBLOCK)
        {	
            ret=-EAGAIN;
            DBGLOG(IPCD,TRA,"ipc_read: PORT%d for NONBLOCK",port_id);
            goto _exit;
        }
        ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0);
        if(ret)
        {
            ret = -EINTR;
            DBGLOG(IPCD, ERR, "[RX]PORT%d read interrupt by syscall.signal(%lld)", port_id, \
		*(long long *)current->pending.signal.sig);	
            goto _exit;
        }
    }

    /*
     * Cached memory from last read fail
     */
    DBGLOG(IPCD,TRA,"ipc_read: dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt);
    rx_skb = skb_dequeue(&curr_node->rx_skb_list);
    /* There should be rx_skb in the list */
    KAL_ASSERT(NULL != rx_skb);
    atomic_dec(&curr_node->rx_pkt_cnt);
    rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt);
    KAL_ASSERT(rx_pkt_cnt >= 0);

    ccci_header = (CCCI_BUFF_T *)rx_skb->data;

    DBGLOG(IPCD,TRA,"ipc_read: PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)",\
            port_id, ccci_header->data[0],ccci_header->data[1],
            ccci_header->channel, ccci_header->reserved);
    
    /*If not match please debug EEMCS CCCI demux skb part*/
    KAL_ASSERT(ccci_header->channel == eemcs_ipc_inst.ccci_ch.rx);
    
    read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T);
    /* remove CCCI_HEADER */
    skb_pull(rx_skb, sizeof(CCCI_BUFF_T));
 
    DBGLOG(IPCD,TRA,"ipc_read: PORT%d read_len=%d",port_id, read_len);

    payload=(unsigned char*)rx_skb->data;
    if(count < read_len)
    {
        DBGLOG(IPCD,ERR,"PKT DROP of PORT%d! want_read=%d, read_len=%d", 
                port_id, count, read_len);
        atomic_inc(&curr_node->rx_pkt_drop_cnt);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
        dev_kfree_skb(rx_skb);
        ret = -E2BIG;
        goto _exit;
    }

    DBGLOG(IPCD,TRA,"ipc_read: copy_to_user(len=%d), %p -> %p", read_len, payload, buf);

    ret = copy_to_user(buf, payload, read_len);
    if(ret!=0)
    {
        DBGLOG(IPCD, ERR, "[RX]PORT%d copy_to_user(len=%d, %p->%p) fail: %d", \
		port_id, read_len, payload, buf, ret);
        ret = -EFAULT;
        goto _exit;
    }       

    dev_kfree_skb(rx_skb);

    if(ret == 0){
        DEBUG_LOG_FUNCTION_LEAVE;
        return read_len;
    }
_exit:    

    DEBUG_LOG_FUNCTION_LEAVE;
	return ret;
}
Пример #7
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;
    unsigned int i = 0;
    char * addr;

    DEBUG_LOG_FUNCTION_ENTRY;

    if (skb){
        p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,DBG,"[RX]CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", \
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
    }
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
    {
       DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
    }
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)(p_ilm+1);

        if (p_ilm->local_para_ptr != NULL) {
            DBGLOG(IPCD, INF, "[RX][KERN]src=%d dest=0x%x sap=0x%x msg=0x%x local_ptr=%p msg_len=%d", \
				p_ilm->src_mod_id, p_ilm->dest_mod_id, p_ilm->sap_id, p_ilm->msg_id,\
				p_ilm->local_para_ptr, p_ilm->local_para_ptr->msg_len); 
            for (i=0; i<32; i++) {
		addr = (char *)p_ilm;
		DBGLOG(IPCD, DBG, "%p=%x", (addr+i), *(addr+i));
            }
        }
        #ifdef ENABLE_CONN_COEX_MSG		
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        #endif
        dev_kfree_skb(skb);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
        DBGLOG(IPCD, ERR, "PKT DROP while ipc dev(%d) closed", node_id);
        dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
Пример #8
0
static ssize_t eemcs_cdev_read(struct file *fp, char *buf, size_t count, loff_t *ppos)
{
    unsigned int flag;
    eemcs_cdev_node_t *curr_node = (eemcs_cdev_node_t *)fp->private_data;
    KAL_UINT8 port_id = curr_node->eemcs_port_id; /* port_id */
    KAL_UINT32 p_type, rx_pkt_cnt, read_len, rx_pkt_cnt_int;
    struct sk_buff *rx_skb;
    unsigned char *payload=NULL;
    CCCI_BUFF_T *ccci_header;
    int ret = 0;

    DEBUG_LOG_FUNCTION_ENTRY;
    
    flag=fp->f_flags;
    //verbose DBGLOG(CHAR,DBG,"read deivce iminor (0x%x),length(0x%x)",port_id,count);

    p_type = ccci_get_port_type(port_id);
    if(p_type != EX_T_USER) 
    {
        DBGLOG(CHAR, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type);
        goto _exit;                    
    }

    rx_pkt_cnt_int = atomic_read(&curr_node->buff.remaining_rx_cnt);
    KAL_ASSERT(rx_pkt_cnt_int >= 0);
    if(rx_pkt_cnt_int == 1)
    {
        DBGLOG(CHAR, DBG, "Streaming reading!! PORT%d len=%d\n",port_id,count);
        rx_skb = curr_node->buff.remaining_rx_skb;
        /* rx_skb shall not be null */
        KAL_ASSERT(NULL != rx_skb);
        read_len = curr_node->buff.remaining_len;
        KAL_ASSERT(read_len >= 0);
    }
    else    
    {
        rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt);
        KAL_ASSERT(rx_pkt_cnt >= 0);
        
        if(rx_pkt_cnt == 0) {
            if (flag&O_NONBLOCK)
            {	
                ret=-EAGAIN;
                //verbose DBGLOG(CHAR,DBG,"[CHAR] PORT(%d) eemcs_cdev_read return O_NONBLOCK for NON-BLOCKING",port_id);
                goto _exit;
            }
            ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0);
            if(ret)
            {
                ret = -EINTR;
                DBGLOG(CHAR, ERR, "PORT%d interruptted while waiting data.", port_id);			
                goto _exit;
            }
        }
       
        /*
         * Cached memory from last read fail
         */
        DBGLOG(CHAR, TRA, "eemcs_cdev_read dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt); 
        rx_skb = skb_dequeue(&curr_node->rx_skb_list);
        
        /* There should be rx_skb in the list */
        KAL_ASSERT(NULL != rx_skb);
        atomic_dec(&curr_node->rx_pkt_cnt);
        rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt);
        KAL_ASSERT(rx_pkt_cnt >= 0);

        ccci_header = (CCCI_BUFF_T *)rx_skb->data;

        DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\
                port_id, ccci_header->data[0],ccci_header->data[1],
                ccci_header->channel, ccci_header->reserved);
        
        /*If not match please debug EEMCS CCCI demux skb part*/
	if(ccci_header->channel != curr_node->ccci_ch.rx) {
		DBGLOG(CHAR,ERR,"Assert(ccci_header->channel == curr_node->ccci_ch.rx)");
		DBGLOG(CHAR,ERR,"ccci_header->channel:%d, curr_node->ccci_ch.rx:%d, curr_node->eemcs_port_id:%d", 
				ccci_header->channel, curr_node->ccci_ch.rx, curr_node->eemcs_port_id);
		KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx);
	}
        //KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx);
        
        if(!(ccci_get_port_cflag(port_id) & EXPORT_CCCI_H))
        {
            read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T);
            /* remove CCCI_HEADER */
            skb_pull(rx_skb, sizeof(CCCI_BUFF_T));
        }else{
            if(ccci_header->data[0] == CCCI_MAGIC_NUM){
                read_len = sizeof(CCCI_BUFF_T); 
            }else{
                read_len = ccci_header->data[1];
            }
        }
    }
    
    DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d read_len=%d",port_id, read_len);

/* 20130816 ian add aud dump */
    {
	    char *ptr = (char *)rx_skb->data;
	    /* dump 32 byte of the !!!CCCI DATA!!! part */
		CDEV_LOG(port_id, CHAR, ERR,"[DUMP]PORT%d eemcs_cdev_read\n\
		[00..07](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\
		[08..15](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\
		[16..23](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\
		[24..31](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)",\
		port_id,\
		(int)*(ptr+0),(int)*(ptr+1),(int)*(ptr+2),(int)*(ptr+3),(int)*(ptr+4),(int)*(ptr+5),(int)*(ptr+6),(int)*(ptr+7),\
		(int)*(ptr+8),(int)*(ptr+9),(int)*(ptr+10),(int)*(ptr+11),(int)*(ptr+12),(int)*(ptr+13),(int)*(ptr+14),(int)*(ptr+15),\
		(int)*(ptr+16),(int)*(ptr+17),(int)*(ptr+18),(int)*(ptr+19),(int)*(ptr+20),(int)*(ptr+21),(int)*(ptr+22),(int)*(ptr+23),\
		(int)*(ptr+24),(int)*(ptr+25),(int)*(ptr+26),(int)*(ptr+27),(int)*(ptr+28),(int)*(ptr+29),(int)*(ptr+30),(int)*(ptr+31));

    }

    payload=(unsigned char*)rx_skb->data;
    if(count < read_len)
    {
        /* Means 1st streaming reading*/
        if(rx_pkt_cnt_int == 0)
        {
            atomic_inc(&curr_node->buff.remaining_rx_cnt);
            curr_node->buff.remaining_rx_skb = rx_skb;
        }
        
        DBGLOG(CHAR, DBG, "PORT%d !!! USER BUFF(%d) less than DATA SIZE(%d) !!!", port_id, count, read_len);
        DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d",payload,buf,count);
        ret = copy_to_user(buf, payload, count);
        if(ret == 0)
        {   
            curr_node->buff.remaining_len = read_len - count;
            skb_pull(rx_skb, count);  //move data pointer
            //update actually read length
            read_len = count;
        }
        else
        {
            // If error occurs, discad the skb buffer
            DBGLOG(CHAR, ERR, "PORT%d !!! PKT DROP !!! fail copy_to_user buf(%d, %d)", port_id, count, ret);
            atomic_dec(&curr_node->rx_pkt_drop_cnt);
            eemcs_update_statistics(0, port_id, RX, DROP);
            dev_kfree_skb(rx_skb);
            eemcs_ccci_release_rx_skb(port_id, 1, rx_skb);
            if(rx_pkt_cnt_int == 1)
            {
                curr_node->buff.remaining_len = 0;
                curr_node->buff.remaining_rx_skb = NULL;
                atomic_dec(&curr_node->buff.remaining_rx_cnt);
            } 
        }
    }
    else
    {

        DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d", payload, buf, read_len);

        ret = copy_to_user(buf, payload, read_len);
        if(ret!=0)
        {
            DBGLOG(CHAR, ERR, "copy_to_user len=%d fail: %d)", read_len, ret);
        }       

        dev_kfree_skb(rx_skb);
        eemcs_ccci_release_rx_skb(port_id, 1, rx_skb);

        if(rx_pkt_cnt_int == 1)
        {
            curr_node->buff.remaining_len = 0;
            curr_node->buff.remaining_rx_skb = NULL;
            atomic_dec(&curr_node->buff.remaining_rx_cnt);
        }
    }
    
    if(ret == 0){
        DEBUG_LOG_FUNCTION_LEAVE;
        return read_len;
    }
_exit:    

    DEBUG_LOG_FUNCTION_LEAVE;
	return ret;
}
Пример #9
0
int ccci_df_to_ccci_callback(unsigned int rxq_no)
{
    int ret, hc_ret, is_xcmd;
    struct sk_buff * skb = NULL;
    CCCI_BUFF_T *ccci_h  = NULL;
    XBOOT_CMD *p_xcmd = NULL;
    KAL_UINT32   port_id = 0;
#ifdef __EEMCS_EXPT_SUPPORT__
    EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID;
#endif

    
    DEBUG_LOG_FUNCTION_ENTRY;
    /* Step 1. read skb from swq */
    skb = hif_dl_read_swq(rxq_no);
    if(skb == NULL) {
    	DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no);
    	if(is_exception_mode(&mode))
    		return KAL_FAIL;
    	else
    		KAL_ASSERT(NULL != skb);
    }

    /* Step 2. call handle complete */
    hc_ret = hif_dl_pkt_handle_complete(rxq_no);
    KAL_ASSERT(0 == hc_ret);

    DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback() rxq_no = %d", rxq_no);
    /* Step 3. buffer type */
    if (rxq_no == RXQ_Q0) {
        is_xcmd = is_xboot_command(skb);
    } else {
        is_xcmd = false;
    }

    wake_lock_timeout(&eemcs_wake_lock, HZ); // Using 1s wake lock

    if (is_xcmd ==  true) {
        /* Step 4. callback to xBoot */
        p_xcmd = (XBOOT_CMD *)skb->data;
        KAL_ASSERT(p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD);
        if (NULL != ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb){
            DBGLOG(CCCI, DBG, "[CCCI][DF CALLBACK] XBOOT_CMD (0x%X)(0x%X)(0x%X)(0x%X)",\
                p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]);
#ifdef __EEMCS_EXPT_SUPPORT__
            if(is_exception_mode(&mode))
            {
                if(!is_valid_exception_port(CCCI_PORT_CTRL, true))
                {
                    ret = KAL_FAIL;
                    dev_kfree_skb(skb);
                    eemcs_expt_ccci_rx_drop(port_id);
                    DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, CCCI_PORT_CTRL);
                    goto _end;
                }
                else
                {
                    ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0);
                }
            }
            else
#endif      
            {
                ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0);
            }
        } else {
            ret = KAL_FAIL;
            dev_kfree_skb(skb);
            DBGLOG(CCCI, ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback xBoot not registered");
        }
    } else {
        /* Step 4. callback to CCCI device */
        ccci_h = (CCCI_BUFF_T *)skb->data;
        KAL_ASSERT(ccci_h->channel < CH_NUM_MAX);
        port_id = ccci_ch_to_port(ccci_h->channel);
        if(NULL != ccci_port_info[port_id].ch.rx_cb){
            DBGLOG(CCCI,DBG,"ccci_df_to_ccci_callback Rx packet CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\
                ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved );

#ifdef __EEMCS_EXPT_SUPPORT__
            if(is_exception_mode(&mode))
            {
                if(!is_valid_exception_port(port_id, true))
                {
                    ret = KAL_FAIL;
                    dev_kfree_skb(skb);
                    eemcs_expt_ccci_rx_drop(port_id);
                    DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, port_id);
                    goto _end;
                }
                else
                {
                    ret = ccci_port_info[port_id].ch.rx_cb(skb, 0);
                }
            }
            else
#endif      
            {
                ret = ccci_port_info[port_id].ch.rx_cb(skb, 0);
            }
        }else{
            ret = KAL_FAIL;
            dev_kfree_skb(skb);
            DBGLOG(CCCI,ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback ccci_port(%d) channel Rx(%d) not registered", port_id, ccci_h->channel);
            eemcs_update_statistics(0, port_id, RX, DROP);
        }
        eemcs_update_statistics(0, port_id, RX, NORMAL);
    }
    
_end:    
    DEBUG_LOG_FUNCTION_LEAVE;
    return ret;
}