示例#1
0
/*************************************************************************
* FUNCTION
*  void ccci_exception_handshake
*
* DESCRIPTION
*  This function .
*
* PARAMETERS
*  channel    -    			logical channel
*  *
* RETURNS
*  The address of the share memory of the input logical channel
*
*************************************************************************/
void ccci_exception_handshake(void){
#if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) 
    CCCI_BUFF_T  *buff;
    kal_uint32   p_cache_aligned;
    kal_uint32 gpd_num;
    qbm_gpd *p_first_gpd, *p_last_gpd;
    //kal_uint32   rcv_size = 0; 
    // ensure in the exception state
    if(INT_QueryExceptionStatus() == KAL_FALSE) 
        return;

    //we block here for debuging
    //if(ccci_exception_state != CCCI_EXPT_CLEAR_CH_ST) while(1);
    //- Avoid to use kal_mem_cpy
    //- HW bug

    ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack);
    ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack);

    // exception only have single thread, need to do polling mode
    /* initialize polling mode GPD */
    ASSERT(CCCI_EXCEPT_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE);
    /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/

    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_tx;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_tx)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_tx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);

    //format Rx GPD
    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_rx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx);

    //format Rx 2nd GPD
    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx2;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx2)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_rx2 = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx2, ccci_except_polling_gpd_rx2);

    //step 0. config rx gpd next pointer
    QBM_DES_SET_NEXT(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx2);

    //step 1. send TX handshake pkt
    buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx);
    
    buff->data[0] = MD_EX_MAGIC;
    buff->data[1] = CCMSG_ID_EXCEPTION_CHECK;
    buff->channel = CCCI_CONTROL_CHANNEL;
    buff->reserved = MD_EX_CHK_ID;
    ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T));
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd,  sizeof(CCCI_BUFF_T));
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx);
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd);
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T));

    ex_set_step_logging(EX_AUTO_STEP); //0x41

    //step 2. polling echoed rx handshake pkt (need two GPD, one for tail)
    // TODO:Need to revise the API, shall use exception API (after channle reset flow is done)
#ifdef SWITCH_TO_EXCEPTION_IO
    p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx;
    ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd);
    gpd_num = 0;
    do{
        ccci_except_hif_st(CCCI_CONTROL_CHANNEL);
        ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num);
    }while(0 == gpd_num);
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);

    ex_set_step_logging(EX_AUTO_STEP); //0x42
    
    p_first_gpd = ccci_except_polling_gpd_rx;
    p_last_gpd = ccci_except_polling_gpd_rx2;
    do{
        ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd);
        gpd_num = 0;
        do{
            ccci_except_hif_st(CCCI_CONTROL_CHANNEL_ACK);
            ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL_ACK, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num);
        }while(0 == gpd_num);
        buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_rx);
        //rcv_size = CCCIDEV_GET_QBM_DATALEN(ccci_except_polling_gpd_rx); // FIXME
        QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T));
        ccci_debug_check_seq(buff); // check ccci seq
        if (buff->reserved == MD_EX_CHK_ID){
            ex_set_step_logging(EX_AUTO_STEP);  //0x43
            ccci_exception_handshake_done = KAL_TRUE;
            CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);
            ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd);  //reload to CCCI_CONTROL_CHANNEL_ACK
            break;
        }
        else
        {
            //dump date
            ex_fire_extern_step_logging(0xFFFFFFFF);
            ex_fire_extern_step_logging(buff->data[0]);
            ex_fire_extern_step_logging(buff->data[1]);
            ex_fire_extern_step_logging(buff->channel);
            ex_fire_extern_step_logging(buff->reserved);
            ex_fire_extern_step_logging(0xFFFFFFFF);
        }
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);
    }while(1);
    
#else     
    ccci_exception_check_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE);
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);

    while(CCCI_SUCCESS == (ccci_exception_handshake_done = ccci_polling_io(CCCI_CONTROL_CHANNEL_ACK, ccci_except_polling_gpd_tx, KAL_FALSE))){
        buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx);
        QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T));
        ccci_debug_check_seq(buff); // check ccci seq
        if (buff->reserved == MD_EX_CHK_ID){
            CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);
            break;
        }
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);
    }
#endif    
    ex_set_step_logging(EX_AUTO_STEP);  //0x44
    ccci_exception_state = CCCI_EXPT_HANDSHAKE_ST;
#ifdef CCCI_EXCETION_PRE_TEST
    ccci_exception_info_passed_pre();
#endif    

#endif
}
示例#2
0
void ccci_ipc_receive_msg_cb(CCCI_CHANNEL_T channel, ccci_io_request_t* ior)
{
    kal_uint32	i = 0;
    ipc_ilm_t	*temp_ipc_ilm = NULL;
    void        *local_param = NULL, *peer_buff = NULL;
    kal_uint32  len = 0;
    kal_uint32  gpd_num = 0;
    ccci_io_request_t *current_ior = ior;
    ccci_io_request_t *reload_ior = NULL;
    qbm_gpd           *current_gpd;
    qbm_gpd           *next_gpd;
    kal_bool    is_process_cb = KAL_FALSE;

    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_FUNC_TRA);
    while(current_ior != NULL){
        current_gpd = current_ior->first_gpd ;
        gpd_num = 0;
        while( current_gpd != NULL){
            CCCI_BUFF_T *bufp = CCCIDEV_GET_QBM_DATAPTR(current_gpd);
            next_gpd = current_gpd->p_next;
            local_param = NULL;
            peer_buff = NULL;
            ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_TRA_CCCI,
                bufp->data[0], bufp->data[1], bufp->channel, bufp->reserved);
            ccci_debug_check_seq(bufp); // check ccci seq
            /* get task id from mapping table of ext queue id */
            for (i = 0; i < MAX_CCCI_IPC_TASKS; i++) 
            {
            	if ( ccci_ipc_maptbl[i].extq_id == bufp->reserved )
            	{
               	    break;
            	}
            }
        		
            /* check if the extquque id can not be found */
            if (i >= MAX_CCCI_IPC_TASKS) 
            {
        	    EXT_ASSERT(0, i, bufp->reserved, MAX_CCCI_IPC_TASKS);
            }

            /* check if the extquque id is to MD */
            if ((ccci_ipc_maptbl[i].extq_id & AP_UINFY_ID_FLAG) != 0)
            {
        	    EXT_ASSERT(0, i, bufp->reserved, ccci_ipc_maptbl[i].extq_id);
            }
        		
    	    temp_ipc_ilm = (ipc_ilm_t *)(bufp + 1);
            ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_TRA_ILM,
                temp_ipc_ilm, temp_ipc_ilm->src_mod_id, temp_ipc_ilm->dest_mod_id,
                temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id,
                temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->peer_buff_ptr);
		
    	    if(temp_ipc_ilm->local_para_ptr)
    	    {
    			// copy local_para_struct
    		    temp_ipc_ilm->local_para_ptr = 	(local_para_struct *)((char*)temp_ipc_ilm + sizeof(ipc_ilm_t));
                len += temp_ipc_ilm->local_para_ptr->msg_len; // need 4 bytes alignment or not??
                //assert if ilm size > CCCI_IPC_GPD size
	            EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE,0);
    			local_param = construct_local_para(temp_ipc_ilm->local_para_ptr->msg_len, 1);
    			kal_mem_cpy((kal_uint8 *)local_param, temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->local_para_ptr->msg_len);
                ((local_para_struct *)local_param)->ref_count = 1 ; 
                temp_ipc_ilm->local_para_ptr = local_param;	    			
    	    }

    	    if (temp_ipc_ilm->peer_buff_ptr)
    	    {
                //copy peer_buff_struct
                if(temp_ipc_ilm->local_para_ptr)
    	        {
    		        temp_ipc_ilm->peer_buff_ptr = 	(peer_buff_struct *)((char*)temp_ipc_ilm->local_para_ptr 
    		                                                         + temp_ipc_ilm->local_para_ptr->msg_len);	
                }
                else
                {
                    temp_ipc_ilm->peer_buff_ptr = 	(peer_buff_struct *)((char*)temp_ipc_ilm + sizeof(ipc_ilm_t));
                }
                len += sizeof(peer_buff_struct) 
                       + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                       + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                       + temp_ipc_ilm->peer_buff_ptr->free_tail_space;
                //assert if ilm size > CCCI_IPC_GPD size 
                EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE, 0);
    			peer_buff = construct_peer_buff(temp_ipc_ilm->peer_buff_ptr->pdu_len, \
    			temp_ipc_ilm->peer_buff_ptr->free_header_space, \
    			temp_ipc_ilm->peer_buff_ptr->free_tail_space, 1);
    			kal_mem_cpy((kal_uint8 *)peer_buff, temp_ipc_ilm->peer_buff_ptr, 
                             sizeof(peer_buff_struct) 
                             + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                             + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                             + temp_ipc_ilm->peer_buff_ptr->free_tail_space);	
                temp_ipc_ilm->peer_buff_ptr = peer_buff;
    	    }

            CCCI_AP_MSG_TO_KAL_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);

	        if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_DISABLE){ // if current is not IT mode, then need to send upper layer
                //4 <1> process ccci ipc cb function
                is_process_cb = ccci_ipc_process_cb_funp(temp_ipc_ilm);
                //4 <2> Determine send ilm to upper layer or destroy ilm
                if(!is_process_cb){ // no process cb, then send ilm to upper layer
    	            msg_send6 (MOD_CCCIIPC, ccci_ipc_maptbl[i].task_id, temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id, local_param, peer_buff);
	            }
            }
            #ifdef CCCI_IT_MODE_CONTROL_CCCI_IPC
            if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_LB){
			    CCCI_KAL_MSG_TO_AP_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);
                bufp->reserved |=  (AP_UINFY_ID_FLAG | temp_ipc_ilm->src_mod_id ); // for Loopback to ccci ipc port 1
                bufp->channel   =  ccci_ipc_ch.send_channel; // for Loopback
                ccci_debug_add_seq(bufp, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
            }
            #endif 
            QBM_CACHE_INVALID(temp_ipc_ilm,sizeof(temp_ipc_ilm)); // prevent cache co-herence problem
            gpd_num++;
    	    if (current_gpd == current_ior->last_gpd){
                break;
    	    }
            else{    
                current_gpd = next_gpd;
            }   
            
        }// process gpds
        reload_ior = current_ior;
        current_ior = current_ior->next_request;
        reload_ior->next_request = NULL;
        
        #ifdef __SDIOC_PULL_Q_ENH_DL__
            reload_ior->num_gpd = gpd_num;
        #endif
        
        #ifdef __CCCI_IPC_UT__
            qbmt_dest_q(reload_ior->first_gpd, reload_ior->last_gpd);
        #else
            #ifdef CCCI_IT_MODE_CONTROL_CCCI_IPC
            if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_LB){
                ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.send_channel, reload_ior, NULL);
                ccci_ipc_ch.reload_rgpd_number -= gpd_num;
                ccci_ipc_reload_rgpds();
            }else
            #endif
            {
                CCCIDEV_RST_CCCI_COMM_GPD_LIST(reload_ior->first_gpd, reload_ior->last_gpd);
                ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.receive_channel, reload_ior, NULL);
            }
        #endif 
    }//process iors
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_FUNC_PASS_TRA);
}