Exemplo n.º 1
0
void ccci_exception_info_passed_pre(void)  
{
#if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__)  

    CCCI_BUFF_T *buff;
    qbm_gpd *p_first_gpd, *p_last_gpd;
    kal_uint32 gpd_num;

    // ensure in the exception state
    if(INT_QueryExceptionStatus() == KAL_FALSE) return;


    //- Already init in ccci_exception_handshake
    //- ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack);
    //- ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack);

    buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx);
    buff->data[0] = MD_EX_MAGIC;
    buff->data[1] = CCMSG_ID_EXCEPTION_REC_OK;
    buff->channel = CCCI_CONTROL_CHANNEL;
    buff->reserved = MD_EX_REC_OK_CHK_ID;
    ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
    kal_mem_cpy((void*)(buff+1), ex_log_ptr, sizeof(EX_LOG_T));
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T));
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd,  sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T));
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx);
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd);
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T));

    // TODO:Need to revise the API, shall use exception API (after channle reset flow is done)
#ifdef SWITCH_TO_EXCEPTION_IO
    p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx;
    ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd);
    gpd_num = 0;
    do{
        ccci_except_hif_st(CCCI_CONTROL_CHANNEL);
        ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num);
    }while(0 == gpd_num);
    ex_set_step_logging(EX_AUTO_STEP); //0x45
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);
#else
    ccci_exception_info_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE);
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);
#endif

    ccci_exception_state = CCCI_EXPT_INFO_PASS_PRE_ST;
    #ifdef WDT_ISR_TEST
    wdt_test();
    #endif
#endif    
}
Exemplo n.º 2
0
void ccci_ipc_init(void)
{
    kal_uint32 p_cache_aligned; 
    
    //Send message does not need callback
    ccci_ipc_ch.ccci_init_gpdior(ccci_ipc_ch.send_channel ,  ccci_ipc_send_msg_cb);
    //send mesage ack need to implement callback
//	    ccci_ipc_ch.ccci_init_gpdior(ccci_ipc_ch.send_ack_channel , ccci_ipc_error_cb);
    //Receive mesage need to implement callback
    ccci_ipc_ch.ccci_init_gpdior(ccci_ipc_ch.receive_channel , ccci_ipc_receive_msg_cb);
    //Receive mesage ack does not need to implement callback
//	    ccci_ipc_ch.ccci_init_gpdior(ccci_ipc_ch.receive_ack_channel , ccci_ipc_error_cb);

    //Initialize the event for usage
    if (ccci_ipc_ch.event == 0){
        ccci_ipc_ch.event = kal_create_event_group("CCCI_IPC");
    }
    //Initialize the semaphore for critical section 
    if (ccci_ipc_ch.semaphore == 0){
        ccci_ipc_ch.semaphore = kal_create_sem("CCCI_IPC",1); 
    }

    /* initialize polling mode GPD */
    ASSERT(CCCI_IPC_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE);
    /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/
    p_cache_aligned = (kal_uint32)g_ccci_ipc_polling_buf;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_ipc_polling_buf)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }

    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_ipc_ch.p_polling_gpd = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);
    
    // register ccci debug get status cb funp
    ccci_debug_get_status_register(CCCI_DEBUG_GET_STATUS_MODULE_CCCIIPC, ccci_ipc_get_debug_status);    
    
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_ipc_ch.p_polling_gpd,ccci_ipc_ch.p_polling_gpd);

#ifdef CCCI_IT_MODE_CONTROL_CCCI_IPC
    ccci_ipc_it_create();
#endif
}
Exemplo n.º 3
0
/*************************************************************************
* FUNCTION
*  void ccci_exception_handshake
*
* DESCRIPTION
*  This function .
*
* PARAMETERS
*  channel    -    			logical channel
*  *
* RETURNS
*  The address of the share memory of the input logical channel
*
*************************************************************************/
void ccci_exception_handshake(void){
#if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) 
    CCCI_BUFF_T  *buff;
    kal_uint32   p_cache_aligned;
    kal_uint32 gpd_num;
    qbm_gpd *p_first_gpd, *p_last_gpd;
    //kal_uint32   rcv_size = 0; 
    // ensure in the exception state
    if(INT_QueryExceptionStatus() == KAL_FALSE) 
        return;

    //we block here for debuging
    //if(ccci_exception_state != CCCI_EXPT_CLEAR_CH_ST) while(1);
    //- Avoid to use kal_mem_cpy
    //- HW bug

    ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack);
    ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack);

    // exception only have single thread, need to do polling mode
    /* initialize polling mode GPD */
    ASSERT(CCCI_EXCEPT_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE);
    /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/

    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_tx;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_tx)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_tx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);

    //format Rx GPD
    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_rx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx);

    //format Rx 2nd GPD
    p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx2;
    if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { 
        p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx2)&~CPU_CACHE_LINE_SIZE_MASK);
        p_cache_aligned += CPU_CACHE_LINE_SIZE;
    }
    
    // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable
	ccci_except_polling_gpd_rx2 = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE);

    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx2, ccci_except_polling_gpd_rx2);

    //step 0. config rx gpd next pointer
    QBM_DES_SET_NEXT(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx2);

    //step 1. send TX handshake pkt
    buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx);
    
    buff->data[0] = MD_EX_MAGIC;
    buff->data[1] = CCMSG_ID_EXCEPTION_CHECK;
    buff->channel = CCCI_CONTROL_CHANNEL;
    buff->reserved = MD_EX_CHK_ID;
    ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T));
    QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd,  sizeof(CCCI_BUFF_T));
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx);
    qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd);
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T));

    ex_set_step_logging(EX_AUTO_STEP); //0x41

    //step 2. polling echoed rx handshake pkt (need two GPD, one for tail)
    // TODO:Need to revise the API, shall use exception API (after channle reset flow is done)
#ifdef SWITCH_TO_EXCEPTION_IO
    p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx;
    ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd);
    gpd_num = 0;
    do{
        ccci_except_hif_st(CCCI_CONTROL_CHANNEL);
        ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num);
    }while(0 == gpd_num);
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);

    ex_set_step_logging(EX_AUTO_STEP); //0x42
    
    p_first_gpd = ccci_except_polling_gpd_rx;
    p_last_gpd = ccci_except_polling_gpd_rx2;
    do{
        ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd);
        gpd_num = 0;
        do{
            ccci_except_hif_st(CCCI_CONTROL_CHANNEL_ACK);
            ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL_ACK, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num);
        }while(0 == gpd_num);
        buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_rx);
        //rcv_size = CCCIDEV_GET_QBM_DATALEN(ccci_except_polling_gpd_rx); // FIXME
        QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T));
        ccci_debug_check_seq(buff); // check ccci seq
        if (buff->reserved == MD_EX_CHK_ID){
            ex_set_step_logging(EX_AUTO_STEP);  //0x43
            ccci_exception_handshake_done = KAL_TRUE;
            CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);
            ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd);  //reload to CCCI_CONTROL_CHANNEL_ACK
            break;
        }
        else
        {
            //dump date
            ex_fire_extern_step_logging(0xFFFFFFFF);
            ex_fire_extern_step_logging(buff->data[0]);
            ex_fire_extern_step_logging(buff->data[1]);
            ex_fire_extern_step_logging(buff->channel);
            ex_fire_extern_step_logging(buff->reserved);
            ex_fire_extern_step_logging(0xFFFFFFFF);
        }
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd);
    }while(1);
    
#else     
    ccci_exception_check_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE);
    CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);

    while(CCCI_SUCCESS == (ccci_exception_handshake_done = ccci_polling_io(CCCI_CONTROL_CHANNEL_ACK, ccci_except_polling_gpd_tx, KAL_FALSE))){
        buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx);
        QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T));
        ccci_debug_check_seq(buff); // check ccci seq
        if (buff->reserved == MD_EX_CHK_ID){
            CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);
            break;
        }
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx);
    }
#endif    
    ex_set_step_logging(EX_AUTO_STEP);  //0x44
    ccci_exception_state = CCCI_EXPT_HANDSHAKE_ST;
#ifdef CCCI_EXCETION_PRE_TEST
    ccci_exception_info_passed_pre();
#endif    

#endif
}
Exemplo n.º 4
0
/*************************************************************************
* FUNCTION
*  ccci_ipc_send_msg
*
* DESCRIPTION
*  This function is the internal api to send message
*
* PARAMETERS
*  ipc_task_id     -  
*  buffer_ptr      -
*  msg_size        -
*  wait_mode       -
*  message_to_head -
*
* RETURNS
*  status - success/fail
*
*************************************************************************/
kal_bool ccci_ipc_send_msg(kal_uint32 ipc_task_id, void *buffer_ptr, kal_uint16 msg_size,	kal_wait_mode wait_mode, kal_bool message_to_head)
{
    kal_uint32 i, j ;
    kal_uint32 retrieved_events = 0, orig_local_addr = 0 , orig_peer_addr = 0, update_buff_addr=0;
    kal_int32 result = CCCI_SUCCESS;
    ipc_ilm_t	*temp_ipc_ilm = (ipc_ilm_t *)buffer_ptr;
    ccci_io_request_t ior = {0};
	CCCI_BUFF_T *p_ccci_buff;
    kal_uint32 len = 0;
	
		
	ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TRA);	
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_ILM,
                temp_ipc_ilm, temp_ipc_ilm->src_mod_id, temp_ipc_ilm->dest_mod_id,
                temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id,
                temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->peer_buff_ptr);
		
    /* get ext queue id from mapping table of task id - destnation*/
    for (i = 0; i < MAX_CCCI_IPC_TASKS; i++) 
    {
        if ( ccci_ipc_maptbl[i].task_id == ipc_task_id )
        	{
                    break;
        	}
    }
	
    /* get ext queue id from mapping table of task id - source*/
    for (j = 0; j < MAX_CCCI_IPC_TASKS; j++) 
    {
        if ( ccci_ipc_maptbl[j].task_id == temp_ipc_ilm->src_mod_id )
        	{
                    break;
        	}
    }
  
    /* check src mod id, if it's not defined in CCCI IPC, don't set used bit */
    if(j >= MAX_CCCI_IPC_TASKS)
    {
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id);
        return KAL_FALSE;
    }
 
    /* check if the extquque id can not be found */
    if (i >= MAX_CCCI_IPC_TASKS) 
    {
	    ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id);
        ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;  
        return KAL_FALSE;
    }
  
        /* check if the extquque id is to AP */
    if ((ccci_ipc_maptbl[i].extq_id & AP_UINFY_ID_FLAG) == 0)
    {
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_DESTID_ERROR, ipc_task_id);
        ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;   
	    return KAL_FALSE;
    }

    /* check if the ilm buffer is from ipc_msgsvc_allocate_ilm or not */
    if (buffer_ptr != &ccci_ipc_ilm_arr[j].ipc_ilm){
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ILM_ERROR);
        return KAL_FALSE;
    }
		    
	len = sizeof(CCCI_BUFF_T) + sizeof(ipc_ilm_t);
    if (temp_ipc_ilm->local_para_ptr != NULL){          
        len+= temp_ipc_ilm->local_para_ptr->msg_len ;
    }
    if( temp_ipc_ilm->peer_buff_ptr != NULL){
        len+= sizeof(peer_buff_struct) 
            + temp_ipc_ilm->peer_buff_ptr->pdu_len 
            + temp_ipc_ilm->peer_buff_ptr->free_header_space 
            + temp_ipc_ilm->peer_buff_ptr->free_tail_space;
    }
	//assert if ilm size > CCCI_IPC_GPD size
	EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE, 0);

    /* Use critical section to protect ENTER */
    CCCI_IPC_ENTER_CRITICAL_SECTION
    if (KAL_TRUE == kal_query_systemInit()){ // polling mode
        ior.first_gpd = ccci_ipc_ch.p_polling_gpd;
        ior.last_gpd  = ccci_ipc_ch.p_polling_gpd;
    }
    else{
#ifdef __SDIOC_PULL_Q_ENH_DL__
        ior.num_gpd = 
#endif 
        qbmt_alloc_q_no_tail( 
                            CCCI_IPC_GPD_TYPE,            /* type */
                            1,                            /* buff_num */
                            (void **)(&ior.first_gpd),    /* pp_head */
                            (void **)(&ior.last_gpd));    /* pp_tail */
        if(ior.first_gpd == NULL){
            ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ALLOC_GPD_ERROR);
            return KAL_FALSE;
        }
    }
    
	//initialize GPD CCCI_Header content
	p_ccci_buff = CCCIDEV_GET_QBM_DATAPTR(ior.first_gpd);
	p_ccci_buff->data[1] = (kal_uint32)len;
	p_ccci_buff->channel = (kal_uint32)ccci_ipc_ch.send_channel;
	p_ccci_buff->reserved = (kal_uint32)ccci_ipc_maptbl[i].extq_id;
    ccci_debug_add_seq(p_ccci_buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
    QBM_DES_SET_DATALEN(ior.first_gpd, p_ccci_buff->data[1]);
    QBM_DES_SET_DATALEN(ior.first_gpd->p_data_tbd,  p_ccci_buff->data[1]);
    qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd);
    qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd->p_data_tbd);
    QBM_CACHE_FLUSH(ior.first_gpd, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(ior.first_gpd->p_data_tbd, sizeof(qbm_gpd));
    
    
    //copy ilm to GPD
    temp_ipc_ilm->src_mod_id =	ccci_ipc_maptbl[j].extq_id; 
    update_buff_addr = (kal_uint32)p_ccci_buff;
	update_buff_addr += sizeof(CCCI_BUFF_T);	
    CCCI_KAL_MSG_TO_AP_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);
	kal_mem_cpy((kal_uint8 *)update_buff_addr ,(kal_uint8 *)temp_ipc_ilm, sizeof(ipc_ilm_t));
    
	if (temp_ipc_ilm->local_para_ptr != NULL){			
			//copy loca_para_struct to GPD
			update_buff_addr += sizeof(ipc_ilm_t); //24 bytes
			orig_local_addr = update_buff_addr;
			kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->local_para_ptr->msg_len);
    }
    
    if( temp_ipc_ilm->peer_buff_ptr != NULL){
			//copy peer buff_struct to GPD
			if (temp_ipc_ilm->local_para_ptr != NULL){	 
			    update_buff_addr += temp_ipc_ilm->local_para_ptr->msg_len;//should be 4 bytes alignment?? 
			}
            else{
                update_buff_addr += sizeof(ipc_ilm_t); //24 bytes
            }
			orig_peer_addr = update_buff_addr;
			kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->peer_buff_ptr, 
                          sizeof(peer_buff_struct) 
                          + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                          + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                          + temp_ipc_ilm->peer_buff_ptr->free_tail_space);
    }

    free_local_para(temp_ipc_ilm->local_para_ptr);
	temp_ipc_ilm->local_para_ptr = (local_para_struct *)orig_local_addr;//assign not NULL ptr to indicate there have content 
			
	free_peer_buff(temp_ipc_ilm->peer_buff_ptr);
	temp_ipc_ilm->peer_buff_ptr = (peer_buff_struct *)orig_peer_addr;//assign not NULL ptr to indicate there have content			

    QBM_CACHE_FLUSH(p_ccci_buff, len);
    
    if (KAL_TRUE == kal_query_systemInit()){ // polling mode
        result = ccci_polling_io(ccci_ipc_ch.send_channel, ccci_ipc_ch.p_polling_gpd, KAL_TRUE);
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_ipc_ch.p_polling_gpd,ccci_ipc_ch.p_polling_gpd);
    }
    else{		
        result = ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.send_channel, &ior, NULL);	
    	
        if (KAL_INFINITE_WAIT == wait_mode && CCCI_SUCCESS == result){		
    	    /* Wait for feedabck by retrieve event */
    	    kal_retrieve_eg_events(ccci_ipc_ch.event, 1 << i, KAL_AND_CONSUME,  &retrieved_events, KAL_SUSPEND);
        }
    }				
	/* Exit critical section */ 
	CCCI_IPC_EXIT_CRITICAL_SECTION
    ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;  

    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_CCCI,
                p_ccci_buff->data[0], p_ccci_buff->data[1], p_ccci_buff->channel, p_ccci_buff->reserved);
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_PASS_TRA);	
		
	/* Finish */	
    if (result == CCCI_SUCCESS){
    	return KAL_TRUE;
    }
    else{
        return KAL_FALSE;
    }
        
}
Exemplo n.º 5
0
void ccci_ipc_receive_msg_cb(CCCI_CHANNEL_T channel, ccci_io_request_t* ior)
{
    kal_uint32	i = 0;
    ipc_ilm_t	*temp_ipc_ilm = NULL;
    void        *local_param = NULL, *peer_buff = NULL;
    kal_uint32  len = 0;
    kal_uint32  gpd_num = 0;
    ccci_io_request_t *current_ior = ior;
    ccci_io_request_t *reload_ior = NULL;
    qbm_gpd           *current_gpd;
    qbm_gpd           *next_gpd;
    kal_bool    is_process_cb = KAL_FALSE;

    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_FUNC_TRA);
    while(current_ior != NULL){
        current_gpd = current_ior->first_gpd ;
        gpd_num = 0;
        while( current_gpd != NULL){
            CCCI_BUFF_T *bufp = CCCIDEV_GET_QBM_DATAPTR(current_gpd);
            next_gpd = current_gpd->p_next;
            local_param = NULL;
            peer_buff = NULL;
            ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_TRA_CCCI,
                bufp->data[0], bufp->data[1], bufp->channel, bufp->reserved);
            ccci_debug_check_seq(bufp); // check ccci seq
            /* get task id from mapping table of ext queue id */
            for (i = 0; i < MAX_CCCI_IPC_TASKS; i++) 
            {
            	if ( ccci_ipc_maptbl[i].extq_id == bufp->reserved )
            	{
               	    break;
            	}
            }
        		
            /* check if the extquque id can not be found */
            if (i >= MAX_CCCI_IPC_TASKS) 
            {
        	    EXT_ASSERT(0, i, bufp->reserved, MAX_CCCI_IPC_TASKS);
            }

            /* check if the extquque id is to MD */
            if ((ccci_ipc_maptbl[i].extq_id & AP_UINFY_ID_FLAG) != 0)
            {
        	    EXT_ASSERT(0, i, bufp->reserved, ccci_ipc_maptbl[i].extq_id);
            }
        		
    	    temp_ipc_ilm = (ipc_ilm_t *)(bufp + 1);
            ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_TRA_ILM,
                temp_ipc_ilm, temp_ipc_ilm->src_mod_id, temp_ipc_ilm->dest_mod_id,
                temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id,
                temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->peer_buff_ptr);
		
    	    if(temp_ipc_ilm->local_para_ptr)
    	    {
    			// copy local_para_struct
    		    temp_ipc_ilm->local_para_ptr = 	(local_para_struct *)((char*)temp_ipc_ilm + sizeof(ipc_ilm_t));
                len += temp_ipc_ilm->local_para_ptr->msg_len; // need 4 bytes alignment or not??
                //assert if ilm size > CCCI_IPC_GPD size
	            EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE,0);
    			local_param = construct_local_para(temp_ipc_ilm->local_para_ptr->msg_len, 1);
    			kal_mem_cpy((kal_uint8 *)local_param, temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->local_para_ptr->msg_len);
                ((local_para_struct *)local_param)->ref_count = 1 ; 
                temp_ipc_ilm->local_para_ptr = local_param;	    			
    	    }

    	    if (temp_ipc_ilm->peer_buff_ptr)
    	    {
                //copy peer_buff_struct
                if(temp_ipc_ilm->local_para_ptr)
    	        {
    		        temp_ipc_ilm->peer_buff_ptr = 	(peer_buff_struct *)((char*)temp_ipc_ilm->local_para_ptr 
    		                                                         + temp_ipc_ilm->local_para_ptr->msg_len);	
                }
                else
                {
                    temp_ipc_ilm->peer_buff_ptr = 	(peer_buff_struct *)((char*)temp_ipc_ilm + sizeof(ipc_ilm_t));
                }
                len += sizeof(peer_buff_struct) 
                       + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                       + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                       + temp_ipc_ilm->peer_buff_ptr->free_tail_space;
                //assert if ilm size > CCCI_IPC_GPD size 
                EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE, 0);
    			peer_buff = construct_peer_buff(temp_ipc_ilm->peer_buff_ptr->pdu_len, \
    			temp_ipc_ilm->peer_buff_ptr->free_header_space, \
    			temp_ipc_ilm->peer_buff_ptr->free_tail_space, 1);
    			kal_mem_cpy((kal_uint8 *)peer_buff, temp_ipc_ilm->peer_buff_ptr, 
                             sizeof(peer_buff_struct) 
                             + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                             + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                             + temp_ipc_ilm->peer_buff_ptr->free_tail_space);	
                temp_ipc_ilm->peer_buff_ptr = peer_buff;
    	    }

            CCCI_AP_MSG_TO_KAL_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);

	        if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_DISABLE){ // if current is not IT mode, then need to send upper layer
                //4 <1> process ccci ipc cb function
                is_process_cb = ccci_ipc_process_cb_funp(temp_ipc_ilm);
                //4 <2> Determine send ilm to upper layer or destroy ilm
                if(!is_process_cb){ // no process cb, then send ilm to upper layer
    	            msg_send6 (MOD_CCCIIPC, ccci_ipc_maptbl[i].task_id, temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id, local_param, peer_buff);
	            }
            }
            #ifdef CCCI_IT_MODE_CONTROL_CCCI_IPC
            if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_LB){
			    CCCI_KAL_MSG_TO_AP_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);
                bufp->reserved |=  (AP_UINFY_ID_FLAG | temp_ipc_ilm->src_mod_id ); // for Loopback to ccci ipc port 1
                bufp->channel   =  ccci_ipc_ch.send_channel; // for Loopback
                ccci_debug_add_seq(bufp, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
            }
            #endif 
            QBM_CACHE_INVALID(temp_ipc_ilm,sizeof(temp_ipc_ilm)); // prevent cache co-herence problem
            gpd_num++;
    	    if (current_gpd == current_ior->last_gpd){
                break;
    	    }
            else{    
                current_gpd = next_gpd;
            }   
            
        }// process gpds
        reload_ior = current_ior;
        current_ior = current_ior->next_request;
        reload_ior->next_request = NULL;
        
        #ifdef __SDIOC_PULL_Q_ENH_DL__
            reload_ior->num_gpd = gpd_num;
        #endif
        
        #ifdef __CCCI_IPC_UT__
            qbmt_dest_q(reload_ior->first_gpd, reload_ior->last_gpd);
        #else
            #ifdef CCCI_IT_MODE_CONTROL_CCCI_IPC
            if(ccci_ipc_ch.it_mode == CCCI_IPC_IT_LB){
                ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.send_channel, reload_ior, NULL);
                ccci_ipc_ch.reload_rgpd_number -= gpd_num;
                ccci_ipc_reload_rgpds();
            }else
            #endif
            {
                CCCIDEV_RST_CCCI_COMM_GPD_LIST(reload_ior->first_gpd, reload_ior->last_gpd);
                ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.receive_channel, reload_ior, NULL);
            }
        #endif 
    }//process iors
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_RECEIVE_MSG_FUNC_PASS_TRA);
}