void usbc_at_exception(void)
{
    static kal_uint32   cnt = 0;
    kal_uint32     mask; 
    kal_uint32     num_gpd;
    void          *head_gpd;
    void          *tail_gpd;
    void          *rx_gpd;
    void          *next_gpd;
    usbc_except_link_st_e   link_state;

    if (cnt++ < 5000) {
        return;
    }

    ASSERT(2 == qbmt_alloc_q_no_tail(QBM_TYPE_HIF_DL, 2, &head_gpd, &tail_gpd));
    for (rx_gpd = head_gpd; rx_gpd; rx_gpd = next_gpd) {
        next_gpd = QBM_DES_GET_NEXT(rx_gpd);

        QBM_DES_SET_ALLOW_LEN(rx_gpd, 512);
        qbm_cal_set_checksum((kal_uint8*)rx_gpd);
        QBM_CACHE_FLUSH(rx_gpd, sizeof(qbm_gpd));
        rx_gpd = QBM_DES_GET_NEXT(rx_gpd);

        if (rx_gpd == tail_gpd) break;
    }

    mask = SaveAndSetIRQMask();

    dev_info_s.class_type = USBC_CLASS_TYPE_CDC_ACM;
    dev_info_s.total_pipes = 3;
    dev_info_s.pipe_type[0] = USBC_PIPE_TYPE_CDC_ACM_COMM_IN;
    dev_info_s.pipe_type[1] = USBC_PIPE_TYPE_CDC_ACM_DATA_IN;
    dev_info_s.pipe_type[2] = USBC_PIPE_TYPE_CDC_ACM_DATA_OUT;
    dev_info_s.notify_usb_state = usbc_at_notify_usb_state;
    dev_info_s.notify_usb_speed = usbc_at_notify_usb_speed;
    dev_info_s.notify_control_setup_packet = usbc_at_notify_control_setup_packet;
    dev_info_s.notify_control_complete = usbc_at_notify_control_complete;
    dev_info_s.notify_alternate_setting = NULL;
    dev_info_s.notify_pipe_complete[0] = NULL;
    dev_info_s.notify_pipe_complete[1] = NULL;
    dev_info_s.notify_pipe_complete[2] = NULL;
    dev_info_s.notify_pipe_stall[0] = NULL;
    dev_info_s.notify_pipe_stall[1] = NULL;
    dev_info_s.notify_pipe_stall[2] = NULL;

    dev_inst_s = usbc_except_reset_ch(&dev_info_s);
    ASSERT(dev_inst_s);
    ASSERT(usbc_except_enum_loop());

    ASSERT(usbc_except_init());
    ASSERT(usbc_except_submit_gpd(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], head_gpd, tail_gpd));

    do {
        usbc_except_hif_poll(dev_inst_s->id);

        ASSERT(usbc_except_poll_queue(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], &head_gpd, &tail_gpd, &num_gpd));
#if 0
/* under construction !*/
/* under construction !*/
/* under construction !*/
/* under construction !*/
/* under construction !*/
#endif

        ASSERT(usbc_except_hif_state(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], &link_state));
#if 0
/* under construction !*/
#endif

    } while(1);

    RestoreIRQMask(mask);
}
/*!
 * @function        cccitty_ttyc_hdr_cmd_assign_rx_ior
 * @brief           TTY Core UL RPGD reload command, TTY_CMD_ASSIGN_RX_IOR, handler
 *                  Reference cdcacm_ttyhdr_cmd_assign_rx_ior -> usbc_class_device_submit_io_request
 *                  Reference ccmni_ipc_dl
 * @param port      [IN] UART port number
 * @param p_ior     [IN] Reload UL RGPD io_request
 * @param ownerid   [IN] module_type for the one opening this port, ex. MOD_xxx
 *
 * @return          DCL_STATUS
 *                  STATUS_OK:   success
 *                  STATUS_FAIL: ccci_write_gpd fail
 */
DCL_STATUS cccitty_ttyc_hdr_cmd_assign_rx_ior(UART_PORT port, ccci_io_request_t *p_ior, module_type ownerid)
{
    cccitty_dev_t       *p_cccidev = cccitty_get_dev_instance(CCCITTY_UARTP_TO_DEVID(port));
    cccitty_inst_t      *p_cccitty = cccitty_get_instance();
    ccci_io_request_t   *p_curr_ior, *p_next_ior;
    ccci_io_request_t   reload_ior;
    kal_uint32          gpd_num, gpd_num_rec;
    qbm_gpd             *p_gpd_h, *p_gpd_t;
    cccitty_deq_req_t   *cccitty_deq_req;
    kal_uint32          to_alloc, num_alloc;
    kal_int32           tmp;
    kal_bool            first_reload = KAL_FALSE;
    CCCI_RETURNVAL_T    ccci_ret;

    if(!cccitty_dev_active(p_cccidev)){
        cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_ASSIGN_RX_IOR_STATE_ERR, p_cccidev->state);
        return STATUS_NOT_OPENED;
    }
    if(NULL == p_ior){
        cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RELOAD_NULL_IOR, port);
        return STATUS_OK;
    }
    if(NULL == p_ior->first_gpd){
        cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RELOAD_EMPTY_IOR, port);
        return STATUS_OK;
    }

    if(KAL_TRUE == p_cccidev->ttyc_rgpd_first_reload){
        first_reload = KAL_TRUE;
    }

    /*
    Reload RGPD to HIF before enqueue the RGPD to internal tty queue.
    Otherwise, RGPD may be returned while first reload is not finished yet.
    */
    //4 <1> 1st time Reload the same number RGPD to the HIF    
    if(first_reload){
        /* ASSERT if there's already RGPD loaded in HIF*/
        EXT_ASSERT(0 >= p_cccidev->hwo_rgpd_cnt, p_cccidev->hwo_rgpd_cnt, 0, 0);

        /* construct reload_ior for HIF reload*/
        to_alloc = p_cccidev->hif_ul_ttl_rgpd_cnt;
        reload_ior.next_request = NULL;
        num_alloc = qbmt_alloc_q_no_tail(CCCI_TTY_UL_BUF_TYPE, to_alloc, (void **)&reload_ior.first_gpd, (void **)&reload_ior.last_gpd);
        /* ASSERT cannot allocate enough RGPD, each TTY_CHANNEL should pre-allocate enough RGPD*/
        if (num_alloc != to_alloc)
        {
            cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_INIT_ERR, to_alloc, num_alloc);
            EXT_ASSERT(KAL_FALSE, to_alloc, num_alloc, 0);
        }
#ifdef __SDIOC_PULL_Q_ENH_DL__
        reload_ior.num_gpd = num_alloc;
#endif
        ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, &reload_ior, NULL);
        if(CCCI_SUCCESS != ccci_ret){
            cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_RLD_CCCI_WRITE_FAIL, ccci_ret);
            return STATUS_FAIL;
        }
        CCCI_TTY_LOCK(p_cccidev->cccitty_mutex);
        {
            p_cccidev->hwo_rgpd_cnt += num_alloc;
            /* to prevent interrupted by SDIOCORE context*/
            tmp = p_cccidev->hwo_rgpd_cnt;
            p_cccidev->ttyc_rgpd_first_reload = KAL_FALSE;
        }
        CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex);
        cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD1,p_cccidev->dev_id, num_alloc, tmp);
    }
    
    //4 <2> check ttyc_rgpd_type
    if(CCCI_TTY_INVALID_GPD_TYPE == p_cccidev->ttyc_rgpd_type){
        /* if rgpd queue hasn't been initialized */
        p_cccidev->ttyc_rgpd_type = QBM_GET_TYPE(p_ior->first_gpd);
    }else{
        /* if rgpd type has been registered, new ior must match the type*/
        //4 <NOTE> only check the 1st gpd
        EXT_ASSERT(p_cccidev->ttyc_rgpd_type == QBM_GET_TYPE(p_ior->first_gpd), p_cccidev->ttyc_rgpd_type, QBM_GET_TYPE(p_ior->first_gpd), 0);
    }

    //4 <3> traverse the ior and queue all the gpd
    to_alloc = 0;
    p_curr_ior = (ccci_io_request_t*)p_ior;
    do {
        p_next_ior = p_curr_ior->next_request;
        CCCITTY_FIX_IOR_NULL_LAST(p_curr_ior);

        p_gpd_h = p_ior->first_gpd;
        p_gpd_t = p_ior->last_gpd;
        gpd_num = CCCITTY_GET_GPD_LIST_SIZE(p_gpd_h, p_gpd_t);
        to_alloc += gpd_num;
        
        CCCI_TTY_LOCK(p_cccidev->cccitty_mutex);
        {
            CCCITTY_QBM_ENQ(p_gpd_h, p_gpd_t, (void **)&p_cccidev->ttyc_rgpd_q_h, (void **)&p_cccidev->ttyc_rgpd_q_t);
            p_cccidev->ttyc_rgpd_cnt += gpd_num;
        }
        CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex);

        p_curr_ior = p_next_ior;
    } while ( p_curr_ior != NULL );

    //4 <4> check ttyc_rgpd_cnt matches ttyc_rgpd_q size
    CCCI_TTY_LOCK(p_cccidev->cccitty_mutex);
    {
        p_gpd_h = p_cccidev->ttyc_rgpd_q_h;
        p_gpd_t = p_cccidev->ttyc_rgpd_q_t;
        gpd_num_rec = p_cccidev->ttyc_rgpd_cnt;
        gpd_num = CCCITTY_GET_GPD_LIST_SIZE(p_gpd_h, p_gpd_t);
    }
    CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex);
    
    
    if(gpd_num_rec != gpd_num){
        /* warning the RGPD Q number is not match */
        cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_Q_NUM_ERR, gpd_num, gpd_num_rec);
        cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_Q_NUM_ERR_ACT1, gpd_num, gpd_num_rec);
    }

    //cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_RGPD_RELOAD, CCCITTY_UARTP_TO_DEVID(port), to_alloc, p_cccidev->ttyc_rgpd_cnt);
    cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_RGPD_RELOAD, CCCITTY_UARTP_TO_DEVID(port), to_alloc, gpd_num_rec);
       
    //4 <5> if ( hif_ul_rgpd_cnt != 0 ), sends MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ to MOD_CCCITTY
    if(p_cccidev->hif_ul_rgpd_cnt){
        if(KAL_FALSE == p_cccidev->dev_ul_processing){
            CCCI_TTY_LOCK(p_cccidev->cccitty_mutex);
            {
                p_cccidev->dev_ul_processing = KAL_TRUE;
            }
            CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex);
            cccitty_deq_req = (cccitty_deq_req_t *)construct_local_para(sizeof(cccitty_deq_req_t), 0);
            cccitty_deq_req->dev = p_cccidev;
            msg_send6(MOD_CCCITTY,                                  /* src_mod_id */
                      MOD_CCCITTY,                                  /* dest_mod_id */
                      0,                                            /* sap_id */
                      MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ,               /* msg_id */
                      (struct local_para_struct *)cccitty_deq_req,  /* local_para_ptr */
                      NULL);                                        /* peer_buff_ptr */
        }else{
            /* there's MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ message pending */
        }
    }
    return STATUS_OK;
}
Пример #3
0
/*************************************************************************
* FUNCTION
*  ccci_ipc_send_msg
*
* DESCRIPTION
*  This function is the internal api to send message
*
* PARAMETERS
*  ipc_task_id     -  
*  buffer_ptr      -
*  msg_size        -
*  wait_mode       -
*  message_to_head -
*
* RETURNS
*  status - success/fail
*
*************************************************************************/
kal_bool ccci_ipc_send_msg(kal_uint32 ipc_task_id, void *buffer_ptr, kal_uint16 msg_size,	kal_wait_mode wait_mode, kal_bool message_to_head)
{
    kal_uint32 i, j ;
    kal_uint32 retrieved_events = 0, orig_local_addr = 0 , orig_peer_addr = 0, update_buff_addr=0;
    kal_int32 result = CCCI_SUCCESS;
    ipc_ilm_t	*temp_ipc_ilm = (ipc_ilm_t *)buffer_ptr;
    ccci_io_request_t ior = {0};
	CCCI_BUFF_T *p_ccci_buff;
    kal_uint32 len = 0;
	
		
	ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TRA);	
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_ILM,
                temp_ipc_ilm, temp_ipc_ilm->src_mod_id, temp_ipc_ilm->dest_mod_id,
                temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id,
                temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->peer_buff_ptr);
		
    /* get ext queue id from mapping table of task id - destnation*/
    for (i = 0; i < MAX_CCCI_IPC_TASKS; i++) 
    {
        if ( ccci_ipc_maptbl[i].task_id == ipc_task_id )
        	{
                    break;
        	}
    }
	
    /* get ext queue id from mapping table of task id - source*/
    for (j = 0; j < MAX_CCCI_IPC_TASKS; j++) 
    {
        if ( ccci_ipc_maptbl[j].task_id == temp_ipc_ilm->src_mod_id )
        	{
                    break;
        	}
    }
  
    /* check src mod id, if it's not defined in CCCI IPC, don't set used bit */
    if(j >= MAX_CCCI_IPC_TASKS)
    {
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id);
        return KAL_FALSE;
    }
 
    /* check if the extquque id can not be found */
    if (i >= MAX_CCCI_IPC_TASKS) 
    {
	    ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id);
        ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;  
        return KAL_FALSE;
    }
  
        /* check if the extquque id is to AP */
    if ((ccci_ipc_maptbl[i].extq_id & AP_UINFY_ID_FLAG) == 0)
    {
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_DESTID_ERROR, ipc_task_id);
        ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;   
	    return KAL_FALSE;
    }

    /* check if the ilm buffer is from ipc_msgsvc_allocate_ilm or not */
    if (buffer_ptr != &ccci_ipc_ilm_arr[j].ipc_ilm){
        ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ILM_ERROR);
        return KAL_FALSE;
    }
		    
	len = sizeof(CCCI_BUFF_T) + sizeof(ipc_ilm_t);
    if (temp_ipc_ilm->local_para_ptr != NULL){          
        len+= temp_ipc_ilm->local_para_ptr->msg_len ;
    }
    if( temp_ipc_ilm->peer_buff_ptr != NULL){
        len+= sizeof(peer_buff_struct) 
            + temp_ipc_ilm->peer_buff_ptr->pdu_len 
            + temp_ipc_ilm->peer_buff_ptr->free_header_space 
            + temp_ipc_ilm->peer_buff_ptr->free_tail_space;
    }
	//assert if ilm size > CCCI_IPC_GPD size
	EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE, 0);

    /* Use critical section to protect ENTER */
    CCCI_IPC_ENTER_CRITICAL_SECTION
    if (KAL_TRUE == kal_query_systemInit()){ // polling mode
        ior.first_gpd = ccci_ipc_ch.p_polling_gpd;
        ior.last_gpd  = ccci_ipc_ch.p_polling_gpd;
    }
    else{
#ifdef __SDIOC_PULL_Q_ENH_DL__
        ior.num_gpd = 
#endif 
        qbmt_alloc_q_no_tail( 
                            CCCI_IPC_GPD_TYPE,            /* type */
                            1,                            /* buff_num */
                            (void **)(&ior.first_gpd),    /* pp_head */
                            (void **)(&ior.last_gpd));    /* pp_tail */
        if(ior.first_gpd == NULL){
            ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ALLOC_GPD_ERROR);
            return KAL_FALSE;
        }
    }
    
	//initialize GPD CCCI_Header content
	p_ccci_buff = CCCIDEV_GET_QBM_DATAPTR(ior.first_gpd);
	p_ccci_buff->data[1] = (kal_uint32)len;
	p_ccci_buff->channel = (kal_uint32)ccci_ipc_ch.send_channel;
	p_ccci_buff->reserved = (kal_uint32)ccci_ipc_maptbl[i].extq_id;
    ccci_debug_add_seq(p_ccci_buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq
    QBM_DES_SET_DATALEN(ior.first_gpd, p_ccci_buff->data[1]);
    QBM_DES_SET_DATALEN(ior.first_gpd->p_data_tbd,  p_ccci_buff->data[1]);
    qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd);
    qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd->p_data_tbd);
    QBM_CACHE_FLUSH(ior.first_gpd, sizeof(qbm_gpd));
    QBM_CACHE_FLUSH(ior.first_gpd->p_data_tbd, sizeof(qbm_gpd));
    
    
    //copy ilm to GPD
    temp_ipc_ilm->src_mod_id =	ccci_ipc_maptbl[j].extq_id; 
    update_buff_addr = (kal_uint32)p_ccci_buff;
	update_buff_addr += sizeof(CCCI_BUFF_T);	
    CCCI_KAL_MSG_TO_AP_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id);
	kal_mem_cpy((kal_uint8 *)update_buff_addr ,(kal_uint8 *)temp_ipc_ilm, sizeof(ipc_ilm_t));
    
	if (temp_ipc_ilm->local_para_ptr != NULL){			
			//copy loca_para_struct to GPD
			update_buff_addr += sizeof(ipc_ilm_t); //24 bytes
			orig_local_addr = update_buff_addr;
			kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->local_para_ptr->msg_len);
    }
    
    if( temp_ipc_ilm->peer_buff_ptr != NULL){
			//copy peer buff_struct to GPD
			if (temp_ipc_ilm->local_para_ptr != NULL){	 
			    update_buff_addr += temp_ipc_ilm->local_para_ptr->msg_len;//should be 4 bytes alignment?? 
			}
            else{
                update_buff_addr += sizeof(ipc_ilm_t); //24 bytes
            }
			orig_peer_addr = update_buff_addr;
			kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->peer_buff_ptr, 
                          sizeof(peer_buff_struct) 
                          + temp_ipc_ilm->peer_buff_ptr->pdu_len 
                          + temp_ipc_ilm->peer_buff_ptr->free_header_space 
                          + temp_ipc_ilm->peer_buff_ptr->free_tail_space);
    }

    free_local_para(temp_ipc_ilm->local_para_ptr);
	temp_ipc_ilm->local_para_ptr = (local_para_struct *)orig_local_addr;//assign not NULL ptr to indicate there have content 
			
	free_peer_buff(temp_ipc_ilm->peer_buff_ptr);
	temp_ipc_ilm->peer_buff_ptr = (peer_buff_struct *)orig_peer_addr;//assign not NULL ptr to indicate there have content			

    QBM_CACHE_FLUSH(p_ccci_buff, len);
    
    if (KAL_TRUE == kal_query_systemInit()){ // polling mode
        result = ccci_polling_io(ccci_ipc_ch.send_channel, ccci_ipc_ch.p_polling_gpd, KAL_TRUE);
        CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_ipc_ch.p_polling_gpd,ccci_ipc_ch.p_polling_gpd);
    }
    else{		
        result = ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.send_channel, &ior, NULL);	
    	
        if (KAL_INFINITE_WAIT == wait_mode && CCCI_SUCCESS == result){		
    	    /* Wait for feedabck by retrieve event */
    	    kal_retrieve_eg_events(ccci_ipc_ch.event, 1 << i, KAL_AND_CONSUME,  &retrieved_events, KAL_SUSPEND);
        }
    }				
	/* Exit critical section */ 
	CCCI_IPC_EXIT_CRITICAL_SECTION
    ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0;  

    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_CCCI,
                p_ccci_buff->data[0], p_ccci_buff->data[1], p_ccci_buff->channel, p_ccci_buff->reserved);
    ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_PASS_TRA);	
		
	/* Finish */	
    if (result == CCCI_SUCCESS){
    	return KAL_TRUE;
    }
    else{
        return KAL_FALSE;
    }
        
}