/*! * @function cccitty_ttyc_hdr_cmd_clr_rx_buf * @brief Rx Flush handler for CCCI TTY * Reference cdcacm_ttyhdr_cmd_clr_rx_buf * @param port [IN] UART port number * @param ownerid [IN] module_type for the one opening this port, ex. MOD_xxx * @param check_own [IN] check if the ownerid owns the p_cccidev * * @return DCL_STATUS, always STATUS_OK */ DCL_STATUS cccitty_ttyc_hdr_cmd_clr_rx_buf(UART_PORT port, module_type ownerid, kal_bool check_own) { cccitty_dev_t *p_cccidev = cccitty_get_dev_instance(CCCITTY_UARTP_TO_DEVID(port)); cccitty_inst_t *p_cccitty = cccitty_get_instance(); qbm_gpd *p_gpd_h, *p_gpd_t; ccci_io_request_t ior; CCCI_RETURNVAL_T ccci_ret; kal_uint32 gpd_num, gpd_num_rec; kal_int32 tmp; if(KAL_TRUE == check_own){ EXT_ASSERT(ownerid == p_cccidev->ownerid, ownerid, p_cccidev->ownerid, port); } EXT_ASSERT(((p_cccidev->state > CCCI_TTY_DEV_DETACHED) && (p_cccidev->state < CCCI_TTY_DEV_STATE_CNT)), p_cccidev->state, port, ownerid); //4 <1> Clean up the TTY core RGPD queue, ttyc_rgpd_q , by p_cccitty->dcl_rx /* clean up the queue */ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_gpd_h = p_cccidev->ttyc_rgpd_q_h; p_gpd_t = p_cccidev->ttyc_rgpd_q_t; p_cccidev->ttyc_rgpd_q_h = NULL; p_cccidev->ttyc_rgpd_q_t = NULL; gpd_num_rec = p_cccidev->ttyc_rgpd_cnt; p_cccidev->ttyc_rgpd_cnt = 0; p_cccidev->ttyc_rgpd_type = CCCI_TTY_INVALID_GPD_TYPE; //assign an invalid, set according to next rgpd reload } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); gpd_num = CCCITTY_GET_GPD_LIST_SIZE(p_gpd_h, p_gpd_t); if(gpd_num_rec != gpd_num){ /* warning gpd count not match*/ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_Q_NUM_ERR, gpd_num, gpd_num_rec); cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_Q_NUM_ERR_ACT2, gpd_num, gpd_num_rec); } if( 0!=gpd_num ){ /* construct ior */ ior.next_request = NULL; ior.first_gpd = p_gpd_h; ior.last_gpd = p_gpd_t; QBM_DES_SET_NEXT(p_gpd_t, NULL); p_cccitty->dcl_rx(p_cccidev->tty_handler, MOD_CCCITTY, &ior); //DclSerialPort_DrvRx } //4 <2> Clean up the HIF uplink queue, hif_ul_q, by ccci_write_gpd /* clean up the queue */ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_gpd_h = p_cccidev->hif_ul_q_h; p_gpd_t = p_cccidev->hif_ul_q_t; p_cccidev->hif_ul_q_h = NULL; p_cccidev->hif_ul_q_t = NULL; gpd_num_rec = p_cccidev->hif_ul_rgpd_cnt; p_cccidev->hif_ul_rgpd_cnt = 0; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); gpd_num = CCCITTY_GET_GPD_LIST_SIZE(p_gpd_h, p_gpd_t); if(gpd_num_rec != gpd_num){ /* warning gpd count not match*/ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_UL_Q_NUM_ERR, gpd_num, gpd_num_rec); } if( 0!=gpd_num ){ /* construct ior */ ior.next_request = NULL; ior.first_gpd = p_gpd_h; ior.last_gpd = p_gpd_t; QBM_DES_SET_NEXT(p_gpd_t, NULL); #ifdef __SDIOC_PULL_Q_ENH_DL__ ior.num_gpd = gpd_num; #endif ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, &ior, NULL); //ccci_write_gpd EXT_ASSERT(CCCI_SUCCESS == ccci_ret, ccci_ret, 0, 0); /* otherwise the input parameter is incorrect */ } CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { /* since reload all the RGPD reloaded to HIF, RPGD count reset to the 1st reload state*/ p_cccidev->hwo_rgpd_cnt = p_cccidev->hif_ul_ttl_rgpd_cnt; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD3,p_cccidev->dev_id, gpd_num, tmp); return STATUS_OK; }
/*! * @function cccitty_ccci_ul_cb * @brief uplink callback function register to CCCI, CCCI will callback during uplink process * Context: HIF context e.x. MOD_SIODCORE * process: <1> update the RGPD count in HIF * <2> remove CCCI header, CCCI_BUFF_T * <3> enqueue the RGPD to hif_ul_q_h * or loopback, CCCITTY_LB_ENABLE * <4> send MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ, change to MOD_CCCITTY context for dequeue(MOD_CCCIDEV) * <5> Error Handling: reload RGPD number error_reload_cnt * Function might free RGPDs for the following cases * case 1. CCCI header corrupts, CCCITTY_RM_CCCI_HEADERS return false * case 2. cccitty device state != CCCI_TTY_DEV_OPEN, cccitty_dev_active return false * * @param channel [IN] ccci_channel id * @param io_request [IN] pointer to uplink io request * * @return void */ void cccitty_ccci_ul_cb(CCCI_CHANNEL_T channel, ccci_io_request_t* io_request){ cccitty_inst_t *p_cccitty = cccitty_get_instance(); cccitty_dev_t *p_cccidev; cccitty_device_id dev_id = cccitty_get_ul_devid(channel); ccci_io_request_t *curr_ior; ccci_io_request_t *next_ior; kal_bool end_of_list; qbm_gpd *first_gpd; qbm_gpd *last_gpd; qbm_gpd *prev_gpd; qbm_gpd *curr_gpd; qbm_gpd *next_gpd; kal_int32 tmp; kal_uint32 num_gpd; kal_bool valid_gpd = KAL_FALSE; kal_uint32 num_alloc;//, to_alloc; CCCI_RETURNVAL_T ccci_ret; kal_bool ttydev_deq_msg = KAL_FALSE; cccitty_deq_req_t *cccitty_deq_req; /* error RPGD handling */ ccci_io_request_t err_reload_ior; kal_uint32 err_reload_cnt = 0; qbm_gpd *err_gpd_h = NULL; qbm_gpd *err_gpd_t = NULL; kal_uint32 NBPS_GPD_NUM = 0; CCCI_BUFF_T *pdata; kal_int32 chkseqrtn = 0; if(CCCI_TTY_DEV_CNT == dev_id){ /* cannot find dev_id for channel, please check g_cccitty_ccci_ch_mappping */ return; } if(NULL == io_request){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_NULL_IOR, dev_id, channel); return; } /* ASSERT if invalid channel number is received */ EXT_ASSERT(dev_id < CCCI_TTY_DEV_CNT, channel, 0, 0); p_cccidev = cccitty_get_dev_instance(dev_id); if(!cccitty_dev_active(p_cccidev)){ /* device is not at CCCI_TTY_DEV_OPEN state*/ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_DEV_NOT_OPEN,p_cccidev->dev_id, cccitty_get_dev_state(p_cccidev)); /*for drop packet case, need to check sequence number first*/ /*If there is mips issue, this action can be combined with reset function*/ cccitty_check_ul_gpd_list_sequence(io_request->first_gpd, io_request->last_gpd); /* reset the gpds*/ num_alloc = cccitty_reset_ccci_comm_gpd_list(io_request->first_gpd, io_request->last_gpd); //ASSERT (num_alloc == CCCITTY_GET_NONBPS_GPD_LIST_SIZE(io_request->first_gpd, io_request->last_gpd)); NBPS_GPD_NUM = CCCITTY_GET_NONBPS_GPD_LIST_SIZE(io_request->first_gpd, io_request->last_gpd); EXT_ASSERT((num_alloc == NBPS_GPD_NUM), num_alloc, NBPS_GPD_NUM, 0); io_request->num_gpd = num_alloc; /* reload the gpds */ ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, io_request, NULL); if(CCCI_SUCCESS != ccci_ret){ /* NOTE!! might cause RGPD leackage here */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_CCCI_WRITE_FAIL,p_cccidev->dev_id, ccci_ret); qbmt_dest_q(io_request->first_gpd, io_request->last_gpd); }else{ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { /* should not increase hwo_rgpd_cnt, since this is the RGPD belongs to other user */ //p_cccidev->hwo_rgpd_cnt += num_alloc; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD5,p_cccidev->dev_id, num_alloc, tmp); } return; } for (curr_ior = io_request; curr_ior; curr_ior = next_ior) { next_ior = curr_ior->next_request; first_gpd = curr_ior->first_gpd; last_gpd = curr_ior->last_gpd; //3 Note that, because GPD might be freeed in the following loop, we shall not access curr_ior from now. if (first_gpd && last_gpd){ //4 <1> update the RGPD count in HIF num_gpd = CCCITTY_GET_GPD_LIST_SIZE(first_gpd, last_gpd); /* NOT allow BPS GPD inside */ //ASSERT(num_gpd == CCCITTY_GET_NONBPS_GPD_LIST_SIZE(first_gpd, last_gpd)); NBPS_GPD_NUM = CCCITTY_GET_NONBPS_GPD_LIST_SIZE(first_gpd, last_gpd); EXT_ASSERT((num_gpd == NBPS_GPD_NUM), num_gpd, NBPS_GPD_NUM, 0); CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_cccidev->hwo_rgpd_cnt -= num_gpd; tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RCV_GPD, p_cccidev->dev_id, num_gpd, tmp); if(p_cccidev->hwo_rgpd_cnt < 1){ cccitty_trace(CCCI_TTY_WARN, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RGPD_EMPTY, p_cccidev->dev_id, tmp); } prev_gpd = NULL; end_of_list = KAL_FALSE; for (curr_gpd = first_gpd; curr_gpd && !end_of_list; curr_gpd = next_gpd) { next_gpd = QBM_DES_GET_NEXT(curr_gpd); end_of_list = (curr_gpd == last_gpd); /*Check sequence number here!*/ pdata = CCCIDEV_GET_QBM_DATAPTR(curr_gpd); chkseqrtn = ccci_debug_check_seq(pdata); if(chkseqrtn != CCCI_SUCCESS) { cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_CHK_UL_SEQ_FAIL, chkseqrtn, channel); } //4 <2> remove CCCI header, CCCI_BUFF_T valid_gpd = CCCITTY_RM_CCCI_HEADERS(channel, curr_gpd); if(KAL_TRUE == valid_gpd){ prev_gpd = curr_gpd; }else{ p_cccidev->ul_invalid_ttl_cnt++; err_reload_cnt ++; cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RPGD_ERR, err_reload_cnt, p_cccidev->ul_invalid_ttl_cnt); if (curr_gpd == first_gpd) { if (curr_gpd == last_gpd) { first_gpd = NULL; last_gpd = NULL; EXT_ASSERT(end_of_list,0,0,0); end_of_list = KAL_TRUE; /* All GPD's in the list are freed, exit the loop after curr_gpd released. */ } else { EXT_ASSERT(next_gpd,0,0,0); first_gpd = next_gpd; } prev_gpd = NULL; } else { EXT_ASSERT(prev_gpd,0,0,0); if (curr_gpd == last_gpd) { last_gpd = prev_gpd; QBM_DES_SET_NEXT(prev_gpd, NULL); EXT_ASSERT(end_of_list,0,0,0); end_of_list = KAL_TRUE; /* To exit the loop after curr_gpd released. */ } else { EXT_ASSERT(next_gpd,0,0,0); QBM_DES_SET_NEXT(prev_gpd, next_gpd); } qbm_cal_set_checksum((kal_uint8 *)prev_gpd); QBM_CACHE_FLUSH(prev_gpd, sizeof(qbm_gpd)); } CCCITTY_QBM_ENQ(curr_gpd, curr_gpd, (void **)&err_gpd_h, (void **)&err_gpd_t); } } /* for (curr_gpd) */ //4 <3> enqueue the RGPD to hif_ul_q_h if (first_gpd) { EXT_ASSERT(last_gpd,0,0,0); num_gpd = CCCITTY_GET_GPD_LIST_SIZE(first_gpd, last_gpd); #if CCCITTY_LB_ENABLE // TODO: Provide the loopback function //4 nicc_ul2dl_loopback((ccci_io_request_t *)curr_ior, (nicc_dl_func)rndis_on_downlink, dev->ethc_inst); #else /*enqueue to hif_ul_q*/ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { CCCITTY_QBM_ENQ(first_gpd, last_gpd, (void **)&p_cccidev->hif_ul_q_h, (void **)&p_cccidev->hif_ul_q_t); p_cccidev->hif_ul_rgpd_cnt += num_gpd; tmp = p_cccidev->hif_ul_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); ttydev_deq_msg = KAL_TRUE; #endif } else { EXT_ASSERT((NULL == last_gpd),0,0,0); cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_EMPTY_IOR, p_cccidev->dev_id); } } else {/* if (first_gpd && last_gpd) */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_INVALID_IOR, p_cccidev->dev_id, curr_ior, first_gpd, last_gpd); EXT_ASSERT(KAL_FALSE, (kal_uint32)first_gpd, (kal_uint32)last_gpd, 0); /* Invalid IOR */ } } /*for (curr_ior...*/ //4 <4> send MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ if(KAL_TRUE == ttydev_deq_msg){ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); if(KAL_FALSE == p_cccidev->dev_ul_processing){ { p_cccidev->dev_ul_processing = KAL_TRUE; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_deq_req = (cccitty_deq_req_t *)construct_local_para(sizeof(cccitty_deq_req_t), 0); cccitty_deq_req->dev = p_cccidev; msg_send6(MOD_CCCITTY, /* src_mod_id, depending on the HIF type */ MOD_CCCITTY, /* dest_mod_id */ 0, /* sap_id */ MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ, /* msg_id */ (struct local_para_struct *)cccitty_deq_req, /* local_para_ptr */ NULL); /* peer_buff_ptr */ cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_SEND_DEQ, p_cccidev->dev_id); }else{ CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); /* there's MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ message pending */ } } //4 <5> Error Handling: reload RGPD number err_reload_cnt if (err_reload_cnt > 0){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_INVALID_PKT, p_cccidev->dev_id, err_reload_cnt, p_cccidev->ul_invalid_ttl_cnt); /* reset the rgpd content with HWO = 1*/ num_alloc = cccitty_reset_ccci_comm_gpd_list(err_gpd_h, err_gpd_t); // num_alloc = CCCITTY_GET_GPD_LIST_SIZE(err_gpd_h, err_gpd_t); if(num_alloc != err_reload_cnt){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_ERR_RGPD_LEAK, p_cccidev->dev_id, num_alloc, err_reload_cnt); EXT_ASSERT(num_alloc == err_reload_cnt, num_alloc, err_reload_cnt, 0); } err_reload_ior.next_request = NULL; err_reload_ior.first_gpd = err_gpd_h; err_reload_ior.last_gpd = err_gpd_t; /* reload the gpds */ ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, &err_reload_ior, NULL); if(CCCI_SUCCESS != ccci_ret){ /* NOTE!! might cause RGPD leackage here */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_CCCI_WRITE_FAIL,p_cccidev->dev_id, ccci_ret); qbmt_dest_q(err_reload_ior.first_gpd, err_reload_ior.last_gpd); }else{ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { /* should not increase hwo_rgpd_cnt, since this is the RGPD belongs to other user */ p_cccidev->hwo_rgpd_cnt += num_alloc; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD4,p_cccidev->dev_id, num_alloc, tmp); } } return; }
/*! * @function cccitty_ul_deq_hdr * @brief MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ message handler, deq hif_ul_q and callback to TTYcore * * @param pccci_dev [IN] pointer to cccitty device * * @return void */ void cccitty_ul_deq_hdr(cccitty_dev_t *p_cccidev){ cccitty_inst_t *p_cccitty = cccitty_get_instance(); kal_bool need_memcpy = KAL_FALSE; kal_uint32 ttyc_rgpd_cnt, hif_ul_rgpd_cnt; kal_uint32 ul_count, deq_count; ccci_io_request_t ul_ior, reload_ior; qbm_gpd *hif_ul_q_pgpd_h, *hif_ul_q_pgpd_t; qbm_gpd *ttyc_rgpd_q_pgpd_h, *ttyc_rgpd_q_pgpd_t; kal_uint32 num_alloc;//, to_alloc; kal_int32 tmp; CCCI_RETURNVAL_T ccci_ret; CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_cccidev->dev_ul_processing = KAL_FALSE; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); ttyc_rgpd_cnt = p_cccidev->ttyc_rgpd_cnt - 1; /* reserved 1 for tail, like HIF Driver*/ hif_ul_rgpd_cnt = p_cccidev->hif_ul_rgpd_cnt; ul_count = MIN(ttyc_rgpd_cnt, hif_ul_rgpd_cnt); if(ul_count == 0){ /* Do nothing if there's no rgpd in ttyc_rgpd_q*/ return; } cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_RGPD_DEQ, p_cccidev->dev_id, p_cccidev->ttyc_rgpd_cnt, p_cccidev->hif_ul_rgpd_cnt, ul_count); if(ttyc_rgpd_cnt!=0){ /* if there's GPD in ttyc_rgpd_q, there should be a valid type defined*/ if(CCCI_TTY_INVALID_GPD_TYPE == p_cccidev->ttyc_rgpd_type){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_Q_TYPE_ERR, \ p_cccidev->ccci_ch, CCCITTY_DEVID_TO_UARTP(p_cccidev->dev_id), p_cccidev->ttyc_rgpd_cnt, p_cccidev->ttyc_rgpd_type); EXT_ASSERT(KAL_FALSE, CCCI_TTY_INVALID_GPD_TYPE, p_cccidev->ttyc_rgpd_type, 0); } } EXT_ASSERT(CCCI_TTY_UL_BUF_TYPE == p_cccidev->hif_ul_rgpd_type, p_cccidev->ttyc_rgpd_type, 0, 0); need_memcpy = ( p_cccidev->hif_ul_rgpd_type == p_cccidev->ttyc_rgpd_type) ? (KAL_FALSE) : (KAL_TRUE); if(KAL_TRUE == need_memcpy){ //4 <NOT support New Rx user> cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_NEW_RX_ERR, \ p_cccidev->ccci_ch, CCCITTY_DEVID_TO_UARTP(p_cccidev->dev_id), p_cccidev->ttyc_rgpd_cnt, p_cccidev->ttyc_rgpd_type); EXT_ASSERT(KAL_FALSE, need_memcpy, p_cccidev->ttyc_rgpd_type, 0); } ul_ior.next_request = NULL; reload_ior.next_request = NULL; //4 <1> dequeue hif_ul_q CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { deq_count = CCCIDEV_QBM_DEQ((void**)&p_cccidev->hif_ul_q_h, (void**)&p_cccidev->hif_ul_q_t, ul_count, (void**)&hif_ul_q_pgpd_h, (void**)&hif_ul_q_pgpd_t); EXT_ASSERT(deq_count == ul_count, deq_count, ul_count, 0); p_cccidev->hif_ul_rgpd_cnt -= ul_count; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); QBM_DES_SET_NEXT(hif_ul_q_pgpd_t, NULL); //4 <2> dequeue ttyc_rgpd_q CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { deq_count = CCCIDEV_QBM_DEQ((void**)&p_cccidev->ttyc_rgpd_q_h, (void**)&p_cccidev->ttyc_rgpd_q_t, ul_count, (void**)&ttyc_rgpd_q_pgpd_h, (void**)&ttyc_rgpd_q_pgpd_t); EXT_ASSERT(deq_count == ul_count, deq_count, ul_count, 0); p_cccidev->ttyc_rgpd_cnt -= ul_count; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); /* at least one tail rgpd will be in ttyc_rgpd_queue*/ EXT_ASSERT(p_cccidev->ttyc_rgpd_cnt >= 1, p_cccidev->ttyc_rgpd_cnt, 0, 0); QBM_DES_SET_NEXT(hif_ul_q_pgpd_t, NULL); if(KAL_FALSE == need_memcpy){ ul_ior.first_gpd = hif_ul_q_pgpd_h; ul_ior.last_gpd = hif_ul_q_pgpd_t; // /* Free the ttyc_rgpd_q dequeu entry, # of RGPDs = ul_count */ // qbmt_dest_q(ttyc_rgpd_q_pgpd_h, ttyc_rgpd_q_pgpd_t); }else{ //4 <NOT support New Rx user> cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_NEW_RX_ERR, \ p_cccidev->ccci_ch, CCCITTY_DEVID_TO_UARTP(p_cccidev->dev_id), p_cccidev->ttyc_rgpd_cnt, p_cccidev->ttyc_rgpd_type); EXT_ASSERT(KAL_FALSE, p_cccidev->dev_id, p_cccidev->ttyc_rgpd_type, 0); } //4 <4> p_cccitty->dcl_rx p_cccitty->dcl_rx(p_cccidev->tty_handler, MOD_CCCITTY, &ul_ior); //4 <5> Reload RGPD //to_alloc = ul_count; //num_alloc = qbmt_alloc_q_no_tail(CCCI_TTY_UL_BUF_TYPE, to_alloc, &reload_ior.first_gpd, &reload_ior.last_gpd); num_alloc = cccitty_reset_ccci_comm_gpd_list(ttyc_rgpd_q_pgpd_h, ttyc_rgpd_q_pgpd_t); if (num_alloc != ul_count) { /* just free ul_count RGPD in previous qbmt_dest_q, should be able to alloc the same number back */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_RGPD_LEAK, p_cccidev->dev_id, ul_count, num_alloc); EXT_ASSERT(KAL_FALSE, num_alloc, ul_count, 0); } reload_ior.first_gpd = ttyc_rgpd_q_pgpd_h; reload_ior.last_gpd = ttyc_rgpd_q_pgpd_t; #ifdef __SDIOC_PULL_Q_ENH_DL__ reload_ior.num_gpd = num_alloc; /* no use, just pass it */ #endif ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, &reload_ior, NULL); if(CCCI_SUCCESS != ccci_ret){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_DEQ_CCCI_WRITE_FAIL,p_cccidev->dev_id, ccci_ret); }else{ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_cccidev->hwo_rgpd_cnt += num_alloc; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD2,p_cccidev->dev_id, num_alloc, tmp); } return; }
/************************************************************************* * FUNCTION * void ccci_exception_handshake * * DESCRIPTION * This function . * * PARAMETERS * channel - logical channel * * * RETURNS * The address of the share memory of the input logical channel * *************************************************************************/ void ccci_exception_handshake(void){ #if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) CCCI_BUFF_T *buff; kal_uint32 p_cache_aligned; kal_uint32 gpd_num; qbm_gpd *p_first_gpd, *p_last_gpd; //kal_uint32 rcv_size = 0; // ensure in the exception state if(INT_QueryExceptionStatus() == KAL_FALSE) return; //we block here for debuging //if(ccci_exception_state != CCCI_EXPT_CLEAR_CH_ST) while(1); //- Avoid to use kal_mem_cpy //- HW bug ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack); ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack); // exception only have single thread, need to do polling mode /* initialize polling mode GPD */ ASSERT(CCCI_EXCEPT_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE); /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/ p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_tx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_tx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_tx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); //format Rx GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx); //format Rx 2nd GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx2; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx2)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx2 = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx2, ccci_except_polling_gpd_rx2); //step 0. config rx gpd next pointer QBM_DES_SET_NEXT(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx2); //step 1. send TX handshake pkt buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); buff->data[0] = MD_EX_MAGIC; buff->data[1] = CCMSG_ID_EXCEPTION_CHECK; buff->channel = CCCI_CONTROL_CHANNEL; buff->reserved = MD_EX_CHK_ID; ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(CCCI_BUFF_T)); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T)); ex_set_step_logging(EX_AUTO_STEP); //0x41 //step 2. polling echoed rx handshake pkt (need two GPD, one for tail) // TODO:Need to revise the API, shall use exception API (after channle reset flow is done) #ifdef SWITCH_TO_EXCEPTION_IO p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx; ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ex_set_step_logging(EX_AUTO_STEP); //0x42 p_first_gpd = ccci_except_polling_gpd_rx; p_last_gpd = ccci_except_polling_gpd_rx2; do{ ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL_ACK); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL_ACK, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_rx); //rcv_size = CCCIDEV_GET_QBM_DATALEN(ccci_except_polling_gpd_rx); // FIXME QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ ex_set_step_logging(EX_AUTO_STEP); //0x43 ccci_exception_handshake_done = KAL_TRUE; CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); //reload to CCCI_CONTROL_CHANNEL_ACK break; } else { //dump date ex_fire_extern_step_logging(0xFFFFFFFF); ex_fire_extern_step_logging(buff->data[0]); ex_fire_extern_step_logging(buff->data[1]); ex_fire_extern_step_logging(buff->channel); ex_fire_extern_step_logging(buff->reserved); ex_fire_extern_step_logging(0xFFFFFFFF); } CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); }while(1); #else ccci_exception_check_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); while(CCCI_SUCCESS == (ccci_exception_handshake_done = ccci_polling_io(CCCI_CONTROL_CHANNEL_ACK, ccci_except_polling_gpd_tx, KAL_FALSE))){ buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); break; } CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); } #endif ex_set_step_logging(EX_AUTO_STEP); //0x44 ccci_exception_state = CCCI_EXPT_HANDSHAKE_ST; #ifdef CCCI_EXCETION_PRE_TEST ccci_exception_info_passed_pre(); #endif #endif }