void ccci_exception_info_passed_pre(void) { #if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) CCCI_BUFF_T *buff; qbm_gpd *p_first_gpd, *p_last_gpd; kal_uint32 gpd_num; // ensure in the exception state if(INT_QueryExceptionStatus() == KAL_FALSE) return; //- Already init in ccci_exception_handshake //- ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack); //- ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack); buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); buff->data[0] = MD_EX_MAGIC; buff->data[1] = CCMSG_ID_EXCEPTION_REC_OK; buff->channel = CCCI_CONTROL_CHANNEL; buff->reserved = MD_EX_REC_OK_CHK_ID; ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq kal_mem_cpy((void*)(buff+1), ex_log_ptr, sizeof(EX_LOG_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); // TODO:Need to revise the API, shall use exception API (after channle reset flow is done) #ifdef SWITCH_TO_EXCEPTION_IO p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx; ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); ex_set_step_logging(EX_AUTO_STEP); //0x45 CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); #else ccci_exception_info_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); #endif ccci_exception_state = CCCI_EXPT_INFO_PASS_PRE_ST; #ifdef WDT_ISR_TEST wdt_test(); #endif #endif }
/*! * @function [static] cccitty_process_dl_gpd_list * @brief Traverse the input GPD list and insert the CCCI header on the first BD->data * * @param pDevice [IN] pointer to the CCCI_TTY device * @param first_gpd [IN] pointer to the first GPD in the GPD chain * @param last_gpd [IN] pointer to the last GPD in the GPD chain * * @return kal_uint32 return the gpd number in between first_gpd and last_gpd */ static kal_uint32 cccitty_process_dl_gpd_list(cccitty_dev_t* pDevice, qbm_gpd* first_gpd, qbm_gpd* last_gpd) { /*process_tx_gpd_list*/ qbm_gpd* current_gpd = NULL; kal_uint32 pkt_cnt = 0; kal_uint32 byte_cnt = 0; kal_uint32 ccci_h_size; CCCI_BUFF_T *p_ccci_head; kal_int32 addseqrtn; //ASSERT(first_gpd && last_gpd); EXT_ASSERT((first_gpd && last_gpd), pDevice->dev_id, (kal_uint32)first_gpd, (kal_uint32)last_gpd); current_gpd = first_gpd; ccci_h_size = sizeof(CCCI_BUFF_T); ASSERT(ccci_h_size > 0); do { //4 <1> append CCCI header CCCI_BUFF_T #ifdef __CCCI_N_USE_TGPD_EXT__ /*move data buffer pointer forward for ccci header, increase the gpd, bd length*/ CCCITTY_PUSH_QBM_DATAHEAD(current_gpd, ccci_h_size); p_ccci_head = (CCCI_BUFF_T *)CCCITTY_GET_QBM_DATAPTR(current_gpd); CCCI_STREAM_SET_LEN(p_ccci_head, CCCITTY_GET_QBM_DATALEN(current_gpd)); #else /* Store the DL CCCI header in the GPD extention part */ QBM_DES_SET_EXTLEN(current_gpd, ccci_h_size); p_ccci_head = (CCCI_BUFF_T *)QBM_DES_GET_EXT(current_gpd); CCCI_STREAM_SET_LEN(p_ccci_head, CCCITTY_GET_QBM_DATALEN(current_gpd)+ccci_h_size); #endif /*Construct the CCCI header*/ CCCI_SET_CH_NO(p_ccci_head, pDevice->ccci_ch.cccitty_ch_dl); /*Add CCCI sequence number*/ addseqrtn = ccci_debug_add_seq(p_ccci_head, CCCI_DEBUG_ASSERT_BIT); if(addseqrtn != CCCI_SUCCESS) { cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_DL, CCCI_TTY_ADD_DL_SEQ_FAIL, addseqrtn, pDevice->ccci_ch.cccitty_ch_dl); } #if CCCITTY_DATA_TRACE_DUMP_ENABLE if(pDevice->ccci_ch.cccitty_ch_dl != CCCI_MD_LOG_RX){ cccitty_dump_data(CCCITTY_GET_QBM_DATAPTR(current_gpd), CCCITTY_DUMP_SIZE); } #endif QBM_CACHE_FLUSH(p_ccci_head, sizeof(CCCI_BUFF_T)); pkt_cnt++; byte_cnt += QBM_DES_GET_DATALEN(current_gpd); if ( current_gpd == last_gpd ) { break; } current_gpd = QBM_DES_GET_NEXT(current_gpd); } while ( current_gpd != NULL ); return pkt_cnt; }
/*! * @function cccitty_ccci_ul_cb * @brief uplink callback function register to CCCI, CCCI will callback during uplink process * Context: HIF context e.x. MOD_SIODCORE * process: <1> update the RGPD count in HIF * <2> remove CCCI header, CCCI_BUFF_T * <3> enqueue the RGPD to hif_ul_q_h * or loopback, CCCITTY_LB_ENABLE * <4> send MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ, change to MOD_CCCITTY context for dequeue(MOD_CCCIDEV) * <5> Error Handling: reload RGPD number error_reload_cnt * Function might free RGPDs for the following cases * case 1. CCCI header corrupts, CCCITTY_RM_CCCI_HEADERS return false * case 2. cccitty device state != CCCI_TTY_DEV_OPEN, cccitty_dev_active return false * * @param channel [IN] ccci_channel id * @param io_request [IN] pointer to uplink io request * * @return void */ void cccitty_ccci_ul_cb(CCCI_CHANNEL_T channel, ccci_io_request_t* io_request){ cccitty_inst_t *p_cccitty = cccitty_get_instance(); cccitty_dev_t *p_cccidev; cccitty_device_id dev_id = cccitty_get_ul_devid(channel); ccci_io_request_t *curr_ior; ccci_io_request_t *next_ior; kal_bool end_of_list; qbm_gpd *first_gpd; qbm_gpd *last_gpd; qbm_gpd *prev_gpd; qbm_gpd *curr_gpd; qbm_gpd *next_gpd; kal_int32 tmp; kal_uint32 num_gpd; kal_bool valid_gpd = KAL_FALSE; kal_uint32 num_alloc;//, to_alloc; CCCI_RETURNVAL_T ccci_ret; kal_bool ttydev_deq_msg = KAL_FALSE; cccitty_deq_req_t *cccitty_deq_req; /* error RPGD handling */ ccci_io_request_t err_reload_ior; kal_uint32 err_reload_cnt = 0; qbm_gpd *err_gpd_h = NULL; qbm_gpd *err_gpd_t = NULL; kal_uint32 NBPS_GPD_NUM = 0; CCCI_BUFF_T *pdata; kal_int32 chkseqrtn = 0; if(CCCI_TTY_DEV_CNT == dev_id){ /* cannot find dev_id for channel, please check g_cccitty_ccci_ch_mappping */ return; } if(NULL == io_request){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_NULL_IOR, dev_id, channel); return; } /* ASSERT if invalid channel number is received */ EXT_ASSERT(dev_id < CCCI_TTY_DEV_CNT, channel, 0, 0); p_cccidev = cccitty_get_dev_instance(dev_id); if(!cccitty_dev_active(p_cccidev)){ /* device is not at CCCI_TTY_DEV_OPEN state*/ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_DEV_NOT_OPEN,p_cccidev->dev_id, cccitty_get_dev_state(p_cccidev)); /*for drop packet case, need to check sequence number first*/ /*If there is mips issue, this action can be combined with reset function*/ cccitty_check_ul_gpd_list_sequence(io_request->first_gpd, io_request->last_gpd); /* reset the gpds*/ num_alloc = cccitty_reset_ccci_comm_gpd_list(io_request->first_gpd, io_request->last_gpd); //ASSERT (num_alloc == CCCITTY_GET_NONBPS_GPD_LIST_SIZE(io_request->first_gpd, io_request->last_gpd)); NBPS_GPD_NUM = CCCITTY_GET_NONBPS_GPD_LIST_SIZE(io_request->first_gpd, io_request->last_gpd); EXT_ASSERT((num_alloc == NBPS_GPD_NUM), num_alloc, NBPS_GPD_NUM, 0); io_request->num_gpd = num_alloc; /* reload the gpds */ ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, io_request, NULL); if(CCCI_SUCCESS != ccci_ret){ /* NOTE!! might cause RGPD leackage here */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_CCCI_WRITE_FAIL,p_cccidev->dev_id, ccci_ret); qbmt_dest_q(io_request->first_gpd, io_request->last_gpd); }else{ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { /* should not increase hwo_rgpd_cnt, since this is the RGPD belongs to other user */ //p_cccidev->hwo_rgpd_cnt += num_alloc; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD5,p_cccidev->dev_id, num_alloc, tmp); } return; } for (curr_ior = io_request; curr_ior; curr_ior = next_ior) { next_ior = curr_ior->next_request; first_gpd = curr_ior->first_gpd; last_gpd = curr_ior->last_gpd; //3 Note that, because GPD might be freeed in the following loop, we shall not access curr_ior from now. if (first_gpd && last_gpd){ //4 <1> update the RGPD count in HIF num_gpd = CCCITTY_GET_GPD_LIST_SIZE(first_gpd, last_gpd); /* NOT allow BPS GPD inside */ //ASSERT(num_gpd == CCCITTY_GET_NONBPS_GPD_LIST_SIZE(first_gpd, last_gpd)); NBPS_GPD_NUM = CCCITTY_GET_NONBPS_GPD_LIST_SIZE(first_gpd, last_gpd); EXT_ASSERT((num_gpd == NBPS_GPD_NUM), num_gpd, NBPS_GPD_NUM, 0); CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { p_cccidev->hwo_rgpd_cnt -= num_gpd; tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RCV_GPD, p_cccidev->dev_id, num_gpd, tmp); if(p_cccidev->hwo_rgpd_cnt < 1){ cccitty_trace(CCCI_TTY_WARN, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RGPD_EMPTY, p_cccidev->dev_id, tmp); } prev_gpd = NULL; end_of_list = KAL_FALSE; for (curr_gpd = first_gpd; curr_gpd && !end_of_list; curr_gpd = next_gpd) { next_gpd = QBM_DES_GET_NEXT(curr_gpd); end_of_list = (curr_gpd == last_gpd); /*Check sequence number here!*/ pdata = CCCIDEV_GET_QBM_DATAPTR(curr_gpd); chkseqrtn = ccci_debug_check_seq(pdata); if(chkseqrtn != CCCI_SUCCESS) { cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_CHK_UL_SEQ_FAIL, chkseqrtn, channel); } //4 <2> remove CCCI header, CCCI_BUFF_T valid_gpd = CCCITTY_RM_CCCI_HEADERS(channel, curr_gpd); if(KAL_TRUE == valid_gpd){ prev_gpd = curr_gpd; }else{ p_cccidev->ul_invalid_ttl_cnt++; err_reload_cnt ++; cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_RPGD_ERR, err_reload_cnt, p_cccidev->ul_invalid_ttl_cnt); if (curr_gpd == first_gpd) { if (curr_gpd == last_gpd) { first_gpd = NULL; last_gpd = NULL; EXT_ASSERT(end_of_list,0,0,0); end_of_list = KAL_TRUE; /* All GPD's in the list are freed, exit the loop after curr_gpd released. */ } else { EXT_ASSERT(next_gpd,0,0,0); first_gpd = next_gpd; } prev_gpd = NULL; } else { EXT_ASSERT(prev_gpd,0,0,0); if (curr_gpd == last_gpd) { last_gpd = prev_gpd; QBM_DES_SET_NEXT(prev_gpd, NULL); EXT_ASSERT(end_of_list,0,0,0); end_of_list = KAL_TRUE; /* To exit the loop after curr_gpd released. */ } else { EXT_ASSERT(next_gpd,0,0,0); QBM_DES_SET_NEXT(prev_gpd, next_gpd); } qbm_cal_set_checksum((kal_uint8 *)prev_gpd); QBM_CACHE_FLUSH(prev_gpd, sizeof(qbm_gpd)); } CCCITTY_QBM_ENQ(curr_gpd, curr_gpd, (void **)&err_gpd_h, (void **)&err_gpd_t); } } /* for (curr_gpd) */ //4 <3> enqueue the RGPD to hif_ul_q_h if (first_gpd) { EXT_ASSERT(last_gpd,0,0,0); num_gpd = CCCITTY_GET_GPD_LIST_SIZE(first_gpd, last_gpd); #if CCCITTY_LB_ENABLE // TODO: Provide the loopback function //4 nicc_ul2dl_loopback((ccci_io_request_t *)curr_ior, (nicc_dl_func)rndis_on_downlink, dev->ethc_inst); #else /*enqueue to hif_ul_q*/ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { CCCITTY_QBM_ENQ(first_gpd, last_gpd, (void **)&p_cccidev->hif_ul_q_h, (void **)&p_cccidev->hif_ul_q_t); p_cccidev->hif_ul_rgpd_cnt += num_gpd; tmp = p_cccidev->hif_ul_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); ttydev_deq_msg = KAL_TRUE; #endif } else { EXT_ASSERT((NULL == last_gpd),0,0,0); cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_EMPTY_IOR, p_cccidev->dev_id); } } else {/* if (first_gpd && last_gpd) */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_INVALID_IOR, p_cccidev->dev_id, curr_ior, first_gpd, last_gpd); EXT_ASSERT(KAL_FALSE, (kal_uint32)first_gpd, (kal_uint32)last_gpd, 0); /* Invalid IOR */ } } /*for (curr_ior...*/ //4 <4> send MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ if(KAL_TRUE == ttydev_deq_msg){ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); if(KAL_FALSE == p_cccidev->dev_ul_processing){ { p_cccidev->dev_ul_processing = KAL_TRUE; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_deq_req = (cccitty_deq_req_t *)construct_local_para(sizeof(cccitty_deq_req_t), 0); cccitty_deq_req->dev = p_cccidev; msg_send6(MOD_CCCITTY, /* src_mod_id, depending on the HIF type */ MOD_CCCITTY, /* dest_mod_id */ 0, /* sap_id */ MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ, /* msg_id */ (struct local_para_struct *)cccitty_deq_req, /* local_para_ptr */ NULL); /* peer_buff_ptr */ cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_SEND_DEQ, p_cccidev->dev_id); }else{ CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); /* there's MSG_ID_CCCI_TTY_UL_DEQUEUE_REQ message pending */ } } //4 <5> Error Handling: reload RGPD number err_reload_cnt if (err_reload_cnt > 0){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_INVALID_PKT, p_cccidev->dev_id, err_reload_cnt, p_cccidev->ul_invalid_ttl_cnt); /* reset the rgpd content with HWO = 1*/ num_alloc = cccitty_reset_ccci_comm_gpd_list(err_gpd_h, err_gpd_t); // num_alloc = CCCITTY_GET_GPD_LIST_SIZE(err_gpd_h, err_gpd_t); if(num_alloc != err_reload_cnt){ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_TTYC_ERR_RGPD_LEAK, p_cccidev->dev_id, num_alloc, err_reload_cnt); EXT_ASSERT(num_alloc == err_reload_cnt, num_alloc, err_reload_cnt, 0); } err_reload_ior.next_request = NULL; err_reload_ior.first_gpd = err_gpd_h; err_reload_ior.last_gpd = err_gpd_t; /* reload the gpds */ ccci_ret = p_cccitty->ccci_write_gpd(p_cccidev->ccci_ch.cccitty_ch_ul, &err_reload_ior, NULL); if(CCCI_SUCCESS != ccci_ret){ /* NOTE!! might cause RGPD leackage here */ cccitty_trace(CCCI_TTY_ERR, CCCI_TTY_MOD_UL, CCCI_TTY_TR_UL_CCCI_WRITE_FAIL,p_cccidev->dev_id, ccci_ret); qbmt_dest_q(err_reload_ior.first_gpd, err_reload_ior.last_gpd); }else{ CCCI_TTY_LOCK(p_cccidev->cccitty_mutex); { /* should not increase hwo_rgpd_cnt, since this is the RGPD belongs to other user */ p_cccidev->hwo_rgpd_cnt += num_alloc; /* to prevent interrupted by SDIOCORE context*/ tmp = p_cccidev->hwo_rgpd_cnt; } CCCI_TTY_UNLOCK(p_cccidev->cccitty_mutex); cccitty_trace(CCCI_TTY_TRACE, CCCI_TTY_MOD_UL, CCCI_TTY_TR_HIF_RGPD_RELOAD4,p_cccidev->dev_id, num_alloc, tmp); } } return; }
/************************************************************************* * FUNCTION * void ccci_exception_handshake * * DESCRIPTION * This function . * * PARAMETERS * channel - logical channel * * * RETURNS * The address of the share memory of the input logical channel * *************************************************************************/ void ccci_exception_handshake(void){ #if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) CCCI_BUFF_T *buff; kal_uint32 p_cache_aligned; kal_uint32 gpd_num; qbm_gpd *p_first_gpd, *p_last_gpd; //kal_uint32 rcv_size = 0; // ensure in the exception state if(INT_QueryExceptionStatus() == KAL_FALSE) return; //we block here for debuging //if(ccci_exception_state != CCCI_EXPT_CLEAR_CH_ST) while(1); //- Avoid to use kal_mem_cpy //- HW bug ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack); ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack); // exception only have single thread, need to do polling mode /* initialize polling mode GPD */ ASSERT(CCCI_EXCEPT_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE); /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/ p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_tx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_tx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_tx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); //format Rx GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx); //format Rx 2nd GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx2; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx2)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx2 = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx2, ccci_except_polling_gpd_rx2); //step 0. config rx gpd next pointer QBM_DES_SET_NEXT(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx2); //step 1. send TX handshake pkt buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); buff->data[0] = MD_EX_MAGIC; buff->data[1] = CCMSG_ID_EXCEPTION_CHECK; buff->channel = CCCI_CONTROL_CHANNEL; buff->reserved = MD_EX_CHK_ID; ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(CCCI_BUFF_T)); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T)); ex_set_step_logging(EX_AUTO_STEP); //0x41 //step 2. polling echoed rx handshake pkt (need two GPD, one for tail) // TODO:Need to revise the API, shall use exception API (after channle reset flow is done) #ifdef SWITCH_TO_EXCEPTION_IO p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx; ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ex_set_step_logging(EX_AUTO_STEP); //0x42 p_first_gpd = ccci_except_polling_gpd_rx; p_last_gpd = ccci_except_polling_gpd_rx2; do{ ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL_ACK); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL_ACK, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_rx); //rcv_size = CCCIDEV_GET_QBM_DATALEN(ccci_except_polling_gpd_rx); // FIXME QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ ex_set_step_logging(EX_AUTO_STEP); //0x43 ccci_exception_handshake_done = KAL_TRUE; CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); //reload to CCCI_CONTROL_CHANNEL_ACK break; } else { //dump date ex_fire_extern_step_logging(0xFFFFFFFF); ex_fire_extern_step_logging(buff->data[0]); ex_fire_extern_step_logging(buff->data[1]); ex_fire_extern_step_logging(buff->channel); ex_fire_extern_step_logging(buff->reserved); ex_fire_extern_step_logging(0xFFFFFFFF); } CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); }while(1); #else ccci_exception_check_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); while(CCCI_SUCCESS == (ccci_exception_handshake_done = ccci_polling_io(CCCI_CONTROL_CHANNEL_ACK, ccci_except_polling_gpd_tx, KAL_FALSE))){ buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); break; } CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); } #endif ex_set_step_logging(EX_AUTO_STEP); //0x44 ccci_exception_state = CCCI_EXPT_HANDSHAKE_ST; #ifdef CCCI_EXCETION_PRE_TEST ccci_exception_info_passed_pre(); #endif #endif }
/************************************************************************* * FUNCTION * void ccci_boottrc_send_log * * DESCRIPTION * This function is exported API to write bootup trace log in shared buffer. * * PARAMETERS * index - bootup trace log key. * value - bootup trace log value. * * RETURNS * NA * *************************************************************************/ void ccci_boottrc_send_log(kal_uint32 index, kal_uint32 value) { #if defined(__MTK_TARGET__) #ifdef __boottrc_performance_measurement__ kal_uint32 ccci_boottrc_start_time =0; kal_uint32 ccci_boottrc_end_time =0; kal_uint32 ccci_boottrc_dur_time =0; ccci_boottrc_start_time = ccci_get_current_time(); #endif qbm_gpd *p_gpd; void *p_cache_aligned; CCCI_BUFF_T *ccci_buff; kal_bool hif_ret; p_cache_aligned = ccci_boottrc_gpd; if((kal_uint32)p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = (void *)((kal_uint32)(ccci_boottrc_gpd)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } p_gpd = (qbm_gpd *)p_cache_aligned; //kal_mem_set(ccci_hs_buff, 0, CCCI_POLLING_MODE_BUF_SZ); /* CCCI polling mode, QMU_BM is not ready yet*/ /* format ccci_hs_buff as GPD->BUFF format */ CCCI_INIT_RESET_DATALEN_EXTLEN((kal_uint32 *)p_gpd); CCCI_INIT_RESET_COMMON_DATA((kal_uint32 *)p_gpd); ccci_buff = QBM_DES_GET_DATAPTR(p_gpd); ccci_buff->data[0] = 0x43525442; // pattern : "BTRC" ccci_buff->data[1] = index; ccci_buff->channel = CCCI_CONTROL_CHANNEL; ccci_buff->reserved = value; ccci_debug_add_seq(ccci_buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq QBM_DES_SET_DATALEN(p_gpd, sizeof(CCCI_BUFF_T)); QBM_CACHE_FLUSH(ccci_buff, sizeof(CCCI_BUFF_T)); #if defined(__HIF_SDIO_SUPPORT__) hif_ret = hifsdio_simple_send_gpd(0, p_gpd, CCCI_HS_POLLING_TIMEOUT); #elif defined(__HIF_CLDMA_SUPPORT__) hif_ret = hifcldma_simple_send_gpd(0, p_gpd, CCCI_HS_POLLING_TIMEOUT); #elif defined(__CCIFCORE_SUPPORT__) hif_ret = ccifc_simple_send_gpd(0, p_gpd, CCCI_HS_POLLING_TIMEOUT); #endif if(!hif_ret){ EXT_ASSERT(KAL_FALSE, hif_ret,0,0); } #ifdef __boottrc_performance_measurement__ ccci_boottrc_end_time = ccci_get_current_time(); ccci_boottrc_dur_time = ccci_get_duration(ccci_boottrc_start_time,ccci_boottrc_end_time); ccci_boottrc_max_transmit_time = (ccci_boottrc_dur_time > ccci_boottrc_max_transmit_time)? ccci_boottrc_dur_time : ccci_boottrc_max_transmit_time; ccci_boottrc_min_transmit_time = (ccci_boottrc_dur_time < ccci_boottrc_min_transmit_time)? ccci_boottrc_dur_time : ccci_boottrc_min_transmit_time; ccci_boottrc_total_transmit_time += ccci_boottrc_dur_time; ccci_boottrc_transmit_count++; ccci_boottrc_avg_transmit_time = ccci_boottrc_total_transmit_time / ccci_boottrc_transmit_count; // write to share memory ccci_excep_dbg_logging_InHS2(CCCI_EXCEP_DBG_HS_BOOTTRC_WAIT_TIME, (void *)&ccci_boottrc_total_transmit_time); #endif #endif }
void usbc_at_exception(void) { static kal_uint32 cnt = 0; kal_uint32 mask; kal_uint32 num_gpd; void *head_gpd; void *tail_gpd; void *rx_gpd; void *next_gpd; usbc_except_link_st_e link_state; if (cnt++ < 5000) { return; } ASSERT(2 == qbmt_alloc_q_no_tail(QBM_TYPE_HIF_DL, 2, &head_gpd, &tail_gpd)); for (rx_gpd = head_gpd; rx_gpd; rx_gpd = next_gpd) { next_gpd = QBM_DES_GET_NEXT(rx_gpd); QBM_DES_SET_ALLOW_LEN(rx_gpd, 512); qbm_cal_set_checksum((kal_uint8*)rx_gpd); QBM_CACHE_FLUSH(rx_gpd, sizeof(qbm_gpd)); rx_gpd = QBM_DES_GET_NEXT(rx_gpd); if (rx_gpd == tail_gpd) break; } mask = SaveAndSetIRQMask(); dev_info_s.class_type = USBC_CLASS_TYPE_CDC_ACM; dev_info_s.total_pipes = 3; dev_info_s.pipe_type[0] = USBC_PIPE_TYPE_CDC_ACM_COMM_IN; dev_info_s.pipe_type[1] = USBC_PIPE_TYPE_CDC_ACM_DATA_IN; dev_info_s.pipe_type[2] = USBC_PIPE_TYPE_CDC_ACM_DATA_OUT; dev_info_s.notify_usb_state = usbc_at_notify_usb_state; dev_info_s.notify_usb_speed = usbc_at_notify_usb_speed; dev_info_s.notify_control_setup_packet = usbc_at_notify_control_setup_packet; dev_info_s.notify_control_complete = usbc_at_notify_control_complete; dev_info_s.notify_alternate_setting = NULL; dev_info_s.notify_pipe_complete[0] = NULL; dev_info_s.notify_pipe_complete[1] = NULL; dev_info_s.notify_pipe_complete[2] = NULL; dev_info_s.notify_pipe_stall[0] = NULL; dev_info_s.notify_pipe_stall[1] = NULL; dev_info_s.notify_pipe_stall[2] = NULL; dev_inst_s = usbc_except_reset_ch(&dev_info_s); ASSERT(dev_inst_s); ASSERT(usbc_except_enum_loop()); ASSERT(usbc_except_init()); ASSERT(usbc_except_submit_gpd(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], head_gpd, tail_gpd)); do { usbc_except_hif_poll(dev_inst_s->id); ASSERT(usbc_except_poll_queue(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], &head_gpd, &tail_gpd, &num_gpd)); #if 0 /* under construction !*/ /* under construction !*/ /* under construction !*/ /* under construction !*/ /* under construction !*/ #endif ASSERT(usbc_except_hif_state(dev_inst_s->id, dev_inst_s->queue_no_for_pipe[1], &link_state)); #if 0 /* under construction !*/ #endif } while(1); RestoreIRQMask(mask); }
/*! * Callback function with packet information to process the IP datagram filtered. * Reply TCP RST packet to the sender of the garbage TCP SYN packet. * * @param info_p [IN] Related information of filtered out GPDs. * @param context [IN] A context specified while registering the filter. * @param filter_id [IN] Corresponding registered filter ID. * @param head_gpd [IN] Pointer head of the GPD list for the IP datagram filtered. * @param tail_gpd [IN] Pointer tail of the GPD list for the IP datagram filtered. * @param length [IN] Bytes of buffers used in the GPD list. */ void pfm_ipc_dl_filter_with_info_cb( ipc_filter_info_t *info_p, void *context, kal_int32 filter_id, qbm_gpd *head_gpd, qbm_gpd *tail_gpd, kal_uint32 length) { ipc_pkt_t ipc_pkt; qbm_gpd *ul_gpd; qbm_gpd *bd; kal_uint8 *p_data; kal_uint8 next_header; kal_uint8 *p_packet; kal_uint8 *src_addr; kal_uint8 *dst_addr; kal_uint8 *p_tcp_header; kal_uint32 ip_header_len; kal_bool is_ipv4; if (QBM_DES_GET_BDP(head_gpd)) { bd = QBM_DES_GET_DATAPTR(head_gpd); p_packet = QBM_DES_GET_DATAPTR(bd); } else { p_packet = QBM_DES_GET_DATAPTR(head_gpd); } if (IPC_HDR_IS_V4(p_packet)) { is_ipv4 = KAL_TRUE; if (IPC_HDR_PROT_TCP != IPC_HDR_V4_GET_PROTOCOL(p_packet)) { /* Only For TCP packet */ goto free_gpd; } /* Send IPv4 TCP RST */ ip_header_len = (kal_uint32)IPC_HDR_V4_GET_IHL(p_packet); p_tcp_header = p_packet + ip_header_len; src_addr = IPC_HDR_V4_GET_DST_ADDR(p_packet); dst_addr = IPC_HDR_V4_GET_SRC_ADDR(p_packet); } else if (IPC_HDR_IS_V6(p_packet)) { is_ipv4 = KAL_FALSE; /* Check if it's TCP or not */ ip_header_len = IPC_HDR_V6_HEADER_SIZE; // fix next_header = IPC_HDR_V6_GET_NH_TYPE(p_packet); p_tcp_header = p_packet + ip_header_len; while (1) { if ((next_header == IPC_HDR_PROT_IPV6_HOP) || (next_header == IPC_HDR_PROT_IPV6_ROUTE) || (next_header == IPC_HDR_PROT_IPV6_DEST)) { // next header next_header = IPC_NE_GET_1B(p_tcp_header); // move pointer to next ext header p_tcp_header += (IPC_NE_GET_1B(p_tcp_header + 1) + 1)*8; } else if (next_header == IPC_HDR_PROT_AH) { // next header next_header = IPC_NE_GET_1B(p_tcp_header); // move pointer to next ext header p_tcp_header += (IPC_NE_GET_1B(p_tcp_header + 1) + 2)*4; } else if (next_header == IPC_HDR_PROT_TCP) { /* Found TCP header ! */ break; } else { goto free_gpd; } } src_addr = IPC_HDR_V6_GET_DST_ADDR(p_packet); dst_addr = IPC_HDR_V6_GET_SRC_ADDR(p_packet); } else { goto free_gpd; } /* Allocate a UL GPD */ ul_gpd = QBM_ALLOC_ONE(QBM_TYPE_HIF_UL_TYPE); if (!ul_gpd) { PFM_ASSERT(KAL_FALSE); } bd = QBM_DES_GET_DATAPTR(ul_gpd); p_data = QBM_DES_GET_DATAPTR(bd); /* Fill IP/TCP header */ { kal_uint16 sum16; kal_uint32 tcp_header_len; kal_uint32 total_len; kal_uint8 *ip_header; // ip_header for output packet kal_uint8 *tcp_header; // tcp_header for output packet ip_header_len = ((is_ipv4) ? IPC_HDR_V4_HEADER_SIZE : IPC_HDR_V6_HEADER_SIZE); tcp_header_len = IPC_HDR_TCP_HEADER_SIZE; total_len = ip_header_len + tcp_header_len; ip_header = p_data; tcp_header = ip_header + ip_header_len; /* Fill TCP header */ IPC_HDR_TCP_SET_SRC_PORT(tcp_header, IPC_HDR_TCP_GET_DST_PORT(p_tcp_header)); IPC_HDR_TCP_SET_DST_PORT(tcp_header, IPC_HDR_TCP_GET_SRC_PORT(p_tcp_header)); IPC_HDR_TCP_SET_SEQ_NUM(tcp_header, IPC_HDR_TCP_GET_ACK_NUM(p_tcp_header)); IPC_HDR_TCP_SET_ACK_NUM(tcp_header, IPC_HDR_TCP_GET_SEQ_NUM(p_tcp_header) + 1); IPC_HDR_TCP_SET_OFFSET(tcp_header, IPC_HDR_TCP_HEADER_SIZE); IPC_HDR_TCP_SET_RESERVED(tcp_header, 0); IPC_HDR_TCP_SET_FLAGS(tcp_header, IPC_HDR_TCP_FLAG_RST | IPC_HDR_TCP_FLAG_ACK); IPC_HDR_TCP_SET_WINDOW(tcp_header, 0); IPC_HDR_TCP_SET_CHECKSUM(tcp_header, 0); IPC_HDR_TCP_SET_URGENT_PTR(tcp_header, 0); sum16 = ipc_calc_tcp_checksum( is_ipv4, src_addr, dst_addr, tcp_header, tcp_header_len); IPC_HDR_TCP_SET_CHECKSUM(tcp_header, sum16); if (is_ipv4) { IPC_HDR_V4_RESET_VER_IHL_DSCP_ECN(ip_header); IPC_HDR_V4_SET_DSCP(ip_header, IPC_HDR_V4_GET_DSCP(p_packet)); IPC_HDR_V4_SET_TOTAL_LENGTH(ip_header, total_len); IPC_HDR_V4_SET_IDENTITY(ip_header, 0); IPC_HDR_V4_SET_FLAGS(ip_header, 0); IPC_HDR_V4_SET_FRAG_OFFSET(ip_header, 0); IPC_HDR_V4_SET_TTL(ip_header, IPC_DEF_TTL); IPC_HDR_V4_SET_PROTOCOL(ip_header, IPC_HDR_PROT_TCP); IPC_HDR_V4_SET_HEADER_CHECKSUM(ip_header, 0); IPC_HDR_V4_SET_SRC_ADDR(ip_header, src_addr); IPC_HDR_V4_SET_DST_ADDR(ip_header, dst_addr); sum16 = ipc_calc_ipv4_checksum(ip_header); IPC_HDR_V4_SET_HEADER_CHECKSUM(ip_header, sum16); } else { IPC_HDR_V6_RESET_VER_TC_FL(ip_header); IPC_HDR_V6_SET_TC(ip_header, IPC_HDR_V6_GET_TC(p_packet)); IPC_HDR_V6_SET_LENGTH(ip_header, total_len - ip_header_len/* TCP length */); IPC_HDR_V6_SET_NH_TYPE(ip_header, IPC_HDR_PROT_TCP); IPC_HDR_V6_SET_HOP_LIMIT(ip_header, IPC_DEF_TTL); IPC_HDR_V6_SET_SRC_ADDR(ip_header, src_addr); IPC_HDR_V6_SET_DST_ADDR(ip_header, dst_addr); } QBM_CACHE_FLUSH(ip_header, total_len); QBM_DES_SET_DATALEN(bd, total_len); qbm_cal_set_checksum(bd); QBM_DES_SET_DATALEN(ul_gpd, total_len); qbm_cal_set_checksum(ul_gpd); } kal_mem_set(&ipc_pkt, 0, sizeof(ipc_pkt)); ipc_pkt.isGPD = KAL_TRUE; ipc_pkt.head = ul_gpd; ipc_pkt.tail = ul_gpd; ipc_send_ul_pkt(&ipc_pkt, NULL, info_p->ebi); hif_trace_info(PFM_TR_GARBAGE_FILTER_REPLY_RST, 0, IPC_HDR_TCP_GET_DST_PORT(p_tcp_header)); free_gpd: pfm_drop_packet_trace(info_p->ebi, p_packet, PFM_DROP_PACKET_DUMP_SIZE); qbmt_dest_q(head_gpd, tail_gpd); }
/************************************************************************* * FUNCTION * ccci_ipc_send_msg * * DESCRIPTION * This function is the internal api to send message * * PARAMETERS * ipc_task_id - * buffer_ptr - * msg_size - * wait_mode - * message_to_head - * * RETURNS * status - success/fail * *************************************************************************/ kal_bool ccci_ipc_send_msg(kal_uint32 ipc_task_id, void *buffer_ptr, kal_uint16 msg_size, kal_wait_mode wait_mode, kal_bool message_to_head) { kal_uint32 i, j ; kal_uint32 retrieved_events = 0, orig_local_addr = 0 , orig_peer_addr = 0, update_buff_addr=0; kal_int32 result = CCCI_SUCCESS; ipc_ilm_t *temp_ipc_ilm = (ipc_ilm_t *)buffer_ptr; ccci_io_request_t ior = {0}; CCCI_BUFF_T *p_ccci_buff; kal_uint32 len = 0; ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TRA); ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_ILM, temp_ipc_ilm, temp_ipc_ilm->src_mod_id, temp_ipc_ilm->dest_mod_id, temp_ipc_ilm->sap_id, temp_ipc_ilm->msg_id, temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->peer_buff_ptr); /* get ext queue id from mapping table of task id - destnation*/ for (i = 0; i < MAX_CCCI_IPC_TASKS; i++) { if ( ccci_ipc_maptbl[i].task_id == ipc_task_id ) { break; } } /* get ext queue id from mapping table of task id - source*/ for (j = 0; j < MAX_CCCI_IPC_TASKS; j++) { if ( ccci_ipc_maptbl[j].task_id == temp_ipc_ilm->src_mod_id ) { break; } } /* check src mod id, if it's not defined in CCCI IPC, don't set used bit */ if(j >= MAX_CCCI_IPC_TASKS) { ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id); return KAL_FALSE; } /* check if the extquque id can not be found */ if (i >= MAX_CCCI_IPC_TASKS) { ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_TASKID_ERROR, ipc_task_id, temp_ipc_ilm->src_mod_id); ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0; return KAL_FALSE; } /* check if the extquque id is to AP */ if ((ccci_ipc_maptbl[i].extq_id & AP_UINFY_ID_FLAG) == 0) { ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_DESTID_ERROR, ipc_task_id); ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0; return KAL_FALSE; } /* check if the ilm buffer is from ipc_msgsvc_allocate_ilm or not */ if (buffer_ptr != &ccci_ipc_ilm_arr[j].ipc_ilm){ ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ILM_ERROR); return KAL_FALSE; } len = sizeof(CCCI_BUFF_T) + sizeof(ipc_ilm_t); if (temp_ipc_ilm->local_para_ptr != NULL){ len+= temp_ipc_ilm->local_para_ptr->msg_len ; } if( temp_ipc_ilm->peer_buff_ptr != NULL){ len+= sizeof(peer_buff_struct) + temp_ipc_ilm->peer_buff_ptr->pdu_len + temp_ipc_ilm->peer_buff_ptr->free_header_space + temp_ipc_ilm->peer_buff_ptr->free_tail_space; } //assert if ilm size > CCCI_IPC_GPD size EXT_ASSERT(len < CCCI_IPC_GPD_SIZE, len, CCCI_IPC_GPD_SIZE, 0); /* Use critical section to protect ENTER */ CCCI_IPC_ENTER_CRITICAL_SECTION if (KAL_TRUE == kal_query_systemInit()){ // polling mode ior.first_gpd = ccci_ipc_ch.p_polling_gpd; ior.last_gpd = ccci_ipc_ch.p_polling_gpd; } else{ #ifdef __SDIOC_PULL_Q_ENH_DL__ ior.num_gpd = #endif qbmt_alloc_q_no_tail( CCCI_IPC_GPD_TYPE, /* type */ 1, /* buff_num */ (void **)(&ior.first_gpd), /* pp_head */ (void **)(&ior.last_gpd)); /* pp_tail */ if(ior.first_gpd == NULL){ ccci_ipc_trace(CCCI_IPC_ERR, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_ALLOC_GPD_ERROR); return KAL_FALSE; } } //initialize GPD CCCI_Header content p_ccci_buff = CCCIDEV_GET_QBM_DATAPTR(ior.first_gpd); p_ccci_buff->data[1] = (kal_uint32)len; p_ccci_buff->channel = (kal_uint32)ccci_ipc_ch.send_channel; p_ccci_buff->reserved = (kal_uint32)ccci_ipc_maptbl[i].extq_id; ccci_debug_add_seq(p_ccci_buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq QBM_DES_SET_DATALEN(ior.first_gpd, p_ccci_buff->data[1]); QBM_DES_SET_DATALEN(ior.first_gpd->p_data_tbd, p_ccci_buff->data[1]); qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd); qbm_cal_set_checksum((kal_uint8 *)ior.first_gpd->p_data_tbd); QBM_CACHE_FLUSH(ior.first_gpd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ior.first_gpd->p_data_tbd, sizeof(qbm_gpd)); //copy ilm to GPD temp_ipc_ilm->src_mod_id = ccci_ipc_maptbl[j].extq_id; update_buff_addr = (kal_uint32)p_ccci_buff; update_buff_addr += sizeof(CCCI_BUFF_T); CCCI_KAL_MSG_TO_AP_MSG(temp_ipc_ilm->msg_id, temp_ipc_ilm->msg_id); kal_mem_cpy((kal_uint8 *)update_buff_addr ,(kal_uint8 *)temp_ipc_ilm, sizeof(ipc_ilm_t)); if (temp_ipc_ilm->local_para_ptr != NULL){ //copy loca_para_struct to GPD update_buff_addr += sizeof(ipc_ilm_t); //24 bytes orig_local_addr = update_buff_addr; kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->local_para_ptr, temp_ipc_ilm->local_para_ptr->msg_len); } if( temp_ipc_ilm->peer_buff_ptr != NULL){ //copy peer buff_struct to GPD if (temp_ipc_ilm->local_para_ptr != NULL){ update_buff_addr += temp_ipc_ilm->local_para_ptr->msg_len;//should be 4 bytes alignment?? } else{ update_buff_addr += sizeof(ipc_ilm_t); //24 bytes } orig_peer_addr = update_buff_addr; kal_mem_cpy((kal_uint8 *)update_buff_addr,(kal_uint8 *)temp_ipc_ilm->peer_buff_ptr, sizeof(peer_buff_struct) + temp_ipc_ilm->peer_buff_ptr->pdu_len + temp_ipc_ilm->peer_buff_ptr->free_header_space + temp_ipc_ilm->peer_buff_ptr->free_tail_space); } free_local_para(temp_ipc_ilm->local_para_ptr); temp_ipc_ilm->local_para_ptr = (local_para_struct *)orig_local_addr;//assign not NULL ptr to indicate there have content free_peer_buff(temp_ipc_ilm->peer_buff_ptr); temp_ipc_ilm->peer_buff_ptr = (peer_buff_struct *)orig_peer_addr;//assign not NULL ptr to indicate there have content QBM_CACHE_FLUSH(p_ccci_buff, len); if (KAL_TRUE == kal_query_systemInit()){ // polling mode result = ccci_polling_io(ccci_ipc_ch.send_channel, ccci_ipc_ch.p_polling_gpd, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_ipc_ch.p_polling_gpd,ccci_ipc_ch.p_polling_gpd); } else{ result = ccci_ipc_ch.ccci_write_gpd(ccci_ipc_ch.send_channel, &ior, NULL); if (KAL_INFINITE_WAIT == wait_mode && CCCI_SUCCESS == result){ /* Wait for feedabck by retrieve event */ kal_retrieve_eg_events(ccci_ipc_ch.event, 1 << i, KAL_AND_CONSUME, &retrieved_events, KAL_SUSPEND); } } /* Exit critical section */ CCCI_IPC_EXIT_CRITICAL_SECTION ((CCCI_IPC_ILM_T*)buffer_ptr)->used = 0; ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_TRA_CCCI, p_ccci_buff->data[0], p_ccci_buff->data[1], p_ccci_buff->channel, p_ccci_buff->reserved); ccci_ipc_trace(CCCI_IPC_TRACE, CCCI_IPC_MOD_DATA, CCCI_SEND_MSG_FUNC_PASS_TRA); /* Finish */ if (result == CCCI_SUCCESS){ return KAL_TRUE; } else{ return KAL_FALSE; } }