KAL_INT32 eemcs_ccci_boot_UL_write_skb_to_swq(struct sk_buff *skb) { XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 tx_queue_idx = 0; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; // Use boot state, not EEMCS state //if (check_device_state() < EEMCS_MOLY_HS_P1) { if (eemcs_boot_get_state() < MD_ROM_BOOT_READY) { if (NULL != skb) { p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (unsigned int)MAGIC_MD_CMD_ACK) { KAL_ASSERT(p_xcmd->msg_id < CMDID_MAX); DBGLOG(CCCI, DBG, "XBOOT_CMD: [TX]0x%X, 0x%X, 0x%X, 0x%X", p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); } else { KAL_ASSERT(skb->len > 0); DBGLOG(CCCI, DBG, "XBOOT_BIN: get %dByte bin to md", skb->len); } } else { DBGLOG(CCCI, WAR, "CH_CTRL_TX write NULL skb to kick DF process!"); } #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(CH_CTRL_TX)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(CH_CTRL_TX)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT]boot_write_skb_to_swq: ch=%d, txq=%d", CH_CTRL_TX, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); } else { DBGLOG(CCCI, ERR, "[EXPT]Invalid exception channel(%d)", CH_CTRL_TX); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(CH_CTRL_TX)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); } } else { eemcs_ccci_UL_write_skb_to_swq(CH_CTRL_TX, skb); } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif KAL_UINT32 channel = 0; bool force_md_assert_flag = false; #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_UINT64 flag; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = (pccci_h->channel)|(pccci_h->seq_num <<16)|(pccci_h->assert_bit <<31); #else channel = pccci_h->channel; #endif if (channel == CCCI_FORCE_RESET_MODEM_CHANNEL) force_md_assert_flag = true; //1. seperate data and ack packet for ccmni0&1 UL #if defined (TDD_DL_DROP_SOLUTION2) if ((chn != CH_NET1_TX) && (chn != CH_NET2_TX) && !force_md_assert_flag) { KAL_ASSERT(pccci_h->channel == chn); } #else KAL_ASSERT(pccci_h->channel == chn); #endif //2. ccci channel check KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || force_md_assert_flag); //3. fs packet check: the value of reserve is less than fs_buf_max_num=5 if ((pccci_h->channel == CH_FS_TX) && (pccci_h->reserved > 0x4)) { int *pdata = skb->data; DBGLOG(CCCI, ERR, "[TX]FS: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x",\ *pdata, *(pdata+1), *(pdata+2), *(pdata+3), *(pdata+4), *(pdata+5), *(pdata+6), *(pdata+7)); } } else { DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(force_md_assert_flag){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); KAL_ASSERT(pccci_h->channel == chn); KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL); }else{ DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT] ccci_UL_write_skb_to_swq write skb to DF: ch=%d, txq=%d", chn, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }