static KAL_INT32 eemcs_cdev_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data) { CCCI_BUFF_T *p_cccih = NULL; KAL_UINT32 port_id; DEBUG_LOG_FUNCTION_ENTRY; if (skb){ p_cccih = (CCCI_BUFF_T *)skb->data; DBGLOG(CHAR, DBG, "cdev_rx_callback: CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved ); } port_id = ccci_ch_to_port(p_cccih->channel); if(CDEV_OPEN == atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].cdev_state)){ skb_queue_tail(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */ atomic_inc(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt); /* increase rx_pkt_cnt */ eemcs_update_statistics_number(0, port_id, RX, QUEUE, \ atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt)); wake_up(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_waitq); /* wake up rx_waitq */ }else{ if(port_id != CCCI_PORT_MD_LOG) /* If port_id == CCCI_PORT_MD_LOG, skip drop info (request by ST team)*/ { DBGLOG(CHAR, ERR, "!!! PKT DROP when cdev(%d) close", port_id); } dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_update_statistics(0, port_id, RX, DROP); } DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS ; }
KAL_UINT32 ccci_ul_lb_queue(struct sk_buff *skb) { CCCI_BUFF_T *pccci_h; KAL_UINT32 port_id; KAL_UINT32 rx_qno; ccci_port_cfg *ccci_port_info; #ifdef _EEMCS_EXCEPTION_UT ccci_expt_port_cfg *ccci_expt_port_info; #endif pccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(pccci_h->channel); DBGLOG(CCCI,DBG, "[CCCI_UT]=========tx_ch(%d) is mapped to PORT(%d)", pccci_h->channel, port_id); #ifdef _EEMCS_EXCEPTION_UT if(is_exception_mode(NULL)) { ccci_expt_port_info = get_expt_port_info(port_id); rx_qno = SDIO_RXQ(ccci_expt_port_info->expt_rxq_id); } else #endif { ccci_port_info = ccci_get_port_info(port_id); rx_qno = SDIO_RXQ(ccci_port_info->rxq_id); } DBGLOG(CCCI,DBG, "[CCCI_UT]=========Loopback Rxqno(%d)", rx_qno); return rx_qno; }
static KAL_INT32 eemcs_cdev_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data) { CCCI_BUFF_T *p_cccih = NULL; KAL_UINT32 port_id; DEBUG_LOG_FUNCTION_ENTRY; if (skb){ p_cccih = (CCCI_BUFF_T *)skb->data; DBGLOG(CHAR, DBG, "cdev_rx_callback: CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X",\ p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved ); } else { DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; } port_id = ccci_ch_to_port(p_cccih->channel); //if(CDEV_OPEN == atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].cdev_state)) { skb_queue_tail(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */ atomic_inc(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt); /* increase rx_pkt_cnt */ eemcs_update_statistics_number(0, port_id, RX, QUEUE, \ atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt)); wake_up(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_waitq); /* wake up rx_waitq */ #if 0 }else{
kal_bool is_valid_exception_tx_channel(CCCI_CHANNEL_T chn) { KAL_UINT32 port_id = 0; port_id = ccci_ch_to_port(chn); return is_valid_exception_port(port_id, false); }
KAL_UINT32 eemcs_ccci_UL_write_room_release(CCCI_CHANNEL_T chn){ KAL_UINT32 port_id = 0; KAL_UINT32 tx_queue_idx = 0; port_id = ccci_ch_to_port(chn); tx_queue_idx = ccci_port_info[port_id].txq_id; atomic_dec(&ccci_port_info[port_id].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); return 0; }
KAL_UINT32 eemcs_ccci_boot_UL_write_room_check(void) { KAL_UINT32 boot_port_id = 0; KAL_UINT32 tx_queue_idx=0; KAL_UINT32 ret = 0; boot_port_id = ccci_ch_to_port(CH_CTRL_TX); tx_queue_idx = ccci_port_info[boot_port_id].txq_id; ret = hif_ul_swq_space(tx_queue_idx); return ret; }
KAL_UINT32 eemcs_ccci_unregister_callback(CCCI_CHANNEL_T chn) { KAL_UINT32 port_id = 0; DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(CCCI,DBG, "CCCI channel (%d) UNregister callback", chn); port_id = ccci_ch_to_port(chn); ccci_port_info[port_id].ch.rx_cb = NULL; DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; }
KAL_UINT32 eemcs_ccci_register_callback(CCCI_CHANNEL_T chn, EEMCS_CCCI_CALLBACK func_ptr , KAL_UINT32 private_data) { KAL_UINT32 port_id = 0; DEBUG_LOG_FUNCTION_ENTRY; port_id = ccci_ch_to_port(chn); ccci_port_info[port_id].ch.rx_cb = func_ptr; DBGLOG(CCCI, DBG, "PORT%d(ch%d) register rx callback", port_id, chn); DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; }
KAL_UINT32 eemcs_ccci_UL_write_wait(CCCI_CHANNEL_T chn) { KAL_UINT32 port_id = 0; KAL_UINT32 tx_queue_idx=0; KAL_UINT32 ret = 0; port_id = ccci_ch_to_port(chn); tx_queue_idx = ccci_port_info[port_id].txq_id; ret = wait_event_interruptible_exclusive(ccci_tx_waitq[tx_queue_idx].tx_waitq, (hif_ul_swq_space(tx_queue_idx) - atomic_read(&ccci_tx_waitq[tx_queue_idx].reserve_space) )> 0); DBGLOG(CCCI, TRA, "ccci_write_wait: wakeup port%d, txq=%d, ret=%d", port_id, tx_queue_idx, ret); return ret; }
void ccci_ul_lb_channel(struct sk_buff *skb){ CCCI_BUFF_T *pccci_h; KAL_UINT32 port_id, tx_ch, rx_ch; pccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(pccci_h->channel); tx_ch = pccci_h->channel; rx_ch = ccci_port_info[port_id].ch.rx; pccci_h->channel = rx_ch; DBGLOG(CCCI,DBG, "[CCCI_UT]=========PORT(%d) tx_ch(%d) LB to rx_ch(%d)",\ port_id, tx_ch, rx_ch); if(KAL_SUCCESS == ccci_is_net_ch(tx_ch)) { eccmni_swap(skb); } }
KAL_UINT32 eemcs_ccci_UL_write_room_alloc(CCCI_CHANNEL_T chn) { KAL_UINT32 port_id = 0; KAL_UINT32 tx_queue_idx=0; KAL_UINT32 ret = 0; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif port_id = ccci_ch_to_port(chn); #ifdef __EEMCS_EXPT_SUPPORT__ /*we just return 1 to bypass this check. exception check will be taken when writing SKB to swq */ if(is_exception_mode(&mode)) { return 1; } #endif tx_queue_idx = ccci_port_info[port_id].txq_id; // for poll if(atomic_read(&ccci_port_info[port_id].reserve_space) >= 1){ DBGLOG(CCCI, TRA, "ccci_write_room_alloc: port%d write space has reserved",port_id); return 1; } ret = hif_ul_swq_space(tx_queue_idx) - atomic_read(&ccci_tx_waitq[tx_queue_idx].reserve_space); if (ret > 0){ atomic_inc(&ccci_port_info[port_id].reserve_space); atomic_inc(&ccci_tx_waitq[tx_queue_idx].reserve_space); } else { DBGLOG(CCCI, INF, "ccci_write_room_alloc: port%d, tx_qlen=%d, resv=%d, ret=%d", \ port_id, mtlte_df_UL_pkt_in_swq(tx_queue_idx), atomic_read(&ccci_tx_waitq[tx_queue_idx].reserve_space), ret); } DBGLOG(CCCI, TRA, "ccci_write_room_alloc: txq=%d, size=%d",tx_queue_idx, ret); return ret; }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret; bool is_xcmd = false; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = CCCI_PORT_CTRL; static KAL_UINT32 rx_err_cnt[CCCI_PORT_NUM_MAX] = {0}; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_INT16 channel, seq_num, assert_bit; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); wake_lock_timeout(&eemcs_wake_lock, HZ/2); // Using 0.5s wake lock /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { //is_xcmd = is_xboot_command(skb); p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD) { if (check_device_state() >= EEMCS_MOLY_HS_P1) { DBGLOG(CCCI, ERR, "can't recv xBoot cmd when EEMCS state=%d", check_device_state()); } else { is_xcmd = true; } } } if (is_xcmd) { /* Step 4. callback to xBoot */ CDEV_LOG(port_id, CCCI, INF, "XBOOT_CMD: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } else { ccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(ccci_h->channel); CDEV_LOG(port_id, CCCI, INF, "CCCI_H: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved); /*check rx sequence number for expect*/ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = ccci_h->channel; seq_num = ccci_h->seq_num; assert_bit = ccci_h->assert_bit; DBGLOG(CCCI, DBG, "Port%d CCCI_H: data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X(0x%08X, 0x%08X, 0x%08X)",\ port_id, ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->seq_num, \ ccci_h->assert_bit, ccci_h->reserved, channel, seq_num, assert_bit); if(((seq_num - ccci_seqno_tbl[channel].seqno[RX]) & 0x7FFF) != 1 && assert_bit) { DBGLOG(CCCI, ERR, "Port%d seqno out-of-order(0x%02X->0x%02X): data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X", \ port_id, seq_num, ccci_seqno_tbl[channel].seqno[RX], ccci_h->data[0], ccci_h->data[1], \ ccci_h->channel, ccci_h->seq_num, ccci_h->assert_bit, ccci_h->reserved); hif_force_md_assert_swint(); } ccci_seqno_tbl[channel].seqno[RX] = seq_num; #endif /* Step 4. callback to CCCI device */ if(NULL != ccci_port_info[port_id].ch.rx_cb){ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "PKT DROP when PORT%d(rxq=%d) at md exception", \ port_id, rxq_no); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } rx_err_cnt[port_id] = 0; } else { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); if (rx_err_cnt[port_id]%20 == 0) { DBGLOG(CCCI, ERR, "PKT DROP when PORT%d rx callback(ch=%d) not registered", \ port_id, ccci_h->channel); } rx_err_cnt[port_id]++; eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_boot_UL_write_skb_to_swq(struct sk_buff *skb) { XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 tx_queue_idx = 0; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; // Use boot state, not EEMCS state //if (check_device_state() < EEMCS_MOLY_HS_P1) { if (eemcs_boot_get_state() < MD_ROM_BOOT_READY) { if (NULL != skb){ p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (unsigned int)MAGIC_MD_CMD_ACK) { KAL_ASSERT(p_xcmd->msg_id < CMDID_MAX); DBGLOG(CCCI, DBG, "XBOOT_CMD: [TX]0x%X, 0x%X, 0x%X, 0x%X", p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); } else { KAL_ASSERT(skb->len > 0); DBGLOG(CCCI, DBG, "XBOOT_BIN: get %dByte bin to md", skb->len); } } else { DBGLOG(CCCI, WAR, "CH_CTRL_TX write NULL skb to kick DF process!"); } #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(CH_CTRL_TX)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(CH_CTRL_TX)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT]boot_write_skb_to_swq: ch=%d, txq=%d", CH_CTRL_TX, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); } else { DBGLOG(CCCI, ERR, "[EXPT]Invalid exception channel(%d)", CH_CTRL_TX); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(CH_CTRL_TX)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); } } else { eemcs_ccci_UL_write_skb_to_swq(CH_CTRL_TX, skb); } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif KAL_UINT32 channel = 0; bool force_md_assert_flag = false; #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_UINT64 flag; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = (pccci_h->channel)|(pccci_h->seq_num <<16)|(pccci_h->assert_bit <<31); #else channel = pccci_h->channel; #endif if (channel == CCCI_FORCE_RESET_MODEM_CHANNEL) force_md_assert_flag = true; //1. seperate data and ack packet for ccmni0&1 UL #if defined (TDD_DL_DROP_SOLUTION2) if ((chn != CH_NET1_TX) && (chn != CH_NET2_TX) && !force_md_assert_flag) { KAL_ASSERT(pccci_h->channel == chn); } #else KAL_ASSERT(pccci_h->channel == chn); #endif //2. ccci channel check KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || force_md_assert_flag); //3. fs packet check: the value of reserve is less than fs_buf_max_num=5 if ((pccci_h->channel == CH_FS_TX) && (pccci_h->reserved > 0x4)) { int *pdata = skb->data; DBGLOG(CCCI, ERR, "[TX]FS: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x",\ *pdata, *(pdata+1), *(pdata+2), *(pdata+3), *(pdata+4), *(pdata+5), *(pdata+6), *(pdata+7)); } } else { DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(force_md_assert_flag){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); KAL_ASSERT(pccci_h->channel == chn); KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL); }else{ DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT] ccci_UL_write_skb_to_swq write skb to DF: ch=%d, txq=%d", chn, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret, is_xcmd; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = 0; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback() rxq_no = %d", rxq_no); /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { is_xcmd = is_xboot_command(skb); } else { is_xcmd = false; } wake_lock_timeout(&eemcs_wake_lock, HZ); // Using 1s wake lock if (is_xcmd == true) { /* Step 4. callback to xBoot */ p_xcmd = (XBOOT_CMD *)skb->data; KAL_ASSERT(p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD); if (NULL != ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb){ DBGLOG(CCCI, DBG, "[CCCI][DF CALLBACK] XBOOT_CMD (0x%X)(0x%X)(0x%X)(0x%X)",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(CCCI_PORT_CTRL, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, CCCI_PORT_CTRL); goto _end; } else { ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0); } } else { ret = KAL_FAIL; dev_kfree_skb(skb); DBGLOG(CCCI, ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback xBoot not registered"); } } else { /* Step 4. callback to CCCI device */ ccci_h = (CCCI_BUFF_T *)skb->data; KAL_ASSERT(ccci_h->channel < CH_NUM_MAX); port_id = ccci_ch_to_port(ccci_h->channel); if(NULL != ccci_port_info[port_id].ch.rx_cb){ DBGLOG(CCCI,DBG,"ccci_df_to_ccci_callback Rx packet CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved ); #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, port_id); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } }else{ ret = KAL_FAIL; dev_kfree_skb(skb); DBGLOG(CCCI,ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback ccci_port(%d) channel Rx(%d) not registered", port_id, ccci_h->channel); eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }