KAL_UINT32 ccci_ul_lb_queue(struct sk_buff *skb) { CCCI_BUFF_T *pccci_h; KAL_UINT32 port_id; KAL_UINT32 rx_qno; ccci_port_cfg *ccci_port_info; #ifdef _EEMCS_EXCEPTION_UT ccci_expt_port_cfg *ccci_expt_port_info; #endif pccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(pccci_h->channel); DBGLOG(CCCI,DBG, "[CCCI_UT]=========tx_ch(%d) is mapped to PORT(%d)", pccci_h->channel, port_id); #ifdef _EEMCS_EXCEPTION_UT if(is_exception_mode(NULL)) { ccci_expt_port_info = get_expt_port_info(port_id); rx_qno = SDIO_RXQ(ccci_expt_port_info->expt_rxq_id); } else #endif { ccci_port_info = ccci_get_port_info(port_id); rx_qno = SDIO_RXQ(ccci_port_info->rxq_id); } DBGLOG(CCCI,DBG, "[CCCI_UT]=========Loopback Rxqno(%d)", rx_qno); return rx_qno; }
int eemcs_cdev_msg(int port_id, unsigned int message, unsigned int reserved){ struct sk_buff *new_skb; CCCI_BUFF_T *pccci_h; ccci_port_cfg *port_cfg; DEBUG_LOG_FUNCTION_ENTRY; new_skb = dev_alloc_skb(sizeof(CCCI_BUFF_T)); if(new_skb == NULL){ DBGLOG(CHAR,ERR,"dev_alloc_skb fail(size=%d).", sizeof(CCCI_BUFF_T)); DEBUG_LOG_FUNCTION_LEAVE; return KAL_FAIL; } pccci_h = (CCCI_BUFF_T *)new_skb->data; memset(pccci_h, 0, sizeof(CCCI_BUFF_T)); port_cfg = ccci_get_port_info(port_id); pccci_h->data[0] = CCCI_MAGIC_NUM; pccci_h->data[1] = message; pccci_h->channel = port_cfg->ch.rx; pccci_h->reserved = reserved; DBGLOG(CHAR, DBG, "%s(%d) send cdev_msg: 0x%08X, 0x%08X, %02d, 0x%08X", ccci_cdev_name[PORT2IDX(port_id)],\ port_id, pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); if(port_id == CCCI_PORT_CTRL){ return eemcs_boot_rx_callback(new_skb, 0); }else{ return eemcs_cdev_rx_callback(new_skb, 0); } }
/* * @brief Initialization of exception handling * @param * None * @return * This function returns KAL_SUCCESS always. */ KAL_INT32 eemcs_expt_mod_init(void) { KAL_UINT32 i = 0; //KAL_UINT32 except_txq = 0, except_rxq = 0; //KAL_UINT32 nonstop_rxq = 0; /* for Log path to output as much as possible */ KAL_INT32 ret; ccci_port_cfg *log_queue_config; DEBUG_LOG_FUNCTION_ENTRY; //Init md exception type g_except_inst.md_ex_type = 0; /* init expt_cb and expt_cb_lock*/ spin_lock_init(&g_except_inst.expt_cb_lock); for(i = 0; i < CCCI_PORT_NUM_MAX; i++) { g_except_inst.expt_cb[i] = NULL; if(TR_Q_INVALID != ccci_expt_port_info[i].expt_txq_id) { set_bit(SDIO_TXQ(ccci_expt_port_info[i].expt_txq_id), (unsigned long *)&except_txq); } if(TR_Q_INVALID != ccci_expt_port_info[i].expt_rxq_id) { set_bit(SDIO_RXQ(ccci_expt_port_info[i].expt_rxq_id), (unsigned long *)&except_rxq); } } eemcs_expt_ut_init(); log_queue_config = ccci_get_port_info(CCCI_PORT_MD_LOG); set_bit(SDIO_RXQ(log_queue_config->rxq_id), (unsigned long *)&nonstop_rxq); hif_except_init(nonstop_rxq, (except_txq << 16) | except_rxq); ret = hif_reg_expt_cb(ccci_df_to_ccci_exception_callback); KAL_ASSERT(ret == KAL_SUCCESS); DBGLOG(EXPT, TRA, "nonstop_txq=%d, nonstop_rxq=%d, exp_txq=%d, exp_rxq=%d", 0, nonstop_rxq, except_txq, except_rxq); /* Init Tx Q list */ for (i = 0; i < SDIO_TX_Q_NUM; i++) { g_except_inst.txq[i].id = -1; atomic_set(&g_except_inst.txq[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.txq[i].skb_list); } /* Init Rx Q list */ for (i = 0; i < SDIO_RX_Q_NUM; i++) { g_except_inst.rxq[i].id = -1; atomic_set(&g_except_inst.rxq[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.rxq[i].skb_list); } /* Init port list */ for (i = 0; i < CCCI_PORT_NUM; i++) { atomic_set(&g_except_inst.port[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.port[i].skb_list); } /* initialize drop count */ eemcs_expt_reset_statistics(); /* initialize exception*/ eemcs_exception_state = EEMCS_EX_NONE; /* initialize exception timer*/ init_timer(&g_except_inst.md_ex_monitor); g_except_inst.md_ex_monitor.function = ex_monitor_func; g_except_inst.md_ex_monitor.data = (unsigned long)&g_except_inst; #ifdef ENABLE_MD_WDT_PROCESS eemcs_ccci_register_WDT_callback(eemcs_wdt_reset_callback); #endif DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; }