/* * @brief Change EEMCS exception state * @param mode[in] Exception mode defined in EEMCS_EXCEPTION_MODE enumeration * @return true indicateds success; otherwise false. */ kal_bool set_exception_mode(EEMCS_EXCEPTION_STATE new_mode) { EEMCS_EXCEPTION_STATE cur_mode; DEBUG_LOG_FUNCTION_ENTRY; KAL_ASSERT((new_mode >= EEMCS_EX_NONE) && (new_mode <= EEMCS_EX_REC_MSG_OK)); cur_mode = get_exception_mode(); #if 0 switch(cur_mode) { case EEMCS_EX_INVALID: goto _err; break; case EEMCS_EX_NONE: if(EEMCS_EX_INIT != new_mode) goto _err; break; case EEMCS_EX_INIT: if(EEMCS_EX_DHL_DL_RDY != new_mode) goto _err; break; case EEMCS_EX_DHL_DL_RDY: if(EEMCS_EX_INIT_DONE != new_mode) goto _err; break; case EEMCS_EX_INIT_DONE: /*Only allow reset to EEMCS_EX_INIT state*/ if(EEMCS_EX_INIT != new_mode) goto _err; break; default: goto _err; break; } #endif /*TODO: LOCK PROTECTION*/ eemcs_exception_state = new_mode; DBGLOG(EXPT, DBG, "Exception mode: 0x%X -> 0x%X", cur_mode, new_mode); DEBUG_LOG_FUNCTION_LEAVE; return true; #if 0 _err: DBGLOG(EXPT, ERR, "Invalid exception mode: 0x%X -> 0x%X", cur_mode, new_mode); DEBUG_LOG_FUNCTION_LEAVE; return false; #endif }
int mtlte_check_excetion_int(unsigned int swint_status) { int i; if(swint_status & D2H_INT_except_init) { KAL_DBGPRINT(KAL, DBG_ERROR,("[exception] MT6290m modem assertion!! Start assertion dump flow... \r\n")) ; if(lte_expt_priv.cb_ccci_expt_int){ lte_expt_priv.cb_ccci_expt_int(EX_INIT); }else{ KAL_DBGPRINT(KAL, DBG_ERROR,("[exception] there is no ccci callback function !!\r\n")) ; KAL_ASSERT(0) ; } if (mtlte_hif_expt_mode_init() != KAL_SUCCESS){ return KAL_FAIL ; } // eemcs_ccci_ex_ind (DHL_DL_RDY) lte_expt_priv.cb_ccci_expt_int(EX_DHL_DL_RDY); // [SDIO] Active DHL_DL queue & Enable DHL_DL related DLQ interrupt for(i=0; i<RXQ_NUM; i++) { if( 1 == lte_expt_priv.non_stop_dlq[i] ) { mtlte_hif_expt_restart_que(1, i); } } mtlte_hif_expt_unmask_swint(); mtlte_hif_expt_enable_interrupt(); KAL_DBGPRINT(KAL, DBG_ERROR,("[exception] Start to transfer remain DHL DL pkt... \r\n")) ; } else if(swint_status & D2H_INT_except_clearQ_done) { KAL_DBGPRINT(KAL, DBG_ERROR,("[exception] DHL DL pkt transfer Done, Start reset to exception que ... \r\n")) ; mtlte_hif_expt_set_reset_allQ_bit(); } return KAL_SUCCESS; }
/* * @brief Configure the modem runtime data structure. * @param * buffer [in] The buffer containing modem runtime data. * @return Size of the modem runtime data structure is returned always. */ KAL_INT32 eemcs_md_runtime_cfg(void *buffer) { struct file *filp = NULL; LOGGING_MODE mdlog_flag = MODE_IDLE; struct MODEM_RUNTIME_st *runtime = NULL; int ret = 0; KAL_ASSERT(buffer != NULL); runtime = (struct MODEM_RUNTIME_st *)buffer; memset(runtime, 0, sizeof(struct MODEM_RUNTIME_st)); runtime->Prefix = 0x46494343; //"CCIF" runtime->Postfix = 0x46494343; //"CCIF" runtime->BootChannel = CH_CTRL_RX; runtime->DriverVersion = 0x20110118; filp = filp_open(MDLOGGER_FILE_PATH, O_RDONLY, 0777); if (!IS_ERR(filp)) { ret = kernel_read(filp, 0, (char*)&mdlog_flag, sizeof(int)); if (ret != sizeof(int)) mdlog_flag = MODE_IDLE; } else { DBGLOG(BOOT, ERR, "open %s fail: %ld", MDLOGGER_FILE_PATH, PTR_ERR(filp)); filp = NULL; } if (filp != NULL) { filp_close(filp, NULL); } if (is_meta_mode() || is_advanced_meta_mode()) runtime->BootingStartID = ((char)mdlog_flag << 8 | META_BOOT_ID); else runtime->BootingStartID = ((char)mdlog_flag << 8 | NORMAL_BOOT_ID); DBGLOG(BOOT, INF, "send /data/extmdl/mdl_config =%d to modem!", mdlog_flag); return sizeof(struct MODEM_RUNTIME_st); }
int mtlte_df_DL_enswq_buf(MTLTE_DF_RX_QUEUE_TYPE qno , void *buf, unsigned int len) { int ret = KAL_SUCCESS ; struct sk_buff *skb = NULL; KAL_DBGPRINT(KAL, DBG_INFO,("====> %s , qno: %d\n",KAL_FUNC_NAME, qno)) ; #if BUFFER_POOL_FOR_EACH_QUE if(len > lte_df_core.df_skb_alloc_size[qno]){ KAL_DBGPRINT(KAL, DBG_ERROR,("[SDIO][ERR] lte_df_core.df_skb_alloc_size[%d] = %d, packet this time = %d \n", qno, lte_df_core.df_skb_alloc_size[qno], len)) ; KAL_DBGPRINT(KAL, DBG_ERROR,("[SDIO][ERR] First 64byte of this error packet = ")) ; KAL_DBGPRINT(KAL, DBG_ERROR, ("0x%08x, 0x%08x, 0x%08x, 0x%08x, ", *(unsigned int *)(buf+0), *(unsigned int *)(buf+4), *(unsigned int *)(buf+8), *(unsigned int *)(buf+12))) ; KAL_DBGPRINT(KAL, DBG_ERROR, ("0x%08x, 0x%08x, 0x%08x, 0x%08x, ", *(unsigned int *)(buf+16), *(unsigned int *)(buf+20), *(unsigned int *)(buf+24), *(unsigned int *)(buf+28))) ; KAL_DBGPRINT(KAL, DBG_ERROR, ("0x%08x, 0x%08x, 0x%08x, 0x%08x, ", *(unsigned int *)(buf+32), *(unsigned int *)(buf+36), *(unsigned int *)(buf+40), *(unsigned int *)(buf+44))) ; KAL_DBGPRINT(KAL, DBG_ERROR, ("0x%08x, 0x%08x, 0x%08x, 0x%08x, ", *(unsigned int *)(buf+48), *(unsigned int *)(buf+52), *(unsigned int *)(buf+56), *(unsigned int *)(buf+60))) ; } KAL_ASSERT(len <= lte_df_core.df_skb_alloc_size[qno]) ; #else KAL_ASSERT(len <= DEV_MAX_PKT_SIZE) ; #endif if (lte_df_core.cb_handle[qno].callback_func == NULL){ return KAL_SUCCESS ; } #if BUFFER_POOL_FOR_EACH_QUE if ((skb = skb_dequeue(<e_df_core.dl_buffer_pool_queue[qno])) == NULL ){ KAL_DBGPRINT(KAL, DBG_WARN,("mtlte_df_DL_enswq_buf skb_dequeue no skb\n")) ; return KAL_FAIL ; } #else if ((skb = skb_dequeue(<e_df_core.dl_buffer_pool_queue)) == NULL ){ KAL_DBGPRINT(KAL, DBG_WARN,("mtlte_df_DL_enswq_buf skb_dequeue no skb\n")) ; return KAL_FAIL ; } #endif memcpy(skb_put(skb, len), buf, len) ; KAL_MUTEXLOCK(<e_df_core.dl_pkt_lock); //NOTICE : try to set in_use number before really enqueue skb, to avoid in_use number <0 assert lte_df_core.dl_pkt_in_use[qno] ++ ; skb_queue_tail(<e_df_core.dl_recv_wait_queue[qno], skb) ; KAL_MUTEXUNLOCK(<e_df_core.dl_pkt_lock); #if FORMAL_DL_FLOW_CONTROL if(true == lte_df_core.fl_ctrl_enable[qno]){ atomic_inc(<e_df_core.fl_ctrl_counter[qno]); if(atomic_read(<e_df_core.fl_ctrl_counter[qno]) >= lte_df_core.fl_ctrl_limit[qno]){ lte_df_core.fl_ctrl_full[qno] = true; } // record the largest counter ever if(atomic_read(<e_df_core.fl_ctrl_counter[qno]) > lte_df_core.fl_ctrl_record[qno]){ lte_df_core.fl_ctrl_record[qno] = atomic_read(<e_df_core.fl_ctrl_counter[qno]); } } #endif #if (USE_QUE_WORK_DISPATCH_RX || DISPATCH_AFTER_ALL_SKB_DONE) #else KAL_DBGPRINT(KAL, DBG_INFO,("RXQ %d callback , and the private data is %d\r\n",qno, lte_df_core.cb_handle[qno].private_data)) ; //lte_df_core.cb_handle[qno].callback_func(lte_df_core.cb_handle[qno].private_data) ; lte_df_core.cb_handle[qno].callback_func(qno) ; #endif KAL_DBGPRINT(KAL, DBG_INFO,("<==== %s\n",KAL_FUNC_NAME)) ; return ret ; }
KAL_UINT32 ccci_get_port_type(KAL_UINT32 ccci_port_index){ KAL_ASSERT(ccci_port_index < CCCI_PORT_NUM_MAX); return ccci_port_info[ccci_port_index].export_type; }
KAL_UINT32 ccci_get_port_cflag(KAL_UINT32 ccci_port_index){ KAL_ASSERT(ccci_port_index < CCCI_PORT_NUM_MAX); return ccci_port_info[ccci_port_index].flag; }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret; bool is_xcmd = false; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = CCCI_PORT_CTRL; static KAL_UINT32 rx_err_cnt[CCCI_PORT_NUM_MAX] = {0}; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_INT16 channel, seq_num, assert_bit; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); wake_lock_timeout(&eemcs_wake_lock, HZ/2); // Using 0.5s wake lock /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { //is_xcmd = is_xboot_command(skb); p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD) { if (check_device_state() >= EEMCS_MOLY_HS_P1) { DBGLOG(CCCI, ERR, "can't recv xBoot cmd when EEMCS state=%d", check_device_state()); } else { is_xcmd = true; } } } if (is_xcmd) { /* Step 4. callback to xBoot */ CDEV_LOG(port_id, CCCI, INF, "XBOOT_CMD: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } else { ccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(ccci_h->channel); CDEV_LOG(port_id, CCCI, INF, "CCCI_H: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved); /*check rx sequence number for expect*/ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = ccci_h->channel; seq_num = ccci_h->seq_num; assert_bit = ccci_h->assert_bit; DBGLOG(CCCI, DBG, "Port%d CCCI_H: data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X(0x%08X, 0x%08X, 0x%08X)",\ port_id, ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->seq_num, \ ccci_h->assert_bit, ccci_h->reserved, channel, seq_num, assert_bit); if(((seq_num - ccci_seqno_tbl[channel].seqno[RX]) & 0x7FFF) != 1 && assert_bit) { DBGLOG(CCCI, ERR, "Port%d seqno out-of-order(0x%02X->0x%02X): data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X", \ port_id, seq_num, ccci_seqno_tbl[channel].seqno[RX], ccci_h->data[0], ccci_h->data[1], \ ccci_h->channel, ccci_h->seq_num, ccci_h->assert_bit, ccci_h->reserved); hif_force_md_assert_swint(); } ccci_seqno_tbl[channel].seqno[RX] = seq_num; #endif /* Step 4. callback to CCCI device */ if(NULL != ccci_port_info[port_id].ch.rx_cb){ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "PKT DROP when PORT%d(rxq=%d) at md exception", \ port_id, rxq_no); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } rx_err_cnt[port_id] = 0; } else { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); if (rx_err_cnt[port_id]%20 == 0) { DBGLOG(CCCI, ERR, "PKT DROP when PORT%d rx callback(ch=%d) not registered", \ port_id, ccci_h->channel); } rx_err_cnt[port_id]++; eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_INT32 eemcs_ccci_boot_UL_write_skb_to_swq(struct sk_buff *skb) { XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 tx_queue_idx = 0; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; // Use boot state, not EEMCS state //if (check_device_state() < EEMCS_MOLY_HS_P1) { if (eemcs_boot_get_state() < MD_ROM_BOOT_READY) { if (NULL != skb){ p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (unsigned int)MAGIC_MD_CMD_ACK) { KAL_ASSERT(p_xcmd->msg_id < CMDID_MAX); DBGLOG(CCCI, DBG, "XBOOT_CMD: [TX]0x%X, 0x%X, 0x%X, 0x%X", p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); } else { KAL_ASSERT(skb->len > 0); DBGLOG(CCCI, DBG, "XBOOT_BIN: get %dByte bin to md", skb->len); } } else { DBGLOG(CCCI, WAR, "CH_CTRL_TX write NULL skb to kick DF process!"); } #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(CH_CTRL_TX)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(CH_CTRL_TX)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT]boot_write_skb_to_swq: ch=%d, txq=%d", CH_CTRL_TX, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); } else { DBGLOG(CCCI, ERR, "[EXPT]Invalid exception channel(%d)", CH_CTRL_TX); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(CH_CTRL_TX)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); } } else { eemcs_ccci_UL_write_skb_to_swq(CH_CTRL_TX, skb); } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
KAL_UINT32 ccci_ch_to_port(KAL_UINT32 ccci_ch_num){ KAL_UINT32 port_index; port_index = ccci_ch_to_port_mapping[ccci_ch_num]; KAL_ASSERT(port_index < CCCI_PORT_NUM_MAX); return port_index; }
void eemcs_ccci_turn_on_dlq_by_port(KAL_UINT32 ccci_port_index){ KAL_ASSERT(ccci_port_index < CCCI_PORT_NUM_MAX); DBGLOG(CCCI,DBG, "CCCI port (%d) turn on dlq(%d)", ccci_port_index, SDIO_RXQ(ccci_port_info[ccci_port_index].rxq_id)); hif_turn_on_dl_q(SDIO_RXQ(ccci_port_info[ccci_port_index].rxq_id)); return; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); KAL_ASSERT(pccci_h->channel == chn); KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL); }else{ DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(pccci_h->channel == CCCI_FORCE_RESET_MODEM_CHANNEL){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; DBGLOG(CCCI, DBG, "[EXPT] ccci_UL_write_skb_to_swq write skb to DF: ch=%d, txq=%d", chn, tx_queue_idx); hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
static ssize_t eemcs_ipc_read(struct file *fp, char *buf, size_t count, loff_t *ppos) { unsigned int flag; eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data; KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */ KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */ KAL_UINT32 rx_pkt_cnt, read_len; struct sk_buff *rx_skb; unsigned char *payload=NULL; CCCI_BUFF_T *ccci_header; int ret = 0; DEBUG_LOG_FUNCTION_ENTRY; flag=fp->f_flags; DBGLOG(IPCD,TRA,"ipc_read: deivce iminor=%d, len=%d", node_id ,count); if(!eemcs_device_ready()) { DBGLOG(IPCD,ERR,"MD device not ready!"); ret= -EIO; return ret; } /* Check receive pkt count */ rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); if(rx_pkt_cnt == 0){ if (flag&O_NONBLOCK) { ret=-EAGAIN; DBGLOG(IPCD,TRA,"ipc_read: PORT%d for NONBLOCK",port_id); goto _exit; } ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0); if(ret) { ret = -EINTR; DBGLOG(IPCD, ERR, "[RX]PORT%d read interrupt by syscall.signal(%lld)", port_id, \ *(long long *)current->pending.signal.sig); goto _exit; } } /* * Cached memory from last read fail */ DBGLOG(IPCD,TRA,"ipc_read: dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt); rx_skb = skb_dequeue(&curr_node->rx_skb_list); /* There should be rx_skb in the list */ KAL_ASSERT(NULL != rx_skb); atomic_dec(&curr_node->rx_pkt_cnt); rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); ccci_header = (CCCI_BUFF_T *)rx_skb->data; DBGLOG(IPCD,TRA,"ipc_read: PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)",\ port_id, ccci_header->data[0],ccci_header->data[1], ccci_header->channel, ccci_header->reserved); /*If not match please debug EEMCS CCCI demux skb part*/ KAL_ASSERT(ccci_header->channel == eemcs_ipc_inst.ccci_ch.rx); read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T); /* remove CCCI_HEADER */ skb_pull(rx_skb, sizeof(CCCI_BUFF_T)); DBGLOG(IPCD,TRA,"ipc_read: PORT%d read_len=%d",port_id, read_len); payload=(unsigned char*)rx_skb->data; if(count < read_len) { DBGLOG(IPCD,ERR,"PKT DROP of PORT%d! want_read=%d, read_len=%d", port_id, count, read_len); atomic_inc(&curr_node->rx_pkt_drop_cnt); eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP); dev_kfree_skb(rx_skb); ret = -E2BIG; goto _exit; } DBGLOG(IPCD,TRA,"ipc_read: copy_to_user(len=%d), %p -> %p", read_len, payload, buf); ret = copy_to_user(buf, payload, read_len); if(ret!=0) { DBGLOG(IPCD, ERR, "[RX]PORT%d copy_to_user(len=%d, %p->%p) fail: %d", \ port_id, read_len, payload, buf, ret); ret = -EFAULT; goto _exit; } dev_kfree_skb(rx_skb); if(ret == 0){ DEBUG_LOG_FUNCTION_LEAVE; return read_len; } _exit: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
ccci_expt_port_cfg* get_expt_port_info(KAL_UINT32 port_id){ DEBUG_LOG_FUNCTION_ENTRY; KAL_ASSERT(port_id < CCCI_PORT_NUM_MAX); DEBUG_LOG_FUNCTION_LEAVE; return &ccci_expt_port_info[port_id]; }
static ssize_t eemcs_cdev_read(struct file *fp, char *buf, size_t count, loff_t *ppos) { unsigned int flag; eemcs_cdev_node_t *curr_node = (eemcs_cdev_node_t *)fp->private_data; KAL_UINT8 port_id = curr_node->eemcs_port_id; /* port_id */ KAL_UINT32 p_type, rx_pkt_cnt, read_len, rx_pkt_cnt_int; struct sk_buff *rx_skb; unsigned char *payload=NULL; CCCI_BUFF_T *ccci_header; int ret = 0; DEBUG_LOG_FUNCTION_ENTRY; flag=fp->f_flags; //verbose DBGLOG(CHAR,DBG,"read deivce iminor (0x%x),length(0x%x)",port_id,count); p_type = ccci_get_port_type(port_id); if(p_type != EX_T_USER) { DBGLOG(CHAR, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type); goto _exit; } rx_pkt_cnt_int = atomic_read(&curr_node->buff.remaining_rx_cnt); KAL_ASSERT(rx_pkt_cnt_int >= 0); if(rx_pkt_cnt_int == 1) { DBGLOG(CHAR, DBG, "Streaming reading!! PORT%d len=%d\n",port_id,count); rx_skb = curr_node->buff.remaining_rx_skb; /* rx_skb shall not be null */ KAL_ASSERT(NULL != rx_skb); read_len = curr_node->buff.remaining_len; KAL_ASSERT(read_len >= 0); } else { rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); if(rx_pkt_cnt == 0) { if (flag&O_NONBLOCK) { ret=-EAGAIN; //verbose DBGLOG(CHAR,DBG,"[CHAR] PORT(%d) eemcs_cdev_read return O_NONBLOCK for NON-BLOCKING",port_id); goto _exit; } ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0); if(ret) { ret = -EINTR; DBGLOG(CHAR, ERR, "PORT%d interruptted while waiting data.", port_id); goto _exit; } } /* * Cached memory from last read fail */ DBGLOG(CHAR, TRA, "eemcs_cdev_read dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt); rx_skb = skb_dequeue(&curr_node->rx_skb_list); /* There should be rx_skb in the list */ KAL_ASSERT(NULL != rx_skb); atomic_dec(&curr_node->rx_pkt_cnt); rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); ccci_header = (CCCI_BUFF_T *)rx_skb->data; DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ port_id, ccci_header->data[0],ccci_header->data[1], ccci_header->channel, ccci_header->reserved); /*If not match please debug EEMCS CCCI demux skb part*/ if(ccci_header->channel != curr_node->ccci_ch.rx) { DBGLOG(CHAR,ERR,"Assert(ccci_header->channel == curr_node->ccci_ch.rx)"); DBGLOG(CHAR,ERR,"ccci_header->channel:%d, curr_node->ccci_ch.rx:%d, curr_node->eemcs_port_id:%d", ccci_header->channel, curr_node->ccci_ch.rx, curr_node->eemcs_port_id); KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx); } //KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx); if(!(ccci_get_port_cflag(port_id) & EXPORT_CCCI_H)) { read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T); /* remove CCCI_HEADER */ skb_pull(rx_skb, sizeof(CCCI_BUFF_T)); }else{ if(ccci_header->data[0] == CCCI_MAGIC_NUM){ read_len = sizeof(CCCI_BUFF_T); }else{ read_len = ccci_header->data[1]; } } } DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d read_len=%d",port_id, read_len); /* 20130816 ian add aud dump */ { char *ptr = (char *)rx_skb->data; /* dump 32 byte of the !!!CCCI DATA!!! part */ CDEV_LOG(port_id, CHAR, ERR,"[DUMP]PORT%d eemcs_cdev_read\n\ [00..07](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [08..15](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [16..23](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [24..31](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)",\ port_id,\ (int)*(ptr+0),(int)*(ptr+1),(int)*(ptr+2),(int)*(ptr+3),(int)*(ptr+4),(int)*(ptr+5),(int)*(ptr+6),(int)*(ptr+7),\ (int)*(ptr+8),(int)*(ptr+9),(int)*(ptr+10),(int)*(ptr+11),(int)*(ptr+12),(int)*(ptr+13),(int)*(ptr+14),(int)*(ptr+15),\ (int)*(ptr+16),(int)*(ptr+17),(int)*(ptr+18),(int)*(ptr+19),(int)*(ptr+20),(int)*(ptr+21),(int)*(ptr+22),(int)*(ptr+23),\ (int)*(ptr+24),(int)*(ptr+25),(int)*(ptr+26),(int)*(ptr+27),(int)*(ptr+28),(int)*(ptr+29),(int)*(ptr+30),(int)*(ptr+31)); } payload=(unsigned char*)rx_skb->data; if(count < read_len) { /* Means 1st streaming reading*/ if(rx_pkt_cnt_int == 0) { atomic_inc(&curr_node->buff.remaining_rx_cnt); curr_node->buff.remaining_rx_skb = rx_skb; } DBGLOG(CHAR, DBG, "PORT%d !!! USER BUFF(%d) less than DATA SIZE(%d) !!!", port_id, count, read_len); DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d",payload,buf,count); ret = copy_to_user(buf, payload, count); if(ret == 0) { curr_node->buff.remaining_len = read_len - count; skb_pull(rx_skb, count); //move data pointer //update actually read length read_len = count; } else { // If error occurs, discad the skb buffer DBGLOG(CHAR, ERR, "PORT%d !!! PKT DROP !!! fail copy_to_user buf(%d, %d)", port_id, count, ret); atomic_dec(&curr_node->rx_pkt_drop_cnt); eemcs_update_statistics(0, port_id, RX, DROP); dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(port_id, 1, rx_skb); if(rx_pkt_cnt_int == 1) { curr_node->buff.remaining_len = 0; curr_node->buff.remaining_rx_skb = NULL; atomic_dec(&curr_node->buff.remaining_rx_cnt); } } } else { DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d", payload, buf, read_len); ret = copy_to_user(buf, payload, read_len); if(ret!=0) { DBGLOG(CHAR, ERR, "copy_to_user len=%d fail: %d)", read_len, ret); } dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(port_id, 1, rx_skb); if(rx_pkt_cnt_int == 1) { curr_node->buff.remaining_len = 0; curr_node->buff.remaining_rx_skb = NULL; atomic_dec(&curr_node->buff.remaining_rx_cnt); } } if(ret == 0){ DEBUG_LOG_FUNCTION_LEAVE; return read_len; } _exit: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
/* * @brief Initialization of exception handling * @param * None * @return * This function returns KAL_SUCCESS always. */ KAL_INT32 eemcs_expt_mod_init(void) { KAL_UINT32 i = 0; //KAL_UINT32 except_txq = 0, except_rxq = 0; //KAL_UINT32 nonstop_rxq = 0; /* for Log path to output as much as possible */ KAL_INT32 ret; ccci_port_cfg *log_queue_config; DEBUG_LOG_FUNCTION_ENTRY; //Init md exception type g_except_inst.md_ex_type = 0; /* init expt_cb and expt_cb_lock*/ spin_lock_init(&g_except_inst.expt_cb_lock); for(i = 0; i < CCCI_PORT_NUM_MAX; i++) { g_except_inst.expt_cb[i] = NULL; if(TR_Q_INVALID != ccci_expt_port_info[i].expt_txq_id) { set_bit(SDIO_TXQ(ccci_expt_port_info[i].expt_txq_id), (unsigned long *)&except_txq); } if(TR_Q_INVALID != ccci_expt_port_info[i].expt_rxq_id) { set_bit(SDIO_RXQ(ccci_expt_port_info[i].expt_rxq_id), (unsigned long *)&except_rxq); } } eemcs_expt_ut_init(); log_queue_config = ccci_get_port_info(CCCI_PORT_MD_LOG); set_bit(SDIO_RXQ(log_queue_config->rxq_id), (unsigned long *)&nonstop_rxq); hif_except_init(nonstop_rxq, (except_txq << 16) | except_rxq); ret = hif_reg_expt_cb(ccci_df_to_ccci_exception_callback); KAL_ASSERT(ret == KAL_SUCCESS); DBGLOG(EXPT, TRA, "nonstop_txq=%d, nonstop_rxq=%d, exp_txq=%d, exp_rxq=%d", 0, nonstop_rxq, except_txq, except_rxq); /* Init Tx Q list */ for (i = 0; i < SDIO_TX_Q_NUM; i++) { g_except_inst.txq[i].id = -1; atomic_set(&g_except_inst.txq[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.txq[i].skb_list); } /* Init Rx Q list */ for (i = 0; i < SDIO_RX_Q_NUM; i++) { g_except_inst.rxq[i].id = -1; atomic_set(&g_except_inst.rxq[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.rxq[i].skb_list); } /* Init port list */ for (i = 0; i < CCCI_PORT_NUM; i++) { atomic_set(&g_except_inst.port[i].pkt_cnt, 0); skb_queue_head_init(&g_except_inst.port[i].skb_list); } /* initialize drop count */ eemcs_expt_reset_statistics(); /* initialize exception*/ eemcs_exception_state = EEMCS_EX_NONE; /* initialize exception timer*/ init_timer(&g_except_inst.md_ex_monitor); g_except_inst.md_ex_monitor.function = ex_monitor_func; g_except_inst.md_ex_monitor.data = (unsigned long)&g_except_inst; #ifdef ENABLE_MD_WDT_PROCESS eemcs_ccci_register_WDT_callback(eemcs_wdt_reset_callback); #endif DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; }
KAL_INT32 eemcs_ccci_UL_write_skb_to_swq(CCCI_CHANNEL_T chn, struct sk_buff *skb) { //KAL_DBGPRINT(KAL, DBG_INFO,("====> %s, chn: %d\n", KAL_FUNC_NAME, chn)) ; //return mtlte_df_UL_write_skb_to_swq(df_ul_ccci_ch_to_q[chn], skb) ; CCCI_BUFF_T *pccci_h = NULL; KAL_UINT32 tx_queue_idx; KAL_INT32 ret = KAL_SUCCESS; #ifdef __EEMCS_EXPT_SUPPORT__ ccci_expt_port_cfg *expt_port_info; EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif KAL_UINT32 channel = 0; bool force_md_assert_flag = false; #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_UINT64 flag; #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb){ pccci_h = (CCCI_BUFF_T *)skb->data; DBGLOG(CCCI, DBG, "[TX]CCCI_H: 0x%x, 0x%x, 0x%x, 0x%x",\ pccci_h->data[0], pccci_h->data[1], pccci_h->channel, pccci_h->reserved); #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = (pccci_h->channel)|(pccci_h->seq_num <<16)|(pccci_h->assert_bit <<31); #else channel = pccci_h->channel; #endif if (channel == CCCI_FORCE_RESET_MODEM_CHANNEL) force_md_assert_flag = true; //1. seperate data and ack packet for ccmni0&1 UL #if defined (TDD_DL_DROP_SOLUTION2) if ((chn != CH_NET1_TX) && (chn != CH_NET2_TX) && !force_md_assert_flag) { KAL_ASSERT(pccci_h->channel == chn); } #else KAL_ASSERT(pccci_h->channel == chn); #endif //2. ccci channel check KAL_ASSERT(pccci_h->channel < CH_NUM_MAX || force_md_assert_flag); //3. fs packet check: the value of reserve is less than fs_buf_max_num=5 if ((pccci_h->channel == CH_FS_TX) && (pccci_h->reserved > 0x4)) { int *pdata = skb->data; DBGLOG(CCCI, ERR, "[TX]FS: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x",\ *pdata, *(pdata+1), *(pdata+2), *(pdata+3), *(pdata+4), *(pdata+5), *(pdata+6), *(pdata+7)); } } else { DBGLOG(CCCI, WAR, "CH%d write NULL skb to kick DF process!", chn); } if(force_md_assert_flag){ tx_queue_idx = 0; hif_ul_write_swq(tx_queue_idx, skb); }else{ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(is_valid_exception_tx_channel(chn)) { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif expt_port_info = get_expt_port_info(ccci_ch_to_port(chn)); /* set exception TX Q*/ tx_queue_idx = expt_port_info->expt_txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } else { DBGLOG(CCCI, WAR, "[EXPT] Invalid exception channel(%d)!", chn); /* * if KAL_FAIL is returned, skb is freed at device layer * we don't have to free it here */ //eemcs_ex_ccci_tx_drop(ccci_ch_to_port(chn)); ret = KAL_FAIL; } } else #endif { /* add sequence number in ccci header */ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) if (likely(pccci_h)) { spin_lock_irqsave(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); pccci_h->seq_num = ccci_seqno_tbl[pccci_h->channel].seqno[TX]++; pccci_h->assert_bit = 1; //why assert_bit=1 instead of assert_bit=0???? spin_unlock_irqrestore(&ccci_seqno_tbl[pccci_h->channel].tx_seqno_lock, flag); DBGLOG(CCCI, DBG, "[TX] CH%d:ch=%d seq_num=(0x%02X->0x%02X) assert_bit=%d channel=0x%08X", chn, \ pccci_h->channel, pccci_h->seq_num, ccci_seqno_tbl[pccci_h->channel].seqno[TX], \ pccci_h->assert_bit, channel); } #endif tx_queue_idx = ccci_port_info[ccci_ch_to_port(chn)].txq_id; hif_ul_write_swq(tx_queue_idx, skb); atomic_dec(&ccci_port_info[ccci_ch_to_port(chn)].reserve_space); atomic_dec(&ccci_tx_waitq[tx_queue_idx].reserve_space); eemcs_update_statistics(0, ccci_ch_to_port(chn), TX, NORMAL); } } DEBUG_LOG_FUNCTION_LEAVE; return ret; }
ccci_expt_port_cfg* ccci_get_expt_port_info(KAL_UINT32 ccci_port_index){ DEBUG_LOG_FUNCTION_ENTRY; KAL_ASSERT(ccci_port_index < CCCI_PORT_NUM_MAX); DEBUG_LOG_FUNCTION_LEAVE; return &ccci_expt_port_info[ccci_port_index]; }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret, is_xcmd; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = 0; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback() rxq_no = %d", rxq_no); /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { is_xcmd = is_xboot_command(skb); } else { is_xcmd = false; } wake_lock_timeout(&eemcs_wake_lock, HZ); // Using 1s wake lock if (is_xcmd == true) { /* Step 4. callback to xBoot */ p_xcmd = (XBOOT_CMD *)skb->data; KAL_ASSERT(p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD); if (NULL != ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb){ DBGLOG(CCCI, DBG, "[CCCI][DF CALLBACK] XBOOT_CMD (0x%X)(0x%X)(0x%X)(0x%X)",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(CCCI_PORT_CTRL, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, CCCI_PORT_CTRL); goto _end; } else { ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[CCCI_PORT_CTRL].ch.rx_cb(skb, 0); } } else { ret = KAL_FAIL; dev_kfree_skb(skb); DBGLOG(CCCI, ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback xBoot not registered"); } } else { /* Step 4. callback to CCCI device */ ccci_h = (CCCI_BUFF_T *)skb->data; KAL_ASSERT(ccci_h->channel < CH_NUM_MAX); port_id = ccci_ch_to_port(ccci_h->channel); if(NULL != ccci_port_info[port_id].ch.rx_cb){ DBGLOG(CCCI,DBG,"ccci_df_to_ccci_callback Rx packet CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved ); #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "[CCCI] [DF CALLBACK] EXCEPTION MODE PKT DROPPED !! Q(%d) PORT(%d)", rxq_no, port_id); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } }else{ ret = KAL_FAIL; dev_kfree_skb(skb); DBGLOG(CCCI,ERR, "[CCCI] !!! PKT DROP !!! ccci_df_to_ccci_callback ccci_port(%d) channel Rx(%d) not registered", port_id, ccci_h->channel); eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }