/* UL APIs */ int ccci_ut_UL_write_skb_to_swq(MTLTE_DF_TX_QUEUE_TYPE qno , struct sk_buff *skb) { #ifdef _EEMCS_EXCEPTION_UT EEMCS_EXPT_UT_SET *eemcs_expt_inst = eemcs_expt_get_inst(); #endif DEBUG_LOG_FUNCTION_ENTRY; if(NULL != skb) { DBGLOG(CCCI,DBG, "[CCCI_UT] queue(%d) Tx",qno); #ifdef _EEMCS_CCCI_LB_UT #if defined(__EEMCS_XBOOT_SUPPORT__) && defined(_EEMCS_BOOT_UT) if (!eemcs_device_ready()) { ccci_boot_write_desc_to_q(skb); return KAL_SUCCESS; } else #endif // __EEMCS_XBOOT_SUPPORT__ && _EEMCS_BOOT_UT { #ifdef _EEMCS_EXCEPTION_UT if(eemcs_expt_inst->enable_ccci_lb == 1) #endif /*_EEMCS_EXCEPTION_UT*/ { struct sk_buff *new_skb; KAL_UINT32 lb_rx_qno; new_skb = dev_alloc_skb(skb->len); if(new_skb == NULL){ DBGLOG(CCCI,ERR,"[CCCI_UT] ccci_ut_UL_write_skb_to_swq dev_alloc_skb fail sz(%d).", skb->len); dev_kfree_skb(skb); DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; } memcpy(skb_put(new_skb, skb->len), skb->data, skb->len); ccci_ul_lb_channel(new_skb); #ifdef _EEMCS_EXCEPTION_UT /*Get Rx loopback Queue number*/ lb_rx_qno = ccci_ul_lb_queue(new_skb); #else lb_rx_qno = qno; #endif /*_EEMCS_EXCEPTION_UT*/ skb_queue_tail(&ccci_ut_queue[lb_rx_qno], new_skb); ccci_df_to_ccci_callback(lb_rx_qno); } } #else DBGLOG(CCCI,DBG, "[CCCI_UT]========= DROP!!!"); #endif dev_kfree_skb(skb); }else{ DBGLOG(CCCI,DBG, "[CCCI_UT] fake kick DF !!!"); } DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS; }
static ssize_t eemcs_ipc_read(struct file *fp, char *buf, size_t count, loff_t *ppos) { unsigned int flag; eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data; KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */ KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */ KAL_UINT32 rx_pkt_cnt, read_len; struct sk_buff *rx_skb; unsigned char *payload=NULL; CCCI_BUFF_T *ccci_header; int ret = 0; DEBUG_LOG_FUNCTION_ENTRY; flag=fp->f_flags; DBGLOG(IPCD,TRA,"ipc_read: deivce iminor=%d, len=%d", node_id ,count); if(!eemcs_device_ready()) { DBGLOG(IPCD,ERR,"MD device not ready!"); ret= -EIO; return ret; } /* Check receive pkt count */ rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); if(rx_pkt_cnt == 0){ if (flag&O_NONBLOCK) { ret=-EAGAIN; DBGLOG(IPCD,TRA,"ipc_read: PORT%d for NONBLOCK",port_id); goto _exit; } ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0); if(ret) { ret = -EINTR; DBGLOG(IPCD, ERR, "[RX]PORT%d read interrupt by syscall.signal(%lld)", port_id, \ *(long long *)current->pending.signal.sig); goto _exit; } } /* * Cached memory from last read fail */ DBGLOG(IPCD,TRA,"ipc_read: dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt); rx_skb = skb_dequeue(&curr_node->rx_skb_list); /* There should be rx_skb in the list */ KAL_ASSERT(NULL != rx_skb); atomic_dec(&curr_node->rx_pkt_cnt); rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); ccci_header = (CCCI_BUFF_T *)rx_skb->data; DBGLOG(IPCD,TRA,"ipc_read: PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)",\ port_id, ccci_header->data[0],ccci_header->data[1], ccci_header->channel, ccci_header->reserved); /*If not match please debug EEMCS CCCI demux skb part*/ KAL_ASSERT(ccci_header->channel == eemcs_ipc_inst.ccci_ch.rx); read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T); /* remove CCCI_HEADER */ skb_pull(rx_skb, sizeof(CCCI_BUFF_T)); DBGLOG(IPCD,TRA,"ipc_read: PORT%d read_len=%d",port_id, read_len); payload=(unsigned char*)rx_skb->data; if(count < read_len) { DBGLOG(IPCD,ERR,"PKT DROP of PORT%d! want_read=%d, read_len=%d", port_id, count, read_len); atomic_inc(&curr_node->rx_pkt_drop_cnt); eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP); dev_kfree_skb(rx_skb); ret = -E2BIG; goto _exit; } DBGLOG(IPCD,TRA,"ipc_read: copy_to_user(len=%d), %p -> %p", read_len, payload, buf); ret = copy_to_user(buf, payload, read_len); if(ret!=0) { DBGLOG(IPCD, ERR, "[RX]PORT%d copy_to_user(len=%d, %p->%p) fail: %d", \ port_id, read_len, payload, buf, ret); ret = -EFAULT; goto _exit; } dev_kfree_skb(rx_skb); if(ret == 0){ DEBUG_LOG_FUNCTION_LEAVE; return read_len; } _exit: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
static ssize_t eemcs_ipc_write(struct file *fp, const char __user *buf, size_t in_sz, loff_t *ppos) { ssize_t ret = 0; eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data; KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */ KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */ KAL_UINT32 p_type, control_flag; struct sk_buff *new_skb; CCCI_BUFF_T *ccci_header; ipc_ilm_t *ilm=NULL; IPC_MSGSVC_TASKMAP_T *id_map; size_t count = in_sz; DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(IPCD,TRA,"ipc_write: deivce=%s iminor=%d len=%d", curr_node->dev_name, node_id, count); p_type = ccci_get_port_type(port_id); if(p_type != EX_T_USER) { DBGLOG(IPCD,ERR,"PORT%d refuse port(%d) access user port", port_id, p_type); ret=-EINVAL; goto _exit; } if(!eemcs_device_ready()) { DBGLOG(IPCD,ERR,"MD device not ready!"); ret= -EIO; return ret; } control_flag = ccci_get_port_cflag(port_id); if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T))) { DBGLOG(IPCD,ERR,"invalid wirte_len(%d) of PORT%d", count, port_id); ret=-EINVAL; goto _exit; } if(control_flag & EXPORT_CCCI_H){ if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){ DBGLOG(IPCD,WAR,"PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T); } }else{ if(count > MAX_TX_BYTE){ DBGLOG(IPCD,WAR,"PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE; } } if (ccci_ch_write_space_alloc(eemcs_ipc_inst.ccci_ch.tx)==0){ DBGLOG(IPCD,WAR,"PORT%d write return 0)", port_id); ret = -EAGAIN; goto _exit; } new_skb = ccci_ipc_mem_alloc(count + CCCI_IPC_HEADER_ROOM); if(NULL == new_skb) { DBGLOG(IPCD,ERR,"PORT%d alloct tx memory fail", port_id); ret = -ENOMEM; goto _exit; } /* reserve SDIO_H header room */ skb_reserve(new_skb, sizeof(SDIO_H)); ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ; if(copy_from_user(skb_put(new_skb, count), buf, count)) { DBGLOG(IPCD,ERR,"PORT%d copy_from_user(len=%d) fail", port_id, count); dev_kfree_skb(new_skb); ret = -EFAULT; goto _exit; } ilm = (ipc_ilm_t*)((char*)ccci_header + sizeof(CCCI_BUFF_T)); /* Check IPC extq_id */ if ((id_map=local_MD_id_2_unify_id(ilm->dest_mod_id))==NULL) { DBGLOG(IPCD,ERR,"Invalid dest_mod_id=%d",ilm->dest_mod_id); dev_kfree_skb(new_skb); ret=-EINVAL; goto _exit; } /* user bring down the payload only */ ccci_header->data[1] = count + sizeof(CCCI_BUFF_T); ccci_header->reserved = id_map->extq_id; ccci_header->channel = eemcs_ipc_inst.ccci_ch.tx; DBGLOG(IPCD,TRA,"ipc_write: PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)", port_id, ccci_header->data[0], ccci_header->data[1], ccci_header->channel, ccci_header->reserved); ret = ccci_ch_write_desc_to_q(ccci_header->channel, new_skb); if (KAL_SUCCESS != ret) { DBGLOG(IPCD,ERR,"PKT of ch%d DROP!",ccci_header->channel); dev_kfree_skb(new_skb); ret = -EAGAIN; } else { atomic_inc(&curr_node->tx_pkt_cnt); wake_up(&curr_node->tx_waitq); /* wake up tx_waitq for notify poll_wait of state change */ } _exit: DEBUG_LOG_FUNCTION_LEAVE; if(!ret){ return count; } return ret; }
ssize_t eemcs_ipc_kern_write(ipc_ilm_t *in_ilm){ ssize_t ret = 0; eemcs_ipc_node_t *curr_node = NULL; KAL_UINT8 node_id = 0; KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */ KAL_UINT32 p_type, control_flag; struct sk_buff *new_skb; CCCI_BUFF_T *ccci_header; ipc_ilm_t *ilm=NULL; IPC_MSGSVC_TASKMAP_T *id_map; size_t count = sizeof(ipc_ilm_t); DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(IPCD,TRA, "ipc_kern_write: src=0x%x dest=0x%x sap=0x%x msg=0x%x local_ptr=%#X peer_ptr=%#X", (unsigned int)in_ilm->src_mod_id, (unsigned int)in_ilm->dest_mod_id, (unsigned int)in_ilm->sap_id, (unsigned int)in_ilm->msg_id, (unsigned int)in_ilm->local_para_ptr, (unsigned int)in_ilm->peer_buff_ptr); // src module id check node_id =(KAL_UINT8) (in_ilm->src_mod_id & (~AP_UNIFY_ID_FLAG)); // source id is ap side txq_id if (node_id >= EEMCS_IPCD_MAX_NUM){ DBGLOG(IPCD,ERR,"invalid src_mod_id=0x%x", in_ilm->src_mod_id); ret = -EINVAL; goto _exit; } curr_node = (eemcs_ipc_node_t *)&eemcs_ipc_inst.ipc_node[node_id]; node_id = curr_node->ipc_node_id;/* node_id */ if (atomic_read(&curr_node->dev_state) != IPCD_KERNEL){ DBGLOG(IPCD,ERR,"invalid dev_state(not IPCD_KERNEL), src_mod_id=0x%x", in_ilm->src_mod_id); ret = -EINVAL; goto _exit; } if(in_ilm->local_para_ptr != NULL){ count = sizeof(ipc_ilm_t) + in_ilm->local_para_ptr->msg_len; } DBGLOG(IPCD,TRA,"ipc_kern_write: dev=%s iminor=%d len=%d", curr_node->dev_name, node_id, count); p_type = ccci_get_port_type(port_id); if(p_type != EX_T_USER) { DBGLOG(IPCD,ERR,"PORT%d refuse port(%d) access user port", port_id, p_type); ret=-EINVAL; goto _exit; } if(!eemcs_device_ready()) { DBGLOG(IPCD,ERR,"MD device not ready!"); ret= -EIO; return ret; } control_flag = ccci_get_port_cflag(port_id); if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T))) { DBGLOG(IPCD,ERR,"invalid wirte_len(%d) of PORT%d", count, port_id); ret=-EINVAL; goto _exit; } if(control_flag & EXPORT_CCCI_H){ if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){ DBGLOG(IPCD,WAR,"PORT%d wirte_len(%d)>MTU(%d)", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T); } }else{ if(count > MAX_TX_BYTE){ DBGLOG(IPCD,WAR,"PORT%d wirte_len(%d)>MTU(%d)", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE; } } if (ccci_ch_write_space_alloc(eemcs_ipc_inst.ccci_ch.tx)==0){ DBGLOG(IPCD,WAR,"PORT%d write return 0)", port_id); ret = -EAGAIN; goto _exit; } new_skb = ccci_ipc_mem_alloc(count + CCCI_IPC_HEADER_ROOM); if(NULL == new_skb) { DBGLOG(IPCD,ERR,"PORT%d alloct tx memory fail", port_id); ret = -ENOMEM; goto _exit; } /* reserve SDIO_H header room */ #ifdef CCCI_SDIO_HEAD skb_reserve(new_skb, sizeof(SDIO_H)); #endif ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ; memcpy(skb_put(new_skb, count), in_ilm, count); ilm = (ipc_ilm_t*)((char*)ccci_header + sizeof(CCCI_BUFF_T)); /* Check IPC extq_id */ if ((id_map=local_MD_id_2_unify_id(ilm->dest_mod_id))==NULL) { DBGLOG(IPCD,ERR,"Invalid dest_mod_id=%d",ilm->dest_mod_id); dev_kfree_skb(new_skb); ret=-EINVAL; goto _exit; } /* user bring down the payload only */ ccci_header->data[1] = count + sizeof(CCCI_BUFF_T); ccci_header->reserved = id_map->extq_id; ccci_header->channel = eemcs_ipc_inst.ccci_ch.tx; DBGLOG(IPCD,TRA,"ipc_kern_write: PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)", port_id, ccci_header->data[0], ccci_header->data[1], ccci_header->channel, ccci_header->reserved); ret = ccci_ch_write_desc_to_q(ccci_header->channel, new_skb); if (KAL_SUCCESS != ret) { DBGLOG(IPCD,ERR,"PKT DROP of ch%d!",ccci_header->channel); dev_kfree_skb(new_skb); ret = -EAGAIN; } else { atomic_inc(&curr_node->tx_pkt_cnt); wake_up(&curr_node->tx_waitq); /* wake up tx_waitq for notify poll_wait of state change */ } _exit: DEBUG_LOG_FUNCTION_LEAVE; if(!ret){ return count; } return ret; }
static ssize_t eemcs_cdev_write(struct file *fp, const char __user *buf, size_t in_sz, loff_t *ppos) { ssize_t ret = -EINVAL; eemcs_cdev_node_t *curr_node = (eemcs_cdev_node_t *)fp->private_data; KAL_UINT8 port_id = curr_node->eemcs_port_id; /* port_id */ KAL_UINT32 p_type, control_flag; struct sk_buff *new_skb; CCCI_BUFF_T *ccci_header; size_t count = in_sz; DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(CHAR, DBG, "eemcs_cdev_write: %s(%d), len=%d",curr_node->cdev_name,port_id,count); p_type = ccci_get_port_type(port_id); if(curr_node->ccci_ch.tx == CH_DUMMY){ /* if ccci channel is assigned to CH_DUMMY means tx packets should be dropped ex. muxreport port */ DBGLOG(CHAR, ERR, "PORT%d is assigned to CH_DUMMY ccci channel !!PKT DROP!!", port_id); ret = -EINVAL; goto _exit; } if(p_type != EX_T_USER) { DBGLOG(CHAR, ERR, "PORT%d refuse p_type(%d) access user port", port_id, p_type); ret = -EINVAL; goto _exit; } if(!eemcs_device_ready() && ((port_id==CCCI_PORT_META)||(port_id==CCCI_PORT_MD_LOG))) { ret= - ENODEV; DBGLOG(CHAR, DEF, "device not ready!"); return ret; } control_flag = ccci_get_port_cflag(port_id); if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T))) { DBGLOG(CHAR, WAR, "PORT%d wirte len not support(%d) by emcs!", port_id, count); ret = -EINVAL; goto _exit; } if(control_flag & EXPORT_CCCI_H){ if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){ DBGLOG(CHAR, WAR, "PORT%d wirte_len(%d) > MTU(%d)!", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T); } }else{ if(count > MAX_TX_BYTE){ DBGLOG(CHAR, WAR, "PORT%d wirte_len(%d) > MTU(%d)!", port_id, count, MAX_TX_BYTE); count = MAX_TX_BYTE; } } __blocking_IO: if (ccci_cdev_write_space_alloc(curr_node->ccci_ch.tx)==0){ if (fp->f_flags & O_NONBLOCK) { ret = -EAGAIN; DBGLOG(CHAR, WAR, "PORT%d ccci_cdev_write_space_alloc return 0)", port_id); goto _exit; }else{ // Blocking IO DBGLOG(CHAR, TRA, "PORT%d Enter Blocking I/O wait", port_id); ret = ccci_cdev_write_wait(curr_node->ccci_ch.tx); if(ret == -ERESTARTSYS) { DBGLOG(CHAR, WAR, "PORT%d Interrupted,return ERESTARTSYS", port_id); ret = -EINTR; goto _exit; } goto __blocking_IO; } } new_skb = ccci_cdev_mem_alloc(count + CCCI_CDEV_HEADER_ROOM); if(NULL == new_skb) { ret = -ENOMEM; DBGLOG(CHAR, ERR, "PORT%d alloct tx memory fail(%d)", port_id, ret); goto _exit; } /* reserve SDIO_H header room */ skb_reserve(new_skb, sizeof(SDIO_H)); if(control_flag & EXPORT_CCCI_H){ ccci_header = (CCCI_BUFF_T *)new_skb->data; }else{ ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ; } if(copy_from_user(skb_put(new_skb, count), buf, count)) { DBGLOG(CHAR, ERR, "PORT%d fail copy data from user space(%d)", port_id, count); dev_kfree_skb(new_skb); ret=-EFAULT; goto _exit; } if(control_flag & EXPORT_CCCI_H) { /* user bring down the ccci header */ if(count == sizeof(CCCI_BUFF_T)){ DBGLOG(CHAR, DBG, "eemcs_cdev_write: PORT%d, CCCI_MSG(0x%x, 0x%x, 0x%x, 0x%x)", port_id, ccci_header->data[0], ccci_header->data[1], ccci_header->channel, ccci_header->reserved); ccci_header->data[0]= CCCI_MAGIC_NUM; }else{ ccci_header->data[1]= count; } if(ccci_header->channel != curr_node->ccci_ch.tx){ DBGLOG(CHAR, WAR, "PORT%d Tx CCCI channel not match (%d) vs (%d)!! will correct by char_dev",\ port_id, ccci_header->channel, curr_node->ccci_ch.tx); } } else { /* user bring down the payload only */ ccci_header->data[1] = count + sizeof(CCCI_BUFF_T); ccci_header->reserved = 0; } ccci_header->channel = curr_node->ccci_ch.tx; DBGLOG(CHAR, DBG, "eemcs_cdev_write: PORT%d, CCCI_MSG(0x%x, 0x%x, 0x%x, 0x%x)", port_id, ccci_header->data[0], ccci_header->data[1], ccci_header->channel, ccci_header->reserved); /* 20130816 ian add aud dump */ { char *ptr = (char *)new_skb->data; ptr+=sizeof(CCCI_BUFF_T); /* dump 32 byte of the !!!CCCI DATA!!! part */ CDEV_LOG(port_id, CHAR, INF, "[DUMP]PORT%d eemcs_cdev_write\n\ [00..07](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [08..15](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [16..23](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [24..31](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)",\ port_id,\ (int)*(ptr+0),(int)*(ptr+1),(int)*(ptr+2),(int)*(ptr+3),(int)*(ptr+4),(int)*(ptr+5),(int)*(ptr+6),(int)*(ptr+7),\ (int)*(ptr+8),(int)*(ptr+9),(int)*(ptr+10),(int)*(ptr+11),(int)*(ptr+12),(int)*(ptr+13),(int)*(ptr+14),(int)*(ptr+15),\ (int)*(ptr+16),(int)*(ptr+17),(int)*(ptr+18),(int)*(ptr+19),(int)*(ptr+20),(int)*(ptr+21),(int)*(ptr+22),(int)*(ptr+23),\ (int)*(ptr+24),(int)*(ptr+25),(int)*(ptr+26),(int)*(ptr+27),(int)*(ptr+28),(int)*(ptr+29),(int)*(ptr+30),(int)*(ptr+31)); } ret = ccci_cdev_write_desc_to_q(curr_node->ccci_ch.tx, new_skb); if (KAL_SUCCESS != ret) { DBGLOG(CHAR, ERR, "Pkt drop of ch%d!",curr_node->ccci_ch.tx); dev_kfree_skb(new_skb); ret = -EAGAIN; } else { atomic_inc(&curr_node->tx_pkt_cnt); //wake_up(&curr_node->tx_waitq); /* wake up tx_waitq for notify poll_wait of state change */ } #if 0 20130102 note that ret = que_wakeup_transfer(port->txq_id); if(ret) { DBGLOG(PORT,ERR,"PORT(%d) fail wake when write(%d)", port->id, ret); goto _exit; } ret = wait_event_interruptible(port->write_waitq, port->tx_pkt_id == port->tx_pkt_id_done); if(ret == -ERESTARTSYS) { // TODO: error handling ..... DBGLOG(PORT,ERR,"PORT(%d) fail wait write done event successfully", port->id); } #endif _exit: DEBUG_LOG_FUNCTION_LEAVE; if(!ret){ return count; } ccci_cdev_write_space_release(curr_node->ccci_ch.tx); return ret; }