static KAL_INT32 eemcs_cdev_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data) { CCCI_BUFF_T *p_cccih = NULL; KAL_UINT32 port_id; DEBUG_LOG_FUNCTION_ENTRY; if (skb){ p_cccih = (CCCI_BUFF_T *)skb->data; DBGLOG(CHAR, DBG, "cdev_rx_callback: CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved ); } port_id = ccci_ch_to_port(p_cccih->channel); if(CDEV_OPEN == atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].cdev_state)){ skb_queue_tail(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */ atomic_inc(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt); /* increase rx_pkt_cnt */ eemcs_update_statistics_number(0, port_id, RX, QUEUE, \ atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt)); wake_up(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_waitq); /* wake up rx_waitq */ }else{ if(port_id != CCCI_PORT_MD_LOG) /* If port_id == CCCI_PORT_MD_LOG, skip drop info (request by ST team)*/ { DBGLOG(CHAR, ERR, "!!! PKT DROP when cdev(%d) close", port_id); } dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_update_statistics(0, port_id, RX, DROP); } DEBUG_LOG_FUNCTION_LEAVE; return KAL_SUCCESS ; }
static int eemcs_cdev_release(struct inode *inode, struct file *file) { int id = iminor(inode); struct sk_buff *rx_skb; DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(CHAR,INF,"cdev_release: close dev(%s, %d)",\ eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_name, id); atomic_set(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_state, CDEV_CLOSE); while ((rx_skb = skb_dequeue(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].rx_skb_list)) != NULL) { dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(id, 1, rx_skb); } atomic_set(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].rx_pkt_cnt, 0); /* unregister ccci channel */ // Might casue raise condition while user close cdev //ccci_cdev_unregister(eemcs_cdev_inst.cdev_node[PORT2IDX(id)].ccci_ch.rx); if(true == eemcs_on_reset()) { if(true == eemcs_cdev_rst_port_closed()){ eemcs_boot_user_exit_notify(); } } DEBUG_LOG_FUNCTION_LEAVE; return 0; }
static int eemcs_cdev_open(struct inode *inode, struct file *file) { int id = iminor(inode); int ret = 0; struct sk_buff *rx_skb; DEBUG_LOG_FUNCTION_ENTRY; DBGLOG(CHAR,INF,"cdev_open: open dev(%s, %d)",\ eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_name, id); //4 <1> check multiple open if(CDEV_OPEN == atomic_read(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_state)){ DBGLOG(CHAR,ERR,"cdev_open: %s(%d) multi-open fail!", \ eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_name, id); return -EIO; } if(eemcs_cdev_inst.cdev_node[PORT2IDX(id)].ccci_ch.rx != CH_DUMMY){ /* CH_DUMMY should not have Rx Data */ //4 <2> clear the rx_skb_list skb_queue_purge(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].rx_skb_list); while ((rx_skb = skb_dequeue(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].rx_skb_list)) != NULL) { dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(id, 1, rx_skb); } atomic_set(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].rx_pkt_cnt, 0); //4 <3> register ccci channel ret = ccci_cdev_register(eemcs_cdev_inst.cdev_node[PORT2IDX(id)].ccci_ch.rx, eemcs_cdev_rx_callback, 0); if(ret != KAL_SUCCESS){ DBGLOG(CHAR,ERR,"PORT%d register cdev fail!!", id); return -EIO; } } file->private_data = &eemcs_cdev_inst.cdev_node[PORT2IDX(id)]; nonseekable_open(inode, file); atomic_set(&eemcs_cdev_inst.cdev_node[PORT2IDX(id)].cdev_state, CDEV_OPEN); DEBUG_LOG_FUNCTION_LEAVE; return ret; }
int ccci_df_to_ccci_callback(unsigned int rxq_no) { int ret, hc_ret; bool is_xcmd = false; struct sk_buff * skb = NULL; CCCI_BUFF_T *ccci_h = NULL; XBOOT_CMD *p_xcmd = NULL; KAL_UINT32 port_id = CCCI_PORT_CTRL; static KAL_UINT32 rx_err_cnt[CCCI_PORT_NUM_MAX] = {0}; #ifdef __EEMCS_EXPT_SUPPORT__ EEMCS_EXCEPTION_STATE mode = EEMCS_EX_INVALID; #endif #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) KAL_INT16 channel, seq_num, assert_bit; #endif DEBUG_LOG_FUNCTION_ENTRY; /* Step 1. read skb from swq */ skb = hif_dl_read_swq(rxq_no); if(skb == NULL) { DBGLOG(CCCI, DBG, "ccci_df_to_ccci_callback read NULL skb on %d", rxq_no); if(is_exception_mode(&mode)) return KAL_FAIL; else KAL_ASSERT(NULL != skb); } /* Step 2. call handle complete */ hc_ret = hif_dl_pkt_handle_complete(rxq_no); KAL_ASSERT(0 == hc_ret); wake_lock_timeout(&eemcs_wake_lock, HZ/2); // Using 0.5s wake lock /* Step 3. buffer type */ if (rxq_no == RXQ_Q0) { //is_xcmd = is_xboot_command(skb); p_xcmd = (XBOOT_CMD *)skb->data; if (p_xcmd->magic == (KAL_UINT32)MAGIC_MD_CMD) { if (check_device_state() >= EEMCS_MOLY_HS_P1) { DBGLOG(CCCI, ERR, "can't recv xBoot cmd when EEMCS state=%d", check_device_state()); } else { is_xcmd = true; } } } if (is_xcmd) { /* Step 4. callback to xBoot */ CDEV_LOG(port_id, CCCI, INF, "XBOOT_CMD: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ p_xcmd->magic, p_xcmd->msg_id, p_xcmd->status, p_xcmd->reserved[0]); ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } else { ccci_h = (CCCI_BUFF_T *)skb->data; port_id = ccci_ch_to_port(ccci_h->channel); CDEV_LOG(port_id, CCCI, INF, "CCCI_H: 0x%08X, 0x%08X, 0x%08X, 0x%08X",\ ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->reserved); /*check rx sequence number for expect*/ #if defined (DBG_FEATURE_ADD_CCCI_SEQNO) channel = ccci_h->channel; seq_num = ccci_h->seq_num; assert_bit = ccci_h->assert_bit; DBGLOG(CCCI, DBG, "Port%d CCCI_H: data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X(0x%08X, 0x%08X, 0x%08X)",\ port_id, ccci_h->data[0],ccci_h->data[1],ccci_h->channel, ccci_h->seq_num, \ ccci_h->assert_bit, ccci_h->reserved, channel, seq_num, assert_bit); if(((seq_num - ccci_seqno_tbl[channel].seqno[RX]) & 0x7FFF) != 1 && assert_bit) { DBGLOG(CCCI, ERR, "Port%d seqno out-of-order(0x%02X->0x%02X): data[0]=0x%08X, data[1]=0x%08X, ch=0x%02X, seqno=0x%02X, assert=%d, resv=0x%08X", \ port_id, seq_num, ccci_seqno_tbl[channel].seqno[RX], ccci_h->data[0], ccci_h->data[1], \ ccci_h->channel, ccci_h->seq_num, ccci_h->assert_bit, ccci_h->reserved); hif_force_md_assert_swint(); } ccci_seqno_tbl[channel].seqno[RX] = seq_num; #endif /* Step 4. callback to CCCI device */ if(NULL != ccci_port_info[port_id].ch.rx_cb){ #ifdef __EEMCS_EXPT_SUPPORT__ if(is_exception_mode(&mode)) { if(!is_valid_exception_port(port_id, true)) { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); eemcs_expt_ccci_rx_drop(port_id); DBGLOG(CCCI, ERR, "PKT DROP when PORT%d(rxq=%d) at md exception", \ port_id, rxq_no); goto _end; } else { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } } else #endif { ret = ccci_port_info[port_id].ch.rx_cb(skb, 0); } rx_err_cnt[port_id] = 0; } else { ret = KAL_FAIL; dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); if (rx_err_cnt[port_id]%20 == 0) { DBGLOG(CCCI, ERR, "PKT DROP when PORT%d rx callback(ch=%d) not registered", \ port_id, ccci_h->channel); } rx_err_cnt[port_id]++; eemcs_update_statistics(0, port_id, RX, DROP); } eemcs_update_statistics(0, port_id, RX, NORMAL); } _end: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
static ssize_t eemcs_cdev_read(struct file *fp, char *buf, size_t count, loff_t *ppos) { unsigned int flag; eemcs_cdev_node_t *curr_node = (eemcs_cdev_node_t *)fp->private_data; KAL_UINT8 port_id = curr_node->eemcs_port_id; /* port_id */ KAL_UINT32 p_type, rx_pkt_cnt, read_len, rx_pkt_cnt_int; struct sk_buff *rx_skb; unsigned char *payload=NULL; CCCI_BUFF_T *ccci_header; int ret = 0; DEBUG_LOG_FUNCTION_ENTRY; flag=fp->f_flags; //verbose DBGLOG(CHAR,DBG,"read deivce iminor (0x%x),length(0x%x)",port_id,count); p_type = ccci_get_port_type(port_id); if(p_type != EX_T_USER) { DBGLOG(CHAR, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type); goto _exit; } rx_pkt_cnt_int = atomic_read(&curr_node->buff.remaining_rx_cnt); KAL_ASSERT(rx_pkt_cnt_int >= 0); if(rx_pkt_cnt_int == 1) { DBGLOG(CHAR, DBG, "Streaming reading!! PORT%d len=%d\n",port_id,count); rx_skb = curr_node->buff.remaining_rx_skb; /* rx_skb shall not be null */ KAL_ASSERT(NULL != rx_skb); read_len = curr_node->buff.remaining_len; KAL_ASSERT(read_len >= 0); } else { rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); if(rx_pkt_cnt == 0) { if (flag&O_NONBLOCK) { ret=-EAGAIN; //verbose DBGLOG(CHAR,DBG,"[CHAR] PORT(%d) eemcs_cdev_read return O_NONBLOCK for NON-BLOCKING",port_id); goto _exit; } ret = wait_event_interruptible(curr_node->rx_waitq, atomic_read(&curr_node->rx_pkt_cnt) > 0); if(ret) { ret = -EINTR; DBGLOG(CHAR, ERR, "PORT%d interruptted while waiting data.", port_id); goto _exit; } } /* * Cached memory from last read fail */ DBGLOG(CHAR, TRA, "eemcs_cdev_read dequeue from rx_skb_list, rx_pkt_cnt(%d)",rx_pkt_cnt); rx_skb = skb_dequeue(&curr_node->rx_skb_list); /* There should be rx_skb in the list */ KAL_ASSERT(NULL != rx_skb); atomic_dec(&curr_node->rx_pkt_cnt); rx_pkt_cnt = atomic_read(&curr_node->rx_pkt_cnt); KAL_ASSERT(rx_pkt_cnt >= 0); ccci_header = (CCCI_BUFF_T *)rx_skb->data; DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d CCCI_H(0x%x)(0x%x)(0x%x)(0x%x)",\ port_id, ccci_header->data[0],ccci_header->data[1], ccci_header->channel, ccci_header->reserved); /*If not match please debug EEMCS CCCI demux skb part*/ if(ccci_header->channel != curr_node->ccci_ch.rx) { DBGLOG(CHAR,ERR,"Assert(ccci_header->channel == curr_node->ccci_ch.rx)"); DBGLOG(CHAR,ERR,"ccci_header->channel:%d, curr_node->ccci_ch.rx:%d, curr_node->eemcs_port_id:%d", ccci_header->channel, curr_node->ccci_ch.rx, curr_node->eemcs_port_id); KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx); } //KAL_ASSERT(ccci_header->channel == curr_node->ccci_ch.rx); if(!(ccci_get_port_cflag(port_id) & EXPORT_CCCI_H)) { read_len = ccci_header->data[1] - sizeof(CCCI_BUFF_T); /* remove CCCI_HEADER */ skb_pull(rx_skb, sizeof(CCCI_BUFF_T)); }else{ if(ccci_header->data[0] == CCCI_MAGIC_NUM){ read_len = sizeof(CCCI_BUFF_T); }else{ read_len = ccci_header->data[1]; } } } DBGLOG(CHAR, TRA, "eemcs_cdev_read: PORT%d read_len=%d",port_id, read_len); /* 20130816 ian add aud dump */ { char *ptr = (char *)rx_skb->data; /* dump 32 byte of the !!!CCCI DATA!!! part */ CDEV_LOG(port_id, CHAR, ERR,"[DUMP]PORT%d eemcs_cdev_read\n\ [00..07](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [08..15](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [16..23](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)\n\ [24..31](0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)(0x%02x)",\ port_id,\ (int)*(ptr+0),(int)*(ptr+1),(int)*(ptr+2),(int)*(ptr+3),(int)*(ptr+4),(int)*(ptr+5),(int)*(ptr+6),(int)*(ptr+7),\ (int)*(ptr+8),(int)*(ptr+9),(int)*(ptr+10),(int)*(ptr+11),(int)*(ptr+12),(int)*(ptr+13),(int)*(ptr+14),(int)*(ptr+15),\ (int)*(ptr+16),(int)*(ptr+17),(int)*(ptr+18),(int)*(ptr+19),(int)*(ptr+20),(int)*(ptr+21),(int)*(ptr+22),(int)*(ptr+23),\ (int)*(ptr+24),(int)*(ptr+25),(int)*(ptr+26),(int)*(ptr+27),(int)*(ptr+28),(int)*(ptr+29),(int)*(ptr+30),(int)*(ptr+31)); } payload=(unsigned char*)rx_skb->data; if(count < read_len) { /* Means 1st streaming reading*/ if(rx_pkt_cnt_int == 0) { atomic_inc(&curr_node->buff.remaining_rx_cnt); curr_node->buff.remaining_rx_skb = rx_skb; } DBGLOG(CHAR, DBG, "PORT%d !!! USER BUFF(%d) less than DATA SIZE(%d) !!!", port_id, count, read_len); DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d",payload,buf,count); ret = copy_to_user(buf, payload, count); if(ret == 0) { curr_node->buff.remaining_len = read_len - count; skb_pull(rx_skb, count); //move data pointer //update actually read length read_len = count; } else { // If error occurs, discad the skb buffer DBGLOG(CHAR, ERR, "PORT%d !!! PKT DROP !!! fail copy_to_user buf(%d, %d)", port_id, count, ret); atomic_dec(&curr_node->rx_pkt_drop_cnt); eemcs_update_statistics(0, port_id, RX, DROP); dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(port_id, 1, rx_skb); if(rx_pkt_cnt_int == 1) { curr_node->buff.remaining_len = 0; curr_node->buff.remaining_rx_skb = NULL; atomic_dec(&curr_node->buff.remaining_rx_cnt); } } } else { DBGLOG(CHAR, DBG, "copy data from %p to %p length = %d", payload, buf, read_len); ret = copy_to_user(buf, payload, read_len); if(ret!=0) { DBGLOG(CHAR, ERR, "copy_to_user len=%d fail: %d)", read_len, ret); } dev_kfree_skb(rx_skb); eemcs_ccci_release_rx_skb(port_id, 1, rx_skb); if(rx_pkt_cnt_int == 1) { curr_node->buff.remaining_len = 0; curr_node->buff.remaining_rx_skb = NULL; atomic_dec(&curr_node->buff.remaining_rx_cnt); } } if(ret == 0){ DEBUG_LOG_FUNCTION_LEAVE; return read_len; } _exit: DEBUG_LOG_FUNCTION_LEAVE; return ret; }
static long eemcs_cdev_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { eemcs_cdev_node_t *curr_node = (eemcs_cdev_node_t *)fp->private_data; KAL_UINT8 port_id = curr_node->eemcs_port_id; /* port_id */ int ret = 0; unsigned int sim_type; unsigned int enable_sim_type; DEBUG_LOG_FUNCTION_ENTRY; if(port_id > END_OF_CCCI_CDEV) { DBGLOG(CHAR, ERR, "ccci ioctl fail: unknown Port id=%d", port_id); ret = -ENOTTY; goto _exit; } switch(cmd) { case CCCI_IOC_GET_MD_STATE: { KAL_UINT32 eemcs_state, md_state; eemcs_state = check_device_state(); if(eemcs_state == EEMCS_BOOTING_DONE ){ md_state = MD_STATE_READY; }else if(eemcs_state == EEMCS_EXCEPTION){ md_state = MD_STATE_EXPT; }else if(eemcs_state <= EEMCS_INIT){ md_state = MD_STATE_INVALID; }else{ md_state = MD_STATE_INIT; } ret = put_user((unsigned int)md_state, (unsigned int __user *)arg); DBGLOG(CHAR, DBG, "CCCI_IOC_GET_MD_STATE(md_s=%d, eemcs_s=%d) by %s(%d)", md_state, eemcs_state, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; case CCCI_IOC_SET_EXCEPTION_DATA: { DBGLOG(CHAR, ERR, "CCCI_IOC_SET_EXCEPTION_DATA by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); #if 0 extern EX_LOG_T md_ex_log; void __user *argp = (void __user *)arg; if(copy_from_user(&md_ex_log,argp,MD_EX_LOG_SIZE)) { DBGLOG(PORT,ERR,"copy_from_user failed."); return -EFAULT; } md_exception(&md_ex_log); #endif } break; case CCCI_IOC_SET_HEADER: { KAL_UINT32 ori_port_flag = 0; KAL_UINT32 new_port_flag = 0; //port->control_flag |=PORT_EXPORT_CCIF_BUFFER; ori_port_flag = ccci_get_port_cflag(port_id); ccci_set_port_type(port_id, (ori_port_flag|EXPORT_CCCI_H)); new_port_flag = ccci_get_port_cflag(port_id); DBGLOG(CHAR, DBG, "CCCI_IOC_SET_HEADER(%d, %d) by %s(%d)", ori_port_flag, new_port_flag,\ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; case CCCI_IOC_CLR_HEADER: { //port->control_flag &=(~PORT_EXPORT_CCIF_BUFFER); KAL_UINT32 ori_port_flag = 0; KAL_UINT32 new_port_flag = 0; ori_port_flag = ccci_get_port_cflag(port_id); ccci_set_port_type(port_id, (ori_port_flag&(~EXPORT_CCCI_H))); new_port_flag = ccci_get_port_cflag(port_id); DBGLOG(CHAR, DBG, "CCCI_IOC_CLR_HEADER(%d, %d) by %s(%d)", ori_port_flag, new_port_flag, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; /* This ioctl will be issued from RILD */ case CCCI_IOC_ENTER_DEEP_FLIGHT: case CCCI_IOC_SEND_STOP_MD_REQUEST: { DBGLOG(CHAR, INF, "IOTCL CCCI_IOC_ENTER_DEEP_FLIGHT by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); change_device_state(EEMCS_GATE); eemcs_power_off_md(0, 0); /* mtlte_sys_sdio_remove */ eemcs_cdev_msg(CCCI_PORT_CTRL, CCCI_MD_MSG_ENTER_FLIGHT_MODE, 0); } break; case CCCI_IOC_LEAVE_DEEP_FLIGHT: case CCCI_IOC_SEND_START_MD_REQUEST: { DBGLOG(CHAR, INF, "CCCI_IOC_LEAVE_DEEP_FLIGHT by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); eemcs_cdev_msg(CCCI_PORT_CTRL, CCCI_MD_MSG_LEAVE_FLIGHT_MODE, 0); } break; case CCCI_IOC_FORCE_MD_ASSERT: { DBGLOG(CHAR, INF, "CCCI_IOC_FORCE_MD_ASSERT by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); /* force md assert channel is 20090215 */ eemcs_cdev_write_force_md_rst(); //CCCI_INIT_MAILBOX(&buff, 0); //ret = ccci_write_force(CCCI_FORCE_RESET_MODEM_CHANNEL, &buff); } break; case CCCI_IOC_MD_RESET: { DBGLOG(CHAR, INF, "CCCI_IOC_MD_RESET by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); eemcs_md_reset(); } break; case CCCI_IOC_CHECK_STATE: { KAL_UINT32 state; state = check_device_state(); DBGLOG(CHAR, INF, "CCCI_IOC_CHECK_STATE(%d) by %s(%d)", state, ccci_cdev_name[PORT2IDX(port_id)], port_id); ret = put_user((unsigned int)state, (unsigned int __user *)arg); } break; #ifdef IT_TESTING_PURPOSE case CCCI_IOC_PURGE_SKBQ: { struct sk_buff *skb; while ((skb = skb_dequeue(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_skb_list)) != NULL) { dev_kfree_skb(skb); eemcs_ccci_release_rx_skb(port_id, 1, skb); } atomic_set(&eemcs_cdev_inst.cdev_node[PORT2IDX(port_id)].rx_pkt_cnt, 0); DBGLOG(CHAR, INF, "CCCI_IOC_PURGE_SKBQ by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; #endif case CCCI_IOC_GET_EXT_MD_POST_FIX: { eemcs_boot_get_ext_md_post_fix((char*) arg); DBGLOG(CHAR, INF, "CCCI_IOC_GET_MD_POSTFIX(%s) by %s(%d)", (char*)arg, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; case CCCI_IOC_SET_BOOT_STATE: { KAL_UINT32 state = 0; get_user(state, (unsigned int __user *)arg); state = eemcs_boot_reset_test(state); DBGLOG(CHAR, INF, "CCCI_IOC_SET_BOOT_STATE(%d) by %s(%d)", state, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; case CCCI_IOC_GET_BOOT_STATE: { KAL_UINT32 state = 0; state = eemcs_boot_get_state(); ret = put_user((unsigned int)state, (unsigned int __user *)arg); DBGLOG(CHAR, INF, "CCCI_IOC_GET_BOOT_STATE(%d) by %s(%d)", state, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); } break; case CCCI_IOC_GET_MD_IMG_EXIST: { unsigned int *md_img_exist_list = eemcs_get_md_img_exist_list(); DBGLOG(CHAR, INF,"CCCI_IOC_GET_MD_IMG_EXIST by %s(%d)", ccci_cdev_name[PORT2IDX(port_id)], port_id); if (copy_to_user((void __user *)arg, md_img_exist_list,(unsigned int)eemcs_get_md_img_exist_list_size())) { DBGLOG(CHAR, ERR, "CCCI_IOC_GET_MD_IMG_EXIST: copy_to_user fail"); ret= -EFAULT; } } break; case CCCI_IOC_GET_MD_TYPE: { int md_type = get_ext_modem_support(eemcs_get_md_id()); DBGLOG(CHAR, INF, "CCCI_IOC_GET_MD_TYPE(%d) by %s(%d)", md_type, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); ret = put_user((unsigned int)md_type, (unsigned int __user *)arg); } break; case CCCI_IOC_RELOAD_MD_TYPE: { int md_type = 0; if(copy_from_user(&md_type, (void __user *)arg, sizeof(unsigned int))) { DBGLOG(CHAR, ERR, "CCCI_IOC_RELOAD_MD_TYPE: copy_from_user fail!"); ret = -EFAULT; break; } if (md_type >= modem_lwg && md_type <= modem_ltg){ DBGLOG(CHAR, INF, "CCCI_IOC_RELOAD_MD_TYPE(%d) by %s(%d)", md_type, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); ret = set_ext_modem_support(eemcs_get_md_id(), md_type); } else{ DBGLOG(CHAR, ERR, "CCCI_IOC_RELOAD_MD_TYPE fail: invalid md type(%d)", md_type); ret = -EFAULT; } eemcs_set_reload_image(true); } break; case CCCI_IOC_STORE_MD_TYPE: { unsigned int md_type_saving = 0; //DBGLOG(CHAR, INF, "IOC_STORE_MD_TYPE ioctl by %s!", current->comm); if(copy_from_user(&md_type_saving, (void __user *)arg, sizeof(unsigned int))) { DBGLOG(CHAR, ERR, "CCCI_IOC_STORE_MD_TYPE: copy_from_user fail!"); ret = -EFAULT; break; } DBGLOG(CHAR, DBG, "CCCI_IOC_STORE_MD_TYPE(%d) by %s(%s,%d)", md_type_saving, current->comm,\ ccci_cdev_name[PORT2IDX(port_id)], port_id); if (md_type_saving >= modem_lwg && md_type_saving <= modem_ltg){ if (md_type_saving != get_ext_modem_support(eemcs_get_md_id())){ DBGLOG(CHAR, INF, "CCCI_IOC_STORE_MD_TYPE(%d->%d)", md_type_saving, get_ext_modem_support(eemcs_get_md_id())); } //Notify md_init daemon to store md type in nvram eemcs_cdev_msg(CCCI_PORT_CTRL, CCCI_MD_MSG_STORE_NVRAM_MD_TYPE, md_type_saving); } else { DBGLOG(CHAR, ERR, "CCCI_IOC_STORE_MD_TYPE fail: invalid md type(%d)", md_type_saving); ret = -EFAULT; } } break; case CCCI_IOC_GET_MD_EX_TYPE: { int md_expt_type = get_md_expt_type(); DBGLOG(CHAR, INF, "CCCI_IOC_GET_MD_EX_TYPE(%d) by %s(%d)", md_expt_type, \ ccci_cdev_name[PORT2IDX(port_id)], port_id); ret = put_user((unsigned int)md_expt_type, (unsigned int __user *)arg); } break; case CCCI_IOC_DL_TRAFFIC_CONTROL: { unsigned int traffic_control = 0; if(copy_from_user(&traffic_control, (void __user *)arg, sizeof(unsigned int))) { DBGLOG(CHAR, ERR, "CCCI_IOC_DL_TRAFFIC_CONTROL: copy_from_user fail!"); ret = -EFAULT; break; } DBGLOG(CHAR, INF, "CCCI_IOC_DL_TRAFFIC_CONTROL(%d) by %s(%d)", traffic_control,\ ccci_cdev_name[PORT2IDX(port_id)], port_id); if(traffic_control == 1) { ccci_cdev_turn_on_dl_q(port_id); } else if(traffic_control == 0) { ccci_cdev_turn_off_dl_q(port_id); } else { DBGLOG(CHAR, ERR, "CCCI_IOC_DL_TRAFFIC_CONTROL fail: Unknown value(0x%x)", traffic_control); ret = -EFAULT; } } break; case CCCI_IOC_GET_SIM_TYPE: //for regional phone boot animation { get_sim_type(eemcs_get_md_id(), &sim_type); ret = put_user((unsigned int)sim_type, (unsigned int __user *)arg); } break; case CCCI_IOC_ENABLE_GET_SIM_TYPE: //for regional phone boot animation { if(copy_from_user(&enable_sim_type, (void __user *)arg, sizeof(unsigned int))) { DBGLOG(CHAR, ERR, "CCCI_IOC_ENABLE_GET_SIM_TYPE: copy_from_user fail!\n"); ret = -EFAULT; } else { enable_get_sim_type(eemcs_get_md_id(), enable_sim_type); } } break; default: DBGLOG(CHAR, ERR, "Unknown ioctl(0x%x) by %s(%d)", cmd, ccci_cdev_name[PORT2IDX(port_id)], port_id); ret = -EFAULT; break; } _exit: DEBUG_LOG_FUNCTION_LEAVE; return ret; }