static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); struct xmd_ch_info *info = p->ch; int ret; #if defined (RMNET_CRITICAL_DEBUG) dynadbg_module(DYNADBG_CRIT|DYNADBG_TX,"\nRMNET[%d]: %d>\n",info->chno, skb->len); // printk("\nRMNET[%d]: %d>\n",info->chno, skb->len); #endif if((skb->len - RMNET_ETH_HDR_SIZE) <= 0) { #ifdef CONFIG_MSM_RMNET_DEBUG dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Got only header for ch %d, return\n", info->chno); // printk("\nrmnet: Got only header for ch %d, return\n", info->chno); #endif ret = NETDEV_TX_OK; dev_kfree_skb_irq(skb); goto quit_xmit; } if ((ret = xmd_ch_write (info->chno, (void *)((char *) skb->data + RMNET_ETH_HDR_SIZE), skb->len - RMNET_ETH_HDR_SIZE)) != 0) { if(ret == -ENOMEM) { ret = NETDEV_TX_BUSY; #ifdef CONFIG_MSM_RMNET_DEBUG dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Cannot alloc mem, so returning busy for ch %d\n", info->chno); // printk("\nrmnet: Cannot alloc mem, so returning busy for ch %d\n", // info->chno); #endif goto quit_xmit; } else if(ret == -EBUSY) { netif_stop_queue(dev); rmnet_ch_block_info[info->chno].dev = dev; rmnet_ch_block_info[info->chno].blocked = 1; #ifdef CONFIG_MSM_RMNET_DEBUG dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Stopping queue for ch %d\n", info->chno); // printk("\nrmnet: Stopping queue for ch %d\n", info->chno); #endif ret = NETDEV_TX_BUSY; goto quit_xmit; } } else { if (count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } } ret = NETDEV_TX_OK; dev_kfree_skb_irq(skb); quit_xmit: return ret; }
static void ifx_check_handle_work(struct work_struct *work) { extern void msleep(unsigned int msecs); printk("[TEST] %s ( %d line)\n",__func__,__LINE__); msleep(100); xmd_ch_write(9, "AT\r\n", 4); //mo2haewoon.you => [START] #if defined (HSI_SEND_ATCOMMAND_TO_CAWAKE) enable_irq(irq_num_122); #endif //mo2haewoon.you <= [END] }