static void process_rx_w(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); struct sk_buff *skb; int status = 0; if (!dev->port_usb) return; while ((skb = skb_dequeue(&dev->rx_frames))) { if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); dev_kfree_skb_any(skb); continue; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; status = netif_rx_ni(skb); } if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { printk(KERN_DEBUG "usb: %s ++\n", __func__); /* fill the rx queue */ rx_fill(dev, gfp_flags); /* and open the tx floodgates */ atomic_set(&dev->tx_qlen, 0); netif_wake_queue(dev->net); }
static void process_uether_rx(struct eth_dev *dev) { struct sk_buff *skb; int status = 0; if (!dev->port_usb) return; while ((skb = skb_dequeue(&dev->rx_frames))) { if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE /* Need to revisit net->mtu does not include header size incase of changed MTU */ if(!strcmp(dev->port_usb->func.name,"ncm")) { if (status < 0 || ETH_HLEN > skb->len || skb->len > (dev->net->mtu + ETH_HLEN)) { printk(KERN_ERR "usb: %s drop incase of NCM rx length %d\n",__func__,skb->len); } else { printk(KERN_ERR "usb: %s Dont drop incase of NCM rx length %d\n",__func__,skb->len); goto process_frame; } } #endif dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; #ifndef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE DBG(dev, "rx length %d\n", skb->len); #else printk(KERN_DEBUG "usb: %s Drop rx length %d\n",__func__,skb->len); #endif DBG(dev, "rx length %d\n", skb->len); dev_kfree_skb_any(skb); continue; } #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE process_frame: #endif skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; if (in_interrupt()) status = netif_rx(skb); else status = netif_rx_ni(skb); } if (netif_running(dev->net)) rx_fill(dev, GFP_ATOMIC); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { DBG(dev, "%s\n", __func__); rx_fill(dev, gfp_flags); dev->tx_qlen = 0; netif_wake_queue(dev->net); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { DBG(dev, "%s\n", __func__); /* fill the rx queue */ rx_fill(dev, gfp_flags); /* and open the tx floodgates */ dev->tx_qlen = 0; netif_wake_queue(dev->net); }
static void eth_work(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, work); if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); } if (dev->todo) DBG(dev, "work done, flags = 0x%lx\n", dev->todo); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { DBG(dev, "%s\n", __func__); xlog_printk(ANDROID_LOG_INFO, UETHER_LOG, "%s\n", __func__); /* fill the rx queue */ rx_fill(dev, gfp_flags); /* and open the tx floodgates */ atomic_set(&dev->tx_qlen, 0); netif_wake_queue(dev->net); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { DBG(dev, "%s\n", __func__); // printk("%s:%d\n", __func__, __LINE__); /* fill the rx queue */ rx_fill(dev, gfp_flags); /* and open the tx floodgates */ atomic_set(&dev->tx_qlen, 0); netif_wake_queue(dev->net); }
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) { DBG(dev, "%s\n", __func__); /* fill the rx queue */ rx_fill(dev, gfp_flags); /* and open the tx floodgates */ atomic_set(&dev->tx_qlen, 0); netif_wake_queue(dev->net); #ifdef CONFIG_USB_SPRD_DWC rndis_msg_init(dev); #endif }
static void process_rx_w(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); struct sk_buff *skb; int status = 0; unsigned int uiCurMtu = 0; if (!dev->port_usb) return; set_wake_up_idle(true); uiCurMtu = dev->net->mtu + ETH_HLEN; if ((uiCurMtu <= ETH_HLEN) || (uiCurMtu > ETH_FRAME_LEN_MAX)) uiCurMtu = ETH_FRAME_LEN; while ((skb = skb_dequeue(&dev->rx_frames))) { if (status < 0 || ETH_HLEN > skb->len || (skb->len > uiCurMtu && test_bit(RMNET_MODE_LLP_ETH, &dev->flags))) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); dev_kfree_skb_any(skb); continue; } if (test_bit(RMNET_MODE_LLP_IP, &dev->flags)) skb->protocol = ether_ip_type_trans(skb, dev->net); else skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; #if fcAUTO_PERF_LOCK if (skb->len >= 1024) auto_perf_lock_enable(1); #endif status = netif_rx_ni(skb); } set_wake_up_idle(false); if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); }
static void eth_work(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, work); int testwkbit; unsigned long flags; spin_lock_irqsave(&dev->req_rx_lock, flags); testwkbit = test_and_clear_bit(WORK_RX_MEMORY, &dev->todo); spin_unlock_irqrestore(&dev->req_rx_lock, flags); if (testwkbit) { if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); } if (dev->todo) DBG(dev, "work done, flags = 0x%lx\n", dev->todo); }
static void process_rx_w(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); struct sk_buff *skb; int status = 0; unsigned int uiCurMtu = 0; if (!dev->port_usb) return; uiCurMtu = dev->net->mtu + ETH_HLEN; if ((uiCurMtu <= ETH_HLEN) || (uiCurMtu > ETH_FRAME_LEN_MAX)) uiCurMtu = ETH_FRAME_LEN; while ((skb = skb_dequeue(&dev->rx_frames))) { if (status < 0 || ETH_HLEN > skb->len // || skb->len > ETH_FRAME_LEN || skb->len > uiCurMtu) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); dev_kfree_skb_any(skb); continue; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; #if fcAUTO_PERF_LOCK if (skb->len >= 1024) schedule_work(&dev->enable_perf_lock_work); #endif status = netif_rx_ni(skb); } if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); }