static void process_rx_w(struct work_struct *work) { struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); struct sk_buff *skb; int status = 0; unsigned int uiCurMtu = 0; if (!dev->port_usb) return; set_wake_up_idle(true); uiCurMtu = dev->net->mtu + ETH_HLEN; if ((uiCurMtu <= ETH_HLEN) || (uiCurMtu > ETH_FRAME_LEN_MAX)) uiCurMtu = ETH_FRAME_LEN; while ((skb = skb_dequeue(&dev->rx_frames))) { if (status < 0 || ETH_HLEN > skb->len || (skb->len > uiCurMtu && test_bit(RMNET_MODE_LLP_ETH, &dev->flags))) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); dev_kfree_skb_any(skb); continue; } if (test_bit(RMNET_MODE_LLP_IP, &dev->flags)) skb->protocol = ether_ip_type_trans(skb, dev->net); else skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; #if fcAUTO_PERF_LOCK if (skb->len >= 1024) auto_perf_lock_enable(1); #endif status = netif_rx_ni(skb); } set_wake_up_idle(false); if (netif_running(dev->net)) rx_fill(dev, GFP_KERNEL); }
static void enable_perf_lock_work_func(struct work_struct *work) { auto_perf_lock_enable(); }