static void bam_notify(void *dev, int event, unsigned long data) { struct rmnet_private *p = netdev_priv(dev); switch (event) { case BAM_DMUX_RECEIVE: bam_recv_notify(dev, (struct sk_buff *)(data)); break; case BAM_DMUX_WRITE_DONE: bam_write_done(dev, (struct sk_buff *)(data)); break; case BAM_DMUX_UL_CONNECTED: ul_is_connected = 1; if (p->waiting_for_ul) { netif_wake_queue(dev); p->waiting_for_ul = 0; } break; case BAM_DMUX_UL_DISCONNECTED: ul_is_connected = 0; break; } }
static void bam_notify(void *dev, int event, unsigned long data) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; switch (event) { case BAM_DMUX_RECEIVE: bam_recv_notify(dev, (struct sk_buff *)(data)); break; case BAM_DMUX_WRITE_DONE: bam_write_done(dev, (struct sk_buff *)(data)); break; case BAM_DMUX_UL_CONNECTED: spin_lock_irqsave(&p->lock, flags); if (p->waiting_for_ul_skb != NULL) { struct sk_buff *skb; int ret; skb = p->waiting_for_ul_skb; p->waiting_for_ul_skb = NULL; spin_unlock_irqrestore(&p->lock, flags); ret = _rmnet_xmit(skb, dev); if (ret) { pr_err("%s: error %d dropping delayed TX SKB %p\n", __func__, ret, skb); dev_kfree_skb_any(skb); } netif_wake_queue(dev); } else { spin_unlock_irqrestore(&p->lock, flags); } break; case BAM_DMUX_UL_DISCONNECTED: break; } }