static void bam_write_done(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	u32 opmode = p->operation_mode;
	unsigned long flags;

	DBG1("%s: write complete\n", __func__);
	if (RMNET_IS_MODE_IP(opmode) ||
				count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    ((struct net_device *)(dev))->name, p->stats.tx_packets,
	    skb->len, skb->mark);
	dev_kfree_skb_any(skb);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (netif_queue_stopped(dev) &&
	    msm_bam_dmux_is_ch_low(p->ch_id)) {
		DBG0("%s: Low WM hit, waking queue=%p\n",
		      __func__, skb);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}
static void bam_write_done(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	u32 opmode = p->operation_mode;
	unsigned long flags;

	DBG1("%s: write complete\n", __func__);
	if (RMNET_IS_MODE_IP(opmode) ||
				count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

#if defined(CONFIG_ARCH_ACER_MSM8960)
	cancel_delayed_work_sync(&fast_dormancy_rmnet->fast_dormancy_work);
	if (kernel_is_in_earlysuspend())
		schedule_delayed_work(&fast_dormancy_rmnet->fast_dormancy_work, msecs_to_jiffies(3000));
	else if (fd_screen_on_delay != 0) {
		schedule_delayed_work(&fast_dormancy_rmnet->fast_dormancy_work, msecs_to_jiffies(fd_screen_on_delay*1000));
	}
#endif

	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    ((struct net_device *)(dev))->name, p->stats.tx_packets,
	    skb->len, skb->mark);
	dev_kfree_skb_any(skb);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (netif_queue_stopped(dev) &&
	    msm_bam_dmux_is_ch_low(p->ch_id)) {
		DBG0("%s: Low WM hit, waking queue=%p\n",
		      __func__, skb);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}