Пример #1
0
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}
Пример #2
0
static void gannet_wq_func(struct work_struct *work)
{
	struct gannet_work_struct *gannet_work =
					(struct gannet_work_struct *)work;

	/* write skb->data of skb->len to udp socket */
	struct gannet_private *p = gannet_work->p;
	struct sk_buff *skb = gannet_work->skb;
	char *data;
	int len;

	data = skb->data;
	len = skb->len;

	/* Remove ethernet header */
	data += 14;
	len -= 14;

	if (len != ksocket_sendto(p->tx_sock, &p->tx_addr, data, len)) {
		printk(KERN_ERR
			   "gannet sendto() failed, dropping packet\n");
	} else {
		if (count_this_packet(data, len)) {
			p->stats.tx_packets++;
			p->stats.tx_bytes += len;
		}
	}

	dev_kfree_skb(skb);

	kfree((void *)work);
}
/*Rx Callback, Called in Work Queue context*/
static void sdio_recv_notify(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	u32 opmode;

	if (skb) {
		skb->dev = dev;
		/* Handle Rx frame format */
		spin_lock_irqsave(&p->lock, flags);
		opmode = p->operation_mode;
		spin_unlock_irqrestore(&p->lock, flags);

		if (RMNET_IS_MODE_IP(opmode)) {
			/* Driver in IP mode */
			skb->protocol = rmnet_ip_type_trans(skb, dev);
		} else {
			/* Driver in Ethernet mode */
			skb->protocol = eth_type_trans(skb, dev);
		}
		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
			p->stats.rx_packets++;
			p->stats.rx_bytes += skb->len;
		}
		netif_rx(skb);
	} else
		pr_err("[lte] Error - %s: No skb received", __func__);
}
static void bam_write_done(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	u32 opmode = p->operation_mode;
	unsigned long flags;

	DBG1("%s: write complete\n", __func__);
	if (RMNET_IS_MODE_IP(opmode) ||
				count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    ((struct net_device *)(dev))->name, p->stats.tx_packets,
	    skb->len, skb->mark);
	dev_kfree_skb_any(skb);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (netif_queue_stopped(dev) &&
	    msm_bam_dmux_is_ch_low(p->ch_id)) {
		DBG0("%s: Low WM hit, waking queue=%p\n",
		      __func__, skb);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	struct xmd_ch_info *info = p->ch;
	int ret;

#if defined (RMNET_CRITICAL_DEBUG)
      dynadbg_module(DYNADBG_CRIT|DYNADBG_TX,"\nRMNET[%d]: %d>\n",info->chno, skb->len);
//   printk("\nRMNET[%d]: %d>\n",info->chno, skb->len);
#endif
	if((skb->len - RMNET_ETH_HDR_SIZE) <= 0) {
#ifdef CONFIG_MSM_RMNET_DEBUG
             dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Got only header for ch %d, return\n", info->chno);
//		printk("\nrmnet: Got only header for ch %d, return\n", info->chno);
#endif
		ret = NETDEV_TX_OK;
		dev_kfree_skb_irq(skb);
		goto quit_xmit;
	}

	if ((ret = xmd_ch_write (info->chno,
							(void *)((char *) skb->data + RMNET_ETH_HDR_SIZE),
							skb->len - RMNET_ETH_HDR_SIZE)) != 0) {
		if(ret == -ENOMEM) {
			ret = NETDEV_TX_BUSY;
#ifdef CONFIG_MSM_RMNET_DEBUG
             dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Cannot alloc mem, so returning busy for ch %d\n",
						info->chno);
//                printk("\nrmnet: Cannot alloc mem, so returning busy for ch %d\n",
//						info->chno);
#endif
			goto quit_xmit;
		} else if(ret == -EBUSY) {
			netif_stop_queue(dev);
			rmnet_ch_block_info[info->chno].dev = dev;
			rmnet_ch_block_info[info->chno].blocked = 1;
#ifdef CONFIG_MSM_RMNET_DEBUG
                   dynadbg_module(DYNADBG_DEBUG|DYNADBG_TX,"\nrmnet: Stopping queue for ch %d\n", info->chno);
//			printk("\nrmnet: Stopping queue for ch %d\n", info->chno);
#endif
			ret = NETDEV_TX_BUSY;
			goto quit_xmit;
		}
	} else {
		if (count_this_packet(skb->data, skb->len)) {
			p->stats.tx_packets++;
			p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
		}
	}
	ret = NETDEV_TX_OK;
	dev_kfree_skb_irq(skb);

quit_xmit:
	return ret;
}
Пример #6
0
static void smux_read_done(void *rcv_dev, const void *meta_data)
{
	struct rmnet_private *p;
	struct net_device *dev = rcv_dev;
	u32 opmode;
	unsigned long flags;
	struct sk_buff *skb = NULL;
	const struct smux_meta_read  *read_meta_info = meta_data;

	if (!dev || !read_meta_info) {
		DBG1("%s:invalid read_done callback recieved", __func__);
		return;
	}

	p = netdev_priv(dev);

	skb = (struct sk_buff *) read_meta_info->pkt_priv;

	if (!skb || skb->dev != dev) {
		DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK",
		      __func__);
		return;
	}

	/* Handle Rx frame format */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_IP(opmode)) {
		/* Driver in IP mode */
		skb->protocol =
		rmnet_ip_type_trans(skb, dev);
	} else {
		/* Driver in Ethernet mode */
		skb->protocol =
		eth_type_trans(skb, dev);
	}
	if (RMNET_IS_MODE_IP(opmode) ||
		count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_rcv +=
		rmnet_cause_wakeup(p);
#endif
		p->stats.rx_packets++;
		p->stats.rx_bytes += skb->len;
	}
	DBG2("[%s] Rx packet #%lu len=%d\n",
		 dev->name, p->stats.rx_packets,
		 skb->len);
	/* Deliver to network stack */
	netif_rx(skb);

	return;
}
Пример #7
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
#if fcENABLE_FLOW_CTRL
	int res;
	int iAvail = smd_write_avail(ch);
#endif

#if fcENABLE_FLOW_CTRL
	if ((res = smd_write_atomic(ch, skb->data, skb->len)) != skb->len) {
		pr_err("rmnet fifo full, dropping packet: %d (%d,%d), fifo size = %d\n", res, skb->len, iAvail, smd_total_fifo_size(ch));
		rmnet_Throttle(dev);
#else
	if (smd_write_atomic(ch, skb->data, skb->len) != skb->len) {
		pr_err("rmnet fifo full, dropping packet, fifo size = %d\n", smd_total_fifo_size(ch));
#endif
	} else {
#if fcENABLE_FLOW_CTRL
		if (iAvail < cTHR_RMNET_FIFO)
		{
			pr_devel(LOG_TAG1 "rmnet fifo almost full: %d (%d,%d), tx paused\n", res, skb->len, iAvail);
			rmnet_Throttle(dev);
		}
#endif
		if (count_this_packet(skb->data, skb->len)) {
			p->stats.tx_packets++;
			p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
		}
	}

	dev_kfree_skb_irq(skb);
	return 0;
}

static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	return &p->stats;
}

static void rmnet_set_multicast_list(struct net_device *dev)
{
}

static void rmnet_tx_timeout(struct net_device *dev)
{
	pr_info("rmnet_tx_timeout()\n");

	rmnet_check_fifo(dev);
}
Пример #8
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		if (sz > 1514) {
			pr_err("rmnet_recv() discarding %d len\n", sz);
			ptr = 0;
		} else {
			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
			if (skb == NULL) {
				pr_err("rmnet_recv() cannot allocate skb\n");
				/* out of memory, reschedule a later attempt */
				smd_net_data_tasklet.data = (unsigned long)dev;
				tasklet_schedule(&smd_net_data_tasklet);
				break;
			} else {
				skb->dev = dev;
				skb_reserve(skb, NET_IP_ALIGN);
				ptr = skb_put(skb, sz);
				wake_lock_timeout(&p->wake_lock, HZ / 2);
				if (smd_read(p->ch, ptr, sz) != sz) {
					pr_err("rmnet_recv() smd lied about avail?!");
					ptr = 0;
					dev_kfree_skb_irq(skb);
				} else {
					skb->protocol = eth_type_trans(skb, dev);
					if (count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
						p->wakeups_rcv +=
							rmnet_cause_wakeup(p);
#endif
						p->stats.rx_packets++;
						p->stats.rx_bytes += skb->len;
					}
					netif_rx(skb);
				}
				continue;
			}
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("rmnet_recv() smd lied about avail?!");
	}
}
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct smm6260net_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	int err;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;
		
		if (sz > SMM6260_NET_DEFAULT_MTU) 
		{
			ptr = 0;
			//pr_err("rmnet_recv() discarding %d len\n", sz);
		}else{
			skb = dev_alloc_skb(sz);
			if (skb == NULL) {
				//pr_err("smm6260net_recv() cannot allocate skb\n");
			} else {
				skb->dev = dev;
				ptr = skb_put(skb, sz);
				wake_lock_timeout(&p->wake_lock, HZ / 2);
				if (smd_read(p->ch, ptr, sz) != sz) {
					//pr_err("smm6260net_recv() smd lied about avail?!");
					ptr = 0;
					dev_kfree_skb_irq(skb);
				} else {
					skb->protocol = htons(ETH_P_IP);//eth_type_trans(skb, dev);			
					if(count_this_packet(skb))
					{
						/* update out statistics */
#ifdef CONFIG_MSM_RMNET_DEBUG
						p->wakeups_rcv += smm6260net_cause_wakeup(p);
#endif				
						p->stats.rx_packets++;
						p->stats.rx_bytes += skb->len;
					}
					skb_reset_mac_header(skb);
					netif_rx(skb);
					//pr_info("%s: smm6260net_recv() size=%d", p->chname, skb->len);
				}
				continue;
			}
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("rmnet_recv() smd lied about avail?!");
	}
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	int sdio_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	if (!netif_carrier_ok(dev)) {
		pr_err("[%s] %s: channel in reset",
			dev->name, __func__);
		goto xmit_out;
	}

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);

	if (sdio_ret != 0) {
		pr_err("[%s] %s: write returned error %d",
			dev->name, __func__, sdio_ret);
		goto xmit_out;
	}

	if (count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}
	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    dev->name, p->stats.tx_packets, skb->len, skb->mark);

	return 0;
xmit_out:
	dev_kfree_skb_any(skb);
	p->stats.tx_errors++;
	return 0;
}
Пример #11
0
static void smux_write_done(void *dev, const void *meta_data)
{
	struct rmnet_private *p = netdev_priv(dev);
	u32 opmode;
	struct sk_buff *skb = NULL;
	const struct smux_meta_write  *write_meta_info = meta_data;
	unsigned long flags;

	if (!dev || !write_meta_info) {
		DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__);
		return;
	}

	skb = (struct sk_buff *) write_meta_info->pkt_priv;

	if (!skb) {
		DBG1("%s: ERR:skb pointer NULL in WRITE_DONE"
		     " CALLBACK", __func__);
		return;
	}

	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	DBG1("%s: write complete\n", __func__);
	if (RMNET_IS_MODE_IP(opmode) ||
		count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}
	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
		 ((struct net_device *)(dev))->name, p->stats.tx_packets,
		 skb->len, skb->mark);
	dev_kfree_skb_any(skb);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (netif_queue_stopped(dev) &&
		msm_smux_is_ch_low(p->ch_id)) {
		DBG0("%s: Low WM hit, waking queue=%p\n",
			 __func__, skb);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	int smd_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	smd_ret = smd_write(ch, skb->data, skb->len);
	if (smd_ret != skb->len) {
		pr_err("[%s] %s: smd_write returned error %d",
			dev->name, __func__, smd_ret);
		p->stats.tx_errors++;
		goto xmit_out;
	}

	if (RMNET_IS_MODE_IP(opmode) ||
	    count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}
	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    dev->name, p->stats.tx_packets, skb->len, skb->mark);

xmit_out:
	
	dev_kfree_skb_irq(skb);
	return 0;
}
Пример #13
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	int smd_ret;

	if (netif_queue_stopped(dev)) {
		pr_err("fatal: rmnet_xmit called when netif_queue is stopped");
		return 0;
	}

	if (smd_write_avail(ch) < skb->len) {
		rmnet_stop(dev);
		/* schedule a function to poll at exponential interval */
		init_timer(&p->smd_poll_timer);
		p->smd_poll_timer.expires = jiffies
			+ ((SMD_POLL_MILLISECS*HZ)/1000);
		p->smd_poll_timer.function = rmnet_poll_smd_write;
		if (p->skb)
			pr_err("fatal: p->skb was non-zero when"
				"we tried to scheduled timer");
		p->skb = skb;
		p->smd_poll_timer.data = (unsigned long)dev;
		add_timer(&p->smd_poll_timer);
	} else {
		smd_ret = smd_write(ch, skb->data, skb->len);
		if (smd_ret != skb->len) {
			pr_err("fatal: smd_write returned error %d", smd_ret);
			return 0;
		}

		if (count_this_packet(skb->data, skb->len)) {
			p->stats.tx_packets++;
			p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
		}
		/* data xmited, safe to release skb */
		dev_kfree_skb_irq(skb);
	}


	return 0;
}
Пример #14
0
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	int smd_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	smd_ret = smd_write(ch, skb->data, skb->len);
	if (smd_ret != skb->len) {
		pr_err("%s: smd_write returned error %d", __func__, smd_ret);
		goto xmit_out;
	}

	if (RMNET_IS_MODE_IP(opmode) ||
	    count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#if 0
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

xmit_out:
	/* data xmited, safe to release skb */
	dev_kfree_skb_irq(skb);
	return 0;
}
/* Rx Callback, Called in Work Queue context */
static void sdio_recv_notify(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	u32 opmode;

	if (skb) {
		skb->dev = dev;
		/* Handle Rx frame format */
		spin_lock_irqsave(&p->lock, flags);
		opmode = p->operation_mode;
		spin_unlock_irqrestore(&p->lock, flags);

		if (RMNET_IS_MODE_IP(opmode)) {
			/* Driver in IP mode */
			skb->protocol = rmnet_ip_type_trans(skb, dev);
		} else {
			/* Driver in Ethernet mode */
			skb->protocol = eth_type_trans(skb, dev);
		}
		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
			p->stats.rx_packets++;
			p->stats.rx_bytes += skb->len;
		}
		DBG1("[%s] Rx packet #%lu len=%d\n",
			((struct net_device *)dev)->name,
			p->stats.rx_packets, skb->len);

		/* Deliver to network stack */
		netif_rx(skb);
	} else {
		spin_lock_irqsave(&p->lock, flags);
		if (!sdio_update_reset_state((struct net_device *)dev))
			pr_err("[%s] %s: No skb received",
				((struct net_device *)dev)->name, __func__);
		spin_unlock_irqrestore(&p->lock, flags);
	}
}
Пример #16
0
static void rx(unsigned char *buf, int len)
{
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	int r;

	sz = len;

	if (sz > 1514) {
		printk(KERN_ERR MODULE_NAME "gannet discarding %d len\n", sz);
		ptr = 0;
	} else {
		skb = dev_alloc_skb(sz + 14 + NET_IP_ALIGN);
		if (skb == NULL) {
			printk(KERN_ERR MODULE_NAME
				   "gannet cannot allocate skb\n");
		} else {
			skb_reserve(skb, NET_IP_ALIGN);

			ptr = skb_put(skb, 14); /* ethernet hdr */
			memcpy(&((unsigned char *) ptr)[6],
				   gthread->dev->dev_addr, 6);

			ptr = skb_put(skb, sz);
			memcpy(ptr, buf, sz);

			skb->dev = gthread->dev;
			skb->protocol = eth_type_trans(skb, gthread->dev);
			skb->protocol = htons(ETH_P_IP);
			skb->ip_summed = CHECKSUM_NONE; /* check it */

			skb->pkt_type = PACKET_HOST;

			if (count_this_packet(ptr, skb->len)) {
				gthread->priv->stats.rx_packets++;
				gthread->priv->stats.rx_bytes += skb->len;
			}
			r = netif_rx(skb);
		}
	}
}
Пример #17
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;

	if (smd_write_atomic(ch, skb->data, skb->len) != skb->len) {
		pr_err("rmnet fifo full, dropping packet\n");
	} else {
		if (count_this_packet(skb->data, skb->len)) {
			p->stats.tx_packets++;
			p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
		}
	}

	dev_kfree_skb_irq(skb);
	return 0;
}
Пример #18
0
static void bam_write_done(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	u32 opmode = p->operation_mode;
	unsigned long flags;

	DBG1("%s: write complete\n", __func__);
	if (RMNET_IS_MODE_IP(opmode) ||
				count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

#if defined(CONFIG_ARCH_ACER_MSM8960)
	cancel_delayed_work_sync(&fast_dormancy_rmnet->fast_dormancy_work);
	if (kernel_is_in_earlysuspend())
		schedule_delayed_work(&fast_dormancy_rmnet->fast_dormancy_work, msecs_to_jiffies(3000));
	else if (fd_screen_on_delay != 0) {
		schedule_delayed_work(&fast_dormancy_rmnet->fast_dormancy_work, msecs_to_jiffies(fd_screen_on_delay*1000));
	}
#endif

	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    ((struct net_device *)(dev))->name, p->stats.tx_packets,
	    skb->len, skb->mark);
	dev_kfree_skb_any(skb);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (netif_queue_stopped(dev) &&
	    msm_bam_dmux_is_ch_low(p->ch_id)) {
		DBG0("%s: Low WM hit, waking queue=%p\n",
		      __func__, skb);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}
Пример #19
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}
Пример #20
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
						(sz > (dev->mtu + ETH_HLEN))) {
			pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
				sz, RMNET_IS_MODE_IP(opmode) ?
					dev->mtu : (dev->mtu + ETH_HLEN));
			ptr = 0;
		} else {
			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
			if (skb == NULL) {
				pr_err("rmnet_recv() cannot allocate skb\n");
			} else {
				skb->dev = dev;
				skb_reserve(skb, NET_IP_ALIGN);
				ptr = skb_put(skb, sz);
				wake_lock_timeout(&p->wake_lock, HZ / 2);
				if (smd_read(p->ch, ptr, sz) != sz) {
					pr_err("rmnet_recv() smd lied about avail?!");
					ptr = 0;
					dev_kfree_skb_irq(skb);
				} else {
					/* Handle Rx frame format */
					spin_lock_irqsave(&p->lock, flags);
					opmode = p->operation_mode;
					spin_unlock_irqrestore(&p->lock, flags);

					if (RMNET_IS_MODE_IP(opmode)) {
						/* Driver in IP mode */
						skb->protocol =
						  rmnet_ip_type_trans(skb, dev);
					} else {
						/* Driver in Ethernet mode */
						skb->protocol =
						  eth_type_trans(skb, dev);
					}
					if (RMNET_IS_MODE_IP(opmode) ||
					    count_this_packet(ptr, skb->len)) {
#if 0
						p->wakeups_rcv +=
							rmnet_cause_wakeup(p);
#endif
						p->stats.rx_packets++;
						p->stats.rx_bytes += skb->len;
					}
					netif_rx(skb);
				}
				continue;
			}
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("rmnet_recv() smd lied about avail?!");
	}
}
Пример #21
0
/*give the packet to TCP/IP*/
static int xmd_trans_packet(
	struct net_device *dev,
	int type,
	void *buf,
	int sz)
{
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = NULL;

	sz += ETH_HLEN; /* 14byte ethernet header should be added */

#if defined (RMNET_CRITICAL_DEBUG)
	printk("\nRMNET: %d<\n", sz);
#endif

	if ((type != RMNET_IPV4_VER) && (type != RMNET_IPV6_VER )
#if defined (RMNET_ARP_ENABLE)
		&& (type != RMNET_ARP_VER )
#endif		
		) {
#if defined (RMNET_ERR)
		printk("\n%s (line %d) invalid type(%x)\n", __func__, __LINE__, type);
#endif
		p->stats.rx_errors++;
		return -EINVAL;
	}


#if defined (RMNET_CHANGE_MTU)
	if (sz > dev->mtu)
#else
	if (sz > RMNET_MTU_SIZE)
#endif
	{

#if defined (RMNET_ERR)
		printk("\n%s (line %d): discarding pkt len (%d) version %d\n", __func__, __LINE__, sz, type);
#endif
		p->stats.rx_errors++;
		return -EINVAL;
	}
	else {
		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
#if defined (RMNET_ERR)
			printk("\n%s (line %d): cannot allocate dev_alloc_skb type(%x) pkt len (%d) \n",
				__func__, __LINE__, type, sz);
#endif
			p->stats.rx_dropped++;
			return -ENOMEM;
		}
		else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);

			if(ptr == NULL) {
#if defined (RMNET_ERR)
				printk("\n%s (line %d): skb_put fails\n",__func__, __LINE__);
#endif
				p->stats.rx_dropped++;
				dev_kfree_skb (skb);
				return -ENOMEM;
			}
			
			wake_lock_timeout(&p->wake_lock, HZ / 2);

			/* adding ethernet header */

#if 0
			{
				char temp[] = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6,0x91,0x24,0xa8,0x14,0x72,0x08,0x0};
				struct ethhdr *eth_hdr = (struct ethhdr *) temp;

				if (type == RMNET_IPV6_VER) {
					eth_hdr->h_proto = htons(ETH_P_IPV6);
				}
#if defined (RMNET_ARP_ENABLE)
				else if (type == RMNET_ARP_VER) {
					eth_hdr->h_proto = htons(ETH_P_ARP);
				}
#endif				
				else /* RMNET_IPV4_VER */
				{
					eth_hdr->h_proto = htons(ETH_P_IP);
				}

				memcpy((void *)eth_hdr->h_dest,
					   (void*)dev->dev_addr,
					   sizeof(eth_hdr->h_dest));
				memcpy((void *)ptr,
					   (void *)eth_hdr,
					   sizeof(struct ethhdr));
			}
#else
			if (type != p->ip_type)
			{
				if (type == RMNET_IPV6_VER) {
					p->eth_hdr.h_proto = htons(ETH_P_IPV6);
				}
#if defined (RMNET_ARP_ENABLE)				
				else if (type == RMNET_ARP_VER) {
					p->eth_hdr.h_proto = htons(ETH_P_ARP);
				}
#endif
				else /* RMNET_IPV4_VER */
				{
					p->eth_hdr.h_proto = htons(ETH_P_IP);
				}

				p->ip_type = type;
			}
			
			memcpy((void *)ptr, (void *)&p->eth_hdr, ETH_HLEN);
#endif


			memcpy(ptr + ETH_HLEN, buf, sz - ETH_HLEN);

			skb->protocol = eth_type_trans(skb, dev);
			if (count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
				p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
				p->stats.rx_packets++;
				p->stats.rx_bytes += skb->len;
			}
			netif_rx(skb);
			wake_unlock(&p->wake_lock);
		}
	}

	return 0;
}
Пример #22
0
/*give the packet to TCP/IP*/
static void xmd_trans_packet(
	struct net_device *dev,
	int type,
	void *buf,
	int sz)
{
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = NULL;

	sz += RMNET_ETH_HDR_SIZE;

#if defined (RMNET_CRITICAL_DEBUG)
      dynadbg_module(DYNADBG_CRIT|DYNADBG_TX,"\nRMNET: %d<\n",sz);
	printk("\nRMNETsend to tcp/ip : %d<\n",sz);
#endif

	if (sz > (RMNET_MTU_SIZE + RMNET_ETH_HDR_SIZE)) {
#if defined (RMNET_ERR)
             dynadbg_module(DYNADBG_WARN|DYNADBG_TX,"xmd_trans_packet() discarding %d pkt len\n", sz);
		printk("xmd_trans_packet() discarding %d pkt len\n", sz);
#endif
		ptr = 0;
		return;
	}
	else {
		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
#if defined (RMNET_ERR)
                   dynadbg_module(DYNADBG_WARN|DYNADBG_TX,"xmd_trans_packet() cannot allocate skb\n");
			printk("xmd_trans_packet() cannot allocate skb\n");
#endif
			return;
		}
		else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);
			wake_lock_timeout(&p->wake_lock, HZ / 2);

			/* adding ethernet header */
			{
				/* struct ethhdr eth_hdr = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6,
										0x91,0x24,0xa8,0x14,0x72,0x08,0x0};*/
				char temp[] = {0xB6,0x91,0x24,0xa8,0x14,0x72,0xb6,0x91,0x24,
							   0xa8,0x14,0x72,0x08,0x0};
				struct ethhdr *eth_hdr = (struct ethhdr *) temp;

				if (type == RMNET_IPV6_VER) {
					eth_hdr->h_proto = 0x08DD;
					eth_hdr->h_proto = htons(eth_hdr->h_proto);
				}

				memcpy((void *)eth_hdr->h_dest,
					   (void*)dev->dev_addr,
					   sizeof(eth_hdr->h_dest));
				memcpy((void *)ptr,
					   (void *)eth_hdr,
					   sizeof(struct ethhdr));
			}
			memcpy(ptr + RMNET_ETH_HDR_SIZE, buf, sz - RMNET_ETH_HDR_SIZE);
			skb->protocol = eth_type_trans(skb, dev);
			if (count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
				p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
				p->stats.rx_packets++;
				p->stats.rx_bytes += skb->len;
			}
			netif_rx(skb);
			wake_unlock(&p->wake_lock);
		}
	}
}
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
			pr_err("[%s] rmnet_recv() cannot allocate skb\n",
			       dev->name);
			/* out of memory, reschedule a later attempt */
			smd_net_data_tasklet.data = (unsigned long)dev;
			tasklet_schedule(&smd_net_data_tasklet);
			break;
		} else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);
			wake_lock_timeout(&p->wake_lock, HZ / 2);
			if (smd_read(p->ch, ptr, sz) != sz) {
				pr_err("[%s] rmnet_recv() smd lied about avail?!",
					dev->name);
				ptr = 0;
				dev_kfree_skb_irq(skb);
			} else {
				/* Handle Rx frame format */
				spin_lock_irqsave(&p->lock, flags);
				opmode = p->operation_mode;
				spin_unlock_irqrestore(&p->lock, flags);

				if (RMNET_IS_MODE_IP(opmode)) {
					/* Driver in IP mode */
					skb->protocol =
					  rmnet_ip_type_trans(skb, dev);
				} else {
					/* Driver in Ethernet mode */
					skb->protocol =
					  eth_type_trans(skb, dev);
				}
				if (RMNET_IS_MODE_IP(opmode) ||
				    count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
					p->wakeups_rcv +=
					rmnet_cause_wakeup(p);
#endif
					p->stats.rx_packets++;
					p->stats.rx_bytes += skb->len;
				}
				DBG1("[%s] Rx packet #%lu len=%d\n",
					dev->name, p->stats.rx_packets,
					skb->len);

				/* Deliver to network stack */
				netif_rx(skb);
			}
			continue;
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("[%s] rmnet_recv() smd lied about avail?!",
				dev->name);
	}
}
Пример #24
0
void rmnet_smux_notify(void *priv, int event_type, const void *metadata)
{
	struct rmnet_private *p;
	struct net_device *dev;
	unsigned long flags;
	struct sk_buff *skb = NULL;
	u32 opmode;
	const struct smux_meta_disconnected *ssr_info;
	const struct smux_meta_read *read_meta_info;
	const struct smux_meta_write *write_meta_info = metadata;


	if (!priv)
		DBG0("%s: priv(cookie) NULL, ignoring notification:"
		     " %d\n", __func__, event_type);

	switch (event_type) {
	case SMUX_CONNECTED:
		p = netdev_priv(priv);
		dev = priv;

		DBG0("[%s] SMUX_CONNECTED event dev:%s\n", __func__, dev->name);

		netif_carrier_on(dev);
		netif_start_queue(dev);

		spin_lock_irqsave(&p->lock, flags);
		p->device_state = DEVICE_ACTIVE;
		spin_unlock_irqrestore(&p->lock, flags);
		break;

	case SMUX_DISCONNECTED:
		p = netdev_priv(priv);
		dev = priv;
		ssr_info = metadata;

		DBG0("[%s] SMUX_DISCONNECTED event dev:%s\n",
		      __func__, dev->name);

		if (ssr_info && ssr_info->is_ssr == 1)
			DBG0("SSR detected on :%s\n", dev->name);

		netif_carrier_off(dev);
		netif_stop_queue(dev);

		spin_lock_irqsave(&p->lock, flags);
		p->device_state = DEVICE_INACTIVE;
		spin_unlock_irqrestore(&p->lock, flags);
		break;

	case SMUX_READ_DONE:
		smux_read_done(priv, metadata);
		break;

	case SMUX_READ_FAIL:
		p = netdev_priv(priv);
		dev = priv;
		read_meta_info = metadata;

		if (!dev || !read_meta_info) {
			DBG1("%s: ERR:invalid read failed callback"
			     " recieved", __func__);
			return;
		}

		skb = (struct sk_buff *) read_meta_info->pkt_priv;

		if (!skb) {
			DBG1("%s: ERR:skb pointer NULL in read fail"
			     " CALLBACK", __func__);
			return;
		}

		DBG0("%s: read failed\n", __func__);

		opmode = p->operation_mode;

		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len))
			p->stats.rx_dropped++;

		dev_kfree_skb_any(skb);
		break;

	case SMUX_WRITE_DONE:
		smux_write_done(priv, metadata);
		break;

	case SMUX_WRITE_FAIL:
		p = netdev_priv(priv);
		dev = priv;
		write_meta_info = metadata;

		if (!dev || !write_meta_info) {
			DBG1("%s: ERR:invalid WRITE_DONE"
			     "callback recieved", __func__);
			return;
		}

		skb = (struct sk_buff *) write_meta_info->pkt_priv;

		if (!skb) {
			DBG1("%s: ERR:skb pointer NULL in"
			     " WRITE_DONE CALLBACK", __func__);
			return;
		}

		DBG0("%s: write failed\n", __func__);

		opmode = p->operation_mode;

		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len)) {
			p->stats.tx_dropped++;
		}

		dev_kfree_skb_any(skb);
		break;

	case SMUX_LOW_WM_HIT:
		dev = priv;
		p = netdev_priv(priv);
		DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
		spin_lock_irqsave(&p->tx_queue_lock, flags);
		netif_wake_queue(dev);
		spin_unlock_irqrestore(&p->tx_queue_lock, flags);
		break;

	case SMUX_HIGH_WM_HIT:
		dev = priv;
		p = netdev_priv(priv);
		DBG0("[%s] High WM hit dev:%s\n", __func__, dev->name);
		spin_lock_irqsave(&p->tx_queue_lock, flags);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&p->tx_queue_lock, flags);
		break;

	default:
		dev = priv;
		DBG0("[%s] Invalid event:%d received on"
		     " dev: %s\n", __func__, event_type, dev->name);
		break;
	}

	return;
}