예제 #1
0
static int rmnet_usb_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
	if (test_bit(RMNET_MODE_LLP_IP, &dev->data[0]))
		skb->protocol = rmnet_ip_type_trans(skb, dev->net);
	else 
		skb->protocol = 0;

	#if defined(CONFIG_MONITOR_STREAMING_PORT_SOCKET) && defined(CONFIG_MSM_NONSMD_PACKET_FILTER)
	if (is_streaming_sock_connectted) {
	    if (!use_extend_suspend_timer) {
	        struct usb_device *udev = dev->udev;

	        if (udev) {
	            use_extend_suspend_timer = true;
	            pm_runtime_set_autosuspend_delay(&udev->dev, EXTEND_AUTOSUSPEND_TIMER);
	            dev_info(&udev->dev, "is_streaming_sock_connectted:%d pm_runtime_set_autosuspend_delay %d\n", is_streaming_sock_connectted, EXTEND_AUTOSUSPEND_TIMER);
	        }
	    }
	}
	#endif 

	DBG1("[%s] Rx packet #%lu len=%d\n",
		dev->net->name, dev->net->stats.rx_packets, skb->len);

	return 1;
}
/*Rx Callback, Called in Work Queue context*/
static void sdio_recv_notify(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	u32 opmode;

	if (skb) {
		skb->dev = dev;
		/* Handle Rx frame format */
		spin_lock_irqsave(&p->lock, flags);
		opmode = p->operation_mode;
		spin_unlock_irqrestore(&p->lock, flags);

		if (RMNET_IS_MODE_IP(opmode)) {
			/* Driver in IP mode */
			skb->protocol = rmnet_ip_type_trans(skb, dev);
		} else {
			/* Driver in Ethernet mode */
			skb->protocol = eth_type_trans(skb, dev);
		}
		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
			p->stats.rx_packets++;
			p->stats.rx_bytes += skb->len;
		}
		netif_rx(skb);
	} else
		pr_err("[lte] Error - %s: No skb received", __func__);
}
예제 #3
0
static int GobiNetDriverRxFixup(
    struct usbnet  *pDev,
    struct sk_buff *pSKB )
{
    pSKB->protocol = rmnet_ip_type_trans(pSKB, pDev->net);
    return 1;
}
예제 #4
0
static void smux_read_done(void *rcv_dev, const void *meta_data)
{
	struct rmnet_private *p;
	struct net_device *dev = rcv_dev;
	u32 opmode;
	unsigned long flags;
	struct sk_buff *skb = NULL;
	const struct smux_meta_read  *read_meta_info = meta_data;

	if (!dev || !read_meta_info) {
		DBG1("%s:invalid read_done callback recieved", __func__);
		return;
	}

	p = netdev_priv(dev);

	skb = (struct sk_buff *) read_meta_info->pkt_priv;

	if (!skb || skb->dev != dev) {
		DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK",
		      __func__);
		return;
	}

	/* Handle Rx frame format */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_IP(opmode)) {
		/* Driver in IP mode */
		skb->protocol =
		rmnet_ip_type_trans(skb, dev);
	} else {
		/* Driver in Ethernet mode */
		skb->protocol =
		eth_type_trans(skb, dev);
	}
	if (RMNET_IS_MODE_IP(opmode) ||
		count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_rcv +=
		rmnet_cause_wakeup(p);
#endif
		p->stats.rx_packets++;
		p->stats.rx_bytes += skb->len;
	}
	DBG2("[%s] Rx packet #%lu len=%d\n",
		 dev->name, p->stats.rx_packets,
		 skb->len);
	/* Deliver to network stack */
	netif_rx(skb);

	return;
}
static int rmnet_usb_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
	if (test_bit(RMNET_MODE_LLP_IP, &dev->data[0]))
		skb->protocol = rmnet_ip_type_trans(skb, dev->net);
	else /*set zero for eth mode*/
		skb->protocol = 0;

	DBG1("[%s] Rx packet #%lu len=%d\n",
		dev->net->name, dev->net->stats.rx_packets, skb->len);

	return 1;
}
/* Rx Callback, Called in Work Queue context */
static void sdio_recv_notify(void *dev, struct sk_buff *skb)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	u32 opmode;

	if (skb) {
		skb->dev = dev;
		/* Handle Rx frame format */
		spin_lock_irqsave(&p->lock, flags);
		opmode = p->operation_mode;
		spin_unlock_irqrestore(&p->lock, flags);

		if (RMNET_IS_MODE_IP(opmode)) {
			/* Driver in IP mode */
			skb->protocol = rmnet_ip_type_trans(skb, dev);
		} else {
			/* Driver in Ethernet mode */
			skb->protocol = eth_type_trans(skb, dev);
		}
		if (RMNET_IS_MODE_IP(opmode) ||
		    count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
			p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
			p->stats.rx_packets++;
			p->stats.rx_bytes += skb->len;
		}
		DBG1("[%s] Rx packet #%lu len=%d\n",
			((struct net_device *)dev)->name,
			p->stats.rx_packets, skb->len);

		/* Deliver to network stack */
		netif_rx(skb);
	} else {
		spin_lock_irqsave(&p->lock, flags);
		if (!sdio_update_reset_state((struct net_device *)dev))
			pr_err("[%s] %s: No skb received",
				((struct net_device *)dev)->name, __func__);
		spin_unlock_irqrestore(&p->lock, flags);
	}
}
static int rmnet_usb_rx_fixup(struct usbnet *dev,
	struct sk_buff *skb)
{
//++SSD_RIL:20120731: For tx/rx enable_hlt/disable_hlt
	if (rnmet_usb_hlt_enabled == 1 ) {
//++SSD_RIL:20120814: For CPU/Freq min default value
		int is_disable_flt = 0;
//--SSD_RIL
		unsigned long flags = 0;
		spin_lock_irqsave(&rmnet_usb_hlt_lock, flags);
		if ( rnmet_usb_hlt_timer_enabled == 0 ) {
			rnmet_usb_hlt_timer_enabled = 1;
			disable_hlt();
//++SSD_RIL:20120814: For CPU/Freq min default value
			is_disable_flt = 1;
//--SSD_RIL
			pr_info("%s: rmnet hlt disable\n", __func__);
		}
		del_timer(&rmnet_usb_lp2_in_idle_timer);
		mod_timer(&rmnet_usb_lp2_in_idle_timer, jiffies + msecs_to_jiffies(2000));
		spin_unlock_irqrestore(&rmnet_usb_hlt_lock, flags);
//++SSD_RIL:20120814: For CPU/Freq min default value
		if ( is_disable_flt == 1 && rnmet_usb_cpu_freq_enabled == 1 ) {
			rmnet_usb_freq_timer_enable();
		}
//--SSD_RIL
	}
//--SSD_RIL

	if (test_bit(RMNET_MODE_LLP_IP, &dev->data[0]))
		skb->protocol = rmnet_ip_type_trans(skb, dev->net);
	else /*set zero for eth mode*/
		skb->protocol = 0;

	DBG1("[%s] Rx packet #%lu len=%d\n",
		dev->net->name, dev->net->stats.rx_packets, skb->len);

	return 1;
}
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		skb = dev_alloc_skb(sz + NET_IP_ALIGN);
		if (skb == NULL) {
			pr_err("[%s] rmnet_recv() cannot allocate skb\n",
			       dev->name);
			/* out of memory, reschedule a later attempt */
			smd_net_data_tasklet.data = (unsigned long)dev;
			tasklet_schedule(&smd_net_data_tasklet);
			break;
		} else {
			skb->dev = dev;
			skb_reserve(skb, NET_IP_ALIGN);
			ptr = skb_put(skb, sz);
			wake_lock_timeout(&p->wake_lock, HZ / 2);
			if (smd_read(p->ch, ptr, sz) != sz) {
				pr_err("[%s] rmnet_recv() smd lied about avail?!",
					dev->name);
				ptr = 0;
				dev_kfree_skb_irq(skb);
			} else {
				/* Handle Rx frame format */
				spin_lock_irqsave(&p->lock, flags);
				opmode = p->operation_mode;
				spin_unlock_irqrestore(&p->lock, flags);

				if (RMNET_IS_MODE_IP(opmode)) {
					/* Driver in IP mode */
					skb->protocol =
					  rmnet_ip_type_trans(skb, dev);
				} else {
					/* Driver in Ethernet mode */
					skb->protocol =
					  eth_type_trans(skb, dev);
				}
				if (RMNET_IS_MODE_IP(opmode) ||
				    count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
					p->wakeups_rcv +=
					rmnet_cause_wakeup(p);
#endif
					p->stats.rx_packets++;
					p->stats.rx_bytes += skb->len;
				}
				DBG1("[%s] Rx packet #%lu len=%d\n",
					dev->name, p->stats.rx_packets,
					skb->len);

				/* Deliver to network stack */
				netif_rx(skb);
			}
			continue;
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("[%s] rmnet_recv() smd lied about avail?!",
				dev->name);
	}
}
예제 #9
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}
예제 #10
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
	struct net_device *dev = (struct net_device *) arg;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb;
	void *ptr = 0;
	int sz;
	u32 opmode = p->operation_mode;
	unsigned long flags;

	for (;;) {
		sz = smd_cur_packet_size(p->ch);
		if (sz == 0) break;
		if (smd_read_avail(p->ch) < sz) break;

		if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
						(sz > (dev->mtu + ETH_HLEN))) {
			pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
				sz, RMNET_IS_MODE_IP(opmode) ?
					dev->mtu : (dev->mtu + ETH_HLEN));
			ptr = 0;
		} else {
			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
			if (skb == NULL) {
				pr_err("rmnet_recv() cannot allocate skb\n");
			} else {
				skb->dev = dev;
				skb_reserve(skb, NET_IP_ALIGN);
				ptr = skb_put(skb, sz);
				wake_lock_timeout(&p->wake_lock, HZ / 2);
				if (smd_read(p->ch, ptr, sz) != sz) {
					pr_err("rmnet_recv() smd lied about avail?!");
					ptr = 0;
					dev_kfree_skb_irq(skb);
				} else {
					/* Handle Rx frame format */
					spin_lock_irqsave(&p->lock, flags);
					opmode = p->operation_mode;
					spin_unlock_irqrestore(&p->lock, flags);

					if (RMNET_IS_MODE_IP(opmode)) {
						/* Driver in IP mode */
						skb->protocol =
						  rmnet_ip_type_trans(skb, dev);
					} else {
						/* Driver in Ethernet mode */
						skb->protocol =
						  eth_type_trans(skb, dev);
					}
					if (RMNET_IS_MODE_IP(opmode) ||
					    count_this_packet(ptr, skb->len)) {
#if 0
						p->wakeups_rcv +=
							rmnet_cause_wakeup(p);
#endif
						p->stats.rx_packets++;
						p->stats.rx_bytes += skb->len;
					}
					netif_rx(skb);
				}
				continue;
			}
		}
		if (smd_read(p->ch, ptr, sz) != sz)
			pr_err("rmnet_recv() smd lied about avail?!");
	}
}