static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	int bam_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	/* if write() succeeds, skb access is unsafe in this process */
	bam_ret = msm_bam_dmux_write(p->ch_id, skb);

	if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
		pr_err("[%s] %s: write returned error %d",
			dev->name, __func__, bam_ret);
		return -EPERM;
	}

	return bam_ret;
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	int sdio_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	if (!netif_carrier_ok(dev)) {
		pr_err("[%s] %s: channel in reset",
			dev->name, __func__);
		goto xmit_out;
	}

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);

	if (sdio_ret != 0) {
		pr_err("[%s] %s: write returned error %d",
			dev->name, __func__, sdio_ret);
		goto xmit_out;
	}

	if (count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}
	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    dev->name, p->stats.tx_packets, skb->len, skb->mark);

	return 0;
xmit_out:
	dev_kfree_skb_any(skb);
	p->stats.tx_errors++;
	return 0;
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	int smd_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	smd_ret = smd_write(ch, skb->data, skb->len);
	if (smd_ret != skb->len) {
		pr_err("[%s] %s: smd_write returned error %d",
			dev->name, __func__, smd_ret);
		p->stats.tx_errors++;
		goto xmit_out;
	}

	if (RMNET_IS_MODE_IP(opmode) ||
	    count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}
	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
	    dev->name, p->stats.tx_packets, skb->len, skb->mark);

xmit_out:
	
	dev_kfree_skb_irq(skb);
	return 0;
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	int smd_ret;
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;
	smd_ret = smd_write(ch, skb->data, skb->len);
	if (smd_ret != skb->len) {
		pr_err("%s: smd_write returned error %d", __func__, smd_ret);
		goto xmit_out;
	}

	if (RMNET_IS_MODE_IP(opmode) ||
	    count_this_packet(skb->data, skb->len)) {
		p->stats.tx_packets++;
		p->stats.tx_bytes += skb->len;
#if 0
		p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
	}

xmit_out:
	/* data xmited, safe to release skb */
	dev_kfree_skb_irq(skb);
	return 0;
}
Beispiel #5
0
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	struct QMI_QOS_HDR_S *qmih;
	u32 opmode;
	unsigned long flags;

	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
	spin_lock_irqsave(&p->lock, flags);
	opmode = p->operation_mode;
	spin_unlock_irqrestore(&p->lock, flags);

	if (RMNET_IS_MODE_QOS(opmode)) {
		qmih = (struct QMI_QOS_HDR_S *)
			   skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
		qmih->version = 1;
		qmih->flags = 0;
		qmih->flow_id = skb->mark;
	}

	dev->trans_start = jiffies;

	return msm_smux_write(p->ch_id, skb, skb->data, skb->len);
}
Beispiel #6
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}