static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	unsigned long flags;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
			dev->name);
		return 0;
	}

	spin_lock_irqsave(&p->lock, flags);
	smd_enable_read_intr(ch);
	if (smd_write_avail(ch) < skb->len) {
		netif_stop_queue(dev);
		p->skb = skb;
		spin_unlock_irqrestore(&p->lock, flags);
		return 0;
	}
	smd_disable_read_intr(ch);
	spin_unlock_irqrestore(&p->lock, flags);

	_rmnet_xmit(skb, dev);

	return 0;
}
예제 #2
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	int ret = 0;

	if (netif_queue_stopped(dev) || (p->device_state == DEVICE_INACTIVE)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			   "netif_queue is stopped", dev->name);
		return 0;
	}

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	ret = _rmnet_xmit(skb, dev);

	if (ret == -EAGAIN) {
		/*
		 * EAGAIN means we attempted to overflow the high watermark
		 * Clearly the queue is not stopped like it should be, so
		 * stop it and return BUSY to the TCP/IP framework.  It will
		 * retry this packet with the queue is restarted which happens
		 * low watermark is called.
		 */
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);

	return ret;
}
예제 #3
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	smd_channel_t *ch = p->ch;
	unsigned long flags;
	int space;

	if (netif_queue_stopped(dev)) {
		pr_err("fatal: rmnet_xmit called when netif_queue is stopped");
		return 0;
	}

	spin_lock_irqsave(&p->lock, flags);
	smd_enable_read_intr(ch);
	if ((space=smd_write_avail(ch)) < skb->len) {
		pr_warn("warn: rmnet_xmit returned when no write resources on ch %s, available %d, needed %d\n",
				p->chname, space, skb->len);
		netif_stop_queue(dev);
		p->skb = skb;
		spin_unlock_irqrestore(&p->lock, flags);
		return 0;
	}
	smd_disable_read_intr(ch);
	spin_unlock_irqrestore(&p->lock, flags);

	_rmnet_xmit(skb, dev);

	return 0;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	if (netif_queue_stopped(dev)) {
		pr_err("[lte] fatal: rmnet_xmit called when netif_queue is stopped");
		return 0;
	}

	netif_stop_queue(dev);
	_rmnet_xmit(skb, dev);

	return 0;
}
static void _rmnet_resume_flow(unsigned long param)
{
	struct net_device *dev = (struct net_device *)param;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb = NULL;
	unsigned long flags;

	spin_lock_irqsave(&p->lock, flags);
	if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
		skb = p->skb;
		p->skb = NULL;
		spin_unlock_irqrestore(&p->lock, flags);
		_rmnet_xmit(skb, dev);
		netif_wake_queue(dev);
	} else
		spin_unlock_irqrestore(&p->lock, flags);
}
static void _rmnet_resume_flow(unsigned long param)
{
	struct net_device *dev = (struct net_device *)param;
	struct rmnet_private *p = netdev_priv(dev);
	struct sk_buff *skb = NULL;
	unsigned long flags;

	/* xmit and enable the flow only once even if
	   multiple tasklets were scheduled by smd_net_notify */
	spin_lock_irqsave(&p->lock, flags);
	if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
		skb = p->skb;
		p->skb = NULL;
		spin_unlock_irqrestore(&p->lock, flags);
		_rmnet_xmit(skb, dev);
		netif_wake_queue(dev);
	} else
		spin_unlock_irqrestore(&p->lock, flags);
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			"netif_queue is stopped", dev->name);
		return 0;
	}

	_rmnet_xmit(skb, dev);

	if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
		netif_stop_queue(dev);
		DBG0("%s: High WM hit, stopping queue=%p\n",	__func__, skb);
	}

	return 0;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			"netif_queue is stopped", dev->name);
		return 0;
	}

	_rmnet_xmit(skb, dev);

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
		netif_stop_queue(dev);
		DBG0("%s: High WM hit, stopping queue=%p\n",	__func__, skb);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);

	return 0;
}
static void bam_notify(void *dev, int event, unsigned long data)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;

	switch (event) {
	case BAM_DMUX_RECEIVE:
		bam_recv_notify(dev, (struct sk_buff *)(data));
		break;
	case BAM_DMUX_WRITE_DONE:
		bam_write_done(dev, (struct sk_buff *)(data));
		break;
	case BAM_DMUX_UL_CONNECTED:
		spin_lock_irqsave(&p->lock, flags);
		if (p->waiting_for_ul_skb != NULL) {
			struct sk_buff *skb;
			int ret;

			skb = p->waiting_for_ul_skb;
			p->waiting_for_ul_skb = NULL;
			spin_unlock_irqrestore(&p->lock, flags);
			ret = _rmnet_xmit(skb, dev);
			if (ret) {
				pr_err("%s: error %d dropping delayed TX SKB %p\n",
						__func__, ret, skb);
				dev_kfree_skb_any(skb);
			}
			netif_wake_queue(dev);
		} else {
			spin_unlock_irqrestore(&p->lock, flags);
		}
		break;
	case BAM_DMUX_UL_DISCONNECTED:
		break;
	}
}
예제 #10
0
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			"netif_queue is stopped", dev->name);
		return 0;
	}

	if (!ul_is_connected) {
		p->waiting_for_ul = 1;
		msm_bam_dmux_kickoff_ul_wakeup();
		return NETDEV_TX_BUSY;
	}
	_rmnet_xmit(skb, dev);

	if (msm_bam_dmux_is_ch_full(p->ch_id)) {
		netif_stop_queue(dev);
		DBG0("%s: High WM hit, stopping queue=%p\n",    __func__, skb);
	}

	return 0;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_private *p = netdev_priv(dev);
	unsigned long flags;
	int awake;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: rmnet_xmit called when "
			"netif_queue is stopped", dev->name);
		return 0;
	}

	spin_lock_irqsave(&p->lock, flags);
	awake = msm_bam_dmux_ul_power_vote();
	if (!awake) {
		/* send SKB once wakeup is complete */
		netif_stop_queue(dev);
		p->waiting_for_ul_skb = skb;
		spin_unlock_irqrestore(&p->lock, flags);
		ret = 0;
		goto exit;
	}
	spin_unlock_irqrestore(&p->lock, flags);

	ret = _rmnet_xmit(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}

	/*
	 * detected SSR a bit early.  shut some things down now, and leave
	 * the rest to the main ssr handling code when that happens later
	 */
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}

	if (ret == -EAGAIN) {
		/*
		 * This should not happen
		 * EAGAIN means we attempted to overflow the high watermark
		 * Clearly the queue is not stopped like it should be, so
		 * stop it and return BUSY to the TCP/IP framework.  It will
		 * retry this packet with the queue is restarted which happens
		 * in the write_done callback when the low watermark is hit.
		 */
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}

	spin_lock_irqsave(&p->tx_queue_lock, flags);
	if (msm_bam_dmux_is_ch_full(p->ch_id)) {
		netif_stop_queue(dev);
		DBG0("%s: High WM hit, stopping queue=%p\n",    __func__, skb);
	}
	spin_unlock_irqrestore(&p->tx_queue_lock, flags);

exit:
	msm_bam_dmux_ul_power_unvote();
	return ret;
}
예제 #12
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}