예제 #1
0
static void *handle_sdio_mux_data(struct sdio_mux_hdr *hdr,
				  struct sk_buff *skb_mux)
{
	struct sk_buff *skb;
	void *rp = (void *)hdr;
	unsigned long flags;

	/* protect? */
	rp += sizeof(*hdr);
	if (rp < (void *)skb_mux->tail)
		rp += (hdr->pkt_len + hdr->pad_len);

	if (rp > (void *)skb_mux->tail) {
		/* partial packet */
		sdio_mux_save_partial_pkt(hdr, skb_mux);
		goto packet_done;
	}

	DBG("[lte] %s: hdr %p next %p tail %p pkt_size %d\n",
	    __func__, hdr, rp, skb_mux->tail, hdr->pkt_len + hdr->pad_len);

	skb =  skb_clone(skb_mux, GFP_KERNEL);
	if (!skb) {
		pr_err("[lte] Error - %s: cannot clone skb\n", __func__);
		goto packet_done;
	}

	skb_set_data(skb, (unsigned char *)(hdr + 1), hdr->pkt_len);
	DBG("[lte] %s: head %p data %p tail %p end %p len %d\n",
	    __func__, skb->head, skb->data, skb->tail, skb->end, skb->len);

	/* probably we should check channel status */
	/* discard packet early if local side not open */
	spin_lock_irqsave(&sdio_ch[hdr->ch_id].lock, flags);
	if (sdio_ch[hdr->ch_id].receive_cb)
		sdio_ch[hdr->ch_id].receive_cb(sdio_ch[hdr->ch_id].priv, skb);
	else
		dev_kfree_skb_any(skb);
	spin_unlock_irqrestore(&sdio_ch[hdr->ch_id].lock, flags);

packet_done:
	return rp;
}
예제 #2
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("[lte] %s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("[lte] %s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("[lte] Error - %s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		DBG("[lte] %s: cannot allocate skb of size:%d + "
				"%d (NET_SKB_PAD)\n",
				__func__, sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_info("[lte] %s: allocation failed. retry later\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			queue_delayed_work(sdio_mux_workqueue,
				&work_sdio_mux_read,
				msecs_to_jiffies(SDIO_OOM_RETRY_DELAY_MS));
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("[lte] %s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("[lte] Error - %s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_delayed_work(sdio_mux_workqueue,
			&work_sdio_mux_read, 0);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("[lte] %s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* HTC */
	dbg_dump_buf("SDIO_RMNET->RD#", skb_mux->data, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);

	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("[lte] Error - %s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("[lte] %s: read done\n", __func__);
	queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, 0);
}
예제 #3
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;
//[[2011.10.06 leecy add qualcomm patch	
	static int workqueue_pinned;

	if (!workqueue_pinned) {
		struct cpumask cpus;
		cpumask_clear(&cpus);
		cpumask_set_cpu(0, &cpus);
		
		if (sched_setaffinity(current->pid, &cpus))
			pr_err("%s: sdio_dmux set CPU affinity failed\n", __func__);

		workqueue_pinned = 1;
	}
//2011.10.06 leecy add qualcomm patch	]]

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		pr_err("%s: cannot allocate skb of size:%d + "
			"%d (NET_SKB_PAD)\n", __func__,
			sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
예제 #4
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;
	/* if allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len);
		if (skb_mux)
			break;
		pr_err("%s: cannot allocate skb of size:%d\n", __func__,
			sz + NET_IP_ALIGN + len);
		if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}