コード例 #1
0
ファイル: diagfwd_sdio.c プロジェクト: dimax754/msm_2.6.38
void __diag_sdio_send_req(void)
{
	int r = 0;
	void *buf = driver->buf_in_sdio;

	if (driver->sdio_ch && (!driver->in_busy_sdio)) {
		r = sdio_read_avail(driver->sdio_ch);

		if (r > IN_BUF_SIZE) {
			if (r < MAX_IN_BUF_SIZE) {
				pr_err("diag: SDIO sending"
					  " packets more than %d bytes\n", r);
				buf = krealloc(buf, r, GFP_KERNEL);
			} else {
				pr_err("diag: SDIO sending"
			  " in packets more than %d bytes\n", MAX_IN_BUF_SIZE);
				return;
			}
		}
		if (r > 0) {
			if (!buf)
				printk(KERN_INFO "Out of diagmem for SDIO\n");
			else {
				APPEND_DEBUG('i');
				sdio_read(driver->sdio_ch, buf, r);
				APPEND_DEBUG('j');
				driver->write_ptr_mdm->length = r;
				driver->in_busy_sdio = 1;
				diag_device_write(buf, SDIO_DATA,
						 driver->write_ptr_mdm);
			}
		}
	}
}
コード例 #2
0
/**
 * Loopback Test
 */
static void loopback_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;

	while (1) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--LOOPBACK WAIT FOR EVENT--.\n");
		/* wait for data ready event */
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail == 0)
			continue;


		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail < read_avail) {
			pr_info(TEST_MODULE_NAME
				":not enough write avail.\n");
			continue;
		}

		ret = sdio_read(test_ctx->ch, test_ctx->buf, read_avail);
		if (ret) {
			pr_info(TEST_MODULE_NAME
			       ":worker, sdio_read err=%d.\n", -ret);
			continue;
		}
		test_ctx->rx_bytes += read_avail;

		pr_debug(TEST_MODULE_NAME ":worker total rx bytes = 0x%x.\n",
			 test_ctx->rx_bytes);


			ret = sdio_write(test_ctx->ch,
					 test_ctx->buf, read_avail);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":loopback sdio_write err=%d.\n",
					-ret);
				continue;
			}
			test_ctx->tx_bytes += read_avail;

			pr_debug(TEST_MODULE_NAME
				 ":loopback total tx bytes = 0x%x.\n",
				 test_ctx->tx_bytes);
		} /* end of while */
}
コード例 #3
0
static void sdio_mux_notify(void *_dev, unsigned event)
{
    DBG("%s: event %d notified\n", __func__, event);

    /* write avail may not be enouogh for a packet, but should be fine */
    if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
            sdio_write_avail(sdio_mux_ch))
        queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

    if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
            sdio_read_avail(sdio_mux_ch))
        queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
コード例 #4
0
ファイル: sdio_smem.c プロジェクト: 84506232/ef40s_jb_kernel
static void sdio_smem_read(struct work_struct *work)
{
	int err;
	int read_avail;
	char *data = client.buf;

	if (!sdio_ch_opened)
		return;

	read_avail = sdio_read_avail(channel);
	if (read_avail > bytes_avail ||
		read_avail < 0) {
		pr_err("Error: read_avail=%d bytes_avail=%d\n",
			read_avail, bytes_avail);
		goto read_err;
	}

	if (read_avail == 0)
		return;

	err = sdio_read(channel,
			&data[client.size - bytes_avail],
			read_avail);
	if (err) {
		pr_err("sdio_read error (%d)", err);
		goto read_err;
	}

	bytes_avail -= read_avail;
	pr_debug("read %d bytes (bytes_avail = %d)\n",
			read_avail, bytes_avail);

	if (!bytes_avail) {
		bytes_avail = client.size;
		err = client.cb_func(SDIO_SMEM_EVENT_READ_DONE);
	}
	if (err)
		pr_err("error (%d) on callback\n", err);

	return;

read_err:
	if (sdio_ch_opened)
		client.cb_func(SDIO_SMEM_EVENT_READ_ERR);
	return;
}
コード例 #5
0
void __diag_sdio_send_req(void)
{
    int r = 0;
    void *buf = driver->buf_in_sdio;

    if (driver->sdio_ch && (!driver->in_busy_sdio)) {
        r = sdio_read_avail(driver->sdio_ch);

        if (r > IN_BUF_SIZE) {
            if (r < MAX_IN_BUF_SIZE) {
                printk(KERN_ALERT "\n diag: SDIO sending"
                       " in packets more than %d bytes", r);
                buf = krealloc(buf, r, GFP_KERNEL);
            } else {
                printk(KERN_ALERT "\n diag: SDIO sending"
                       " in packets more than %d bytes", MAX_IN_BUF_SIZE);
                return;
            }
        }
        if (r > 0) {
            if (!buf)
                printk(KERN_INFO "Out of diagmem for SDIO\n");
            else {
                APPEND_DEBUG('i');
                sdio_read(driver->sdio_ch, buf, r);
                if (((!driver->usb_connected) && (driver->
                                                  logging_mode == USB_MODE)) || (driver->
                                                          logging_mode == NO_LOGGING_MODE)) {
                    /*Drop the diag payload */
                    driver->in_busy_sdio = 0;
                    return;
                }
                APPEND_DEBUG('j');
                driver->write_ptr_mdm->length = r;
                driver->in_busy_sdio = 1;
                diag_device_write(buf, SDIO_DATA,
                                  driver->write_ptr_mdm);
            }
        }
    }
}
コード例 #6
0
ファイル: u_sdio.c プロジェクト: R-N/boston-2.6.32.x
void gsdio_read_pending(struct gsdio_port *port)
{
	struct sdio_channel *ch;
	char buf[1024];
	int avail;

	if (!port) {
		pr_err("%s: port is null\n", __func__);
		return;
	}

	ch = port->sport_info->ch;

	if (!ch)
		return;

	while ((avail = sdio_read_avail(ch))) {
		if (avail > 1024)
			avail = 1024;
		sdio_read(ch, buf, avail);

		pr_debug("%s: flushed out %d bytes\n", __func__, avail);
	}
}
コード例 #7
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("[lte] %s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("[lte] %s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("[lte] Error - %s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		DBG("[lte] %s: cannot allocate skb of size:%d + "
				"%d (NET_SKB_PAD)\n",
				__func__, sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_info("[lte] %s: allocation failed. retry later\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			queue_delayed_work(sdio_mux_workqueue,
				&work_sdio_mux_read,
				msecs_to_jiffies(SDIO_OOM_RETRY_DELAY_MS));
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("[lte] %s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("[lte] Error - %s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_delayed_work(sdio_mux_workqueue,
			&work_sdio_mux_read, 0);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("[lte] %s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* HTC */
	dbg_dump_buf("SDIO_RMNET->RD#", skb_mux->data, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);

	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("[lte] Error - %s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("[lte] %s: read done\n", __func__);
	queue_delayed_work(sdio_mux_workqueue, &work_sdio_mux_read, 0);
}
コード例 #8
0
/**
 * A2 Perf Test
 */
static void a2_performance_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int tx_packet_count = 0;
	int rx_packet_count = 0;
	int size = 0;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;
	int total_bytes = 0;
	int max_packets = 10000;

	u64 start_jiffy, end_jiffy, delta_jiffies;
	unsigned int time_msec = 0;

	for (i = 0; i < test_ctx->buf_size / 2; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	pr_info(TEST_MODULE_NAME "--A2 PERFORMANCE TEST START --.\n");

	sdio_set_write_threshold(test_ctx->ch, 2*1024);
	sdio_set_read_threshold(test_ctx->ch, 14*1024);
	sdio_set_poll_time(test_ctx->ch, 0);

	start_jiffy = get_jiffies_64(); /* read the current time */

	while (tx_packet_count < max_packets) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		/* wait for data ready event */
		/* use a func to avoid compiler optimizations */
		write_avail = sdio_write_avail(test_ctx->ch);
		read_avail = sdio_read_avail(test_ctx->ch);
		if ((write_avail == 0) && (read_avail == 0)) {
			ret = wait_any_notify();
			if (ret)
				goto exit_err;
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail > 0) {
			size = min(test_ctx->buf_size, write_avail) ;
			pr_debug(TEST_MODULE_NAME ":tx size = %d.\n", size);
			if (atomic_read(&test_ctx->tx_notify_count) > 0)
				atomic_dec(&test_ctx->tx_notify_count);
			test_ctx->buf[0] = tx_packet_count;
			test_ctx->buf[(size/4)-1] = tx_packet_count;

			ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":sdio_write err=%d.\n",
					-ret);
				goto exit_err;
			}
			tx_packet_count++;
			test_ctx->tx_bytes += size;
		}

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail > 0) {
			size = min(test_ctx->buf_size, read_avail);
			pr_debug(TEST_MODULE_NAME ":rx size = %d.\n", size);
			if (atomic_read(&test_ctx->rx_notify_count) > 0)
				atomic_dec(&test_ctx->rx_notify_count);

			ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					": sdio_read err=%d.\n",
					-ret);
				goto exit_err;
			}
			rx_packet_count++;
			test_ctx->rx_bytes += size;
		}

		pr_debug(TEST_MODULE_NAME
			 ":total rx bytes = %d , rx_packet#=%d.\n",
			 test_ctx->rx_bytes, rx_packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":total tx bytes = %d , tx_packet#=%d.\n",
			 test_ctx->tx_bytes, tx_packet_count);

	   /* pr_info(TEST_MODULE_NAME ":packet#=%d.\n", tx_packet_count); */

	} /* while (tx_packet_count < max_packets ) */

	end_jiffy = get_jiffies_64(); /* read the current time */

	delta_jiffies = end_jiffy - start_jiffy;
	time_msec = jiffies_to_msecs(delta_jiffies);

	pr_info(TEST_MODULE_NAME ":total rx bytes = 0x%x , rx_packet#=%d.\n",
		test_ctx->rx_bytes, rx_packet_count);
	pr_info(TEST_MODULE_NAME ":total tx bytes = 0x%x , tx_packet#=%d.\n",
		test_ctx->tx_bytes, tx_packet_count);

	total_bytes = (test_ctx->tx_bytes + test_ctx->rx_bytes);
	pr_err(TEST_MODULE_NAME ":total bytes = %d, time msec = %d.\n",
		   total_bytes , (int) time_msec);

	pr_err(TEST_MODULE_NAME ":Performance = %d Mbit/sec.\n",
	(total_bytes / time_msec) * 8 / 1000) ;

	pr_err(TEST_MODULE_NAME "--A2 PERFORMANCE TEST END --.\n");

	pr_err(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	pr_err(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
コード例 #9
0
/**
 * sender Test
 */
static void sender_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int packet_count = 0;
	int size = 512;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;

	for (i = 0 ; i < size / 2 ; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	sdio_set_write_threshold(test_ctx->ch, 4*1024);
	sdio_set_read_threshold(test_ctx->ch, 16*1024); /* N/A with Rx EOT  */
	sdio_set_poll_time(test_ctx->ch, 0); /* N/A with Rx EOT  */

	while (packet_count < 100) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--SENDER WAIT FOR EVENT--.\n");

		/* wait for data ready event */
		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			wait_event(test_ctx->wait_q,
				   atomic_read(&test_ctx->tx_notify_count));
			atomic_dec(&test_ctx->tx_notify_count);
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
			continue;
		}

		test_ctx->buf[0] = packet_count;
		test_ctx->buf[(size/4)-1] = packet_count;

		ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_write err=%d.\n",
				-ret);
			goto exit_err;
		}

		/* wait for read data ready event */
		pr_debug(TEST_MODULE_NAME ":sender wait for rx data.\n");
		read_avail = sdio_read_avail(test_ctx->ch);
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);

		if (read_avail != size) {
			pr_info(TEST_MODULE_NAME
				":read_avail size %d not as expected.\n",
				read_avail);
			goto exit_err;
		}

		memset(test_ctx->buf, 0x00, size);

		ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_read err=%d.\n",
				-ret);
			goto exit_err;
		}


		if ((test_ctx->buf[0] != packet_count) ||
		    (test_ctx->buf[(size/4)-1] != packet_count)) {
			pr_info(TEST_MODULE_NAME
				":sender sdio_read WRONG DATA.\n");
			goto exit_err;
		}

		test_ctx->tx_bytes += size;
		test_ctx->rx_bytes += size;
		packet_count++;

		pr_debug(TEST_MODULE_NAME
			 ":sender total rx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->rx_bytes, packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":sender total tx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->tx_bytes, packet_count);

	} /* end of while */

	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
コード例 #10
0
ファイル: sdio_dmux.c プロジェクト: crazyi/ef39s_kernel
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;
//[[2011.10.06 leecy add qualcomm patch	
	static int workqueue_pinned;

	if (!workqueue_pinned) {
		struct cpumask cpus;
		cpumask_clear(&cpus);
		cpumask_set_cpu(0, &cpus);
		
		if (sched_setaffinity(current->pid, &cpus))
			pr_err("%s: sdio_dmux set CPU affinity failed\n", __func__);

		workqueue_pinned = 1;
	}
//2011.10.06 leecy add qualcomm patch	]]

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;

	/* If allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = __dev_alloc_skb(sz + NET_IP_ALIGN + len, GFP_KERNEL);
		if (skb_mux)
			break;

		pr_err("%s: cannot allocate skb of size:%d + "
			"%d (NET_SKB_PAD)\n", __func__,
			sz + NET_IP_ALIGN + len, NET_SKB_PAD);
		/* the skb structure adds NET_SKB_PAD bytes to the memory
		 * request, which may push the actual request above PAGE_SIZE
		 * in that case, we need to iterate one more time to make sure
		 * we get the memory request under PAGE_SIZE
		 */
		if (sz + NET_IP_ALIGN + len + NET_SKB_PAD <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
コード例 #11
0
void __diag_sdio_send_req(void)
{
	int r = 0;
	void *buf = NULL;
	int *in_busy_ptr = NULL;
	struct diag_request *write_ptr_modem = NULL;
	int retry = 0;
#if defined(CONFIG_MACH_VIGOR)
	int type;
#endif

	if (!driver->in_busy_sdio_1) {
		buf = driver->buf_in_sdio_1;
		write_ptr_modem = driver->write_ptr_mdm_1;
		in_busy_ptr = &(driver->in_busy_sdio_1);
	} else if (!driver->in_busy_sdio_2) {
		buf = driver->buf_in_sdio_2;
		write_ptr_modem = driver->write_ptr_mdm_2;
		in_busy_ptr = &(driver->in_busy_sdio_2);
	}

	APPEND_DEBUG('Z');
	if (driver->sdio_ch && buf) {
		r = sdio_read_avail(driver->sdio_ch);

		if (r > MAX_IN_BUF_SIZE) {
				DIAG_ERR("\n diag: SDIO sending"
					  " in packets more than %d bytes\n", r);
		}
		if (r > 0) {
			if (!buf)
				DIAG_INFO("Out of diagmem for SDIO\n");
			else {
drop:
				APPEND_DEBUG('i');
				sdio_read(driver->sdio_ch, buf, r);
				if ((driver->qxdm2sd_drop) && (driver->logging_mode == USB_MODE)) {
					/*Drop the diag payload */
					DIAG_INFO("%s:Drop the diag payload :%d\n", __func__, retry);
					print_hex_dump(KERN_DEBUG, "Drop Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
					driver->in_busy_sdio_1 = 0;
					driver->in_busy_sdio_2 = 0;
					r=sdio_read_avail(driver->sdio_ch);
					if (++retry > 20) {
						driver->qxdm2sd_drop = 0;
						return;
						}
					if (r)
						goto drop;
					else {
						driver->qxdm2sd_drop = 0;
						return;
						}
				}
				APPEND_DEBUG('j');

				if (diag9k_debug_mask) {
					switch (diag9k_debug_mask) {
					case 1:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						break;
					case 2:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1);
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1);
						break;
					default:
						print_hex_dump(KERN_DEBUG, "Read Packet Data"
						" from 9K ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1);

					}
				}
#if defined(CONFIG_MACH_VIGOR)
				type = checkcmd_modem_epst(buf);
				if (type) {
					modem_to_userspace(buf, r, type, 1);
					return;
				}
#endif

				write_ptr_modem->length = r;
				*in_busy_ptr = 1;
				diag_device_write(buf, SDIO_DATA,
						 write_ptr_modem);

			}
		}
	}
}
コード例 #12
0
ファイル: u_sdio.c プロジェクト: R-N/boston-2.6.32.x
void gsdio_tx_pull(struct work_struct *w)
{
	struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
	struct list_head *pool = &port->write_pool;

	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
			port, port->port_num, pool);

	if (!port->port_usb) {
		pr_err("%s: usb disconnected\n", __func__);

		/* take out all the pending data from sdio */
		gsdio_read_pending(port);

		return;
	}

	spin_lock_irq(&port->port_lock);

	while (!list_empty(pool)) {
		int avail;
		struct usb_ep *in = port->port_usb->in;
		struct sdio_channel *ch = port->sport_info->ch;
		struct usb_request *req;
		unsigned len = TX_BUF_SIZE;
		int ret;


		req = list_entry(pool->next, struct usb_request, list);

		if (!port->sdio_open) {
			pr_debug("%s: SDIO channel is not open\n", __func__);
			goto tx_pull_end;
		}

		avail = sdio_read_avail(ch);
		if (!avail) {
			/* REVISIT: for ZLP */
			pr_debug("%s: read_avail:%d port:%p port#%d\n",
					__func__, avail, port, port->port_num);
			goto tx_pull_end;
		}

		if (avail > len)
			avail = len;

		list_del(&req->list);

		spin_unlock_irq(&port->port_lock);
		ret = sdio_read(ch, req->buf, avail);
		spin_lock_irq(&port->port_lock);
		if (ret) {
			pr_err("%s: port:%p port#%d sdio read failed err:%d",
					__func__, port, port->port_num, ret);

			/* check if usb is still active */
			if (!port->port_usb)
				gsdio_free_req(in, req);
			else
				list_add(&req->list, pool);
			goto tx_pull_end;
		}

		req->length = avail;

		spin_unlock_irq(&port->port_lock);
		ret = usb_ep_queue(in, req, GFP_KERNEL);
		spin_lock_irq(&port->port_lock);
		if (ret) {
			pr_err("%s: usb ep out queue failed"
					"port:%p, port#%d err:%d\n",
					__func__, port, port->port_num, ret);

			/* could be usb disconnected */
			if (!port->port_usb)
				gsdio_free_req(in, req);
			else
				list_add(&req->list, pool);
			goto tx_pull_end;
		}

		port->nbytes_tolaptop += avail;
	}
tx_pull_end:
	spin_unlock_irq(&port->port_lock);
}
コード例 #13
0
static void sdio_mux_read_data(struct work_struct *work)
{
	struct sk_buff *skb_mux;
	void *ptr = 0;
	int sz, rc, len = 0;
	struct sdio_mux_hdr *hdr;

	DBG("%s: reading\n", __func__);
	/* should probably have a separate read lock */
	mutex_lock(&sdio_mux_lock);
	sz = sdio_read_avail(sdio_mux_ch);
	DBG("%s: read avail %d\n", __func__, sz);
	if (sz <= 0) {
		if (sz)
			pr_err("%s: read avail failed %d\n", __func__, sz);
		mutex_unlock(&sdio_mux_lock);
		return;
	}

	/* net_ip_aling is probably not required */
	if (sdio_partial_pkt.valid)
		len = sdio_partial_pkt.skb->len;
	/* if allocation fails attempt to get a smaller chunk of mem */
	do {
		skb_mux = dev_alloc_skb(sz + NET_IP_ALIGN + len);
		if (skb_mux)
			break;
		pr_err("%s: cannot allocate skb of size:%d\n", __func__,
			sz + NET_IP_ALIGN + len);
		if (sz + NET_IP_ALIGN + len <= PAGE_SIZE) {
			pr_err("%s: allocation failed\n", __func__);
			mutex_unlock(&sdio_mux_lock);
			return;
		}
		sz /= 2;
	} while (1);

	skb_reserve(skb_mux, NET_IP_ALIGN + len);
	ptr = skb_put(skb_mux, sz);

	/* half second wakelock is fine? */
	wake_lock_timeout(&sdio_mux_ch_wakelock, HZ / 2);
	rc = sdio_read(sdio_mux_ch, ptr, sz);
	DBG("%s: read %d\n", __func__, rc);
	if (rc) {
		pr_err("%s: sdio read failed %d\n", __func__, rc);
		dev_kfree_skb_any(skb_mux);
		mutex_unlock(&sdio_mux_lock);
		queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
		return;
	}
	mutex_unlock(&sdio_mux_lock);

	DBG_INC_READ_CNT(sz);
	DBG("%s: head %p data %p tail %p end %p len %d\n", __func__,
	    skb_mux->head, skb_mux->data, skb_mux->tail,
	    skb_mux->end, skb_mux->len);

	/* move to a separate function */
	/* probably do skb_pull instead of pointer adjustment */
	hdr = handle_sdio_partial_pkt(skb_mux);
	while ((void *)hdr < (void *)skb_mux->tail) {

		if (((void *)hdr + sizeof(*hdr)) > (void *)skb_mux->tail) {
			/* handle partial header */
			sdio_mux_save_partial_pkt(hdr, skb_mux);
			break;
		}

		if (hdr->magic_num != SDIO_MUX_HDR_MAGIC_NO) {
			pr_err("%s: packet error\n", __func__);
			break;
		}

		hdr = handle_sdio_mux_command(hdr, skb_mux);
	}
	dev_kfree_skb_any(skb_mux);

	DBG("%s: read done\n", __func__);
	queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}