static int sdio_mux_write(struct sk_buff *skb)
{
	int rc, sz;

	/* HTC */
	dbg_dump_buf("SDIO_RMNET->WD#", skb->data, skb->len);

	mutex_lock(&sdio_mux_lock);
	sz = sdio_write_avail(sdio_mux_ch);
	DBG("[lte] %s: avail %d len %d\n", __func__, sz, skb->len);
	if (skb->len <= sz) {
		rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
		DBG("[lte] %s: write returned %d\n", __func__, rc);
		if (rc)
			rc = -EAGAIN;
		else
			DBG_INC_WRITE_CNT(skb->len);
	} else {
		rc = -ENOMEM;
		/* pr_err("[lte] Error - %s, ENOMEM\n", __func__); */
	}

	mutex_unlock(&sdio_mux_lock);
	return rc;
}
Beispiel #2
0
void diag_read_mdm_work_fn(struct work_struct *work)
{
#ifdef CONFIG_LGE_USB_MDM_DIAG_DISABLE
	if(mdm_diag_enable == 1) 
		return;
#endif
	if (driver->sdio_ch) {
		wait_event_interruptible(driver->wait_q, ((sdio_write_avail
			(driver->sdio_ch) >= driver->read_len_mdm) ||
				 !(driver->sdio_ch)));
		if (!(driver->sdio_ch)) {
			pr_alert("diag: sdio channel not valid");
			return;
		}
		if (driver->sdio_ch && driver->usb_buf_mdm_out &&
						 (driver->read_len_mdm > 0))
			sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out,
							 driver->read_len_mdm);
		APPEND_DEBUG('x');
		driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
		driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
		usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
		APPEND_DEBUG('y');
	}
}
void diag_read_mdm_work_fn(struct work_struct *work)
{
		if (diag9k_debug_mask)
		DIAG_INFO("%s \n", __func__);

	if (driver->sdio_ch) {
		wait_event_interruptible(driver->wait_q, (sdio_write_avail
				(driver->sdio_ch) >= driver->read_len_mdm));
		if (!strcmp(DIAG_MDM, usb_ch_name)) {
			if (driver->sdio_ch && driver->usb_buf_mdm_out &&
						 (driver->read_len_mdm > 0))
				sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out,
							 driver->read_len_mdm);

		} else {
			if (driver->sdio_ch && driver->usb_read_ptr &&
						 (driver->read_len_mdm > 0))
				sdio_write(driver->sdio_ch, driver->usb_buf_out,
							 driver->read_len_mdm);

		}

		APPEND_DEBUG('x');
		if (!strcmp(DIAG_MDM, usb_ch_name)) {
			driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
			driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
			usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
		} else {
			driver->usb_read_ptr->buf = driver->usb_buf_out;
			driver->usb_read_ptr->length = USB_MAX_OUT_BUF;
			usb_diag_read(driver->legacy_ch, driver->usb_read_ptr);
		}
		APPEND_DEBUG('y');
	}
}
/**
 * Loopback Test
 */
static void loopback_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;

	while (1) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--LOOPBACK WAIT FOR EVENT--.\n");
		/* wait for data ready event */
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail == 0)
			continue;


		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail < read_avail) {
			pr_info(TEST_MODULE_NAME
				":not enough write avail.\n");
			continue;
		}

		ret = sdio_read(test_ctx->ch, test_ctx->buf, read_avail);
		if (ret) {
			pr_info(TEST_MODULE_NAME
			       ":worker, sdio_read err=%d.\n", -ret);
			continue;
		}
		test_ctx->rx_bytes += read_avail;

		pr_debug(TEST_MODULE_NAME ":worker total rx bytes = 0x%x.\n",
			 test_ctx->rx_bytes);


			ret = sdio_write(test_ctx->ch,
					 test_ctx->buf, read_avail);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":loopback sdio_write err=%d.\n",
					-ret);
				continue;
			}
			test_ctx->tx_bytes += read_avail;

			pr_debug(TEST_MODULE_NAME
				 ":loopback total tx bytes = 0x%x.\n",
				 test_ctx->tx_bytes);
		} /* end of while */
}
Beispiel #5
0
void gsdio_rx_push(struct work_struct *w)
{
	struct gsdio_port *port = container_of(w, struct gsdio_port, push);
	struct list_head *q = &port->read_queue;
	int ret;

	pr_debug("%s: port:%p port#%d read_queue:%p", __func__,
			port, port->port_num, q);

	spin_lock_irq(&port->port_lock);

	while (!list_empty(q)) {
		struct usb_request *req;

		req = list_first_entry(q, struct usb_request, list);

		switch (req->status) {
		case -ESHUTDOWN:
			pr_debug("%s: req status shutdown portno#%d port:%p",
					__func__, port->port_num, port);
			goto rx_push_end;
		default:
			pr_warning("%s: port:%p port#%d"
					" Unexpected Rx Status:%d\n", __func__,
					port, port->port_num, req->status);
			/* FALL THROUGH */
		case 0:
			/* normal completion */
			break;
		}

		if (!port->sdio_open) {
			pr_err("%s: sio channel is not open\n", __func__);
			list_move(&req->list, &port->read_pool);
			goto rx_push_end;
		}

		ret = gsdio_write(port, req);
		if (ret || port->n_read)
			goto rx_push_end;

		list_move(&req->list, &port->read_pool);
	}

	if (port->sdio_open && !list_empty(q)) {
		if (sdio_write_avail(port->sport_info->ch))
			queue_work(gsdio_wq, &port->push);
	}
rx_push_end:
	spin_unlock_irq(&port->port_lock);

	/* start queuing out requests again to host */
	gsdio_start_rx(port);
}
static void sdio_mux_notify(void *_dev, unsigned event)
{
    DBG("%s: event %d notified\n", __func__, event);

    /* write avail may not be enouogh for a packet, but should be fine */
    if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
            sdio_write_avail(sdio_mux_ch))
        queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

    if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
            sdio_read_avail(sdio_mux_ch))
        queue_work(sdio_mux_workqueue, &work_sdio_mux_read);
}
Beispiel #7
0
void diag_read_mdm_work_fn(struct work_struct *work)
{
	if (driver->sdio_ch) {
		wait_event_interruptible(driver->wait_q, (sdio_write_avail
				(driver->sdio_ch) >= driver->read_len_mdm));
		if (driver->sdio_ch && driver->usb_buf_mdm_out &&
						 (driver->read_len_mdm > 0))
			sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out,
							 driver->read_len_mdm);
		APPEND_DEBUG('x');
		driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
		driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
		usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
		APPEND_DEBUG('y');
	}
}
static int sdio_mux_write(struct sk_buff *skb)
{
    int rc, sz;

    mutex_lock(&sdio_mux_lock);
    sz = sdio_write_avail(sdio_mux_ch);
    DBG("%s: avail %d len %d\n", __func__, sz, skb->len);
    if (skb->len <= sz) {
        rc = sdio_write(sdio_mux_ch, skb->data, skb->len);
        DBG("%s: write returned %d\n", __func__, rc);
        if (rc == 0)
            DBG_INC_WRITE_CNT(skb->len);
    } else
        rc = -ENOMEM;

    mutex_unlock(&sdio_mux_lock);
    return rc;
}
static int sdio_mux_write_cmd(void *data, uint32_t len)
{
	int avail, rc;
	for (;;) {
		mutex_lock(&sdio_mux_lock);
		avail = sdio_write_avail(sdio_mux_ch);
		DBG("[lte] %s: avail %d len %d\n", __func__, avail, len);
		if (avail >= len) {
			rc = sdio_write(sdio_mux_ch, data, len);
			DBG("[lte] %s: write returned %d\n", __func__, rc);
			if (!rc) {
				DBG_INC_WRITE_CNT(len);
				break;
			}
		}
		mutex_unlock(&sdio_mux_lock);
		msleep(250);
	}
	mutex_unlock(&sdio_mux_lock);
	return 0;
}
/*  Modem_request command */
static int lge_dm_tty_modem_request(const unsigned char *buf, int count)
{
	short modem_chip;
	int length;

	memcpy(&modem_chip, buf + dm_modem_request_header_length,
							sizeof(modem_chip));

	length = dm_modem_request_header_length + sizeof(modem_chip);

	if (modem_chip == Primary_modem_chip) {
		/* send masks to modem */
		diag_process_hdlc((void *)buf + length, count - length);
	} else if (modem_chip == Secondary_modem_chip) {

#ifdef CONFIG_DIAG_SDIO_PIPE
		/* send masks to 9k */
		if (driver->sdio_ch) {
			wait_event_interruptible(driver->wait_q,
				(sdio_write_avail(driver->sdio_ch)
					>= (count - length)));
			if (driver->sdio_ch && ((count - length) > 0)) {
				sdio_write(driver->sdio_ch,
					(void *)buf + length,
						count - length);
			}
		}
#endif

	} else {
		pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_write"
			"modem_number %d "
			"error count = %d length = %d\n",
			__func__, modem_chip, count, length);
	}

	return count;
}
static int diagchar_write(struct file *file, const char __user *buf,
			      size_t count, loff_t *ppos)
{
	int err, ret = 0, pkt_type;

#ifdef CONFIG_LGE_DM_APP
	char *buf_cmp;
#endif

#ifdef DIAG_DEBUG
	int length = 0, i;
#endif
	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
	void *buf_copy = NULL;
	int payload_size;
#ifdef CONFIG_DIAG_OVER_USB
	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
				(driver->logging_mode == NO_LOGGING_MODE)) {
		/*Drop the diag payload */
		return -EIO;
	}
#endif /* DIAG over USB */
	/* Get the packet type F3/log/event/Pkt response */
	err = copy_from_user((&pkt_type), buf, 4);
	/* First 4 bytes indicate the type of payload - ignore these */
	payload_size = count - 4;

#ifdef CONFIG_LGE_DM_APP
	if (driver->logging_mode == DM_APP_MODE) {
		/* only diag cmd #250 for supporting testmode tool */
		buf_cmp = (char *)buf + 4;
		if (*(buf_cmp) != 0xFA)
			return 0;
	}
#endif

	if (payload_size > USER_SPACE_DATA) {
		pr_err("diag: Dropping packet, packet payload size crosses 8KB limit. Current payload size %d\n",
				payload_size);
		driver->dropped_count++;
		return -EBADMSG;
	}
	if (pkt_type == DCI_DATA_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		if (err) {
			pr_alert("diag: copy failed for DCI data\n");
			return DIAG_DCI_SEND_DATA_FAIL;
		}
		err = diag_process_dci_client(driver->user_space_data,
							payload_size);
		return err;
	}
	if (pkt_type == USER_SPACE_LOG_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		/* Check masks for On-Device logging */
		if (driver->mask_check) {
			if (!mask_request_validate(driver->user_space_data)) {
				pr_alert("diag: mask request Invalid\n");
				return -EFAULT;
			}
		}
		buf = buf + 4;
#ifdef DIAG_DEBUG
		pr_debug("diag: user space data %d\n", payload_size);
		for (i = 0; i < payload_size; i++)
			pr_debug("\t %x", *((driver->user_space_data)+i));
#endif
#ifdef CONFIG_DIAG_SDIO_PIPE
		/* send masks to 9k too */
		if (driver->sdio_ch) {
			wait_event_interruptible(driver->wait_q,
				 (sdio_write_avail(driver->sdio_ch) >=
					 payload_size));
			if (driver->sdio_ch && (payload_size > 0)) {
				sdio_write(driver->sdio_ch, (void *)
				   (driver->user_space_data), payload_size);
			}
		}
#endif
#ifdef CONFIG_DIAG_BRIDGE_CODE
		/* send masks to 9k too */
		if (driver->hsic_ch && (payload_size > 0)) {
			/* wait sending mask updates if HSIC ch not ready */
			if (driver->in_busy_hsic_write)
				wait_event_interruptible(driver->wait_q,
					(driver->in_busy_hsic_write != 1));
			driver->in_busy_hsic_write = 1;
			driver->in_busy_hsic_read_on_device = 0;
			err = diag_bridge_write(driver->user_space_data,
							 payload_size);
			if (err) {
				pr_err("diag: err sending mask to MDM: %d\n",
									 err);
				/*
				* If the error is recoverable, then clear
				* the write flag, so we will resubmit a
				* write on the next frame.  Otherwise, don't
				* resubmit a write on the next frame.
				*/
				if ((-ESHUTDOWN) != err)
					driver->in_busy_hsic_write = 0;
			}
		}
#endif
		/* send masks to 8k now */
		diag_process_hdlc((void *)(driver->user_space_data),
							 payload_size);
		return 0;
	}

	if (payload_size > itemsize) {
		pr_err("diag: Dropping packet, packet payload size crosses"
				"4KB limit. Current payload size %d\n",
				payload_size);
		driver->dropped_count++;
		return -EBADMSG;
	}

	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
	if (!buf_copy) {
		driver->dropped_count++;
		return -ENOMEM;
	}

	err = copy_from_user(buf_copy, buf + 4, payload_size);
	if (err) {
		printk(KERN_INFO "diagchar : copy_from_user failed\n");
		ret = -EFAULT;
		goto fail_free_copy;
	}
#ifdef DIAG_DEBUG
	printk(KERN_DEBUG "data is -->\n");
	for (i = 0; i < payload_size; i++)
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_copy)+i));
#endif
	send.state = DIAG_STATE_START;
	send.pkt = buf_copy;
	send.last = (void *)(buf_copy + payload_size - 1);
	send.terminate = 1;
#ifdef DIAG_DEBUG
	pr_debug("diag: Already used bytes in buffer %d, and"
	" incoming payload size is %d\n", driver->used, payload_size);
	printk(KERN_DEBUG "hdlc encoded data is -->\n");
	for (i = 0; i < payload_size + 8; i++) {
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_hdlc)+i));
		if (*(((unsigned char *)buf_hdlc)+i) != 0x7e)
			length++;
	}
#endif
	mutex_lock(&driver->diagchar_mutex);
	if (!buf_hdlc)
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
						 POOL_TYPE_HDLC);
	if (!buf_hdlc) {
		ret = -ENOMEM;
		goto fail_free_hdlc;
	}
	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
	}

	enc.dest = buf_hdlc + driver->used;
	enc.dest_last = (void *)(buf_hdlc + driver->used + 2*payload_size + 3);
	diag_hdlc_encode(&send, &enc);

	/* This is to check if after HDLC encoding, we are still within the
	 limits of aggregation buffer. If not, we write out the current buffer
	and start aggregation in a newly allocated buffer */
	if ((unsigned int) enc.dest >=
		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
		enc.dest = buf_hdlc + driver->used;
		enc.dest_last = (void *)(buf_hdlc + driver->used +
							 (2*payload_size) + 3);
		diag_hdlc_encode(&send, &enc);
	}

	driver->used = (uint32_t) enc.dest - (uint32_t) buf_hdlc;
	if (pkt_type == DATA_TYPE_RESPONSE) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
	}

	mutex_unlock(&driver->diagchar_mutex);
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	if (!timer_in_progress)	{
		timer_in_progress = 1;
		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
	}
	return 0;

fail_free_hdlc:
	buf_hdlc = NULL;
	driver->used = 0;
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	mutex_unlock(&driver->diagchar_mutex);
	return ret;

fail_free_copy:
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	return ret;
}
static void sdio_mux_send_open_cmd(uint32_t id)
{
    struct sdio_mux_hdr hdr = {
        .magic_num = SDIO_MUX_HDR_MAGIC_NO,
        .cmd = SDIO_MUX_HDR_CMD_OPEN,
        .reserved = 0,
        .ch_id = id,
        .pkt_len = 0,
        .pad_len = 0
    };

    sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
}

static void sdio_mux_write_data(struct work_struct *work)
{
    int rc, reschedule = 0;
    int notify = 0;
    struct sk_buff *skb;
    unsigned long flags;
    int avail;
    int ch_id;

    spin_lock_irqsave(&sdio_mux_write_lock, flags);
    while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
        ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;

        avail = sdio_write_avail(sdio_mux_ch);
        if (avail < skb->len) {
            /* we may have to wait for write avail
             * notification from sdio al
             */
            DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
                __func__, avail, skb->len);

            reschedule = 1;
            break;
        }
        spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
        rc = sdio_mux_write(skb);
        spin_lock_irqsave(&sdio_mux_write_lock, flags);
        if (rc == 0) {

            spin_lock(&sdio_ch[ch_id].lock);
            sdio_ch[ch_id].num_tx_pkts--;
            spin_unlock(&sdio_ch[ch_id].lock);

            if (sdio_ch[ch_id].write_done)
                sdio_ch[ch_id].write_done(
                    sdio_ch[ch_id].priv, skb);
            else
                dev_kfree_skb_any(skb);
        } else if (rc == -EAGAIN || rc == -ENOMEM) {
            /* recoverable error - retry again later */
            reschedule = 1;
            break;
        } else if (rc == -ENODEV) {
            /*
             * sdio_al suffered some kind of fatal error
             * prevent future writes and clean up pending ones
             */
            fatal_error = 1;
            do {
                ch_id = ((struct sdio_mux_hdr *) skb->data)->ch_id;
                spin_lock(&sdio_ch[ch_id].lock);
                sdio_ch[ch_id].num_tx_pkts--;
                spin_unlock(&sdio_ch[ch_id].lock);
                dev_kfree_skb_any(skb);
            } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
            spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
            return;
        } else {
            /* unknown error condition - drop the
             * skb and reschedule for the
             * other skb's
             */
            pr_err("%s: sdio_mux_write error %d"
                   " for ch %d, skb=%p\n",
                   __func__, rc, ch_id, skb);
            notify = 1;
            break;
        }
    }

    if (reschedule) {
        if (sdio_ch_is_in_reset(ch_id)) {
            notify = 1;
        } else {
            __skb_queue_head(&sdio_mux_write_pool, skb);
            queue_delayed_work(sdio_mux_workqueue,
                               &delayed_work_sdio_mux_write,
                               msecs_to_jiffies(250)
                              );
        }
    }

    if (notify) {
        spin_lock(&sdio_ch[ch_id].lock);
        sdio_ch[ch_id].num_tx_pkts--;
        spin_unlock(&sdio_ch[ch_id].lock);

        if (sdio_ch[ch_id].write_done)
            sdio_ch[ch_id].write_done(
                sdio_ch[ch_id].priv, skb);
        else
            dev_kfree_skb_any(skb);
    }
    spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
}
/**
 * A2 Perf Test
 */
static void a2_performance_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int tx_packet_count = 0;
	int rx_packet_count = 0;
	int size = 0;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;
	int total_bytes = 0;
	int max_packets = 10000;

	u64 start_jiffy, end_jiffy, delta_jiffies;
	unsigned int time_msec = 0;

	for (i = 0; i < test_ctx->buf_size / 2; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	pr_info(TEST_MODULE_NAME "--A2 PERFORMANCE TEST START --.\n");

	sdio_set_write_threshold(test_ctx->ch, 2*1024);
	sdio_set_read_threshold(test_ctx->ch, 14*1024);
	sdio_set_poll_time(test_ctx->ch, 0);

	start_jiffy = get_jiffies_64(); /* read the current time */

	while (tx_packet_count < max_packets) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		/* wait for data ready event */
		/* use a func to avoid compiler optimizations */
		write_avail = sdio_write_avail(test_ctx->ch);
		read_avail = sdio_read_avail(test_ctx->ch);
		if ((write_avail == 0) && (read_avail == 0)) {
			ret = wait_any_notify();
			if (ret)
				goto exit_err;
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		if (write_avail > 0) {
			size = min(test_ctx->buf_size, write_avail) ;
			pr_debug(TEST_MODULE_NAME ":tx size = %d.\n", size);
			if (atomic_read(&test_ctx->tx_notify_count) > 0)
				atomic_dec(&test_ctx->tx_notify_count);
			test_ctx->buf[0] = tx_packet_count;
			test_ctx->buf[(size/4)-1] = tx_packet_count;

			ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					":sdio_write err=%d.\n",
					-ret);
				goto exit_err;
			}
			tx_packet_count++;
			test_ctx->tx_bytes += size;
		}

		read_avail = sdio_read_avail(test_ctx->ch);
		if (read_avail > 0) {
			size = min(test_ctx->buf_size, read_avail);
			pr_debug(TEST_MODULE_NAME ":rx size = %d.\n", size);
			if (atomic_read(&test_ctx->rx_notify_count) > 0)
				atomic_dec(&test_ctx->rx_notify_count);

			ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
			if (ret) {
				pr_info(TEST_MODULE_NAME
					": sdio_read err=%d.\n",
					-ret);
				goto exit_err;
			}
			rx_packet_count++;
			test_ctx->rx_bytes += size;
		}

		pr_debug(TEST_MODULE_NAME
			 ":total rx bytes = %d , rx_packet#=%d.\n",
			 test_ctx->rx_bytes, rx_packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":total tx bytes = %d , tx_packet#=%d.\n",
			 test_ctx->tx_bytes, tx_packet_count);

	   /* pr_info(TEST_MODULE_NAME ":packet#=%d.\n", tx_packet_count); */

	} /* while (tx_packet_count < max_packets ) */

	end_jiffy = get_jiffies_64(); /* read the current time */

	delta_jiffies = end_jiffy - start_jiffy;
	time_msec = jiffies_to_msecs(delta_jiffies);

	pr_info(TEST_MODULE_NAME ":total rx bytes = 0x%x , rx_packet#=%d.\n",
		test_ctx->rx_bytes, rx_packet_count);
	pr_info(TEST_MODULE_NAME ":total tx bytes = 0x%x , tx_packet#=%d.\n",
		test_ctx->tx_bytes, tx_packet_count);

	total_bytes = (test_ctx->tx_bytes + test_ctx->rx_bytes);
	pr_err(TEST_MODULE_NAME ":total bytes = %d, time msec = %d.\n",
		   total_bytes , (int) time_msec);

	pr_err(TEST_MODULE_NAME ":Performance = %d Mbit/sec.\n",
	(total_bytes / time_msec) * 8 / 1000) ;

	pr_err(TEST_MODULE_NAME "--A2 PERFORMANCE TEST END --.\n");

	pr_err(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	pr_err(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
/**
 * sender Test
 */
static void sender_test(void)
{
	int ret = 0 ;
	u32 read_avail = 0;
	u32 write_avail = 0;
	int packet_count = 0;
	int size = 512;
	u16 *buf16 = (u16 *) test_ctx->buf;
	int i;

	for (i = 0 ; i < size / 2 ; i++)
		buf16[i] = (u16) (i & 0xFFFF);

	sdio_set_write_threshold(test_ctx->ch, 4*1024);
	sdio_set_read_threshold(test_ctx->ch, 16*1024); /* N/A with Rx EOT  */
	sdio_set_poll_time(test_ctx->ch, 0); /* N/A with Rx EOT  */

	while (packet_count < 100) {

		if (test_ctx->exit_flag) {
			pr_info(TEST_MODULE_NAME ":Exit Test.\n");
			return;
		}

		pr_info(TEST_MODULE_NAME "--SENDER WAIT FOR EVENT--.\n");

		/* wait for data ready event */
		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			wait_event(test_ctx->wait_q,
				   atomic_read(&test_ctx->tx_notify_count));
			atomic_dec(&test_ctx->tx_notify_count);
		}

		write_avail = sdio_write_avail(test_ctx->ch);
		pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
		if (write_avail < size) {
			pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
			continue;
		}

		test_ctx->buf[0] = packet_count;
		test_ctx->buf[(size/4)-1] = packet_count;

		ret = sdio_write(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_write err=%d.\n",
				-ret);
			goto exit_err;
		}

		/* wait for read data ready event */
		pr_debug(TEST_MODULE_NAME ":sender wait for rx data.\n");
		read_avail = sdio_read_avail(test_ctx->ch);
		wait_event(test_ctx->wait_q,
			   atomic_read(&test_ctx->rx_notify_count));
		atomic_dec(&test_ctx->rx_notify_count);

		read_avail = sdio_read_avail(test_ctx->ch);

		if (read_avail != size) {
			pr_info(TEST_MODULE_NAME
				":read_avail size %d not as expected.\n",
				read_avail);
			goto exit_err;
		}

		memset(test_ctx->buf, 0x00, size);

		ret = sdio_read(test_ctx->ch, test_ctx->buf, size);
		if (ret) {
			pr_info(TEST_MODULE_NAME ":sender sdio_read err=%d.\n",
				-ret);
			goto exit_err;
		}


		if ((test_ctx->buf[0] != packet_count) ||
		    (test_ctx->buf[(size/4)-1] != packet_count)) {
			pr_info(TEST_MODULE_NAME
				":sender sdio_read WRONG DATA.\n");
			goto exit_err;
		}

		test_ctx->tx_bytes += size;
		test_ctx->rx_bytes += size;
		packet_count++;

		pr_debug(TEST_MODULE_NAME
			 ":sender total rx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->rx_bytes, packet_count);
		pr_debug(TEST_MODULE_NAME
			 ":sender total tx bytes = 0x%x , packet#=%d.\n",
			 test_ctx->tx_bytes, packet_count);

	} /* end of while */

	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST PASS.\n");
	return;

exit_err:
	sdio_close(test_ctx->ch);

	pr_info(TEST_MODULE_NAME ": TEST FAIL.\n");
	return;
}
/*  Modem_request command */
static int lge_dm_tty_modem_request(const unsigned char *buf, int count)
{
	short modem_chip;
	int length;

#ifdef CONFIG_DIAG_HSIC_PIPE
	int err = 0;
#endif

	memcpy(&modem_chip, buf + dm_modem_request_header_length,
							sizeof(modem_chip));

	length = dm_modem_request_header_length + sizeof(modem_chip);

	if (modem_chip == Primary_modem_chip) {
		/* send masks to modem */
		diag_process_hdlc((void *)buf + length, count - length);
	} else if (modem_chip == Secondary_modem_chip) {

#ifdef CONFIG_DIAG_SDIO_PIPE
		/* send masks to 9k */
		if (driver->sdio_ch) {
			wait_event_interruptible(driver->wait_q,
				(sdio_write_avail(driver->sdio_ch)
					>= (count - length)));
			if (driver->sdio_ch && ((count - length) > 0)) {
				sdio_write(driver->sdio_ch,
					(void *)buf + length,
						count - length);
			}
		}
#endif

#ifdef CONFIG_DIAG_HSIC_PIPE
		/* send masks to 9k too */
		if (driver->hsic_ch && (count - length > 0)) {
			/* wait sending mask updates if HSIC ch not ready */
			if (driver->in_busy_hsic_write)
				wait_event_interruptible(driver->wait_q,
					(driver->in_busy_hsic_write != 1));
			driver->in_busy_hsic_write = 1;
			driver->in_busy_hsic_read_on_device = 0;
			err = diag_bridge_write((void *)buf + length,
							 count - length);
			if (err) {
				pr_err("diag: err sending mask to MDM: %d\n",
									 err);
				/*
				* If the error is recoverable, then clear
				* the write flag, so we will resubmit a
				* write on the next frame.  Otherwise, don't
				* resubmit a write on the next frame.
				*/
				if ((-ESHUTDOWN) != err)
					driver->in_busy_hsic_write = 0;
			}
		}
#endif

	} else {
		pr_info(DM_TTY_MODULE_NAME ": %s: lge_dm_tty_write"
			"modem_number %d "
			"error count = %d length = %d\n",
			__func__, modem_chip, count, length);
	}

	return count;
}
/* modem_request_command */
static int lge_dm_dev_tty_modem_request(const unsigned char *buf, int count)
{
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
	int err = 0;
#endif /*CONFIG_DIAGFWD_BRIDGE_CODE*/

	int index = 0;
//	unsigned int payload_size;
//	int remote_proc = 0;
	//int token_offset = 0;


	
#ifdef CONFIG_DIAG_SDIO_PIPE
  /*send masks to 9k*/
	if (driver->sdio_ch) {
	wait_event_interruptible(driver->wait_q,
	    (sdio_write_avail(driver->sdio_ch) >= count));
	if (driver->sdio_ch && (count > 0)) {
	  sdio_write(driver->sdio_ch,
	      (void *)buf, count);
		}
	}
#endif /*CONFIG_DIAG_SDIO_PIPE*/

#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
   		/* send masks to All 9k */
		for (index = 0; index < MAX_HSIC_CH; index++) {
//			if (diag_hsic[index].hsic_ch && (payload_size > 0) &&
//							 (remote_proc == MDM)) {
            if (diag_hsic[index].hsic_ch && (count > 0)){
                /* wait sending mask updates
				 * if HSIC ch not ready */
				if (diag_hsic[index].in_busy_hsic_write)
					wait_event_interruptible(driver->wait_q,
						(diag_hsic[index].
						 in_busy_hsic_write != 1));
				diag_hsic[index].in_busy_hsic_write = 1;
				diag_hsic[index].in_busy_hsic_read_on_device =
									0;
//				err = diag_bridge_write(index,
//						driver->user_space_data +
//						token_offset, payload_size);
                err = diag_bridge_write(index, (void *)buf, count);
                if (err) {
					pr_err("diag: err sending mask to MDM: %d\n",
					       err);
					/*
					* If the error is recoverable, then
					* clear the write flag, so we will
					* resubmit a write on the next frame.
					* Otherwise, don't resubmit a write
					* on the next frame.
					*/
					if ((-ESHUTDOWN) != err)
						diag_hsic[index].in_busy_hsic_write = 0;
				 }
			 }
		}

#endif /*CONFIG_DIAGFWD_BRIDGE_CODE*/

          return count;
}
static int diagchar_write(struct file *file, const char __user *buf,
			      size_t count, loff_t *ppos)
{
	int err, ret = 0, pkt_type;
#ifdef DIAG_DEBUG
	int length = 0, i;
#endif
	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
	void *buf_copy = NULL;
	int payload_size;
#ifdef CONFIG_DIAG_OVER_USB
	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
				(driver->logging_mode == NO_LOGGING_MODE)) {
		/*Drop the diag payload */
		return -EIO;
	}
#endif /* DIAG over USB */
	/* Get the packet type F3/log/event/Pkt response */
	err = copy_from_user((&pkt_type), buf, 4);
	/* First 4 bytes indicate the type of payload - ignore these */
	payload_size = count - 4;

	if (pkt_type == USER_SPACE_LOG_TYPE) {
		err = copy_from_user(driver->user_space_data, buf + 4,
							 payload_size);
		/* Check masks for On-Device logging */
		if (driver->mask_check) {
			if (!mask_request_validate(driver->user_space_data)) {
				pr_alert("diag: mask request Invalid\n");
				return -EFAULT;
			}
		}
		buf = buf + 4;
#ifdef DIAG_DEBUG
		pr_debug("diag: user space data %d\n", payload_size);
		for (i = 0; i < payload_size; i++)
			pr_debug("\t %x", *((driver->user_space_data)+i));
#endif
#ifdef CONFIG_DIAG_SDIO_PIPE
		/* send masks to 9k too */
		if (driver->sdio_ch) {
			wait_event_interruptible(driver->wait_q,
				 (sdio_write_avail(driver->sdio_ch) >=
					 payload_size));
			if (driver->sdio_ch && (payload_size > 0)) {
				sdio_write(driver->sdio_ch, (void *)
				   (driver->user_space_data), payload_size);
			}
		}
#endif
		/* send masks to modem now */
		diag_process_hdlc((void *)(driver->user_space_data),
							 payload_size);
		return 0;
	}

	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
	if (!buf_copy) {
		driver->dropped_count++;
		return -ENOMEM;
	}

	err = copy_from_user(buf_copy, buf + 4, payload_size);
	if (err) {
		printk(KERN_INFO "diagchar : copy_from_user failed\n");
		ret = -EFAULT;
		goto fail_free_copy;
	}
#ifdef DIAG_DEBUG
	printk(KERN_DEBUG "data is -->\n");
	for (i = 0; i < payload_size; i++)
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_copy)+i));
#endif
	send.state = DIAG_STATE_START;
	send.pkt = buf_copy;
	send.last = (void *)(buf_copy + payload_size - 1);
	send.terminate = 1;
#ifdef DIAG_DEBUG
	pr_debug("diag: Already used bytes in buffer %d, and"
	" incoming payload size is %d\n", driver->used, payload_size);
	printk(KERN_DEBUG "hdlc encoded data is -->\n");
	for (i = 0; i < payload_size + 8; i++) {
		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_hdlc)+i));
		if (*(((unsigned char *)buf_hdlc)+i) != 0x7e)
			length++;
	}
#endif
	mutex_lock(&driver->diagchar_mutex);
	if (!buf_hdlc)
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
						 POOL_TYPE_HDLC);
	if (!buf_hdlc) {
		ret = -ENOMEM;
		goto fail_free_hdlc;
	}
	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
	}

	enc.dest = buf_hdlc + driver->used;
	enc.dest_last = (void *)(buf_hdlc + driver->used + 2*payload_size + 3);
	diag_hdlc_encode(&send, &enc);

	/* This is to check if after HDLC encoding, we are still within the
	 limits of aggregation buffer. If not, we write out the current buffer
	and start aggregation in a newly allocated buffer */
	if ((unsigned int) enc.dest >=
		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
							 POOL_TYPE_HDLC);
		if (!buf_hdlc) {
			ret = -ENOMEM;
			goto fail_free_hdlc;
		}
		enc.dest = buf_hdlc + driver->used;
		enc.dest_last = (void *)(buf_hdlc + driver->used +
							 (2*payload_size) + 3);
		diag_hdlc_encode(&send, &enc);
	}

	driver->used = (uint32_t) enc.dest - (uint32_t) buf_hdlc;
	if (pkt_type == DATA_TYPE_RESPONSE) {
		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
		if (err) {
			/*Free the buffer right away if write failed */
			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
			diagmem_free(driver, (unsigned char *)driver->
				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
			ret = -EIO;
			goto fail_free_hdlc;
		}
		buf_hdlc = NULL;
		driver->used = 0;
	}

	mutex_unlock(&driver->diagchar_mutex);
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	if (!timer_in_progress)	{
		timer_in_progress = 1;
		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
	}
	return 0;

fail_free_hdlc:
	buf_hdlc = NULL;
	driver->used = 0;
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	mutex_unlock(&driver->diagchar_mutex);
	return ret;

fail_free_copy:
	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
	return ret;
}
Beispiel #18
0
int gsdio_write(struct gsdio_port *port, struct usb_request *req)
{
	unsigned	avail;
	char		*packet = req->buf;
	unsigned	size = req->actual;
	unsigned	n;
	int		ret = 0;


	if (!port) {
		pr_err("%s: port is null\n", __func__);
		return -ENODEV;
	}

	if (!req) {
		pr_err("%s: usb request is null port#%d\n",
				__func__, port->port_num);
		return -ENODEV;
	}

	pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n",
			__func__, port, port->port_num, req,
			req->actual, port->n_read);

	if (!port->sdio_open) {
		pr_debug("%s: SDIO IO is not supported\n", __func__);
		return -ENODEV;
	}

	avail = sdio_write_avail(port->sport_info->ch);

	pr_debug("%s: sdio_write_avail:%d", __func__, avail);

	if (!avail)
		return -EBUSY;

	if (!req->actual) {
		pr_debug("%s: req->actual is already zero,update bytes read\n",
				__func__);
		port->n_read = 0;
		return -ENODEV;
	}

	packet = req->buf;
	n = port->n_read;
	if (n) {
		packet += n;
		size -= n;
	}

	if (size > avail)
		size = avail;

	spin_unlock_irq(&port->port_lock);
	ret = sdio_write(port->sport_info->ch, packet, size);
	spin_lock_irq(&port->port_lock);
	if (ret) {
		pr_err("%s: port#%d sdio write failed err:%d",
				__func__, port->port_num, ret);
		/* try again later */
		return ret;
	}

	port->nbytes_tomodem += size;

	if (size + n == req->actual)
		port->n_read = 0;
	else
		port->n_read += size;

	return ret;
}