Ejemplo n.º 1
0
static void
submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
{
	struct urb *urb;
	int ret;

	urb = usb_alloc_urb(0, flags);
	if (!urb) {
		pr_err("unable to allocate urb");
		ksb_free_data_pkt(pkt);
		return;
	}

	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
			pkt->buf, pkt->len,
			ksb_rx_cb, pkt);
	usb_anchor_urb(urb, &ksb->submitted);

	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);

	ret = usb_submit_urb(urb, flags);
	if (ret) {
		pr_err("in urb submission failed");
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		ksb_free_data_pkt(pkt);
		return;
	}

	usb_free_urb(urb);
}
Ejemplo n.º 2
0
static void ksb_start_rx_work(struct work_struct *w)
{
	struct ks_bridge *ksb =
			container_of(w, struct ks_bridge, start_rx_work);
	struct data_pkt	*pkt;
	struct urb *urb;
	int i = 0;
	int ret;
	bool put = true;

	ret = usb_autopm_get_interface(ksb->ifc);
	if (ret < 0) {
		if (ret != -EAGAIN && ret != -EACCES) {
			pr_err_ratelimited("autopm_get failed:%d", ret);
			return;
		}
		put = false;
	}
	for (i = 0; i < NO_RX_REQS; i++) {

		if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
			break;

		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
		if (IS_ERR(pkt)) {
			pr_err("unable to allocate data pkt");
			break;
		}

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			pr_err("unable to allocate urb");
			ksb_free_data_pkt(pkt);
			break;
		}

		usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
				pkt->buf, pkt->len,
				ksb_rx_cb, pkt);
		usb_anchor_urb(urb, &ksb->submitted);

		dbg_log_event(ksb, "S RX_URB", pkt->len, 0);

		atomic_inc(&ksb->rx_pending_cnt);
		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			pr_err("in urb submission failed");
			usb_unanchor_urb(urb);
			usb_free_urb(urb);
			ksb_free_data_pkt(pkt);
			atomic_dec(&ksb->rx_pending_cnt);
			wake_up(&ksb->pending_urb_wait);
			break;
		}

		usb_free_urb(urb);
	}
	if (put)
		usb_autopm_put_interface_async(ksb->ifc);
}
static void ksb_tomdm_work(struct work_struct *w)
{
	struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
	struct data_pkt	*pkt;
	unsigned long flags;
	struct urb *urb;
	int ret;

	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_mdm_list)
			&& test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		spin_unlock_irqrestore(&ksb->lock, flags);

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			pr_err_ratelimited("%s: unable to allocate urb",
					ksb->fs_dev.name);
			ksb_free_data_pkt(pkt);
			return;
		}

		ret = usb_autopm_get_interface(ksb->ifc);
		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
			pr_err_ratelimited("%s: autopm_get failed:%d",
					ksb->fs_dev.name, ret);
			usb_free_urb(urb);
			ksb_free_data_pkt(pkt);
			return;
		}
		usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
				pkt->buf, pkt->len, ksb_tx_cb, pkt);
		usb_anchor_urb(urb, &ksb->submitted);

		dbg_log_event(ksb, "S TX_URB", pkt->len, 0);

		atomic_inc(&ksb->tx_pending_cnt);
		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			dev_err(&ksb->udev->dev, "out urb submission failed");
			usb_unanchor_urb(urb);
			usb_free_urb(urb);
			ksb_free_data_pkt(pkt);
			usb_autopm_put_interface(ksb->ifc);
			atomic_dec(&ksb->tx_pending_cnt);
			wake_up(&ksb->pending_urb_wait);
			return;
		}
		usb_mark_last_busy(ksb->udev);

		usb_free_urb(urb);

		spin_lock_irqsave(&ksb->lock, flags);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);
}
Ejemplo n.º 4
0
static void ksb_start_rx_work(struct work_struct *w)
{
	struct ks_bridge *ksb =
			container_of(w, struct ks_bridge, start_rx_work);
	struct data_pkt	*pkt;
	struct urb *urb;
	int i = 0;
	int ret;

	for (i = 0; i < NO_RX_REQS; i++) {
		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
		if (IS_ERR(pkt)) {
			pr_err("unable to allocate data pkt");
			return;
		}

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			pr_err("unable to allocate urb");
			ksb_free_data_pkt(pkt);
			return;
		}

		ret = usb_autopm_get_interface(ksb->ifc);
		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
			pr_err_ratelimited("autopm_get failed:%d", ret);
			usb_free_urb(urb);
			ksb_free_data_pkt(pkt);
			return;
		}
		ksb->alloced_read_pkts++;

		usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
				pkt->buf, pkt->len,
				ksb_rx_cb, pkt);
		usb_anchor_urb(urb, &ksb->submitted);

		dbg_log_event(ksb, "S RX_URB", pkt->len, 0);

		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			pr_err("in urb submission failed");
			usb_unanchor_urb(urb);
			usb_free_urb(urb);
			ksb_free_data_pkt(pkt);
			ksb->alloced_read_pkts--;
			usb_autopm_put_interface(ksb->ifc);
			return;
		}

		usb_autopm_put_interface_async(ksb->ifc);
		usb_free_urb(urb);
	}
}
static void ksb_usb_disconnect(struct usb_interface *ifc)
{
	struct ks_bridge *ksb = usb_get_intfdata(ifc);
	unsigned long flags;
	struct data_pkt *pkt;

	pr_info("%s called\n", __func__);
	dbg_log_event(ksb, "PID-DETACH", 0, 0);

	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
	wake_up(&ksb->ks_wait_q);
	cancel_work_sync(&ksb->to_mdm_work);
	cancel_work_sync(&ksb->start_rx_work);

	misc_deregister(&ksb->fs_dev);

	usb_kill_anchored_urbs(&ksb->submitted);

	wait_event_interruptible_timeout(
					ksb->pending_urb_wait,
					!atomic_read(&ksb->tx_pending_cnt) &&
					!atomic_read(&ksb->rx_pending_cnt),
					msecs_to_jiffies(PENDING_URB_TIMEOUT));

	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_ks_list)) {
		pkt = list_first_entry(&ksb->to_ks_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	while (!list_empty(&ksb->to_mdm_list)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	ifc->needs_remote_wakeup = 0;
	usb_put_dev(ksb->udev);
	ksb->ifc = NULL;
	usb_set_intfdata(ifc, NULL);

	pr_info("%s done\n", __func__);

	return;
}
Ejemplo n.º 6
0
static void ksb_rx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;

	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);

	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);

	if (urb->status < 0) {
		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT)
			pr_err_ratelimited("urb failed with err:%d",
					urb->status);
		ksb_free_data_pkt(pkt);
		return;
	}

	if (urb->actual_length == 0) {
		submit_one_urb(ksb, GFP_ATOMIC, pkt);
		return;
	}

	spin_lock(&ksb->lock);
	pkt->len = urb->actual_length;
	list_add_tail(&pkt->list, &ksb->to_ks_list);
	spin_unlock(&ksb->lock);

	/* wake up read thread */
	wake_up(&ksb->ks_wait_q);
}
Ejemplo n.º 7
0
static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
				 size_t count, loff_t *pos)
{
	int			ret;
	struct data_pkt		*pkt;
	unsigned long		flags;
	struct ks_bridge	*ksb = fp->private_data;

	pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
	if (IS_ERR(pkt)) {
		pr_err("unable to allocate data packet");
		return PTR_ERR(pkt);
	}

	ret = copy_from_user(pkt->buf, buf, count);
	if (ret) {
		pr_err("copy_from_user failed: err:%d", ret);
		ksb_free_data_pkt(pkt);
		return ret;
	}

	spin_lock_irqsave(&ksb->lock, flags);
	list_add_tail(&pkt->list, &ksb->to_mdm_list);
	spin_unlock_irqrestore(&ksb->lock, flags);

	queue_work(ksb->wq, &ksb->to_mdm_work);

	return count;
}
static void ksb_tx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;

	dbg_log_event(ksb, "C TX_URB", urb->status, 0);

#if 0
	dev_dbg(&ksb->udev->dev, "status:%d", urb->status);
#endif
	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
		usb_autopm_put_interface_async(ksb->ifc);

	if (urb->status < 0)
		pr_err_ratelimited("%s: urb failed with err:%d",
				ksb->fs_dev.name, urb->status);

	if ((ksb->ifc->cur_altsetting->desc.bInterfaceNumber == 2))
		dev_info(ksb->fs_dev.this_device, "write: %d bytes", urb->actual_length);

	ksb_free_data_pkt(pkt);

	atomic_dec(&ksb->tx_pending_cnt);
	wake_up(&ksb->pending_urb_wait);
}
static void ksb_rx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;
	bool wakeup = true;

	usb_mark_last_busy(ksb->udev);

	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);

#if 0
	dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status,
			urb->actual_length);
#endif

	/*non zero len of data received while unlinking urb*/
	if (urb->status == -ENOENT && (urb->actual_length > 0)) {
		/*
		 * If we wakeup the reader process now, it may
		 * queue the URB before its reject flag gets
		 * cleared.
		 */
		wakeup = false;
		goto add_to_list;
	}

	if (urb->status < 0) {
		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
				&& urb->status != -EPROTO)
			pr_err_ratelimited("%s: urb failed with err:%d",
					ksb->fs_dev.name, urb->status);
		ksb_free_data_pkt(pkt);
		goto done;
	}

	if (urb->actual_length == 0) {
		submit_one_urb(ksb, GFP_ATOMIC, pkt);
		goto done;
	}

	if (urb->actual_length == 48) {
		pr_info("%s: usage=%d, child=%d\n", __func__,
				atomic_read(&ksb->udev->dev.power.usage_count),
				atomic_read(&ksb->udev->dev.power.child_count));
	}

add_to_list:
	spin_lock(&ksb->lock);
	pkt->len = urb->actual_length;
	list_add_tail(&pkt->list, &ksb->to_ks_list);
	spin_unlock(&ksb->lock);
	/* wake up read thread */
	if (wakeup)
		wake_up(&ksb->ks_wait_q);
done:
	atomic_dec(&ksb->rx_pending_cnt);
	wake_up(&ksb->pending_urb_wait);
}
Ejemplo n.º 10
0
static void ksb_rx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;

	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);

	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);

	/*non zero len of data received while unlinking urb*/
	if (urb->status == -ENOENT && urb->actual_length > 0)
		goto add_to_list;

	if (urb->status < 0) {
		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
				&& urb->status != -EPROTO)
			pr_err_ratelimited("urb failed with err:%d",
					urb->status);
		ksb_free_data_pkt(pkt);
		ksb->alloced_read_pkts--;
		return;
	}

	if (urb->actual_length == 0) {
		ksb_free_data_pkt(pkt);
		ksb->alloced_read_pkts--;
		goto resubmit_urb;
	}

add_to_list:
	spin_lock(&ksb->lock);
	pkt->len = urb->actual_length;
	list_add_tail(&pkt->list, &ksb->to_ks_list);
	spin_unlock(&ksb->lock);

	/* wake up read thread */
	wake_up(&ksb->ks_wait_q);

resubmit_urb:
	submit_one_urb(ksb);

}
Ejemplo n.º 11
0
static void ksb_usb_disconnect(struct usb_interface *ifc)
{
	struct ks_bridge *ksb = usb_get_intfdata(ifc);
	unsigned long flags;
	struct data_pkt *pkt;

	dbg_log_event(ksb, "PID-DETACH", 0, 0);

	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
	wake_up(&ksb->ks_wait_q);
	cancel_work_sync(&ksb->to_mdm_work);

	usb_kill_anchored_urbs(&ksb->submitted);

	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_ks_list)) {
		pkt = list_first_entry(&ksb->to_ks_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	while (!list_empty(&ksb->to_mdm_list)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	misc_deregister(ksb->fs_dev);

//#ifdef VENDOR_EDIT
//[email protected]  2013/03/08, Modify for can't incoming call
	ifc->needs_remote_wakeup = 0;
//#endif /* VENDOR_EDIT */

	usb_put_dev(ksb->udev);
	ksb->ifc = NULL;
	usb_set_intfdata(ifc, NULL);

	return;
}
Ejemplo n.º 12
0
static void
submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
{
	struct urb *urb;
	int ret;

	urb = usb_alloc_urb(0, flags);
	if (!urb) {
		pr_err("unable to allocate urb");
		ksb_free_data_pkt(pkt);
		return;
	}

	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
			pkt->buf, pkt->len,
			ksb_rx_cb, pkt);
	usb_anchor_urb(urb, &ksb->submitted);

	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		ksb_free_data_pkt(pkt);
		return;
	}

	atomic_inc(&ksb->rx_pending_cnt);
	ret = usb_submit_urb(urb, flags);
	if (ret) {
		pr_err("in urb submission failed");
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		ksb_free_data_pkt(pkt);
		atomic_dec(&ksb->rx_pending_cnt);
		wake_up(&ksb->pending_urb_wait);
		return;
	}

	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);

	usb_free_urb(urb);
}
Ejemplo n.º 13
0
static void submit_one_urb(struct ks_bridge *ksb)
{
	struct data_pkt	*pkt;
	struct urb *urb;
	int ret;

	pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
	if (IS_ERR(pkt)) {
		pr_err("unable to allocate data pkt");
		return;
	}

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		pr_err("unable to allocate urb");
		ksb_free_data_pkt(pkt);
		return;
	}
	ksb->alloced_read_pkts++;

	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
			pkt->buf, pkt->len,
			ksb_rx_cb, pkt);
	usb_anchor_urb(urb, &ksb->submitted);

	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);

	ret = usb_submit_urb(urb, GFP_ATOMIC);
	if (ret) {
		pr_err("in urb submission failed");
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		ksb_free_data_pkt(pkt);
		ksb->alloced_read_pkts--;
		return;
	}

	usb_free_urb(urb);
}
static void ksb_usb_disconnect(struct usb_interface *ifc)
{
	struct ks_bridge *ksb = usb_get_intfdata(ifc);
	unsigned long flags;
	struct data_pkt *pkt;

	dbg_log_event(ksb, "PID-DETACH", 0, 0);

	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
	wake_up(&ksb->ks_wait_q);
	cancel_work_sync(&ksb->to_mdm_work);

	usb_kill_anchored_urbs(&ksb->submitted);

	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_ks_list)) {
		pkt = list_first_entry(&ksb->to_ks_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	while (!list_empty(&ksb->to_mdm_list)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	misc_deregister(ksb->fs_dev);
	usb_put_dev(ksb->udev);
	ksb->ifc = NULL;
	usb_set_intfdata(ifc, NULL);

	return;
}
Ejemplo n.º 15
0
static void ksb_tx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;

	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
	pr_debug("status:%d", urb->status);

	if (ksb->ifc)
		usb_autopm_put_interface_async(ksb->ifc);

	if (urb->status < 0)
		pr_err_ratelimited("urb failed with err:%d", urb->status);

	ksb_free_data_pkt(pkt);
}
Ejemplo n.º 16
0
static void ksb_rx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;
	bool wakeup = true;

	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);

	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);

	/*non zero len of data received while unlinking urb*/
	if (urb->status == -ENOENT && (urb->actual_length > 0)) {
		/*
		 * If we wakeup the reader process now, it may
		 * queue the URB before its reject flag gets
		 * cleared.
		 */
		wakeup = false;
		goto add_to_list;
	}

	if (urb->status < 0) {
		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
				&& urb->status != -EPROTO)
			pr_err_ratelimited("urb failed with err:%d",
					urb->status);
		ksb_free_data_pkt(pkt);
		goto done;
	}

	if (urb->actual_length == 0) {
		submit_one_urb(ksb, GFP_ATOMIC, pkt);
		goto done;
	}

add_to_list:
	spin_lock(&ksb->lock);
	pkt->len = urb->actual_length;
	list_add_tail(&pkt->list, &ksb->to_ks_list);
	spin_unlock(&ksb->lock);
	/* wake up read thread */
	if (wakeup)
	wake_up(&ksb->ks_wait_q);
done:
	atomic_dec(&ksb->rx_pending_cnt);
	wake_up(&ksb->pending_urb_wait);
}
Ejemplo n.º 17
0
static void ksb_tx_cb(struct urb *urb)
{
	struct data_pkt *pkt = urb->context;
	struct ks_bridge *ksb = pkt->ctxt;

	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
	pr_debug("status:%d", urb->status);

	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
		usb_autopm_put_interface_async(ksb->ifc);

	if (urb->status < 0)
		pr_err_ratelimited("urb failed with err:%d", urb->status);

	ksb_free_data_pkt(pkt);

	atomic_dec(&ksb->tx_pending_cnt);
	wake_up(&ksb->pending_urb_wait);
}
Ejemplo n.º 18
0
static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
				 size_t count, loff_t *pos)
{
	int			ret;
	struct data_pkt		*pkt;
	unsigned long		flags;
	struct ks_bridge	*ksb = fp->private_data;

        if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		pr_err("USB_DEV_CONNECTED is not set");
		return -ENODEV;
	}

	if (count > MAX_DATA_PKT_SIZE)
		count = MAX_DATA_PKT_SIZE;

	if (!strcmp(ksb->name, "ks_bridge:2"))
	        pr_info("count:%d cmd:%d", count, *buf);

	pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
	if (IS_ERR(pkt)) {
		pr_err("unable to allocate data packet");
		return PTR_ERR(pkt);
	}

	ret = copy_from_user(pkt->buf, buf, count);
	if (ret) {
		pr_err("copy_from_user failed: err:%d", ret);
		ksb_free_data_pkt(pkt);
		return ret;
	}

	spin_lock_irqsave(&ksb->lock, flags);
	list_add_tail(&pkt->list, &ksb->to_mdm_list);
	spin_unlock_irqrestore(&ksb->lock, flags);

	queue_work(ksb->wq, &ksb->to_mdm_work);

	return count;
}
Ejemplo n.º 19
0
static int
ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
{
	__u8				ifc_num;
	struct usb_host_interface	*ifc_desc;
	struct usb_endpoint_descriptor	*ep_desc;
	int				i;
	struct ks_bridge		*ksb;
	unsigned long			flags;
	struct data_pkt			*pkt;

	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;

	switch (id->idProduct) {
	case 0x9008:
		if (ifc_num != 0)
			return -ENODEV;
		ksb = __ksb[BOOT_BRIDGE_INDEX];
		break;
	case 0x9048:
	case 0x904C:
	case 0x9075:
		if (ifc_num != 2)
			return -ENODEV;
		ksb = __ksb[EFS_BRIDGE_INDEX];
		break;
	default:
		return -ENODEV;
	}

	if (!ksb) {
		pr_err("ksb is not initialized");
		return -ENODEV;
	}

	ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
	ksb->ifc = ifc;
	ifc_desc = ifc->cur_altsetting;

	for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
		ep_desc = &ifc_desc->endpoint[i].desc;

		if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
			ksb->in_epAddr = ep_desc->bEndpointAddress;

		if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
			ksb->out_epAddr = ep_desc->bEndpointAddress;
	}

	if (!(ksb->in_epAddr && ksb->out_epAddr)) {
		pr_err("could not find bulk in and bulk out endpoints");
		usb_put_dev(ksb->udev);
		ksb->ifc = NULL;
		return -ENODEV;
	}

	ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
	ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);

	usb_set_intfdata(ifc, ksb);
	set_bit(USB_DEV_CONNECTED, &ksb->flags);
	atomic_set(&ksb->tx_pending_cnt, 0);
	atomic_set(&ksb->rx_pending_cnt, 0);

	dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);

	/*free up stale buffers if any from previous disconnect*/
	spin_lock_irqsave(&ksb->lock, flags);
	while (!list_empty(&ksb->to_ks_list)) {
		pkt = list_first_entry(&ksb->to_ks_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	while (!list_empty(&ksb->to_mdm_list)) {
		pkt = list_first_entry(&ksb->to_mdm_list,
				struct data_pkt, list);
		list_del_init(&pkt->list);
		ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	ksb->fs_dev = (struct miscdevice *)id->driver_info;
	misc_register(ksb->fs_dev);

	if (device_can_wakeup(&ksb->udev->dev)) {
		ifc->needs_remote_wakeup = 1;
		usb_enable_autosuspend(ksb->udev);
	}
        atomic_set(&ksb->pmlock_cnt, 0);

	pr_info("usb dev connected");

	return 0;
}
Ejemplo n.º 20
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt = NULL;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space &&
			test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		list_del_init(&pkt->list);
		len = min_t(size_t, space, pkt->len - pkt->n_read);
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
		if (ret) {
			pr_err("copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			return -EFAULT;
		}

		pkt->n_read += len;
		space -= len;
		copied += len;

		if (pkt->n_read == pkt->len) {
			/*
			 * re-init the packet and queue it
			 * for more data.
			 */
			pkt->n_read = 0;
			pkt->len = MAX_DATA_PKT_SIZE;
			submit_one_urb(ksb, GFP_KERNEL, pkt);
			pkt = NULL;
		}
		spin_lock_irqsave(&ksb->lock, flags);
	}

	/* put the partial packet back in the list */
	if (!space && pkt && pkt->n_read != pkt->len) {
		if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
			list_add(&pkt->list, &ksb->to_ks_list);
		else
			ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	if (!strcmp(ksb->name, "ks_bridge:2") && count == 48 && *buf == 0x1) {
		pr_info("%s, HELLO COMMAND = 0x%x\n", __func__, *buf);
		if (!atomic_read(&ksb->pmlock_cnt)) {
			atomic_inc(&ksb->pmlock_cnt);
			pr_info("<cnt = %d> efs sync\n",
				atomic_read(&ksb->pmlock_cnt));

		} else if (atomic_read(&ksb->pmlock_cnt)) {
			atomic_inc(&ksb->pmlock_cnt);
			pr_info("<cnt = %d> get efs lock\n",
				atomic_read(&ksb->pmlock_cnt));
			pm_runtime_get(&ksb->udev->dev);
			pm_runtime_forbid(&ksb->udev->dev);
		}

	} else if (!strcmp(ksb->name, "ks_bridge:2") && count == 8 && *buf == 0x8) {
		pr_info("%s, RESET_RESPONSE = 0x%x\n", __func__, *buf);

		if (atomic_read(&ksb->pmlock_cnt) == 2) {
			atomic_dec(&ksb->pmlock_cnt);
			pr_info("<cnt = %d> release efs lock\n",
				atomic_read(&ksb->pmlock_cnt));
			pm_runtime_allow(&ksb->udev->dev);
			pm_runtime_put(&ksb->udev->dev);
		}
	}

	if (!strcmp(ksb->name, "ks_bridge:2"))
	        pr_info("count:%d space:%d copied:%d", count, space, copied);
	else
		pr_debug("count:%d space:%d copied:%d", count, space, copied);

	return copied;
}
Ejemplo n.º 21
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		len = min_t(size_t, space, pkt->len);
		pkt->n_read += len;
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf, len);
		if (ret) {
			pr_err("copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			ksb->alloced_read_pkts--;
			return ret;
		}

		space -= len;
		copied += len;

		spin_lock_irqsave(&ksb->lock, flags);
		if (pkt->n_read == pkt->len) {
			list_del_init(&pkt->list);
			ksb_free_data_pkt(pkt);
			ksb->alloced_read_pkts--;
		}
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	pr_debug("count:%d space:%d copied:%d", count, space, copied);

	return copied;
}
Ejemplo n.º 22
0
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	int ret;
	unsigned long flags;
	struct ks_bridge *ksb = fp->private_data;
	struct data_pkt *pkt = NULL;
	size_t space, copied;

read_start:
	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
		return -ENODEV;

	spin_lock_irqsave(&ksb->lock, flags);
	if (list_empty(&ksb->to_ks_list)) {
		spin_unlock_irqrestore(&ksb->lock, flags);
		ret = wait_event_interruptible(ksb->ks_wait_q,
				!list_empty(&ksb->to_ks_list) ||
				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
		if (ret < 0)
			return ret;

		goto read_start;
	}

	space = count;
	copied = 0;
	while (!list_empty(&ksb->to_ks_list) && space &&
			test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
		size_t len;

		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
		list_del_init(&pkt->list);
		len = min_t(size_t, space, pkt->len - pkt->n_read);
		spin_unlock_irqrestore(&ksb->lock, flags);

		ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
		if (ret) {
			dev_err(ksb->fs_dev.this_device,
					"copy_to_user failed err:%d\n", ret);
			ksb_free_data_pkt(pkt);
			return -EFAULT;
		}

		pkt->n_read += len;
		space -= len;
		copied += len;

		if (pkt->n_read == pkt->len) {
			/*
			 * re-init the packet and queue it
			 * for more data.
			 */
			pkt->n_read = 0;
			pkt->len = MAX_DATA_PKT_SIZE;
			submit_one_urb(ksb, GFP_KERNEL, pkt);
			pkt = NULL;
		}
		spin_lock_irqsave(&ksb->lock, flags);
	}

	/* put the partial packet back in the list */
	if (!space && pkt && pkt->n_read != pkt->len) {
		if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
			list_add(&pkt->list, &ksb->to_ks_list);
		else
			ksb_free_data_pkt(pkt);
	}
	spin_unlock_irqrestore(&ksb->lock, flags);

	dbg_log_event(ksb, "KS_READ", copied, 0);

	dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count,
			space, copied);

	return copied;
}