static void ksb_tx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C TX_URB", urb->status, 0); pr_debug("status:%d", urb->status); if (ksb->ifc) usb_autopm_put_interface_async(ksb->ifc); if (urb->status < 0) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; bool wakeup = true; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && (urb->actual_length > 0)) { /* * If we wakeup the reader process now, it may * queue the URB before its reject flag gets * cleared. */ wakeup = false; goto add_to_list; } if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); goto done; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); goto done; } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ if (wakeup) wake_up(&ksb->ks_wait_q); done: atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static int ehci_hsic_bus_resume(struct usb_hcd *hcd) { struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 temp; struct task_struct *resume_thread = NULL; mehci->resume_status = 0; resume_thread = kthread_run(msm_hsic_resume_thread, mehci, "hsic_resume_thread"); if (IS_ERR(resume_thread)) { pr_err("Error creating resume thread:%lu\n", PTR_ERR(resume_thread)); return PTR_ERR(resume_thread); } wait_for_completion(&mehci->rt_completion); if (mehci->resume_status < 0) return mehci->resume_status; dbg_log_event(NULL, "FPR: Wokeup", 0); spin_lock_irq(&ehci->lock); (void) ehci_readl(ehci, &ehci->regs->command); temp = 0; if (ehci->async->qh_next.qh) temp |= CMD_ASE; if (ehci->periodic_sched) temp |= CMD_PSE; if (temp) { ehci->command |= temp; ehci_writel(ehci, ehci->command, &ehci->regs->command); } ehci->next_statechange = jiffies + msecs_to_jiffies(5); hcd->state = HC_STATE_RUNNING; ehci->rh_state = EHCI_RH_RUNNING; /* Now we can safely re-enable irqs */ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); spin_unlock_irq(&ehci->lock); return 0; }
static void ksb_usb_disconnect(struct usb_interface *ifc) { struct ks_bridge *ksb = usb_get_intfdata(ifc); unsigned long flags; struct data_pkt *pkt; dbg_log_event(ksb, "PID-DETACH", 0, 0); clear_bit(USB_DEV_CONNECTED, &ksb->flags); wake_up(&ksb->ks_wait_q); cancel_work_sync(&ksb->to_mdm_work); cancel_work_sync(&ksb->start_rx_work); misc_deregister(ksb->fs_dev); usb_kill_anchored_urbs(&ksb->submitted); wait_event_interruptible_timeout( ksb->pending_urb_wait, !atomic_read(&ksb->tx_pending_cnt) && !atomic_read(&ksb->rx_pending_cnt), msecs_to_jiffies(PENDING_URB_TIMEOUT)); spin_lock_irqsave(&ksb->lock, flags); while (!list_empty(&ksb->to_ks_list)) { pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } while (!list_empty(&ksb->to_mdm_list)) { pkt = list_first_entry(&ksb->to_mdm_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); ifc->needs_remote_wakeup = 0; usb_put_dev(ksb->udev); ksb->ifc = NULL; usb_set_intfdata(ifc, NULL); return; }
static void ksb_tx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C TX_URB", urb->status, 0); pr_debug("status:%d", urb->status); if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) usb_autopm_put_interface_async(ksb->ifc); if (urb->status < 0) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); atomic_dec(&ksb->tx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && urb->actual_length > 0) goto add_to_list; if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; return; } if (urb->actual_length == 0) { ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; goto resubmit_urb; } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ wake_up(&ksb->ks_wait_q); resubmit_urb: submit_one_urb(ksb); }
static void ksb_usb_disconnect(struct usb_interface *ifc) { struct ks_bridge *ksb = usb_get_intfdata(ifc); unsigned long flags; struct data_pkt *pkt; dbg_log_event(ksb, "PID-DETACH", 0, 0); clear_bit(USB_DEV_CONNECTED, &ksb->flags); wake_up(&ksb->ks_wait_q); cancel_work_sync(&ksb->to_mdm_work); usb_kill_anchored_urbs(&ksb->submitted); spin_lock_irqsave(&ksb->lock, flags); while (!list_empty(&ksb->to_ks_list)) { pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } while (!list_empty(&ksb->to_mdm_list)) { pkt = list_first_entry(&ksb->to_mdm_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); misc_deregister(ksb->fs_dev); //#ifdef VENDOR_EDIT //[email protected] 2013/03/08, Modify for can't incoming call ifc->needs_remote_wakeup = 0; //#endif /* VENDOR_EDIT */ usb_put_dev(ksb->udev); ksb->ifc = NULL; usb_set_intfdata(ifc, NULL); return; }
static int ksb_fs_open(struct inode *ip, struct file *fp) { struct ks_bridge *ksb = __ksb[BOOT_BRIDGE_INDEX]; if (!ksb) { pr_err("ksb is being removed"); return -ENODEV; } pr_info(":%s", ksb->name); dbg_log_event(ksb, "KS-FS-OPEN", 0, 0); fp->private_data = ksb; set_bit(FILE_OPENED, &ksb->flags); if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) queue_work(ksb->wq, &ksb->start_rx_work); return 0; }
static int msm_hsic_pm_suspend(struct device *dev) { int ret; struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); dev_dbg(dev, "ehci-msm-hsic PM suspend\n"); dbg_log_event(NULL, "PM Suspend", 0); if (device_may_wakeup(dev)) enable_irq_wake(hcd->irq); ret = msm_hsic_suspend(mehci); if (ret && device_may_wakeup(dev)) disable_irq_wake(hcd->irq); return ret; }
static void submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt) { struct urb *urb; int ret; urb = usb_alloc_urb(0, flags); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); return; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) { usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); return; } atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, flags); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); return; } dbg_log_event(ksb, "S RX_URB", pkt->len, 0); usb_free_urb(urb); }
static int ksb_fs_open(struct inode *ip, struct file *fp) { struct miscdevice *mdev = fp->private_data; struct ks_bridge *ksb = container_of(mdev, struct ks_bridge, fs_dev); if (IS_ERR(ksb)) { pr_err("ksb device not found"); return -ENODEV; } dev_dbg(ksb->fs_dev.this_device, ":%s", ksb->fs_dev.name); dbg_log_event(ksb, "FS-OPEN", 0, 0); fp->private_data = ksb; set_bit(FILE_OPENED, &ksb->flags); if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) queue_work(ksb->wq, &ksb->start_rx_work); return 0; }
static int msm_hsic_pm_resume(struct device *dev) { int ret; struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); dbg_log_event(NULL, "PM Resume", 0); if (device_may_wakeup(dev)) disable_irq_wake(hcd->irq); ret = msm_hsic_resume(mehci); if (ret) return ret; /* Bring the device to full powered state upon system resume */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static void submit_one_urb(struct ks_bridge *ksb) { struct data_pkt *pkt; struct urb *urb; int ret; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); return; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); return; } ksb->alloced_read_pkts++; usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; return; } usb_free_urb(urb); }
static irqreturn_t msm_hsic_wakeup_irq(int irq, void *data) { struct msm_hsic_hcd *mehci = data; int ret; mehci->wakeup_int_cnt++; dbg_log_event(NULL, "Remote Wakeup IRQ", mehci->wakeup_int_cnt); dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt cnt: %u\n", __func__, mehci->wakeup_int_cnt); wake_lock(&mehci->wlock); spin_lock(&mehci->wakeup_lock); if (mehci->wakeup_irq_enabled) { mehci->wakeup_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } spin_unlock(&mehci->wakeup_lock); if (!atomic_read(&mehci->pm_usage_cnt)) { ret = pm_runtime_get(mehci->dev); /* * HSIC runtime resume can race with us. * if we are active (ret == 1) or resuming * (ret == -EINPROGRESS), decrement the * PM usage counter before returning. */ if ((ret == 1) || (ret == -EINPROGRESS)) pm_runtime_put_noidle(mehci->dev); else atomic_set(&mehci->pm_usage_cnt, 1); } return IRQ_HANDLED; }
static void ksb_usb_disconnect(struct usb_interface *ifc) { struct ks_bridge *ksb = usb_get_intfdata(ifc); unsigned long flags; struct data_pkt *pkt; dbg_log_event(ksb, "PID-DETACH", 0, 0); clear_bit(USB_DEV_CONNECTED, &ksb->flags); wake_up(&ksb->ks_wait_q); cancel_work_sync(&ksb->to_mdm_work); usb_kill_anchored_urbs(&ksb->submitted); spin_lock_irqsave(&ksb->lock, flags); while (!list_empty(&ksb->to_ks_list)) { pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } while (!list_empty(&ksb->to_mdm_list)) { pkt = list_first_entry(&ksb->to_mdm_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); misc_deregister(ksb->fs_dev); usb_put_dev(ksb->udev); ksb->ifc = NULL; usb_set_intfdata(ifc, NULL); return; }
static irqreturn_t msm_hsic_wakeup_irq(int irq, void *data) { struct msm_hsic_hcd *mehci = data; mehci->wakeup_int_cnt++; dbg_log_event(NULL, "Remote Wakeup IRQ", mehci->wakeup_int_cnt); dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt cnt: %u\n", __func__, mehci->wakeup_int_cnt); wake_lock(&mehci->wlock); if (mehci->wakeup_irq_enabled) { mehci->wakeup_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } if (!atomic_read(&mehci->pm_usage_cnt)) { atomic_set(&mehci->pm_usage_cnt, 1); pm_runtime_get(mehci->dev); } return IRQ_HANDLED; }
static int ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id) { __u8 ifc_num; struct usb_host_interface *ifc_desc; struct usb_endpoint_descriptor *ep_desc; int i; struct ks_bridge *ksb; ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber; switch (id->idProduct) { case 0x9008: if (ifc_num != 0) return -ENODEV; ksb = __ksb[BOOT_BRIDGE_INDEX]; break; case 0x9048: case 0x904C: if (ifc_num != 2) return -ENODEV; ksb = __ksb[EFS_BRIDGE_INDEX]; break; default: return -ENODEV; } if (!ksb) { pr_err("ksb is not initialized"); return -ENODEV; } ksb->udev = usb_get_dev(interface_to_usbdev(ifc)); ksb->ifc = ifc; ifc_desc = ifc->cur_altsetting; for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) { ep_desc = &ifc_desc->endpoint[i].desc; if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc)) ksb->in_epAddr = ep_desc->bEndpointAddress; if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc)) ksb->out_epAddr = ep_desc->bEndpointAddress; } if (!(ksb->in_epAddr && ksb->out_epAddr)) { pr_err("could not find bulk in and bulk out endpoints"); usb_put_dev(ksb->udev); ksb->ifc = NULL; return -ENODEV; } ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr); ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr); usb_set_intfdata(ifc, ksb); set_bit(USB_DEV_CONNECTED, &ksb->flags); dbg_log_event(ksb, "PID-ATT", id->idProduct, 0); ksb->fs_dev = (struct miscdevice *)id->driver_info; misc_register(ksb->fs_dev); ifc->needs_remote_wakeup = 1; usb_enable_autosuspend(ksb->udev); pr_debug("usb dev connected"); return 0; }
static void flush_to_ldisc(struct work_struct *work) { struct tty_struct *tty = container_of(work, struct tty_struct, buf.work); unsigned long flags; struct tty_ldisc *disc; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ return; spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { struct tty_buffer *head; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; unsigned char *flag_buf; unsigned int left = 0; unsigned int max_space; count = head->commit - head->read; if (!count) { if (head->next == NULL) break; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; } /* Ldisc or user is trying to flush the buffers we are feeding to the ldisc, stop feeding the line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; /* update receive room */ spin_lock(&tty->read_lock); if (tty->update_room_in_ldisc) { if ((tty->read_cnt == N_TTY_BUF_SIZE - 1) && (tty->receive_room == N_TTY_BUF_SIZE - 1)) tty->rr_bug++; left = N_TTY_BUF_SIZE - tty->read_cnt - 1; } spin_unlock(&tty->read_lock); if (!tty->receive_room) break; if (tty->update_room_in_ldisc && !left) { schedule_work(&tty->buf.work); break; } if (tty->update_room_in_ldisc) max_space = min(left, tty->receive_room); else max_space = tty->receive_room; if (count > max_space) count = max_space; char_buf = head->char_buf_ptr + head->read; flag_buf = head->flag_buf_ptr + head->read; head->read += count; spin_unlock_irqrestore(&tty->buf.lock, flags); if(tty->start_debug){ dbg_log_event(NULL, "f_t_l_d:head->read",head->read, "head->commit", head->commit, "receive_room ", tty->receive_room ); } tty->ldisc_cnt += count; disc->ops->receive_buf(tty, char_buf, flag_buf, count); spin_lock_irqsave(&tty->buf.lock, flags); } clear_bit(TTY_FLUSHING, &tty->flags); } /* We may have a deferred request to flush the input buffer, if so pull the chain under the lock and empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { __tty_buffer_flush(tty); clear_bit(TTY_FLUSHPENDING, &tty->flags); wake_up(&tty->read_wait); } spin_unlock_irqrestore(&tty->buf.lock, flags); tty_ldisc_deref(disc); }
static int ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id) { __u8 ifc_num; struct usb_host_interface *ifc_desc; struct usb_endpoint_descriptor *ep_desc; int i; struct ks_bridge *ksb; unsigned long flags; struct data_pkt *pkt; ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber; switch (id->idProduct) { case 0x9008: if (ifc_num != 0) return -ENODEV; ksb = __ksb[BOOT_BRIDGE_INDEX]; break; case 0x9048: case 0x904C: case 0x9075: if (ifc_num != 2) return -ENODEV; ksb = __ksb[EFS_BRIDGE_INDEX]; break; default: return -ENODEV; } if (!ksb) { pr_err("ksb is not initialized"); return -ENODEV; } ksb->udev = usb_get_dev(interface_to_usbdev(ifc)); ksb->ifc = ifc; ifc_desc = ifc->cur_altsetting; for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) { ep_desc = &ifc_desc->endpoint[i].desc; if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc)) ksb->in_epAddr = ep_desc->bEndpointAddress; if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc)) ksb->out_epAddr = ep_desc->bEndpointAddress; } if (!(ksb->in_epAddr && ksb->out_epAddr)) { pr_err("could not find bulk in and bulk out endpoints"); usb_put_dev(ksb->udev); ksb->ifc = NULL; return -ENODEV; } ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr); ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr); usb_set_intfdata(ifc, ksb); set_bit(USB_DEV_CONNECTED, &ksb->flags); atomic_set(&ksb->tx_pending_cnt, 0); atomic_set(&ksb->rx_pending_cnt, 0); dbg_log_event(ksb, "PID-ATT", id->idProduct, 0); /*free up stale buffers if any from previous disconnect*/ spin_lock_irqsave(&ksb->lock, flags); while (!list_empty(&ksb->to_ks_list)) { pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } while (!list_empty(&ksb->to_mdm_list)) { pkt = list_first_entry(&ksb->to_mdm_list, struct data_pkt, list); list_del_init(&pkt->list); ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); ksb->fs_dev = (struct miscdevice *)id->driver_info; misc_register(ksb->fs_dev); if (device_can_wakeup(&ksb->udev->dev)) { ifc->needs_remote_wakeup = 1; usb_enable_autosuspend(ksb->udev); } atomic_set(&ksb->pmlock_cnt, 0); pr_info("usb dev connected"); return 0; }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; bool put = true; ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0) { if (ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("%s: autopm_get failed:%d", ksb->fs_dev.name, ret); return; } put = false; } for (i = 0; i < NO_RX_REQS; i++) { if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) break; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { dev_err(&ksb->udev->dev, "unable to allocate data pkt"); break; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&ksb->udev->dev, "unable to allocate urb"); ksb_free_data_pkt(pkt); break; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&ksb->udev->dev, "in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); break; } usb_free_urb(urb); } if (put) usb_autopm_put_interface_async(ksb->ifc); }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt = NULL; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); len = min_t(size_t, space, pkt->len - pkt->n_read); spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len); if (ret) { dev_err(ksb->fs_dev.this_device, "copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); return -EFAULT; } pkt->n_read += len; space -= len; copied += len; if (pkt->n_read == pkt->len) { /* * re-init the packet and queue it * for more data. */ pkt->n_read = 0; pkt->len = MAX_DATA_PKT_SIZE; submit_one_urb(ksb, GFP_KERNEL, pkt); pkt = NULL; } spin_lock_irqsave(&ksb->lock, flags); } /* put the partial packet back in the list */ if (!space && pkt && pkt->n_read != pkt->len) { if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) list_add(&pkt->list, &ksb->to_ks_list); else ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count, space, copied); return copied; }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); len = min_t(size_t, space, pkt->len); pkt->n_read += len; spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf, len); if (ret) { pr_err("copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; return ret; } space -= len; copied += len; spin_lock_irqsave(&ksb->lock, flags); if (pkt->n_read == pkt->len) { list_del_init(&pkt->list); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; } } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); pr_debug("count:%d space:%d copied:%d", count, space, copied); return copied; }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt = NULL; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); len = min_t(size_t, space, pkt->len - pkt->n_read); spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len); if (ret) { pr_err("copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); return -EFAULT; } pkt->n_read += len; space -= len; copied += len; if (pkt->n_read == pkt->len) { /* * re-init the packet and queue it * for more data. */ pkt->n_read = 0; pkt->len = MAX_DATA_PKT_SIZE; submit_one_urb(ksb, GFP_KERNEL, pkt); pkt = NULL; } spin_lock_irqsave(&ksb->lock, flags); } /* put the partial packet back in the list */ if (!space && pkt && pkt->n_read != pkt->len) { if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) list_add(&pkt->list, &ksb->to_ks_list); else ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); if (!strcmp(ksb->name, "ks_bridge:2") && count == 48 && *buf == 0x1) { pr_info("%s, HELLO COMMAND = 0x%x\n", __func__, *buf); if (!atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> efs sync\n", atomic_read(&ksb->pmlock_cnt)); } else if (atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> get efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_get(&ksb->udev->dev); pm_runtime_forbid(&ksb->udev->dev); } } else if (!strcmp(ksb->name, "ks_bridge:2") && count == 8 && *buf == 0x8) { pr_info("%s, RESET_RESPONSE = 0x%x\n", __func__, *buf); if (atomic_read(&ksb->pmlock_cnt) == 2) { atomic_dec(&ksb->pmlock_cnt); pr_info("<cnt = %d> release efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_allow(&ksb->udev->dev); pm_runtime_put(&ksb->udev->dev); } } if (!strcmp(ksb->name, "ks_bridge:2")) pr_info("count:%d space:%d copied:%d", count, space, copied); else pr_debug("count:%d space:%d copied:%d", count, space, copied); return copied; }
static int msm_hsic_resume_thread(void *data) { struct msm_hsic_hcd *mehci = data; struct usb_hcd *hcd = hsic_to_hcd(mehci); struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 temp; unsigned long resume_needed = 0; int retry_cnt = 0; int tight_resume = 0; dbg_log_event(NULL, "Resume RH", 0); /* keep delay between bus states */ if (time_before(jiffies, ehci->next_statechange)) usleep_range(5000, 5000); spin_lock_irq(&ehci->lock); if (!HCD_HW_ACCESSIBLE(hcd)) { spin_unlock_irq(&ehci->lock); mehci->resume_status = -ESHUTDOWN; complete(&mehci->rt_completion); return 0; } if (unlikely(ehci->debug)) { if (!dbgp_reset_prep()) ehci->debug = NULL; else dbgp_external_startup(); } /* at least some APM implementations will try to deliver * IRQs right away, so delay them until we're ready. */ ehci_writel(ehci, 0, &ehci->regs->intr_enable); /* re-init operational registers */ ehci_writel(ehci, 0, &ehci->regs->segment); ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); /*CMD_RUN will be set after, PORT_RESUME gets cleared*/ if (ehci->resume_sof_bug) ehci->command &= ~CMD_RUN; /* restore CMD_RUN, framelist size, and irq threshold */ ehci_writel(ehci, ehci->command, &ehci->regs->command); /* manually resume the ports we suspended during bus_suspend() */ resume_again: if (retry_cnt >= RESUME_RETRY_LIMIT) { pr_info("retry count(%d) reached max, resume in tight loop\n", retry_cnt); tight_resume = 1; } temp = ehci_readl(ehci, &ehci->regs->port_status[0]); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(0, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) { temp |= PORT_RESUME; set_bit(0, &resume_needed); } dbg_log_event(NULL, "FPR: Set", temp); ehci_writel(ehci, temp, &ehci->regs->port_status[0]); /* HSIC controller has a h/w bug due to which it can try to send SOFs * (start of frames) during port resume resulting in phy lockup. HSIC hw * controller in MSM clears FPR bit after driving the resume signal for * 20ms. Workaround is to stop SOFs before driving resume and then start * sending SOFs immediately. Need to send SOFs within 3ms of resume * completion otherwise peripheral may enter undefined state. As * usleep_range does not gurantee exact sleep time, GPTimer is used to * to time the resume sequence. If driver exceeds allowable time SOFs, * repeat the resume process. */ if (ehci->resume_sof_bug && resume_needed) { if (!tight_resume) { mehci->resume_again = 0; ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_MS), &mehci->timer->gptimer0_ld); ehci_writel(ehci, GPT_RESET | GPT_RUN, &mehci->timer->gptimer0_ctrl); ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT, &ehci->regs->intr_enable); ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_SOF_MS), &mehci->timer->gptimer1_ld); ehci_writel(ehci, GPT_RESET | GPT_RUN, &mehci->timer->gptimer1_ctrl); spin_unlock_irq(&ehci->lock); wait_for_completion(&mehci->gpt0_completion); spin_lock_irq(&ehci->lock); } else { dbg_log_event(NULL, "FPR: Tightloop", 0); /* do the resume in a tight loop */ handshake(ehci, &ehci->regs->port_status[0], PORT_RESUME, 0, 22 * 1000); ehci_writel(ehci, ehci_readl(ehci, &ehci->regs->command) | CMD_RUN, &ehci->regs->command); } if (mehci->resume_again) { int temp; dbg_log_event(NULL, "FPR: Re-Resume", retry_cnt); pr_info("FPR: retry count: %d\n", retry_cnt); spin_unlock_irq(&ehci->lock); temp = ehci_readl(ehci, &ehci->regs->port_status[0]); temp &= ~PORT_RWC_BITS; temp |= PORT_SUSPEND; ehci_writel(ehci, temp, &ehci->regs->port_status[0]); /* Keep the bus idle for 5ms so that peripheral * can detect and initiate suspend */ usleep_range(5000, 5000); dbg_log_event(NULL, "FPR: RResume", ehci_readl(ehci, &ehci->regs->port_status[0])); spin_lock_irq(&ehci->lock); mehci->resume_again = 0; retry_cnt++; goto resume_again; } } dbg_log_event(NULL, "FPR: RT-Done", 0); mehci->resume_status = 1; spin_unlock_irq(&ehci->lock); complete(&mehci->rt_completion); return 0; }
static int ehci_hsic_bus_suspend(struct usb_hcd *hcd) { dbg_log_event(NULL, "Suspend RH", 0); return ehci_bus_suspend(hcd); }
static int ehci_hsic_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { dbg_log_event(urb, event_to_str(URB_SUBMIT), 0); return ehci_urb_enqueue(hcd, urb, mem_flags); }