static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); return; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); return; } spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ wake_up(&ksb->ks_wait_q); }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; bool wakeup = true; usb_mark_last_busy(ksb->udev); dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); #if 0 dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status, urb->actual_length); #endif /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && (urb->actual_length > 0)) { /* * If we wakeup the reader process now, it may * queue the URB before its reject flag gets * cleared. */ wakeup = false; goto add_to_list; } if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("%s: urb failed with err:%d", ksb->fs_dev.name, urb->status); ksb_free_data_pkt(pkt); goto done; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); goto done; } if (urb->actual_length == 48) { pr_info("%s: usage=%d, child=%d\n", __func__, atomic_read(&ksb->udev->dev.power.usage_count), atomic_read(&ksb->udev->dev.power.child_count)); } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ if (wakeup) wake_up(&ksb->ks_wait_q); done: atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; bool wakeup = true; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && (urb->actual_length > 0)) { /* * If we wakeup the reader process now, it may * queue the URB before its reject flag gets * cleared. */ wakeup = false; goto add_to_list; } if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); goto done; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); goto done; } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ if (wakeup) wake_up(&ksb->ks_wait_q); done: atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && urb->actual_length > 0) goto add_to_list; if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; return; } if (urb->actual_length == 0) { ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; goto resubmit_urb; } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ wake_up(&ksb->ks_wait_q); resubmit_urb: submit_one_urb(ksb); }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt = NULL; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); len = min_t(size_t, space, pkt->len - pkt->n_read); spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len); if (ret) { pr_err("copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); return -EFAULT; } pkt->n_read += len; space -= len; copied += len; if (pkt->n_read == pkt->len) { /* * re-init the packet and queue it * for more data. */ pkt->n_read = 0; pkt->len = MAX_DATA_PKT_SIZE; submit_one_urb(ksb, GFP_KERNEL, pkt); pkt = NULL; } spin_lock_irqsave(&ksb->lock, flags); } /* put the partial packet back in the list */ if (!space && pkt && pkt->n_read != pkt->len) { if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) list_add(&pkt->list, &ksb->to_ks_list); else ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); if (!strcmp(ksb->name, "ks_bridge:2") && count == 48 && *buf == 0x1) { pr_info("%s, HELLO COMMAND = 0x%x\n", __func__, *buf); if (!atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> efs sync\n", atomic_read(&ksb->pmlock_cnt)); } else if (atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> get efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_get(&ksb->udev->dev); pm_runtime_forbid(&ksb->udev->dev); } } else if (!strcmp(ksb->name, "ks_bridge:2") && count == 8 && *buf == 0x8) { pr_info("%s, RESET_RESPONSE = 0x%x\n", __func__, *buf); if (atomic_read(&ksb->pmlock_cnt) == 2) { atomic_dec(&ksb->pmlock_cnt); pr_info("<cnt = %d> release efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_allow(&ksb->udev->dev); pm_runtime_put(&ksb->udev->dev); } } if (!strcmp(ksb->name, "ks_bridge:2")) pr_info("count:%d space:%d copied:%d", count, space, copied); else pr_debug("count:%d space:%d copied:%d", count, space, copied); return copied; }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt = NULL; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); len = min_t(size_t, space, pkt->len - pkt->n_read); spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len); if (ret) { dev_err(ksb->fs_dev.this_device, "copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); return -EFAULT; } pkt->n_read += len; space -= len; copied += len; if (pkt->n_read == pkt->len) { /* * re-init the packet and queue it * for more data. */ pkt->n_read = 0; pkt->len = MAX_DATA_PKT_SIZE; submit_one_urb(ksb, GFP_KERNEL, pkt); pkt = NULL; } spin_lock_irqsave(&ksb->lock, flags); } /* put the partial packet back in the list */ if (!space && pkt && pkt->n_read != pkt->len) { if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) list_add(&pkt->list, &ksb->to_ks_list); else ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count, space, copied); return copied; }