static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; bool put = true; ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0) { if (ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("autopm_get failed:%d", ret); return; } put = false; } for (i = 0; i < NO_RX_REQS; i++) { if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) break; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); break; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); break; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); break; } usb_free_urb(urb); } if (put) usb_autopm_put_interface_async(ksb->ifc); }
static ssize_t ksb_fs_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { int ret; struct data_pkt *pkt; unsigned long flags; struct ks_bridge *ksb = fp->private_data; pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data packet"); return PTR_ERR(pkt); } ret = copy_from_user(pkt->buf, buf, count); if (ret) { pr_err("copy_from_user failed: err:%d", ret); ksb_free_data_pkt(pkt); return ret; } spin_lock_irqsave(&ksb->lock, flags); list_add_tail(&pkt->list, &ksb->to_mdm_list); spin_unlock_irqrestore(&ksb->lock, flags); queue_work(ksb->wq, &ksb->to_mdm_work); return count; }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; for (i = 0; i < NO_RX_REQS; i++) { pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); return; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); return; } ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("autopm_get failed:%d", ret); usb_free_urb(urb); ksb_free_data_pkt(pkt); return; } ksb->alloced_read_pkts++; usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; usb_autopm_put_interface(ksb->ifc); return; } usb_autopm_put_interface_async(ksb->ifc); usb_free_urb(urb); } }
static ssize_t ksb_fs_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { int ret; struct data_pkt *pkt; unsigned long flags; struct ks_bridge *ksb = fp->private_data; if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) { pr_err("USB_DEV_CONNECTED is not set"); return -ENODEV; } if (count > MAX_DATA_PKT_SIZE) count = MAX_DATA_PKT_SIZE; if (!strcmp(ksb->name, "ks_bridge:2")) pr_info("count:%d cmd:%d", count, *buf); pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data packet"); return PTR_ERR(pkt); } ret = copy_from_user(pkt->buf, buf, count); if (ret) { pr_err("copy_from_user failed: err:%d", ret); ksb_free_data_pkt(pkt); return ret; } spin_lock_irqsave(&ksb->lock, flags); list_add_tail(&pkt->list, &ksb->to_mdm_list); spin_unlock_irqrestore(&ksb->lock, flags); queue_work(ksb->wq, &ksb->to_mdm_work); return count; }
static void submit_one_urb(struct ks_bridge *ksb) { struct data_pkt *pkt; struct urb *urb; int ret; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); return; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); return; } ksb->alloced_read_pkts++; usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; return; } usb_free_urb(urb); }