static void null_bind(struct usb_endpoint **ept, void *_ctxt) { struct null_context *ctxt = _ctxt; ctxt->out = ept[0]; printk(KERN_INFO "null_bind() %p\n", ctxt->out); ctxt->req0 = usb_ept_alloc_req(ctxt->out, 4096); ctxt->req1 = usb_ept_alloc_req(ctxt->out, 4096); }
static void zero_bind(struct usb_endpoint **ept, void *_ctxt) { struct zero_context *ctxt = _ctxt; ctxt->in = ept[0]; printk(KERN_INFO "zero_bind() %p\n", ctxt->in); ctxt->req0 = usb_ept_alloc_req(ctxt->in, 4096); ctxt->req1 = usb_ept_alloc_req(ctxt->in, 4096); memset(ctxt->req0->buf, 0, 4096); memset(ctxt->req1->buf, 0, 4096); }
static void adb_bind(struct usb_endpoint **ept, void *_ctxt) { struct adb_context *ctxt = _ctxt; struct usb_request *req; int n; ctxt->out = ept[0]; ctxt->in = ept[1]; printk(KERN_INFO "adb_bind() %p, %p\n", ctxt->out, ctxt->in); for (n = 0; n < RX_REQ_MAX; n++) { req = usb_ept_alloc_req(ctxt->out, 4096); if (req == 0) goto fail; req->context = ctxt; req->complete = adb_complete_out; req_put(ctxt, &ctxt->rx_idle, req); } for (n = 0; n < TX_REQ_MAX; n++) { req = usb_ept_alloc_req(ctxt->in, 4096); if (req == 0) goto fail; req->context = ctxt; req->complete = adb_complete_in; req_put(ctxt, &ctxt->tx_idle, req); } printk(KERN_INFO "adb_bind() allocated %d rx and %d tx requests\n", RX_REQ_MAX, TX_REQ_MAX); misc_register(&adb_device); misc_register(&adb_enable_device); return; fail: printk(KERN_ERR "adb_bind() could not allocate requests\n"); adb_unbind(ctxt); }
static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *ep, unsigned len, gfp_t kmalloc_flags) { struct diag_req_entry *req; req = kmalloc(sizeof(struct diag_req_entry), kmalloc_flags); if (req == NULL) return ERR_PTR(-ENOMEM); req->usb_req = usb_ept_alloc_req(ep , 0); if (req->usb_req == NULL) { kfree(req); return ERR_PTR(-ENOMEM); } req->usb_req->context = req; return req; }
static void usb_prepare(struct usb_info *ui) { spin_lock_init(&ui->lock); memset(ui->buf, 0, 4096); ui->head = (void *) (ui->buf + 0); /* only important for reset/reinit */ memset(ui->ept, 0, sizeof(ui->ept)); ui->next_item = 0; ui->next_ifc_num = 0; init_endpoints(ui); ui->ep0in.ep.maxpacket = 64; ui->ep0out.ep.maxpacket = 64; ui->setup_req = usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL); INIT_WORK(&ui->work, usb_do_work); }
static struct usb_request * msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags); }
static void mtp_tunnel_bind(struct usb_endpoint **ept, void *_ctxt) { struct mtp_tunnel_context *ctxt = _ctxt; struct usb_request *req; int ret; #ifndef ALLOCATE_16K_BUFF int n; #endif ctxt->registered = 0; ctxt->out = ept[0]; ctxt->in = ept[1]; printk(KERN_DEBUG "mtp_tunnel_bind() %p, %p\n", ctxt->out, ctxt->in); #ifndef ALLOCATE_16K_BUFF for (n = 0; n < RX_REQ_MAX; n++) #endif { req = usb_ept_alloc_req(ctxt->out, TXN_MAX); if (req == 0) goto fail; req->context = ctxt; req->complete = mtp_tunnel_complete_out; req_put(ctxt, &ctxt->rx_idle, req); } #ifndef ALLOCATE_16K_BUFF for (n = 0; n < TX_REQ_MAX; n++) #endif { req = usb_ept_alloc_req(ctxt->in, TXN_MAX); if (req == 0) goto fail; req->context = ctxt; req->complete = mtp_tunnel_complete_in; req_put(ctxt, &ctxt->tx_idle, req); } #ifndef ALLOCATE_16K_BUFF printk(KERN_DEBUG "mtp_tunnel_bind() allocated %d rx and %d tx requests\n", RX_REQ_MAX, TX_REQ_MAX); #else printk(KERN_DEBUG "%s(): allocated buffer: %d\n", __func__, TXN_MAX); #endif misc_register(&mtp_tunnel_device); misc_register(&mtp_tunnel_enable_device); mtp_tunnel_dev.release = mtp_tunnel_dev_release; mtp_tunnel_dev.parent = &ctxt->pdev->dev; strcpy(mtp_tunnel_dev.bus_id, "interface"); ret = device_register(&mtp_tunnel_dev); if (ret != 0) { printk(KERN_WARNING "mtp_tunnel_dev failed to register device: %d\n", ret); goto fail_dev_register_fail; } ret = device_create_file(&mtp_tunnel_dev, &dev_attr_mtp_tunnel_status); if (ret != 0) { printk(KERN_WARNING "mtp_tunnel_dev device_create_file failed: %d\n", ret); device_unregister(&mtp_tunnel_dev); goto fail_dev_register_fail; } ctxt->registered = 1; return; fail_dev_register_fail: printk(KERN_ERR "%s() could not allocate requests\n", __func__); fail: printk(KERN_WARNING "mtp_tunnel_bind() could not allocate requests\n"); mtp_tunnel_unbind(ctxt); }