static int thor_eps_setup(struct usb_function *f) { struct usb_composite_dev *cdev = f->config->cdev; struct usb_gadget *gadget = cdev->gadget; struct thor_dev *dev = thor_func->dev; struct usb_endpoint_descriptor *d; struct usb_request *req; struct usb_ep *ep; int result; ep = dev->in_ep; d = ep_desc(gadget, &hs_in_desc, &fs_in_desc); debug("(d)bEndpointAddress: 0x%x\n", d->bEndpointAddress); result = usb_ep_enable(ep, d); if (result) goto exit; ep->driver_data = cdev; /* claim */ req = thor_start_ep(ep); if (!req) { usb_ep_disable(ep); result = -EIO; goto exit; } dev->in_req = req; ep = dev->out_ep; d = ep_desc(gadget, &hs_out_desc, &fs_out_desc); debug("(d)bEndpointAddress: 0x%x\n", d->bEndpointAddress); result = usb_ep_enable(ep, d); if (result) goto exit; ep->driver_data = cdev; /* claim */ req = thor_start_ep(ep); if (!req) { usb_ep_disable(ep); result = -EIO; goto exit; } dev->out_req = req; out_req_buf = dev->out_req->buf; /* ACM control EP */ ep = dev->int_ep; ep->driver_data = cdev; /* claim */ exit: return result; }
static int set_source_sink_config(struct zero_dev *dev) { int result = 0; struct usb_ep *ep; struct usb_gadget *gadget = dev->gadget; gadget_for_each_ep(ep, gadget) { const struct usb_endpoint_descriptor *d; /* one endpoint writes (sources) zeroes in (to the host) */ if (strcmp(ep->name, EP_IN_NAME) == 0) { d = ep_desc(gadget, &hs_source_desc, &fs_source_desc); result = usb_ep_enable(ep, d); if (result == 0) { ep->driver_data = dev; if (source_sink_start_ep(ep) != NULL) { dev->in_ep = ep; continue; } usb_ep_disable(ep); result = -EIO; } /* one endpoint reads (sinks) anything out (from the host) */ } else if (strcmp(ep->name, EP_OUT_NAME) == 0) { d = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc); result = usb_ep_enable(ep, d); if (result == 0) { ep->driver_data = dev; if (source_sink_start_ep(ep) != NULL) { dev->out_ep = ep; continue; } usb_ep_disable(ep); result = -EIO; } /* ignore any other endpoints */ } else continue; /* stop on error */ ERROR(dev, "can't start %s, result %d\n", ep->name, result); break; } if (result == 0) DBG(dev, "buflen %d\n", buflen); /* caller is responsible for cleanup on error */ return result; }
static int set_loopback_config(struct zero_dev *dev) { int result = 0; struct usb_ep *ep; struct usb_gadget *gadget = dev->gadget; gadget_for_each_ep(ep, gadget) { const struct usb_endpoint_descriptor *d; /* one endpoint writes data back IN to the host */ if (strcmp(ep->name, EP_IN_NAME) == 0) { d = ep_desc(gadget, &hs_source_desc, &fs_source_desc); result = usb_ep_enable(ep, d); if (result == 0) { ep->driver_data = dev; dev->in_ep = ep; continue; } /* one endpoint just reads OUT packets */ } else if (strcmp(ep->name, EP_OUT_NAME) == 0) { d = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc); result = usb_ep_enable(ep, d); if (result == 0) { ep->driver_data = dev; dev->out_ep = ep; continue; } /* ignore any other endpoints */ } else continue; /* stop on error */ ERROR(dev, "can't enable %s, result %d\n", ep->name, result); break; } /* allocate a bunch of read buffers and queue them all at once. * we buffer at most 'qlen' transfers; fewer if any need more * than 'buflen' bytes each. */ if (result == 0) { struct usb_request *req; unsigned i; ep = dev->out_ep; for (i = 0; i < qlen && result == 0; i++) { req = alloc_ep_req(ep, buflen); if (req) { req->complete = loopback_complete; result = usb_ep_queue(ep, req, GFP_ATOMIC); if (result) DBG(dev, "%s queue req --> %d\n", ep->name, result); } else result = -ENOMEM; } } if (result == 0) DBG(dev, "qlen %d, buflen %d\n", qlen, buflen); /* caller is responsible for cleanup on error */ return result; }