struct usb_request *alloc_ep_req(struct usb_ep *ep) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = buflen; req->buf = kmalloc(buflen, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; }
static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) return NULL; /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } return req; }
static void uvc_unbind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct uvc_device *uvc = to_uvc(f); INFO(cdev, "%s\n", __func__); video_unregister_device(&uvc->vdev); v4l2_device_unregister(&uvc->v4l2_dev); usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); kfree(uvc->control_buf); usb_free_all_descriptors(f); }
static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = length; req->buf = kmalloc(length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; }
static void fastboot_unbind(struct usb_gadget *gadget) { struct fastboot_dev *dev = get_gadget_data(gadget); debug("%s...\n", __func__); /* we've already been disconnected ... no i/o is active */ if (dev->req) { usb_ep_free_request(gadget->ep0, dev->req); dev->req = NULL; } #if 0 if (dev->tx_req) { usb_ep_free_request(dev->in_ep, dev->tx_req); dev->tx_req = NULL; } if (dev->rx_req) { usb_ep_free_request(dev->out_ep, dev->rx_req); dev->rx_req = NULL; } #endif set_gadget_data(gadget, NULL); }
struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = len ?: default_len; req->buf = kmalloc(req->length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; }
static void rndis_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_rndis *rndis = func_to_rndis(f); rndis_deregister(rndis->config); rndis_exit(); usb_free_all_descriptors(f); kfree(rndis->notify_req->buf); usb_ep_free_request(rndis->notify, rndis->notify_req); kfree(rndis); __rndis = NULL; }
struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = usb_endpoint_dir_out(ep->desc) ? usb_ep_align(ep, len) : len; req->buf = kmalloc(req->length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; }
static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_ecm *ecm = func_to_ecm(f); DBG(c->cdev, "ecm unbind\n"); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); ecm_string_defs[1].s = NULL; kfree(ecm); }
int sysfs_endpoint_disable(sysfs_endpoint* sys_ep) { int error = 0; if (!sys_ep) { return -EINVAL; } DBG(sys_ep, "%s\n", __func__); if (!sys_ep->enabled_desc) { return 0; } error = usb_ep_disable(sys_ep->ep); sys_ep->enabled_desc = 0; if (sys_ep->request) { /* This call will flag the request as "dead" if it's still in use. */ usb_ep_free_request(sys_ep->ep, sys_ep->request); } return error; }
/*-------------------------------------------------------------------------*/ static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length) { struct usb_request *req; req = usb_ep_alloc_request(ep, 0); if (!req) return req; req->length = length; req->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, length); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } return req; }
/* * gs_alloc_req * * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */ struct usb_request * gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, kmalloc_flags); if (req != NULL) { req->length = len; req->buf = kmalloc(len, kmalloc_flags); if (req->buf == NULL) { usb_ep_free_request(ep, req); return NULL; } } return req; }
static struct usb_request *rockusb_start_ep(struct usb_ep *ep) { struct usb_request *req; req = usb_ep_alloc_request(ep, 0); if (!req) return NULL; req->length = EP_BUFFER_SIZE; req->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, EP_BUFFER_SIZE); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } memset(req->buf, 0, req->length); return req; }
struct usb_request *alloc_ep_req(struct usb_ep *ep, int len) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { if (len) req->length = len; else req->length = buflen; req->buf = kmalloc(req->length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; }
static void uvc_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct uvc_device *uvc = to_uvc(f); INFO(cdev, "uvc_function_unbind\n"); video_unregister_device(uvc->vdev); uvc->control_ep->driver_data = NULL; uvc->video.ep->driver_data = NULL; uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = 0; usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); kfree(uvc->control_buf); usb_free_all_descriptors(f); kfree(uvc); }
static struct usb_request *dtf_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); _dbgmsg_gadget( "_dbgmsg_gadget\n" ); req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!req) { _dbgmsg_gadget( "usb_ep_alloc_request error\n" ); return NULL; } /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { _dbgmsg_gadget( "usb_ep_free_request\n" ); usb_ep_free_request(ep, req); return NULL; } return req; }
struct usb_request * gsdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, flags); if (!req) { pr_err("%s: usb alloc request failed\n", __func__); return NULL; } req->length = len; req->buf = kmalloc(len, flags); if (!req->buf) { pr_err("%s: request buf allocation failed\n", __func__); usb_ep_free_request(ep, req); return NULL; } return req; }
static int uvc_video_free_requests(struct uvc_video *video) { unsigned int i; for (i = 0; i < UVC_NUM_REQUESTS; ++i) { if (video->req[i]) { usb_ep_free_request(video->ep, video->req[i]); video->req[i] = NULL; } if (video->req_buffer[i]) { kfree(video->req_buffer[i]); video->req_buffer[i] = NULL; } } INIT_LIST_HEAD(&video->req_free); video->req_size = 0; return 0; }
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) { unsigned i; struct usb_request *req; if (!n) return -ENOMEM; /* queue/recycle up to N requests */ i = n; list_for_each_entry(req, list, list) { if (i-- == 0) goto extra; } while (i--) { req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (!req){ printk(KERN_ERR "%s@%d: usb_ep_alloc_request fail: %d\n", __func__, __LINE__, i); return list_empty(list) ? -ENOMEM : 0; } list_add(&req->list, list); } return 0; extra: /* free extras */ for (;;) { struct list_head *next; next = req->list.next; list_del(&req->list); usb_ep_free_request(ep, req); if (next == list) break; req = container_of(next, struct usb_request, list); } return 0; }
/* * gs_alloc_req * * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */ struct usb_request * pxa910_gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, kmalloc_flags); if (req != NULL) { req->length = len; #ifdef CONFIG_PXA910_1G_DDR_WORKAROUND req->buf = kmalloc(len, kmalloc_flags | GFP_DMA); #else req->buf = kmalloc(len, kmalloc_flags); #endif if (req->buf == NULL) { usb_ep_free_request(ep, req); return NULL; } } return req; }
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) { unsigned i; struct usb_request *req; if (!n) return -ENOMEM; i = n; list_for_each_entry(req, list, list) { if (i-- == 0) goto extra; } while (i--) { req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (!req) return list_empty(list) ? -ENOMEM : 0; list_add(&req->list, list); } return 0; extra: for (;;) { struct list_head *next; next = req->list.next; list_del(&req->list); usb_ep_free_request(ep, req); if (next == list) break; req = container_of(next, struct usb_request, list); } return 0; }
static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv, unsigned short status) { struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_request *req; unsigned short *buf; /* alloc new usb_request for recip */ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); if (!req) { dev_err(dev, "recip request allocation fail\n"); return; } /* alloc recip data buffer */ buf = kmalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) { usb_ep_free_request(&dcp->ep, req); dev_err(dev, "recip data allocation fail\n"); return; } /* recip data is status */ *buf = cpu_to_le16(status); /* allocated usb_request/buffer will be freed */ req->complete = __usbhsg_recip_send_complete; req->buf = buf; req->length = sizeof(*buf); req->zero = 0; /* push packet */ pipe->handler = &usbhs_fifo_pio_push_handler; usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req)); }
static int alloc_requests(struct fastboot_dev *dev, unsigned n, gfp_t gfp_flags) { #if 0 dev->tx_req = usb_ep_alloc_request(dev->in_ep, 0); if (!dev->tx_req) goto fail1; dev->rx_req = usb_ep_alloc_request(dev->out_ep, 0); if (!dev->rx_req) goto fail2; return 0; fail2: usb_ep_free_request(dev->in_ep, dev->tx_req); fail1: error("can't alloc requests"); return -1; #endif return 0; }
void free_ep_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); }
static void tx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb; struct eth_dev *dev; struct net_device *net; struct usb_request *new_req; struct usb_ep *in; int length; int retval; if (!ep->driver_data) { usb_ep_free_request(ep, req); return; } dev = ep->driver_data; net = dev->net; if (!dev->port_usb) { usb_ep_free_request(ep, req); return; } switch (req->status) { default: dev->net->stats.tx_errors++; VDBG(dev, "tx err %d\n", req->status); /* FALLTHROUGH */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ break; case 0: if (!req->zero) dev->net->stats.tx_bytes += req->length-1; else dev->net->stats.tx_bytes += req->length; } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); list_add_tail(&req->list, &dev->tx_reqs); if (dev->port_usb->multi_pkt_xfer) { dev->no_tx_req_used--; req->length = 0; in = dev->port_usb->in_ep; if (!list_empty(&dev->tx_reqs)) { new_req = container_of(dev->tx_reqs.next, struct usb_request, list); list_del(&new_req->list); spin_unlock(&dev->req_lock); if (new_req->length > 0) { length = new_req->length; /* NCM requires no zlp if transfer is * dwNtbInMaxSize */ if (dev->port_usb->is_fixed && length == dev->port_usb->fixed_in_len && (length % in->maxpacket) == 0) new_req->zero = 0; else new_req->zero = 1; /* use zlp framing on tx for strict CDC-Ether * conformance, though any robust network rx * path ignores extra padding. and some hardware * doesn't like to write zlps. */ if (new_req->zero && !dev->zlp && (length % in->maxpacket) == 0) { new_req->zero = 0; length++; } new_req->length = length; retval = usb_ep_queue(in, new_req, GFP_ATOMIC); switch (retval) { default: #ifndef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE DBG(dev, "tx queue err %d\n", retval); #else printk(KERN_ERR"usb:%s tx queue err %d\n",__func__, retval); #endif new_req->length = 0; spin_lock(&dev->req_lock); list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); break; case 0: spin_lock(&dev->req_lock); dev->no_tx_req_used++; spin_unlock(&dev->req_lock); net->trans_start = jiffies; } } else { spin_lock(&dev->req_lock); /* * Put the idle request at the back of the * queue. The xmit function will put the * unfinished request at the beginning of the * queue. */ list_add_tail(&new_req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); } } else {
static int uvc_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct uvc_device *uvc = to_uvc(f); struct usb_string *us; unsigned int max_packet_mult; unsigned int max_packet_size; struct usb_ep *ep; struct f_uvc_opts *opts; int ret = -EINVAL; INFO(cdev, "uvc_function_bind\n"); opts = fi_to_f_uvc_opts(f->fi); /* Sanity check the streaming endpoint module parameters. */ opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U); opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ if (opts->streaming_maxburst && (opts->streaming_maxpacket % 1024) != 0) { opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); INFO(cdev, "overriding streaming_maxpacket to %d\n", opts->streaming_maxpacket); } /* Fill in the FS/HS/SS Video Streaming specific descriptors from the * module parameters. * * NOTE: We assume that the user knows what they are doing and won't * give parameters that their UDC doesn't support. */ if (opts->streaming_maxpacket <= 1024) { max_packet_mult = 1; max_packet_size = opts->streaming_maxpacket; } else if (opts->streaming_maxpacket <= 2048) { max_packet_mult = 2; max_packet_size = opts->streaming_maxpacket / 2; } else { max_packet_mult = 3; max_packet_size = opts->streaming_maxpacket / 3; } uvc_fs_streaming_ep.wMaxPacketSize = cpu_to_le16(min(opts->streaming_maxpacket, 1023U)); uvc_fs_streaming_ep.bInterval = opts->streaming_interval; uvc_hs_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); uvc_hs_streaming_ep.bInterval = opts->streaming_interval; uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); uvc_ss_streaming_ep.bInterval = opts->streaming_interval; uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1; uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; uvc_ss_streaming_comp.wBytesPerInterval = cpu_to_le16(max_packet_size * max_packet_mult * (opts->streaming_maxburst + 1)); /* Allocate endpoints. */ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); if (!ep) { INFO(cdev, "Unable to allocate control EP\n"); goto error; } uvc->control_ep = ep; if (gadget_is_superspeed(c->cdev->gadget)) ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep, &uvc_ss_streaming_comp); else if (gadget_is_dualspeed(cdev->gadget)) ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep); else ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep); if (!ep) { INFO(cdev, "Unable to allocate streaming EP\n"); goto error; } uvc->video.ep = ep; uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address; us = usb_gstrings_attach(cdev, uvc_function_strings, ARRAY_SIZE(uvc_en_us_strings)); if (IS_ERR(us)) { ret = PTR_ERR(us); goto error; } uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id; uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id; ret = us[UVC_STRING_STREAMING_IDX].id; uvc_streaming_intf_alt0.iInterface = ret; uvc_streaming_intf_alt1.iInterface = ret; /* Allocate interface IDs. */ if ((ret = usb_interface_id(c, f)) < 0) goto error; uvc_iad.bFirstInterface = ret; uvc_control_intf.bInterfaceNumber = ret; uvc->control_intf = ret; if ((ret = usb_interface_id(c, f)) < 0) goto error; uvc_streaming_intf_alt0.bInterfaceNumber = ret; uvc_streaming_intf_alt1.bInterfaceNumber = ret; uvc->streaming_intf = ret; /* Copy descriptors */ f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL); if (IS_ERR(f->fs_descriptors)) { ret = PTR_ERR(f->fs_descriptors); f->fs_descriptors = NULL; goto error; } if (gadget_is_dualspeed(cdev->gadget)) { f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH); if (IS_ERR(f->hs_descriptors)) { ret = PTR_ERR(f->hs_descriptors); f->hs_descriptors = NULL; goto error; } } if (gadget_is_superspeed(c->cdev->gadget)) { f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER); if (IS_ERR(f->ss_descriptors)) { ret = PTR_ERR(f->ss_descriptors); f->ss_descriptors = NULL; goto error; } } /* Preallocate control endpoint request. */ uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL); if (uvc->control_req == NULL || uvc->control_buf == NULL) { ret = -ENOMEM; goto error; } uvc->control_req->buf = uvc->control_buf; uvc->control_req->complete = uvc_function_ep0_complete; uvc->control_req->context = uvc; if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) { printk(KERN_INFO "v4l2_device_register failed\n"); goto error; } /* Initialise video. */ ret = uvcg_video_init(&uvc->video); if (ret < 0) goto error; /* Register a V4L2 device. */ ret = uvc_register_video(uvc); if (ret < 0) { printk(KERN_INFO "Unable to register video device\n"); goto error; } return 0; error: v4l2_device_unregister(&uvc->v4l2_dev); if (uvc->control_req) usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); kfree(uvc->control_buf); usb_free_all_descriptors(f); return ret; }
static void bam_data_epout_complete(struct usb_ep *ep, struct usb_request *req) { struct bam_data_port *port = ep->driver_data; struct bam_data_ch_info *d = &port->data_ch; struct sk_buff *skb = req->context; int status = req->status; int queue = 0; switch (status) { case 0: skb_put(skb, req->actual); queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: /* cable disconnection */ bam_data_free_skb_to_pool(port, skb); req->buf = 0; usb_ep_free_request(ep, req); return; default: pr_err("%s: %s response error %d, %d/%d\n", __func__, ep->name, status, req->actual, req->length); bam_data_free_skb_to_pool(port, skb); break; } spin_lock(&port->port_lock_ul); if (queue) { __skb_queue_tail(&d->rx_skb_q, skb); if (!usb_bam_get_prod_granted(d->dst_connection_idx)) { list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } else queue_work(bam_data_wq, &d->write_tobam_w); } if (bam_mux_rx_fctrl_support && d->rx_skb_q.qlen >= bam_ipa_rx_fctrl_en_thld) { if (!d->rx_flow_control_triggered) { d->rx_flow_control_triggered = 1; d->rx_flow_control_enable++; } list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } skb = bam_data_alloc_skb_from_pool(port); spin_unlock(&port->port_lock_ul); if (!skb) { list_add_tail(&req->list, &d->rx_idle); return; } skb_reserve(skb, BAM_MUX_HDR); req->buf = skb->data; req->length = d->rx_buffer_size; req->context = skb; status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { bam_data_free_skb_to_pool(port, skb); pr_err("%s: data rx enqueue err %d\n", __func__, status); spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); } }
static int __init uvc_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct uvc_device *uvc = to_uvc(f); unsigned int max_packet_mult; unsigned int max_packet_size; struct usb_ep *ep; int ret = -EINVAL; INFO(cdev, "uvc_function_bind\n"); /* Sanity check the streaming endpoint module parameters. */ streaming_interval = clamp(streaming_interval, 1U, 16U); streaming_maxpacket = clamp(streaming_maxpacket, 1U, 3072U); streaming_maxburst = min(streaming_maxburst, 15U); /* Fill in the FS/HS/SS Video Streaming specific descriptors from the * module parameters. * * NOTE: We assume that the user knows what they are doing and won't * give parameters that their UDC doesn't support. */ if (streaming_maxpacket <= 1024) { max_packet_mult = 1; max_packet_size = streaming_maxpacket; } else if (streaming_maxpacket <= 2048) { max_packet_mult = 2; max_packet_size = streaming_maxpacket / 2; } else { max_packet_mult = 3; max_packet_size = streaming_maxpacket / 3; } uvc_fs_streaming_ep.wMaxPacketSize = min(streaming_maxpacket, 1023U); uvc_fs_streaming_ep.bInterval = streaming_interval; uvc_hs_streaming_ep.wMaxPacketSize = max_packet_size; uvc_hs_streaming_ep.wMaxPacketSize |= ((max_packet_mult - 1) << 11); uvc_hs_streaming_ep.bInterval = streaming_interval; uvc_ss_streaming_ep.wMaxPacketSize = max_packet_size; uvc_ss_streaming_ep.bInterval = streaming_interval; uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1; uvc_ss_streaming_comp.bMaxBurst = streaming_maxburst; uvc_ss_streaming_comp.wBytesPerInterval = max_packet_size * max_packet_mult * streaming_maxburst; /* Allocate endpoints. */ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); if (!ep) { INFO(cdev, "Unable to allocate control EP\n"); goto error; } uvc->control_ep = ep; ep->driver_data = uvc; if (gadget_is_superspeed(c->cdev->gadget)) ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep, &uvc_ss_streaming_comp); else if (gadget_is_dualspeed(cdev->gadget)) ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep); else ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep); if (!ep) { INFO(cdev, "Unable to allocate streaming EP\n"); goto error; } uvc->video.ep = ep; ep->driver_data = uvc; uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address; /* Allocate interface IDs. */ if ((ret = usb_interface_id(c, f)) < 0) goto error; uvc_iad.bFirstInterface = ret; uvc_control_intf.bInterfaceNumber = ret; uvc->control_intf = ret; if ((ret = usb_interface_id(c, f)) < 0) goto error; uvc_streaming_intf_alt0.bInterfaceNumber = ret; uvc_streaming_intf_alt1.bInterfaceNumber = ret; uvc->streaming_intf = ret; /* Copy descriptors */ f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL); if (gadget_is_dualspeed(cdev->gadget)) f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH); if (gadget_is_superspeed(c->cdev->gadget)) f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER); /* Preallocate control endpoint request. */ uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL); if (uvc->control_req == NULL || uvc->control_buf == NULL) { ret = -ENOMEM; goto error; } uvc->control_req->buf = uvc->control_buf; uvc->control_req->complete = uvc_function_ep0_complete; uvc->control_req->context = uvc; /* Avoid letting this gadget enumerate until the userspace server is * active. */ if ((ret = usb_function_deactivate(f)) < 0) goto error; if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) { printk(KERN_INFO "v4l2_device_register failed\n"); goto error; } /* Initialise video. */ ret = uvc_video_init(&uvc->video); if (ret < 0) goto error; /* Register a V4L2 device. */ ret = uvc_register_video(uvc); if (ret < 0) { printk(KERN_INFO "Unable to register video device\n"); goto error; } return 0; error: v4l2_device_unregister(&uvc->v4l2_dev); if (uvc->vdev) video_device_release(uvc->vdev); if (uvc->control_ep) uvc->control_ep->driver_data = NULL; if (uvc->video.ep) uvc->video.ep->driver_data = NULL; if (uvc->control_req) { usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); kfree(uvc->control_buf); } usb_free_all_descriptors(f); return ret; }
static int hidg_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); int status; dev_t dev; pr_info("%s: creating device %p\n", __func__, hidg); /* allocate instance-specific interface IDs, and patch descriptors */ status = usb_interface_id(c, f); if (status < 0) goto fail; hidg_interface_desc.bInterfaceNumber = status; /* allocate instance-specific endpoints */ status = -ENODEV; ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); if (!ep) goto fail; ep->driver_data = c->cdev; /* claim */ hidg->in_ep = ep; /* preallocate request and buffer */ status = -ENOMEM; hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL); if (!hidg->req) goto fail; hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL); if (!hidg->req->buf) goto fail; /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; hidg_desc.desc[0].wDescriptorLength = cpu_to_le16(hidg->report_desc_length); hidg->set_report_buff = NULL; hidg_hs_in_ep_desc.bEndpointAddress = hidg_fs_in_ep_desc.bEndpointAddress; status = usb_assign_descriptors(f, hidg_fs_descriptors, hidg_hs_descriptors, NULL); if (status) goto fail; mutex_init(&hidg->lock); spin_lock_init(&hidg->spinlock); init_waitqueue_head(&hidg->write_queue); init_waitqueue_head(&hidg->read_queue); /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); dev = MKDEV(major, hidg->minor); status = cdev_add(&hidg->cdev, dev, 1); if (status) goto fail; device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor); hacky_device_list_add(hidg); return 0; fail: ERROR(f->config->cdev, "hidg_bind FAILED\n"); if (hidg->req != NULL) { kfree(hidg->req->buf); if (hidg->in_ep != NULL) usb_ep_free_request(hidg->in_ep, hidg->req); } usb_free_all_descriptors(f); return status; }
/** * gether_disconnect - notify network layer that USB link is inactive * @link: the USB link, on which gether_connect() was called * Context: irqs blocked * * This is called to deactivate endpoints and let the network layer know * the connection went inactive ("no carrier"). * * On return, the state is as if gether_connect() had never been called. * The endpoints are inactive, and accordingly without active USB I/O. * Pointers to endpoint descriptors and endpoint private data are nulled. */ void gether_disconnect(struct gether *link) { struct eth_dev *dev = link->ioport; struct usb_request *req; if (!dev) return; DBG(dev, "%s\n", __func__); netif_stop_queue(dev->net); netif_carrier_off(dev->net); /* disable endpoints, forcing (synchronous) completion * of all pending i/o. then free the request objects * and forget about the endpoints. */ usb_ep_disable(link->in_ep); spin_lock(&dev->req_lock); while (!list_empty(&dev->tx_reqs)) { req = container_of(dev->tx_reqs.next, struct usb_request, list); list_del(&req->list); spin_unlock(&dev->req_lock); usb_ep_free_request(link->in_ep, req); spin_lock(&dev->req_lock); } spin_unlock(&dev->req_lock); #ifndef CONFIG_USB_GADGET_DYNAMIC_ENDPOINT link->in_ep->driver_data = NULL; #endif link->in = NULL; usb_ep_disable(link->out_ep); spin_lock(&dev->req_lock); while (!list_empty(&dev->rx_reqs)) { req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del(&req->list); spin_unlock(&dev->req_lock); usb_ep_free_request(link->out_ep, req); spin_lock(&dev->req_lock); } spin_unlock(&dev->req_lock); #ifndef CONFIG_USB_GADGET_DYNAMIC_ENDPOINT link->out_ep->driver_data = NULL; #endif link->out = NULL; /* finish forgetting about this USB link episode */ dev->header_len = 0; dev->unwrap = NULL; dev->wrap = NULL; spin_lock(&dev->lock); dev->port_usb = NULL; link->ioport = NULL; spin_unlock(&dev->lock); }