static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); unsigned short status = 0; if (usbhsg_status_has(gpriv, USBHSG_STATUS_SELF_POWERED)) status = 1 << USB_DEVICE_SELF_POWERED; __usbhsg_recip_send_status(gpriv, status); return 0; }
static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); unsigned short status = 0; if (usbhs_pipe_is_stall(pipe)) status = 1 << USB_ENDPOINT_HALT; __usbhsg_recip_send_status(gpriv, status); return 0; }
static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map) { struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); enum dma_data_direction dir; dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (map) return usbhsg_dma_map(dev, pkt, dir); else return usbhsg_dma_unmap(dev, pkt, dir); }
/* * queue push/pop */ static void __usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); ureq->req.status = status; spin_unlock(usbhs_priv_to_lock(priv)); usb_gadget_giveback_request(&uep->ep, &ureq->req); spin_lock(usbhs_priv_to_lock(priv)); }
static int __usbhsg_queue_handler(struct usbhsg_uep *uep, int prepare) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhsg_request *ureq; spinlock_t *lock; unsigned long flags; int ret = 0; if (!uep->handler) { dev_err(dev, "no handler function\n"); return -EIO; } /* * CAUTION [*queue handler*] * * This function will be called for start/restart queue operation. * OTOH the most much worry for USB driver is spinlock nest. * Specially it are * - usb_ep_ops :: queue * - usb_request :: complete * * But the caller of this function need not care about spinlock. * This function is using usbhsg_trylock for it. * if "is_locked" is 1, this mean this function lock it. * but if it is 0, this mean it is already under spin lock. * see also * CAUTION [*endpoint queue*] * CAUTION [*request complete*] */ /****************** spin try lock *******************/ lock = usbhsg_trylock(gpriv, &flags); ureq = usbhsg_queue_get(uep); if (ureq) { if (prepare) ret = uep->handler->prepare(uep, ureq); else ret = uep->handler->try_run(uep, ureq); } usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); spinlock_t *lock; unsigned long flags; int ret = 0; /* * CAUTION [*endpoint queue*] * * This function will be called from usb_request :: complete * or usb driver timing. * If this function is called from usb_request :: complete, * it is already under spinlock on this driver. * but it is called frm usb driver, this function should call spinlock. * * This function is using usbshg_trylock to solve this issue. * if "is_locked" is 1, this mean this function lock it. * but if it is 0, this mean it is already under spin lock. * see also * CAUTION [*queue handler*] * CAUTION [*request complete*] */ /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) ret = -ESHUTDOWN; else usbhsg_queue_push(uep, ureq); usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ usbhsg_queue_prepare(uep); return ret; }
/* * * usb_ep_ops * */ static int usbhsg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; int ret = -EIO; /* * if it already have pipe, * nothing to do */ if (uep->pipe) { usbhs_pipe_clear(uep->pipe); usbhs_pipe_sequence_data0(uep->pipe); return 0; } pipe = usbhs_pipe_malloc(priv, usb_endpoint_type(desc), usb_endpoint_dir_in(desc)); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; /* set epnum / maxp */ usbhs_pipe_config_update(pipe, 0, usb_endpoint_num(desc), usb_endpoint_maxp(desc)); /* * usbhs_fifo_dma_push/pop_handler try to * use dmaengine if possible. * It will use pio handler if impossible. */ if (usb_endpoint_dir_in(desc)) pipe->handler = &usbhs_fifo_dma_push_handler; else pipe->handler = &usbhs_fifo_dma_pop_handler; ret = 0; } return ret; }
static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { usbhs_pipe_disable(pipe); usbhs_pipe_clear_sequence(pipe); usbhs_pipe_enable(pipe); } usbhsg_recip_handler_std_control_done(priv, uep, ctrl); return 0; }
/* * queue push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); struct usb_request *req = &ureq->req; req->actual = 0; req->status = -EINPROGRESS; usbhs_pkt_push(pipe, pkt, uep->handler, req->buf, req->length, req->zero); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), req->length); }
static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) return -ESHUTDOWN; usbhsg_queue_push(uep, ureq); return 0; }
static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); spinlock_t *lock; unsigned long flags; int ret; /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); ret = usbhsg_pipe_disable(uep); usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
/* * list push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* ********* assume under spin lock ********* */ list_del_init(&ureq->node); list_add_tail(&ureq->node, &uep->list); ureq->req.actual = 0; ureq->req.status = -EINPROGRESS; dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), ureq->req.length); }
static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); spinlock_t *lock; unsigned long flags; int ret = -EAGAIN; /* * see * CAUTION [*queue handler*] * CAUTION [*endpoint queue*] * CAUTION [*request complete*] */ /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); if (!usbhsg_queue_get(uep)) { dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); if (halt) usbhs_fifo_stall(pipe); else usbhs_fifo_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); ret = 0; } usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
/* * * usb_dcp_ops * */ static int usbhsg_dcp_enable(struct usbhsg_uep *uep) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; /* ********* assume under spin lock ********* */ pipe = usbhs_dcp_malloc(priv); if (!pipe) return -EIO; uep->pipe = pipe; uep->pipe->mod_private = uep; INIT_LIST_HEAD(&uep->list); return 0; }
static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); /* ********* assume under spin lock ********* */ /* * CAUTION [*request complete*] * * There is a possibility not to be called in correct order * if "complete" is called without spinlock. * * So, this function assume it is under spinlock, * and call usb_request :: complete. * * But this "complete" will push next usb_request. * It mean "usb_ep_ops :: queue" which is using spinlock is called * under spinlock. * * To avoid dead-lock, this driver is using usbhsg_trylock. * CAUTION [*endpoint queue*] * CAUTION [*queue handler*] */ dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); list_del_init(&ureq->node); ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); /* more request ? */ if (0 == status) usbhsg_queue_prepare(uep); }
/* * * usb_ep_ops * */ static int usbhsg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; spinlock_t *lock; unsigned long flags; int ret = -EIO; /* * if it already have pipe, * nothing to do */ if (uep->pipe) return 0; /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); pipe = usbhs_pipe_malloc(priv, desc); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; INIT_LIST_HEAD(&uep->list); if (usb_endpoint_dir_in(desc)) uep->handler = &usbhsg_handler_send_packet; else uep->handler = &usbhsg_handler_recv_packet; ret = 0; } usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usb_request *req = &ureq->req; struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); void *buf; int remainder, send; int is_done = 0; int enable; int maxp; /* ********* assume under spin lock ********* */ maxp = usbhs_pipe_get_maxpacket(pipe); buf = req->buf + req->actual; remainder = req->length - req->actual; send = usbhs_fifo_write(pipe, buf, remainder); /* * send < 0 : pipe busy * send = 0 : send zero packet * send > 0 : send data * * send <= max_packet */ if (send > 0) req->actual += send; /* send all packet ? */ if (send < remainder) is_done = 0; /* there are remainder data */ else if (send < maxp) is_done = 1; /* short packet */ else is_done = !req->zero; /* send zero packet ? */ dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", usbhs_pipe_number(pipe), remainder, send, is_done, req->zero); /* * enable interrupt and send again in irq handler * if it still have remainder data which should be sent. */ enable = !is_done; uep->handler->irq_mask(uep, enable); /* * usbhs_fifo_enable execute * - after callback_update, * - before queue_pop / stage_end */ usbhs_fifo_enable(pipe); /* * all data were sent ? */ if (is_done) { /* it care below call in "function mode" */ if (usbhsg_is_dcp(uep)) usbhs_dcp_control_transfer_done(pipe); usbhsg_queue_pop(uep, ureq, 0); } return 0; }