static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; usbhsg_pipe_disable(uep); dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); /******************** spin lock ********************/ usbhs_lock(priv, flags); if (halt) usbhs_pipe_stall(pipe); else usbhs_pipe_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ return 0; }
static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_request *ureq; int disable = 0; /* ********* assume under spin lock ********* */ usbhs_fifo_disable(pipe); /* * disable pipe irq */ usbhsg_irq_empty_ctrl(uep, disable); usbhsg_irq_ready_ctrl(uep, disable); while (1) { ureq = usbhsg_queue_get(uep); if (!ureq) break; usbhsg_queue_pop(uep, ureq, -ECONNRESET); } return 0; }
/* * USB_TYPE handler */ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, struct usbhsg_recip_handle *handler, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhsg_uep *uep; struct usbhs_pipe *pipe; int recip = ctrl->bRequestType & USB_RECIP_MASK; int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; int ret; int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); char *msg; uep = usbhsg_gpriv_to_nth_uep(gpriv, nth); pipe = usbhsg_uep_to_pipe(uep); if (!pipe) { dev_err(dev, "wrong recip request\n"); ret = -EINVAL; goto usbhsg_recip_run_handle_end; } switch (recip) { case USB_RECIP_DEVICE: msg = "DEVICE"; func = handler->device; break; case USB_RECIP_INTERFACE: msg = "INTERFACE"; func = handler->interface; break; case USB_RECIP_ENDPOINT: msg = "ENDPOINT"; func = handler->endpoint; break; default: dev_warn(dev, "unsupported RECIP(%d)\n", recip); func = NULL; ret = -EINVAL; } if (func) { unsigned long flags; dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg); /******************** spin lock ********************/ usbhs_lock(priv, flags); ret = func(priv, uep, ctrl); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ } usbhsg_recip_run_handle_end: usbhs_pkt_start(pipe); return ret; }
static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); usbhsg_queue_pop(uep, ureq, -ECONNRESET); return 0; }
/* * USB_TYPE_STANDARD / clear feature functions */ static int usbhsg_recip_handler_std_control_done(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); usbhs_dcp_control_transfer_done(pipe); return 0; }
static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pipe_stall(pipe); usbhsg_recip_handler_std_control_done(priv, uep, ctrl); return 0; }
static int usbhsg_try_run_receive_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usb_request *req = &ureq->req; struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); void *buf; int maxp; int remainder, recv; int is_done = 0; /* ********* assume under spin lock ********* */ maxp = usbhs_pipe_get_maxpacket(pipe); buf = req->buf + req->actual; remainder = req->length - req->actual; recv = usbhs_fifo_read(pipe, buf, remainder); /* * recv < 0 : pipe busy * recv >= 0 : receive data * * recv <= max_packet */ if (recv < 0) return -EBUSY; /* update parameters */ req->actual += recv; if ((recv == remainder) || /* receive all data */ (recv < maxp)) /* short packet */ is_done = 1; dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", usbhs_pipe_number(pipe), remainder, recv, is_done, req->zero); /* read all data ? */ if (is_done) { int disable = 0; uep->handler->irq_mask(uep, disable); usbhs_fifo_disable(pipe); usbhsg_queue_pop(uep, ureq, 0); } return 0; }
/* * queue push/pop */ static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); ureq->req.status = status; usb_gadget_giveback_request(&uep->ep, &ureq->req); }
static int usbhsg_prepare_send_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* ********* assume under spin lock ********* */ usbhs_fifo_prepare_write(pipe); usbhsg_try_run_send_packet(uep, ureq); return 0; }
/* * handler function */ static int usbhsg_try_run_ctrl_stage_end(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* ********* assume under spin lock ********* */ usbhs_dcp_control_transfer_done(pipe); usbhsg_queue_pop(uep, ureq, 0); return 0; }
/* * * usb_dcp_ops * */ static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt; usbhs_pipe_disable(pipe); while (1) { pkt = usbhs_pkt_pop(pipe, NULL); if (!pkt) break; } return 0; }
static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); unsigned short status = 0; if (usbhs_pipe_is_stall(pipe)) status = 1 << USB_ENDPOINT_HALT; __usbhsg_recip_send_status(gpriv, status); return 0; }
static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!pipe) return -EINVAL; usbhsg_pipe_disable(uep); usbhs_pipe_free(pipe); uep->pipe->mod_private = NULL; uep->pipe = NULL; return 0; }
static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { usbhs_pipe_disable(pipe); usbhs_pipe_clear_sequence(pipe); usbhs_pipe_enable(pipe); } usbhsg_recip_handler_std_control_done(priv, uep, ctrl); return 0; }
/* * * usb_dcp_ops * */ static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt; while (1) { pkt = usbhs_pkt_pop(pipe, NULL); if (!pkt) break; usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET); } usbhs_pipe_disable(pipe); return 0; }
static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (pipe) usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); /* * To dequeue a request, this driver should call the usbhsg_queue_pop() * even if the pipe is NULL. */ usbhsg_queue_pop(uep, ureq, -ECONNRESET); return 0; }
static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); spinlock_t *lock; unsigned long flags; int ret = 0; /* * CAUTION [*endpoint queue*] * * This function will be called from usb_request :: complete * or usb driver timing. * If this function is called from usb_request :: complete, * it is already under spinlock on this driver. * but it is called frm usb driver, this function should call spinlock. * * This function is using usbshg_trylock to solve this issue. * if "is_locked" is 1, this mean this function lock it. * but if it is 0, this mean it is already under spin lock. * see also * CAUTION [*queue handler*] * CAUTION [*request complete*] */ /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) ret = -ESHUTDOWN; else usbhsg_queue_push(uep, ureq); usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ usbhsg_queue_prepare(uep); return ret; }
static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) return -ESHUTDOWN; usbhsg_queue_push(uep, ureq); return 0; }
/* * queue push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); struct usb_request *req = &ureq->req; req->actual = 0; req->status = -EINPROGRESS; usbhs_pkt_push(pipe, pkt, uep->handler, req->buf, req->length, req->zero); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), req->length); }
/* * list push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* ********* assume under spin lock ********* */ list_del_init(&ureq->node); list_add_tail(&ureq->node, &uep->list); ureq->req.actual = 0; ureq->req.status = -EINPROGRESS; dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), ureq->req.length); }
static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); spinlock_t *lock; unsigned long flags; int ret = -EAGAIN; /* * see * CAUTION [*queue handler*] * CAUTION [*endpoint queue*] * CAUTION [*request complete*] */ /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); if (!usbhsg_queue_get(uep)) { dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); if (halt) usbhs_fifo_stall(pipe); else usbhs_fifo_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); ret = 0; } usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); /* ********* assume under spin lock ********* */ /* * CAUTION [*request complete*] * * There is a possibility not to be called in correct order * if "complete" is called without spinlock. * * So, this function assume it is under spinlock, * and call usb_request :: complete. * * But this "complete" will push next usb_request. * It mean "usb_ep_ops :: queue" which is using spinlock is called * under spinlock. * * To avoid dead-lock, this driver is using usbhsg_trylock. * CAUTION [*endpoint queue*] * CAUTION [*queue handler*] */ dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); list_del_init(&ureq->node); ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); /* more request ? */ if (0 == status) usbhsg_queue_prepare(uep); }
static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe; unsigned long flags; spin_lock_irqsave(&uep->lock, flags); pipe = usbhsg_uep_to_pipe(uep); if (pipe) usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); /* * To dequeue a request, this driver should call the usbhsg_queue_pop() * even if the pipe is NULL. */ usbhsg_queue_pop(uep, ureq, -ECONNRESET); spin_unlock_irqrestore(&uep->lock, flags); return 0; }
static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe; unsigned long flags; spin_lock_irqsave(&uep->lock, flags); pipe = usbhsg_uep_to_pipe(uep); if (!pipe) goto out; usbhsg_pipe_disable(uep); usbhs_pipe_free(pipe); uep->pipe->mod_private = NULL; uep->pipe = NULL; out: spin_unlock_irqrestore(&uep->lock, flags); return 0; }
static int usbhsg_prepare_receive_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); int enable = 1; int ret; /* ********* assume under spin lock ********* */ ret = usbhs_fifo_prepare_read(pipe); if (ret < 0) return ret; /* * data will be read in interrupt handler */ uep->handler->irq_mask(uep, enable); return ret; }
static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv, unsigned short status) { struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_request *req; unsigned short *buf; /* alloc new usb_request for recip */ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); if (!req) { dev_err(dev, "recip request allocation fail\n"); return; } /* alloc recip data buffer */ buf = kmalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) { usb_ep_free_request(&dcp->ep, req); dev_err(dev, "recip data allocation fail\n"); return; } /* recip data is status */ *buf = cpu_to_le16(status); /* allocated usb_request/buffer will be freed */ req->complete = __usbhsg_recip_send_complete; req->buf = buf; req->length = sizeof(*buf); req->zero = 0; /* push packet */ pipe->handler = &usbhs_fifo_pio_push_handler; usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req)); }
static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv, struct usbhs_irq_state *irq_state) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_ctrlrequest ctrl; struct usbhsg_recip_handle *recip_handler = NULL; int stage = usbhs_status_get_ctrl_stage(irq_state); int ret = 0; dev_dbg(dev, "stage = %d\n", stage); /* * see Manual * * "Operation" * - "Interrupt Function" * - "Control Transfer Stage Transition Interrupt" * - Fig. "Control Transfer Stage Transitions" */ switch (stage) { case READ_DATA_STAGE: dcp->handler = &usbhsg_handler_send_ctrl; break; case WRITE_DATA_STAGE: dcp->handler = &usbhsg_handler_recv_ctrl; break; case NODATA_STATUS_STAGE: dcp->handler = &usbhsg_handler_ctrl_stage_end; break; default: return ret; } /* * get usb request */ usbhs_usbreq_get_val(priv, &ctrl); switch (ctrl.bRequestType & USB_TYPE_MASK) { case USB_TYPE_STANDARD: switch (ctrl.bRequest) { case USB_REQ_CLEAR_FEATURE: recip_handler = &req_clear_feature; break; } } /* * setup stage / run recip */ if (recip_handler) ret = usbhsg_recip_run_handle(priv, recip_handler, &ctrl); else ret = gpriv->driver->setup(&gpriv->gadget, &ctrl); if (ret < 0) usbhs_fifo_stall(pipe); return ret; }
static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usb_request *req = &ureq->req; struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); void *buf; int remainder, send; int is_done = 0; int enable; int maxp; /* ********* assume under spin lock ********* */ maxp = usbhs_pipe_get_maxpacket(pipe); buf = req->buf + req->actual; remainder = req->length - req->actual; send = usbhs_fifo_write(pipe, buf, remainder); /* * send < 0 : pipe busy * send = 0 : send zero packet * send > 0 : send data * * send <= max_packet */ if (send > 0) req->actual += send; /* send all packet ? */ if (send < remainder) is_done = 0; /* there are remainder data */ else if (send < maxp) is_done = 1; /* short packet */ else is_done = !req->zero; /* send zero packet ? */ dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", usbhs_pipe_number(pipe), remainder, send, is_done, req->zero); /* * enable interrupt and send again in irq handler * if it still have remainder data which should be sent. */ enable = !is_done; uep->handler->irq_mask(uep, enable); /* * usbhs_fifo_enable execute * - after callback_update, * - before queue_pop / stage_end */ usbhs_fifo_enable(pipe); /* * all data were sent ? */ if (is_done) { /* it care below call in "function mode" */ if (usbhsg_is_dcp(uep)) usbhs_dcp_control_transfer_done(pipe); usbhsg_queue_pop(uep, ureq, 0); } return 0; }