/* * PIPEnTRN/PIPEnTRE functions */ static void usbhsp_pipe_trn_set(struct usbhs_pipe *pipe, u16 mask, u16 val) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int num = usbhs_pipe_number(pipe); u16 reg; /* * It is impossible to calculate address, * since PIPEnTRN addresses were mapped randomly. */ #define CASE_PIPExTRN(a) \ case 0x ## a: \ reg = PIPE ## a ## TRN; \ break; switch (num) { CASE_PIPExTRN(1); CASE_PIPExTRN(2); CASE_PIPExTRN(3); CASE_PIPExTRN(4); CASE_PIPExTRN(5); CASE_PIPExTRN(B); CASE_PIPExTRN(C); CASE_PIPExTRN(D); CASE_PIPExTRN(E); CASE_PIPExTRN(F); CASE_PIPExTRN(9); CASE_PIPExTRN(A); default: dev_err(dev, "unknown pipe (%d)\n", num); return; } __usbhsp_pipe_xxx_set(pipe, 0, reg, mask, val); }
static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; usbhsg_pipe_disable(uep); dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); /******************** spin lock ********************/ usbhs_lock(priv, flags); if (halt) usbhs_pipe_stall(pipe); else usbhs_pipe_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ return 0; }
/* * * usb_ep_ops * */ static int usbhsg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; int ret = -EIO; unsigned long flags; usbhs_lock(priv, flags); /* * if it already have pipe, * nothing to do */ if (uep->pipe) { usbhs_pipe_clear(uep->pipe); usbhs_pipe_sequence_data0(uep->pipe); ret = 0; goto usbhsg_ep_enable_end; } pipe = usbhs_pipe_malloc(priv, usb_endpoint_type(desc), usb_endpoint_dir_in(desc)); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; /* set epnum / maxp */ usbhs_pipe_config_update(pipe, 0, usb_endpoint_num(desc), usb_endpoint_maxp(desc)); /* * usbhs_fifo_dma_push/pop_handler try to * use dmaengine if possible. * It will use pio handler if impossible. */ if (usb_endpoint_dir_in(desc)) { pipe->handler = &usbhs_fifo_dma_push_handler; } else { pipe->handler = &usbhs_fifo_dma_pop_handler; usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe)); } ret = 0; } usbhsg_ep_enable_end: usbhs_unlock(priv, flags); return ret; }
static int usbhsg_try_run_receive_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usb_request *req = &ureq->req; struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); void *buf; int maxp; int remainder, recv; int is_done = 0; /* ********* assume under spin lock ********* */ maxp = usbhs_pipe_get_maxpacket(pipe); buf = req->buf + req->actual; remainder = req->length - req->actual; recv = usbhs_fifo_read(pipe, buf, remainder); /* * recv < 0 : pipe busy * recv >= 0 : receive data * * recv <= max_packet */ if (recv < 0) return -EBUSY; /* update parameters */ req->actual += recv; if ((recv == remainder) || /* receive all data */ (recv < maxp)) /* short packet */ is_done = 1; dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", usbhs_pipe_number(pipe), remainder, recv, is_done, req->zero); /* read all data ? */ if (is_done) { int disable = 0; uep->handler->irq_mask(uep, disable); usbhs_fifo_disable(pipe); usbhsg_queue_pop(uep, ureq, 0); } return 0; }
/* * queue push/pop */ static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); ureq->req.status = status; usb_gadget_giveback_request(&uep->ep, &ureq->req); }
/* * queue push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); struct usb_request *req = &ureq->req; req->actual = 0; req->status = -EINPROGRESS; usbhs_pkt_push(pipe, pkt, uep->handler, req->buf, req->length, req->zero); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), req->length); }
/* * list push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* ********* assume under spin lock ********* */ list_del_init(&ureq->node); list_add_tail(&ureq->node, &uep->list); ureq->req.actual = 0; ureq->req.status = -EINPROGRESS; dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), ureq->req.length); }
static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); spinlock_t *lock; unsigned long flags; int ret = -EAGAIN; /* * see * CAUTION [*queue handler*] * CAUTION [*endpoint queue*] * CAUTION [*request complete*] */ /******************** spin lock ********************/ lock = usbhsg_trylock(gpriv, &flags); if (!usbhsg_queue_get(uep)) { dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); if (halt) usbhs_fifo_stall(pipe); else usbhs_fifo_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); ret = 0; } usbhsg_unlock(lock, &flags); /******************** spin unlock ******************/ return ret; }
static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); /* ********* assume under spin lock ********* */ /* * CAUTION [*request complete*] * * There is a possibility not to be called in correct order * if "complete" is called without spinlock. * * So, this function assume it is under spinlock, * and call usb_request :: complete. * * But this "complete" will push next usb_request. * It mean "usb_ep_ops :: queue" which is using spinlock is called * under spinlock. * * To avoid dead-lock, this driver is using usbhsg_trylock. * CAUTION [*endpoint queue*] * CAUTION [*queue handler*] */ dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); list_del_init(&ureq->node); ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); /* more request ? */ if (0 == status) usbhsg_queue_prepare(uep); }
/* * pipe control functions */ static void usbhsp_pipe_select(struct usbhs_pipe *pipe) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); /* * On pipe, this is necessary before * accesses to below registers. * * PIPESEL : usbhsp_pipe_select * PIPECFG : usbhsp_pipe_cfg_xxx * PIPEBUF : usbhsp_pipe_buf_xxx * PIPEMAXP : usbhsp_pipe_maxp_xxx * PIPEPERI */ /* * if pipe is dcp, no pipe is selected. * it is no problem, because dcp have its register */ usbhs_write(priv, PIPESEL, 0xF & usbhs_pipe_number(pipe)); }
static int usbhsf_fifo_select(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo, int write) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int timeout = 1024; u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */ u16 base = usbhs_pipe_number(pipe); /* CURPIPE */ if (usbhs_pipe_is_busy(pipe) || usbhsf_fifo_is_busy(fifo)) return -EBUSY; if (usbhs_pipe_is_dcp(pipe)) { base |= (1 == write) << 5; /* ISEL */ if (usbhs_mod_is_host(priv)) usbhs_dcp_dir_for_host(pipe, write); } /* "base" will be used below */ if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo)) usbhs_write(priv, fifo->sel, base); else usbhs_write(priv, fifo->sel, base | MBW_32); /* check ISEL and CURPIPE value */ while (timeout--) { if (base == (mask & usbhs_read(priv, fifo->sel))) { usbhs_pipe_select_fifo(pipe, fifo); return 0; } udelay(10); } dev_err(dev, "fifo select error\n"); return -EIO; }
static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usb_request *req = &ureq->req; struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); void *buf; int remainder, send; int is_done = 0; int enable; int maxp; /* ********* assume under spin lock ********* */ maxp = usbhs_pipe_get_maxpacket(pipe); buf = req->buf + req->actual; remainder = req->length - req->actual; send = usbhs_fifo_write(pipe, buf, remainder); /* * send < 0 : pipe busy * send = 0 : send zero packet * send > 0 : send data * * send <= max_packet */ if (send > 0) req->actual += send; /* send all packet ? */ if (send < remainder) is_done = 0; /* there are remainder data */ else if (send < maxp) is_done = 1; /* short packet */ else is_done = !req->zero; /* send zero packet ? */ dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", usbhs_pipe_number(pipe), remainder, send, is_done, req->zero); /* * enable interrupt and send again in irq handler * if it still have remainder data which should be sent. */ enable = !is_done; uep->handler->irq_mask(uep, enable); /* * usbhs_fifo_enable execute * - after callback_update, * - before queue_pop / stage_end */ usbhs_fifo_enable(pipe); /* * all data were sent ? */ if (is_done) { /* it care below call in "function mode" */ if (usbhsg_is_dcp(uep)) usbhs_dcp_control_transfer_done(pipe); usbhsg_queue_pop(uep, ureq, 0); } return 0; }