/** * hw_ep_flush: flush endpoint fifo (execute without interruption) * @num: endpoint number * @dir: endpoint direction * * This function returns an error code */ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir) { int n = hw_ep_bit(num, dir); do { /* flush any pending transfer */ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) cpu_relax(); } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); return 0; }
/** * hw_ep_prime: primes endpoint (execute without interruption) * @num: endpoint number * @dir: endpoint direction * @is_ctrl: true if control endpoint * * This function returns an error code */ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl) { int n = hw_ep_bit(num, dir); if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n)); while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) cpu_relax(); if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; /* status shoult be tested according with manual but it doesn't work */ return 0; }
/** * _hardware_queue: configures a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { struct ci13xxx *ci = mEp->ci; unsigned i; int ret = 0; unsigned length = mReq->req.length; /* don't queue twice */ if (mReq->req.status == -EALREADY) return -EALREADY; mReq->req.status = -EALREADY; if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) { mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC, &mReq->zdma); if (mReq->zptr == NULL) return -ENOMEM; memset(mReq->zptr, 0, sizeof(*mReq->zptr)); mReq->zptr->next = TD_TERMINATE; mReq->zptr->token = TD_STATUS_ACTIVE; if (!mReq->req.no_interrupt) mReq->zptr->token |= TD_IOC; } ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); if (ret) return ret; /* * TD configuration * TODO - handle requests which spawns into several TDs */ memset(mReq->ptr, 0, sizeof(*mReq->ptr)); mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES); mReq->ptr->token &= TD_TOTAL_BYTES; mReq->ptr->token |= TD_STATUS_ACTIVE; if (mReq->zptr) { mReq->ptr->next = mReq->zdma; } else { mReq->ptr->next = TD_TERMINATE; if (!mReq->req.no_interrupt) mReq->ptr->token |= TD_IOC; } mReq->ptr->page[0] = mReq->req.dma; for (i = 1; i < 5; i++) mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; if (!list_empty(&mEp->qh.queue)) { struct ci13xxx_req *mReqPrev; int n = hw_ep_bit(mEp->num, mEp->dir); int tmp_stat; mReqPrev = list_entry(mEp->qh.queue.prev, struct ci13xxx_req, queue); if (mReqPrev->zptr) mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK; else mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK; wmb(); if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) goto done; do { hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n)); } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW)); hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done; } /* QH configuration */ mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ mEp->qh.ptr->cap |= QH_ZLT; wmb(); /* synchronize before ep prime */ ret = hw_ep_prime(ci, mEp->num, mEp->dir, mEp->type == USB_ENDPOINT_XFER_CONTROL); done: return ret; }