/* * Stops transfer processing for an endpoint and throws away all unprocessed * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and * ring the doorbell, causing this endpoint to start working again. * (Careful: This will BUG() when there was no transfer in progress. Shouldn't * happen in practice for current uses and is too complicated to fix right now.) */ static void abort_td(struct usb_device *udev, int ep_index) { struct xhci_ctrl *ctrl = udev->controller; struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring; union xhci_trb *event; u32 field; xhci_queue_command(ctrl, NULL, udev->slot_id, ep_index, TRB_STOP_RING); event = xhci_wait_for_event(ctrl, TRB_TRANSFER); field = le32_to_cpu(event->trans_event.flags); BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id); BUG_ON(TRB_TO_EP_INDEX(field) != ep_index); BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len != COMP_STOP))); xhci_acknowledge_event(ctrl); event = xhci_wait_for_event(ctrl, TRB_COMPLETION); BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags)) != udev->slot_id || GET_COMP_CODE(le32_to_cpu( event->event_cmd.status)) != COMP_SUCCESS); xhci_acknowledge_event(ctrl); xhci_queue_command(ctrl, (void *)((uintptr_t)ring->enqueue | ring->cycle_state), udev->slot_id, ep_index, TRB_SET_DEQ); event = xhci_wait_for_event(ctrl, TRB_COMPLETION); BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags)) != udev->slot_id || GET_COMP_CODE(le32_to_cpu( event->event_cmd.status)) != COMP_SUCCESS); xhci_acknowledge_event(ctrl); }
static void record_transfer_result(struct usb_device *udev, union xhci_trb *event, int length) { udev->act_len = min(length, length - EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len))); switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) { case COMP_SUCCESS: BUG_ON(udev->act_len != length); /* fallthrough */ case COMP_SHORT_TX: udev->status = 0; break; case COMP_STALL: udev->status = USB_ST_STALLED; break; case COMP_DB_ERR: case COMP_TRB_ERR: udev->status = USB_ST_BUF_ERR; break; case COMP_BABBLE: udev->status = USB_ST_BABBLE_DET; break; default: udev->status = 0x80; /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */ } }
/** * Waits for a specific type of event and returns it. Discards unexpected * events. Caller *must* call xhci_acknowledge_event() after it is finished * processing the event, and must not access the returned pointer afterwards. * * @param ctrl Host controller data structure * @param expected TRB type expected from Event TRB * @return pointer to event trb */ union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected) { trb_type type; unsigned long ts = get_timer(0); int retry = 50; try_again: do { union xhci_trb *event = ctrl->event_ring->dequeue; if (!event_ready(ctrl)) continue; type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); if (type == expected) return event; if (type == TRB_PORT_STATUS) { /* TODO: remove this once enumeration has been reworked */ /* * Port status change events always have a * successful completion code */ BUG_ON(GET_COMP_CODE( le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS); } else printf("Unexpected XHCI event TRB, skipping... " "(%08x %08x %08x %08x)\n", le32_to_cpu(event->generic.field[0]), le32_to_cpu(event->generic.field[1]), le32_to_cpu(event->generic.field[2]), le32_to_cpu(event->generic.field[3])); xhci_acknowledge_event(ctrl); } while (get_timer(ts) < (XHCI_TIMEOUT * 50000)); //} while (1); retry--; if (retry > 0) goto try_again; if (expected == TRB_TRANSFER) return NULL; printf("\n\n\nXHCI timeout on event type %d... cannot recover.\n\n\n", expected); BUG(); }
/** * Debug a transfer request block (TRB). */ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) { u64 address; u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK; switch (type) { case TRB_TYPE(TRB_LINK): xhci_dbg(xhci, "Link TRB:\n"); xhci_print_trb_offsets(xhci, trb); address = le64_to_cpu(trb->link.segment_ptr); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target))); xhci_dbg(xhci, "Cycle bit = %u\n", le32_to_cpu(trb->link.control) & TRB_CYCLE); xhci_dbg(xhci, "Toggle cycle bit = %u\n", le32_to_cpu(trb->link.control) & LINK_TOGGLE); xhci_dbg(xhci, "No Snoop bit = %u\n", le32_to_cpu(trb->link.control) & TRB_NO_SNOOP); break; case TRB_TYPE(TRB_TRANSFER): address = le64_to_cpu(trb->trans_event.buffer); /* * FIXME: look at flags to figure out if it's an address or if * the data is directly in the buffer field. */ xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); break; case TRB_TYPE(TRB_COMPLETION): address = le64_to_cpu(trb->event_cmd.cmd_trb); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); xhci_dbg(xhci, "Flags = 0x%x\n", le32_to_cpu(trb->event_cmd.flags)); break; default: xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", (unsigned int) type>>10); xhci_print_trb_offsets(xhci, trb); break; } }
/** * Queues up the Control Transfer Request * * @param udev pointer to the USB device structure * @param pipe contains the DIR_IN or OUT , devnum * @param req request type * @param length length of the buffer * @param buffer buffer to be read/written based on the request * @return returns 0 if successful else error code on failure */ int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe, struct devrequest *req, int length, void *buffer) { int ret; int start_cycle; int num_trbs; u32 field; u32 length_field; u64 buf_64 = 0; struct xhci_generic_trb *start_trb; struct xhci_ctrl *ctrl = udev->controller; int slot_id = udev->slot_id; int ep_index; u32 trb_fields[4]; struct xhci_virt_device *virt_dev = ctrl->devs[slot_id]; struct xhci_ring *ep_ring; union xhci_trb *event; XHCI_RING_PRINTF("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n", req->request, req->request, req->requesttype, req->requesttype, le16_to_cpu(req->value), le16_to_cpu(req->value), le16_to_cpu(req->index)); ep_index = usb_pipe_ep_index(pipe); ep_ring = virt_dev->eps[ep_index].ring; /* * Check to see if the max packet size for the default control * endpoint changed during FS device enumeration */ if (udev->speed == USB_SPEED_FULL) { ret = xhci_check_maxpacket(udev); if (ret < 0) return ret; } xhci_inval_cache((uint32_t)virt_dev->out_ctx->bytes, virt_dev->out_ctx->size); struct xhci_ep_ctx *ep_ctx = NULL; ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index); /* 1 TRB for setup, 1 for status */ num_trbs = 2; /* * Don't need to check if we need additional event data and normal TRBs, * since data in control transfers will never get bigger than 16MB * XXX: can we get a buffer that crosses 64KB boundaries? */ if (length > 0) num_trbs++; /* * XXX: Calling routine prepare_ring() called in place of * prepare_trasfer() as there in 'Linux' since we are not * maintaining multiple TDs/transfer at the same time. */ ret = prepare_ring(ctrl, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK); if (ret < 0) return ret; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) * until we've finished creating all the other TRBs. The ring's cycle * state may change as we enqueue the other TRBs, so save it too. */ start_trb = &ep_ring->enqueue->generic; start_cycle = ep_ring->cycle_state; XHCI_RING_PRINTF("start_trb %p, start_cycle %d\n", start_trb, start_cycle); /* Queue setup TRB - see section 6.4.1.2.1 */ /* FIXME better way to translate setup_packet into two u32 fields? */ field = 0; field |= TRB_IDT | (TRB_SETUP << TRB_TYPE_SHIFT); if (start_cycle == 0) field |= 0x1; /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ //if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) == 0x100) { // XHCI_MTK if (1) { if (length > 0) { if (req->requesttype & USB_DIR_IN) field |= (TRB_DATA_IN << TRB_TX_TYPE_SHIFT); else field |= (TRB_DATA_OUT << TRB_TX_TYPE_SHIFT); } } XHCI_RING_PRINTF("req->requesttype = %d, req->request = %d," "le16_to_cpu(req->value) = %d," "le16_to_cpu(req->index) = %d," "le16_to_cpu(req->length) = %d\n", req->requesttype, req->request, le16_to_cpu(req->value), le16_to_cpu(req->index), le16_to_cpu(req->length)); trb_fields[0] = req->requesttype | req->request << 8 | le16_to_cpu(req->value) << 16; trb_fields[1] = le16_to_cpu(req->index) | le16_to_cpu(req->length) << 16; /* TRB_LEN | (TRB_INTR_TARGET) */ trb_fields[2] = (8 | ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT)); /* Immediate data in pointer */ trb_fields[3] = field; queue_trb(ctrl, ep_ring, true, trb_fields); /* Re-initializing field to zero */ field = 0; /* If there's data, queue data TRBs */ /* Only set interrupt on short packet for IN endpoints */ if (usb_pipein(pipe)) field = TRB_ISP | (TRB_DATA << TRB_TYPE_SHIFT); else field = (TRB_DATA << TRB_TYPE_SHIFT); length_field = (length & TRB_LEN_MASK) | 0 | //length_field = (length & TRB_LEN_MASK) | xhci_td_remainder(length) | ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT); //XHCI_RING_PRINTF("length_field = %d, length = %d," // "xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n", // length_field, (length & TRB_LEN_MASK), // xhci_td_remainder(length), 0); if (length > 0) { if (req->requesttype & USB_DIR_IN) field |= TRB_DIR_IN; buf_64 = (uintptr_t)buffer; trb_fields[0] = lower_32_bits(buf_64); trb_fields[1] = upper_32_bits(buf_64); trb_fields[2] = length_field; trb_fields[3] = field | ep_ring->cycle_state; xhci_flush_cache((uint32_t)buffer, length); queue_trb(ctrl, ep_ring, true, trb_fields); } /* * Queue status TRB - * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ field = 0; if (length > 0 && req->requesttype & USB_DIR_IN) field = 0; else field = TRB_DIR_IN; trb_fields[0] = 0; trb_fields[1] = 0; trb_fields[2] = ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT); /* Event on completion */ trb_fields[3] = field | TRB_IOC | (TRB_STATUS << TRB_TYPE_SHIFT) | ep_ring->cycle_state; queue_trb(ctrl, ep_ring, false, trb_fields); giveback_first_trb(udev, ep_index, start_cycle, start_trb); event = xhci_wait_for_event(ctrl, TRB_TRANSFER); if (!event) goto abort; field = le32_to_cpu(event->trans_event.flags); BUG_ON(TRB_TO_SLOT_ID(field) != slot_id); BUG_ON(TRB_TO_EP_INDEX(field) != ep_index); record_transfer_result(udev, event, length); xhci_acknowledge_event(ctrl); /* Invalidate buffer to make it available to usb-core */ if (length > 0) xhci_inval_cache((uint32_t)buffer, length); if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len)) == COMP_SHORT_TX) { /* Short data stage, clear up additional status stage event */ event = xhci_wait_for_event(ctrl, TRB_TRANSFER); if (!event) goto abort; BUG_ON(TRB_TO_SLOT_ID(field) != slot_id); BUG_ON(TRB_TO_EP_INDEX(field) != ep_index); xhci_acknowledge_event(ctrl); } return (udev->status != USB_ST_NOT_PROC) ? 0 : -1; abort: XHCI_RING_PRINTF("XHCI control transfer timed out, aborting...\n"); abort_td(udev, ep_index); udev->status = USB_ST_NAK_REC; udev->act_len = 0; return -ETIMEDOUT; }
/** * Debug a transfer request block (TRB). */ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) { u64 address; #if 0 /* original code */ u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; #else /* 2010/6/28,2010/7/16 modified by Panasonic for little-endian access to the data structures in host memory */ u32 type = xhci_desc_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; #endif switch (type) { case TRB_TYPE(TRB_LINK): xhci_dbg(xhci, "Link TRB:\n"); xhci_print_trb_offsets(xhci, trb); #if 0 /* original code */ address = trb->link.segment_ptr; xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", GET_INTR_TARGET(trb->link.intr_target)); xhci_dbg(xhci, "Cycle bit = %u\n", (unsigned int) (trb->link.control & TRB_CYCLE)); xhci_dbg(xhci, "Toggle cycle bit = %u\n", (unsigned int) (trb->link.control & LINK_TOGGLE)); xhci_dbg(xhci, "No Snoop bit = %u\n", (unsigned int) (trb->link.control & TRB_NO_SNOOP)); #else /* 2010/6/28,2010/7/16 modified by Panasonic for little-endian access to the data structures in host memory */ address = xhci_desc_read_64(xhci, &trb->link.segment_ptr); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", GET_INTR_TARGET(xhci_desc_readl(xhci, &trb->link.intr_target))); xhci_dbg(xhci, "Cycle bit = %u\n", (unsigned int) (xhci_desc_readl(xhci, &trb->link.control) & TRB_CYCLE)); xhci_dbg(xhci, "Toggle cycle bit = %u\n", (unsigned int) (xhci_desc_readl(xhci, &trb->link.control) & LINK_TOGGLE)); xhci_dbg(xhci, "No Snoop bit = %u\n", (unsigned int) (xhci_desc_readl(xhci, &trb->link.control) & TRB_NO_SNOOP)); #endif break; case TRB_TYPE(TRB_TRANSFER): #if 0 /* original code */ address = trb->trans_event.buffer; #else /* 2010/6/28,2010/7/16 modified by Panasonic for little-endian access to the data structures in host memory */ address = xhci_desc_read_64(xhci, &trb->trans_event.buffer); #endif /* * FIXME: look at flags to figure out if it's an address or if * the data is directly in the buffer field. */ xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); break; case TRB_TYPE(TRB_COMPLETION): #if 0 /* original code */ address = trb->event_cmd.cmd_trb; xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags); #else /* 2010/6/28,2010/7/16 modified by Panasonic for little-endian access to the data structures in host memory */ address = xhci_desc_read_64(xhci, &trb->event_cmd.cmd_trb); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", (unsigned int) GET_COMP_CODE(xhci_desc_readl(xhci, &trb->event_cmd.status))); xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) xhci_desc_readl(xhci, &trb->event_cmd.flags)); #endif break; default: xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", (unsigned int) type>>10); xhci_print_trb_offsets(xhci, trb); break; } }