int xhci_cmd_disable_slot(xhci_t *const xhci, const int slot_id) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_DISABLE_SLOT); TRB_SET(ID, cmd, slot_id); xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
int xhci_cmd_stop_endpoint(xhci_t *const xhci, const int slot_id, const int ep) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_STOP_EP); TRB_SET(ID, cmd, slot_id); TRB_SET(EP, cmd, ep); xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
/* read one intr-packet from queue, if available. extend the queue for new input. return NULL if nothing new available. Recommended use: while (data=poll_intr_queue(q)) process(data); */ static u8 * xhci_poll_intr_queue(void *const q) { if (!q) return NULL; intrq_t *const intrq = (intrq_t *)q; endpoint_t *const ep = intrq->ep; xhci_t *const xhci = XHCI_INST(ep->dev->controller); /* TODO: Reset interrupt queue if it gets halted? */ xhci_handle_events(xhci); u8 *reqdata = NULL; while (!reqdata && intrq->ready) { const int ep_id = xhci_ep_id(ep); transfer_ring_t *const tr = xhci->dev[ep->dev->address].transfer_rings[ep_id]; /* Fetch the request's buffer */ reqdata = phys_to_virt(intrq->next->ptr_low); /* Enqueue the last (spare) TRB and ring doorbell */ xhci_enqueue_trb(tr); xhci->dbreg[ep->dev->address] = ep_id; /* Reuse the current buffer for the next spare TRB */ xhci_clear_trb(tr->cur, tr->pcs); tr->cur->ptr_low = virt_to_phys(reqdata); tr->cur->ptr_high = 0; TRB_SET(TL, tr->cur, intrq->size); TRB_SET(TT, tr->cur, TRB_NORMAL); TRB_SET(ISP, tr->cur, 1); TRB_SET(IOC, tr->cur, 1); /* Check if anything was transferred */ const size_t read = TRB_GET(TL, intrq->next); if (!read) reqdata = NULL; else if (read < intrq->size) /* At least zero it, poll interface is rather limited */ memset(reqdata + read, 0x00, intrq->size - read); /* Advance the interrupt queue */ if (intrq->ready == intrq->next) /* This was last TRB being ready */ intrq->ready = NULL; intrq->next = xhci_next_trb(intrq->next, NULL); } return reqdata; }
void xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size) { memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring)); TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK); TRB_SET(TC, &tr->ring[ring_size - 1], 1); /* only one segment that points to itself */ tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring); tr->pcs = 1; tr->cur = tr->ring; }
int xhci_cmd_set_tr_dq(xhci_t *const xhci, const int slot_id, const int ep, trb_t *const dq_trb, const int dcs) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_SET_TR_DQ); TRB_SET(ID, cmd, slot_id); TRB_SET(EP, cmd, ep); cmd->ptr_low = virt_to_phys(dq_trb) | dcs; xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
int xhci_cmd_evaluate_context(xhci_t *const xhci, const int slot_id, inputctx_t *const ic) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_EVAL_CTX); TRB_SET(ID, cmd, slot_id); cmd->ptr_low = virt_to_phys(ic->raw); xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
int xhci_cmd_address_device(xhci_t *const xhci, const int slot_id, inputctx_t *const ic) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_ADDRESS_DEV); TRB_SET(ID, cmd, slot_id); cmd->ptr_low = virt_to_phys(ic->raw); xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
int xhci_cmd_configure_endpoint(xhci_t *const xhci, const int slot_id, const int config_id, inputctx_t *const ic) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_CONFIGURE_EP); TRB_SET(ID, cmd, slot_id); cmd->ptr_low = virt_to_phys(ic->raw); if (config_id == 0) TRB_SET(DC, cmd, 1); xhci_post_command(xhci); return xhci_wait_for_command(xhci, cmd, 1); }
static void xhci_enqueue_trb(transfer_ring_t *const tr) { const int chain = TRB_GET(CH, tr->cur); TRB_SET(C, tr->cur, tr->pcs); ++tr->cur; while (TRB_GET(TT, tr->cur) == TRB_LINK) { xhci_spew("Handling LINK pointer\n"); const int tc = TRB_GET(TC, tr->cur); TRB_SET(CH, tr->cur, chain); TRB_SET(C, tr->cur, tr->pcs); tr->cur = phys_to_virt(tr->cur->ptr_low); if (tc) tr->pcs ^= 1; } }
static void xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps, const int dalen, void *const data, const int dir) { trb_t *trb = NULL; /* cur TRB */ u8 *cur_start = data; /* cur data pointer */ size_t length = dalen; /* remaining bytes */ size_t packets = (length + mps - 1) / mps; /* remaining packets */ size_t residue = 0; /* residue from last TRB */ size_t trb_count = 0; /* TRBs added so far */ while (length || !trb_count /* enqueue at least one */) { const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff; size_t cur_length = cur_end - (size_t)cur_start; if (length < cur_length) { cur_length = length; packets = 0; length = 0; } else { packets -= (residue + cur_length) / mps; residue = (residue + cur_length) % mps; length -= cur_length; } trb = tr->cur; xhci_clear_trb(trb, tr->pcs); trb->ptr_low = virt_to_phys(cur_start); TRB_SET(TL, trb, cur_length); TRB_SET(TDS, trb, packets); TRB_SET(CH, trb, 1); /* Check for first, data stage TRB */ if (!trb_count && ep == 1) { TRB_SET(DIR, trb, dir); TRB_SET(TT, trb, TRB_DATA_STAGE); } else { TRB_SET(TT, trb, TRB_NORMAL); } xhci_enqueue_trb(tr); cur_start += cur_length; ++trb_count; } trb = tr->cur; xhci_clear_trb(trb, tr->pcs); trb->ptr_low = virt_to_phys(trb); /* for easier debugging only */ TRB_SET(TT, trb, TRB_EVENT_DATA); TRB_SET(IOC, trb, 1); xhci_enqueue_trb(tr); }
static void xhci_handle_transfer_event(xhci_t *const xhci) { const trb_t *const ev = xhci->er.cur; const int cc = TRB_GET(CC, ev); const int id = TRB_GET(ID, ev); const int ep = TRB_GET(EP, ev); devinfo_t *di; intrq_t *intrq; if (id && id <= xhci->max_slots_en && (di = DEVINFO_FROM_XHCI(xhci, id)) && (intrq = di->interrupt_queues[ep])) { /* It's a running interrupt endpoint */ intrq->ready = phys_to_virt(ev->ptr_low); if (cc == CC_SUCCESS || cc == CC_SHORT_PACKET) { TRB_SET(TL, intrq->ready, intrq->size - TRB_GET(EVTL, ev)); } else { xhci_debug("Interrupt Transfer failed: %d\n", cc); TRB_SET(TL, intrq->ready, 0); } } else if (cc == CC_STOPPED || cc == CC_STOPPED_LENGTH_INVALID) { /* Ignore 'Forced Stop Events' */ } else { xhci_debug("Warning: " "Spurious transfer event for ID %d, EP %d:\n" " Pointer: 0x%08x%08x\n" " TL: 0x%06x\n" " CC: %d\n", id, ep, ev->ptr_high, ev->ptr_low, TRB_GET(EVTL, ev), cc); } xhci_advance_event_ring(xhci); }
void xhci_post_command(xhci_t *const xhci) { xhci_debug("Command %d (@%p)\n", TRB_GET(TT, xhci->cr.cur), xhci->cr.cur); TRB_SET(C, xhci->cr.cur, xhci->cr.pcs); ++xhci->cr.cur; /* pass command trb to hardware */ wmb(); /* Ring the doorbell */ xhci->dbreg[0] = 0; while (TRB_GET(TT, xhci->cr.cur) == TRB_LINK) { xhci_debug("Handling LINK pointer (@%p)\n", xhci->cr.cur); const int tc = TRB_GET(TC, xhci->cr.cur); TRB_SET(C, xhci->cr.cur, xhci->cr.pcs); xhci->cr.cur = phys_to_virt(xhci->cr.cur->ptr_low); if (tc) xhci->cr.pcs ^= 1; } }
int xhci_cmd_enable_slot(xhci_t *const xhci, int *const slot_id) { trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_ENABLE_SLOT); xhci_post_command(xhci); int cc = xhci_wait_for_command(xhci, cmd, 0); if (cc >= 0) { if (cc == CC_SUCCESS) { *slot_id = TRB_GET(ID, xhci->er.cur); if (*slot_id > xhci->max_slots_en) cc = CONTROLLER_ERROR; } xhci_advance_event_ring(xhci); xhci_handle_events(xhci); } return cc; }
/* create and hook-up an intr queue into device schedule */ static void * xhci_create_intr_queue(endpoint_t *const ep, const int reqsize, const int reqcount, const int reqtiming) { /* reqtiming: We ignore it and use the interval from the endpoint descriptor configured earlier. */ xhci_t *const xhci = XHCI_INST(ep->dev->controller); const int slot_id = ep->dev->address; const int ep_id = xhci_ep_id(ep); transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id]; if (reqcount > (TRANSFER_RING_SIZE - 2)) { xhci_debug("reqcount is too high, at most %d supported\n", TRANSFER_RING_SIZE - 2); return NULL; } if (reqsize > 0x10000) { xhci_debug("reqsize is too large, at most 64KiB supported\n"); return NULL; } if (xhci->dev[slot_id].interrupt_queues[ep_id]) { xhci_debug("Only one interrupt queue per endpoint supported\n"); return NULL; } /* Allocate intrq structure and reqdata chunks */ intrq_t *const intrq = malloc(sizeof(*intrq)); if (!intrq) { xhci_debug("Out of memory\n"); return NULL; } int i; int pcs = tr->pcs; trb_t *cur = tr->cur; for (i = 0; i < reqcount; ++i) { if (TRB_GET(C, cur) == pcs) { xhci_debug("Not enough empty TRBs\n"); goto _free_return; } void *const reqdata = xhci_align(1, reqsize); if (!reqdata) { xhci_debug("Out of memory\n"); goto _free_return; } xhci_clear_trb(cur, pcs); cur->ptr_low = virt_to_phys(reqdata); cur->ptr_high = 0; TRB_SET(TL, cur, reqsize); TRB_SET(TT, cur, TRB_NORMAL); TRB_SET(ISP, cur, 1); TRB_SET(IOC, cur, 1); cur = xhci_next_trb(cur, &pcs); } intrq->size = reqsize; intrq->count = reqcount; intrq->next = tr->cur; intrq->ready = NULL; intrq->ep = ep; xhci->dev[slot_id].interrupt_queues[ep_id] = intrq; /* Now enqueue all the prepared TRBs but the last and ring the doorbell. */ for (i = 0; i < (reqcount - 1); ++i) xhci_enqueue_trb(tr); xhci->dbreg[slot_id] = ep_id; return intrq; _free_return: cur = tr->cur; for (--i; i >= 0; --i) { free(phys_to_virt(cur->ptr_low)); cur = xhci_next_trb(cur, NULL); } free(intrq); return NULL; }
static int xhci_control(usbdev_t *const dev, const direction_t dir, const int drlen, void *const devreq, const int dalen, unsigned char *const src) { unsigned char *data = src; xhci_t *const xhci = XHCI_INST(dev->controller); epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0; transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1]; const size_t off = (size_t)data & 0xffff; if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) { xhci_debug("Unsupported transfer size\n"); return -1; } /* Reset endpoint if it's halted */ const unsigned ep_state = EC_GET(STATE, epctx); if (ep_state == 2 || ep_state == 4) { if (xhci_reset_endpoint(dev, NULL, 0)) return -1; } if (dalen && !dma_coherent(src)) { data = xhci->dma_buffer; if (dalen > DMA_SIZE) { xhci_debug("Control transfer too large: %d\n", dalen); return -1; } if (dir == OUT) memcpy(data, src, dalen); } /* Fill and enqueue setup TRB */ trb_t *const setup = tr->cur; xhci_clear_trb(setup, tr->pcs); setup->ptr_low = ((u32 *)devreq)[0]; setup->ptr_high = ((u32 *)devreq)[1]; TRB_SET(TL, setup, 8); TRB_SET(TRT, setup, (dalen) ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA) : TRB_TRT_NO_DATA); TRB_SET(TT, setup, TRB_SETUP_STAGE); TRB_SET(IDT, setup, 1); TRB_SET(IOC, setup, 1); xhci_enqueue_trb(tr); /* Fill and enqueue data TRBs (if any) */ if (dalen) { const unsigned mps = EC_GET(MPS, epctx); const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN; xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir); } /* Fill status TRB */ trb_t *const status = tr->cur; xhci_clear_trb(status, tr->pcs); TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT); TRB_SET(TT, status, TRB_STATUS_STAGE); TRB_SET(IOC, status, 1); xhci_enqueue_trb(tr); /* Ring doorbell for EP0 */ xhci->dbreg[dev->address] = 1; /* Wait for transfer events */ int i, transferred = 0; const int n_stages = 2 + !!dalen; for (i = 0; i < n_stages; ++i) { const int ret = xhci_wait_for_transfer(xhci, dev->address, 1); transferred += ret; if (ret < 0) { if (ret == TIMEOUT) { xhci_debug("Stopping ID %d EP 1\n", dev->address); xhci_cmd_stop_endpoint(xhci, dev->address, 1); } xhci_debug("Stage %d/%d failed: %d\n" " trb ring: @%p\n" " setup trb: @%p\n" " status trb: @%p\n" " ep state: %d -> %d\n" " usbsts: 0x%08"PRIx32"\n", i, n_stages, ret, tr->ring, setup, status, ep_state, EC_GET(STATE, epctx), xhci->opreg->usbsts); return ret; } } if (dir == IN && data != src) memcpy(src, data, transferred); return transferred; }
static void xhci_reinit (hci_t *controller) { xhci_t *const xhci = XHCI_INST(controller); if (xhci_wait_ready(xhci)) return; /* Enable all available slots */ xhci->opreg->config = xhci->max_slots_en; /* Set DCBAA */ xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa); xhci->opreg->dcbaap_hi = 0; /* Initialize command ring */ xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE); xhci_debug("command ring @%p (0x%08x)\n", xhci->cr.ring, virt_to_phys(xhci->cr.ring)); xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS; xhci->opreg->crcr_hi = 0; /* Make sure interrupts are disabled */ xhci->opreg->usbcmd &= ~USBCMD_INTE; /* Initialize event ring */ xhci_reset_event_ring(&xhci->er); xhci_debug("event ring @%p (0x%08x)\n", xhci->er.ring, virt_to_phys(xhci->er.ring)); xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n", xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max); memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t)); xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring); xhci->ev_ring_table[0].seg_base_hi = 0; xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE; /* Initialize primary interrupter */ xhci->hcrreg->intrrs[0].erstsz = 1; xhci_update_event_dq(xhci); /* erstba has to be written at last */ xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table); xhci->hcrreg->intrrs[0].erstba_hi = 0; xhci_start(controller); #ifdef USB_DEBUG int i; for (i = 0; i < 32; ++i) { xhci_debug("NOOP run #%d\n", i); trb_t *const cmd = xhci_next_command_trb(xhci); TRB_SET(TT, cmd, TRB_CMD_NOOP); xhci_post_command(xhci); /* Wait for result in event ring */ xhci_wait_for_command_done(xhci, cmd, 1); xhci_debug("Command ring is %srunning\n", (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not "); } #endif }