/* finalize == 1: if data is of packet aligned size, add a zero length packet */ static int uhci_bulk (endpoint_t *ep, int size, u8 *data, int finalize) { int maxpsize = ep->maxpacketsize; if (maxpsize == 0) fatal("MaxPacketSize == 0!!!"); int numpackets = (size + maxpsize - 1) / maxpsize; if (finalize && ((size % maxpsize) == 0)) { numpackets++; } if (numpackets == 0) return 0; td_t *tds = create_schedule (numpackets); int i = 0, toggle = ep->toggle; while ((size > 0) || ((size == 0) && (finalize != 0))) { fill_schedule (&tds[i], ep, min (size, maxpsize), data, &toggle); i++; data += maxpsize; size -= maxpsize; } if (run_schedule (ep->dev, tds) == 1) { usb_debug("Stalled. Trying to clean up.\n"); clear_stall (ep); free (tds); return 1; } ep->toggle = toggle; free (tds); return 0; }
status_t aes_usb_exec(status_t (*bulk_transfer)(), status_t (*clear_stall)(), bool strict, const pairs *cmd, unsigned int num) { unsigned int i; int skip = 0, add_offset = 0; for (i = 0; i < num; i += add_offset + skip) { int limit = MIN(num, i + MAX_REGWRITES_PER_REQ), j; status_t res; skip = 0; /* handle 0 reg, i.e. request separator */ if (!cmd[i].reg) { skip = 1; add_offset = 0; continue; } for (j = i; j < limit; j++) // up to: limit || new separator if (!cmd[j].reg) { skip = 1; break; } /* */ add_offset = j - i; limit = strict ? 0 : MAX_RETRIES; for (j = 0; j <= limit; j++) { if ((res = usb_write(bulk_transfer, &cmd[i], add_offset)) == B_OK) break; else if (res == B_TIMED_OUT) { if (strict) return B_ERROR; break; } else if (res == B_BUSY || B_DEV_FIFO_UNDERRUN || B_DEV_FIFO_OVERRUN) { if (strict) return B_ERROR; continue; } else if (res == B_DEV_STALLED) { if (strict) return B_ERROR; if(clear_stall() != B_OK) break; } else return B_ERROR; } } return B_OK; }
static int xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep, const int clear_halt) { xhci_t *const xhci = XHCI_INST(dev->controller); const int slot_id = dev->address; const int ep_id = ep ? xhci_ep_id(ep) : 1; epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id]; xhci_debug("Resetting ID %d EP %d (ep state: %d)\n", slot_id, ep_id, EC_GET(STATE, epctx)); /* Run Reset Endpoint Command if the EP is in Halted state */ if (EC_GET(STATE, epctx) == 2) { const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id); if (cc != CC_SUCCESS) { xhci_debug("Reset Endpoint Command failed: %d\n", cc); return 1; } } /* Clear TT buffer for bulk and control endpoints behind a TT */ const int hub = dev->hub; if (hub && dev->speed < HIGH_SPEED && dev->controller->devices[hub]->speed == HIGH_SPEED) /* TODO */; /* Try clearing the device' halt condition on non-control endpoints */ if (clear_halt && ep) clear_stall(ep); /* Reset transfer ring if the endpoint is in the right state */ const unsigned ep_state = EC_GET(STATE, epctx); if (ep_state == 3 || ep_state == 4) { transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id]; const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1); if (cc != CC_SUCCESS) { xhci_debug("Set TR Dequeue Command failed: %d\n", cc); return 1; } xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE); } xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n", slot_id, ep_id, EC_GET(STATE, epctx)); return 0; }
/* finalize == 1: if data is of packet aligned size, add a zero length packet */ static int ohci_bulk (endpoint_t *ep, int dalen, u8 *data, int finalize) { int i; debug("bulk: %x bytes from %x, finalize: %x, maxpacketsize: %x\n", dalen, data, finalize, ep->maxpacketsize); td_t *cur; // pages are specified as 4K in OHCI, so don't use getpagesize() int first_page = (unsigned long)data / 4096; int last_page = (unsigned long)(data+dalen-1)/4096; if (last_page < first_page) last_page = first_page; int pages = (dalen==0)?0:(last_page - first_page + 1); int td_count = (pages+1)/2; if (finalize && ((dalen % ep->maxpacketsize) == 0)) { td_count++; } td_t *tds = memalign(sizeof(td_t), (td_count+1)*sizeof(td_t)); memset((void*)tds, 0, (td_count+1)*sizeof(td_t)); for (i=0; i < td_count; i++) { tds[i].next_td = virt_to_phys(&tds[i+1]); } for (cur = tds; cur->next_td != 0; cur++) { cur->toggle_from_td = 0; cur->error_count = 0; cur->delay_interrupt = 7; cur->condition_code = 0xf; cur->direction = (ep->direction==IN)?OHCI_IN:OHCI_OUT; cur->current_buffer_pointer = virt_to_phys(data); pages--; if (dalen == 0) { /* magic TD for empty packet transfer */ cur->current_buffer_pointer = 0; cur->buffer_end = 0; /* assert((pages == 0) && finalize); */ } int consumed = (4096 - ((unsigned long)data % 4096)); if (consumed >= dalen) { // end of data is within same page cur->buffer_end = virt_to_phys(data + dalen - 1); dalen = 0; /* assert(pages == finalize); */ } else { dalen -= consumed; data += consumed; pages--; int second_page_size = dalen; if (dalen > 4096) { second_page_size = 4096; } cur->buffer_end = virt_to_phys(data + second_page_size - 1); dalen -= second_page_size; data += second_page_size; } } /* Data structures */ ed_t *head = memalign(sizeof(ed_t), sizeof(ed_t)); memset((void*)head, 0, sizeof(*head)); head->function_address = ep->dev->address; head->endpoint_number = ep->endpoint & 0xf; head->direction = (ep->direction==IN)?OHCI_IN:OHCI_OUT; head->lowspeed = ep->dev->speed; head->format = 0; head->maximum_packet_size = ep->maxpacketsize; head->tail_pointer = virt_to_phys(cur); head->head_pointer = virt_to_phys(tds); head->halted = 0; head->toggle = ep->toggle; debug("doing bulk transfer with %x(%x). first_td at %x, last %x\n", head->function_address, head->endpoint_number, virt_to_phys(tds), virt_to_phys(cur)); /* activate schedule */ OHCI_INST(ep->dev->controller)->opreg->HcBulkHeadED = virt_to_phys(head); OHCI_INST(ep->dev->controller)->opreg->HcControl |= BulkListEnable; OHCI_INST(ep->dev->controller)->opreg->HcCommandStatus = BulkListFilled; int failure = wait_for_ed(ep->dev, head); OHCI_INST(ep->dev->controller)->opreg->HcControl &= ~BulkListEnable; ep->toggle = head->toggle; /* free memory */ free((void*)tds); free((void*)head); if (failure) { /* try cleanup */ clear_stall(ep); } return failure; }