Beispiel #1
0
/* handle standard types:
 * - command completion event
 * - port status change event
 * - transfer event
 * - host controller event
 */
static void
xhci_handle_event(xhci_t *const xhci)
{
	const trb_t *const ev = xhci->er.cur;

	const int trb_type = TRB_GET(TT, ev);
	switch (trb_type) {
		/* Either pass along the event or advance event ring */
	case TRB_EV_TRANSFER:
		xhci_handle_transfer_event(xhci);
		break;
	case TRB_EV_CMD_CMPL:
		xhci_handle_command_completion_event(xhci);
		break;
	case TRB_EV_PORTSC:
		xhci_debug("Port Status Change Event for %d: %d\n",
			   TRB_GET(PORT, ev), TRB_GET(CC, ev));
		/* We ignore the event as we look for the PORTSC
		   registers instead, at a time when it suits _us_. */
		xhci_advance_event_ring(xhci);
		break;
	case TRB_EV_HOST:
		xhci_handle_host_controller_event(xhci);
		break;
	default:
		xhci_debug("Warning: Spurious event: %d, Completion Code: %d\n",
			   trb_type, TRB_GET(CC, ev));
		xhci_advance_event_ring(xhci);
		break;
	}
}
Beispiel #2
0
static void
xhci_handle_host_controller_event(xhci_t *const xhci)
{
	const trb_t *const ev = xhci->er.cur;

	const int cc = TRB_GET(CC, ev);
	switch (cc) {
	case CC_EVENT_RING_FULL_ERROR:
		xhci_debug("Event ring full! (@%p)\n", xhci->er.cur);
		/*
		 * If we get here, we have processed the whole queue:
		 * xHC pushes this event, when it sees the ring full,
		 * full of other events.
		 * IMO it's save and necessary to update the dequeue
		 * pointer here.
		 */
		xhci_advance_event_ring(xhci);
		xhci_update_event_dq(xhci);
		break;
	default:
		xhci_debug("Warning: Spurious host controller event: %d\n", cc);
		xhci_advance_event_ring(xhci);
		break;
	}
}
Beispiel #3
0
void
xhci_dump_inputctx(const inputctx_t *const ic)
{
	xhci_debug("Input Control  add: 0x%08"PRIx32"\n", *ic->add);
	xhci_debug("Input Control drop: 0x%08"PRIx32"\n", *ic->drop);
	xhci_dump_devctx(&ic->dev, *ic->add);
}
Beispiel #4
0
/* On Panther Point: switch all ports back to EHCI */
static void
xhci_switchback_ppt_ports(pcidev_t addr)
{
	if (pci_read_config32(addr, 0x00) == 0x1e318086) {
		u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
		xhci_debug("Switching ports back:   0x%"PRIx32"\n", reg32);
		pci_write_config32(addr, 0xd0, 0x00000000);
		reg32 = pci_read_config32(addr, 0xd0) & 0xf;
		xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
	}
}
Beispiel #5
0
static int
xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep,
		    const int clear_halt)
{
	xhci_t *const xhci = XHCI_INST(dev->controller);
	const int slot_id = dev->address;
	const int ep_id = ep ? xhci_ep_id(ep) : 1;
	epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];

	xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
		   slot_id, ep_id, EC_GET(STATE, epctx));

	/* Run Reset Endpoint Command if the EP is in Halted state */
	if (EC_GET(STATE, epctx) == 2) {
		const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
		if (cc != CC_SUCCESS) {
			xhci_debug("Reset Endpoint Command failed: %d\n", cc);
			return 1;
		}
	}

	/* Clear TT buffer for bulk and control endpoints behind a TT */
	const int hub = dev->hub;
	if (hub && dev->speed < HIGH_SPEED &&
			dev->controller->devices[hub]->speed == HIGH_SPEED)
		/* TODO */;

	/* Try clearing the device' halt condition on non-control endpoints */
	if (clear_halt && ep)
		clear_stall(ep);

	/* Reset transfer ring if the endpoint is in the right state */
	const unsigned ep_state = EC_GET(STATE, epctx);
	if (ep_state == 3 || ep_state == 4) {
		transfer_ring_t *const tr =
				xhci->dev[slot_id].transfer_rings[ep_id];
		const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
						  tr->ring, 1);
		if (cc != CC_SUCCESS) {
			xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
			return 1;
		}
		xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
	}

	xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
		   slot_id, ep_id, EC_GET(STATE, epctx));

	return 0;
}
Beispiel #6
0
/*
 * returns cc of command in question (pointed to by `address`)
 * caller should abort command if cc is TIMEOUT
 */
int
xhci_wait_for_command_done(xhci_t *const xhci,
			   const trb_t *const address,
			   const int clear_event)
{
	/*
	 * The Address Device Command should take most time, as it has to
	 * communicate with the USB device. Set address processing shouldn't
	 * take longer than 50ms (at the slave). Let's take a timeout of
	 * 100ms.
	 */
	unsigned long timeout_us = 100 * 1000; /* 100ms */
	int cc = TIMEOUT;
	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
		if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
				(xhci->er.cur->ptr_high == 0)) {
			cc = TRB_GET(CC, xhci->er.cur);
			break;
		}

		xhci_handle_command_completion_event(xhci);
	}
	if (!timeout_us) {
		xhci_debug("Warning: Timed out waiting for TRB_EV_CMD_CMPL.\n");
	} else if (clear_event) {
		xhci_advance_event_ring(xhci);
	}
	xhci_update_event_dq(xhci);
	return cc;
}
Beispiel #7
0
/* remove queue from device schedule, dropping all data that came in */
static void
xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
{
	xhci_t *const xhci = XHCI_INST(ep->dev->controller);
	const int slot_id = ep->dev->address;
	const int ep_id = xhci_ep_id(ep);
	transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];

	intrq_t *const intrq = (intrq_t *)q;

	/* Make sure the endpoint is stopped */
	if (EC_GET(STATE, xhci->dev[slot_id].ctx.ep[ep_id]) == 1) {
		const int cc = xhci_cmd_stop_endpoint(xhci, slot_id, ep_id);
		if (cc != CC_SUCCESS)
			xhci_debug("Warning: Failed to stop endpoint\n");
	}

	/* Process all remaining transfer events */
	xhci_handle_events(xhci);

	/* Free all pending transfers and the interrupt queue structure */
	int i;
	for (i = 0; i < intrq->count; ++i) {
		free(phys_to_virt(intrq->next->ptr_low));
		intrq->next = xhci_next_trb(intrq->next, NULL);
	}
	xhci->dev[slot_id].interrupt_queues[ep_id] = NULL;
	free((void *)intrq);

	/* Reset the controller's dequeue pointer and reinitialize the ring */
	xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1);
	xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
}
Beispiel #8
0
static void
xhci_start (hci_t *controller)
{
	xhci_t *const xhci = XHCI_INST(controller);

	xhci->opreg->usbcmd |= USBCMD_RS;
	if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
		xhci_debug("Controller didn't start within 1s\n");
}
Beispiel #9
0
/* returns cc of command in question (pointed to by `address`) */
int
xhci_wait_for_command_aborted(xhci_t *const xhci, const trb_t *const address)
{
	/*
	 * Specification says that something might be seriously wrong, if
	 * we don't get a response after 5s. Still, let the caller decide,
	 * what to do then.
	 */
	unsigned long timeout_us = 5 * 1000 * 1000; /* 5s */
	int cc = TIMEOUT;
	/*
	 * Expects two command completion events:
	 * The first with CC == COMMAND_ABORTED should point to address,
	 * the second with CC == COMMAND_RING_STOPPED should point to new dq.
	 */
	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
		if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
				(xhci->er.cur->ptr_high == 0)) {
			cc = TRB_GET(CC, xhci->er.cur);
			xhci_advance_event_ring(xhci);
			break;
		}

		xhci_handle_command_completion_event(xhci);
	}
	if (!timeout_us)
		xhci_debug("Warning: Timed out waiting for COMMAND_ABORTED.\n");
	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
		if (TRB_GET(CC, xhci->er.cur) == CC_COMMAND_RING_STOPPED) {
			xhci->cr.cur = phys_to_virt(xhci->er.cur->ptr_low);
			xhci_advance_event_ring(xhci);
			break;
		}

		xhci_handle_command_completion_event(xhci);
	}
	if (!timeout_us)
		xhci_debug("Warning: Timed out "
			   "waiting for COMMAND_RING_STOPPED.\n");
	xhci_update_event_dq(xhci);
	return cc;
}
Beispiel #10
0
static int
xhci_wait_ready(xhci_t *const xhci)
{
	xhci_debug("Waiting for controller to be ready... ");
	if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
		usb_debug("timeout!\n");
		return -1;
	}
	usb_debug("ok.\n");
	return 0;
}
Beispiel #11
0
/* On Panther Point: switch ports shared with EHCI to xHCI */
static void
xhci_switch_ppt_ports(pcidev_t addr)
{
	if (pci_read_config32(addr, 0x00) == 0x1e318086) {
		u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
		xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);

		/* For now, do not enable SuperSpeed on any ports */
		//pci_write_config32(addr, 0xd8, reg32);
		pci_write_config32(addr, 0xd8, 0x00000000);
		reg32 = pci_read_config32(addr, 0xd8) & 0xf;
		xhci_debug("Configured for SuperSpeed:   0x%"PRIx32"\n", reg32);

		reg32 = pci_read_config32(addr, 0xd4) & 0xf;
		xhci_debug("Trying to switch over:       0x%"PRIx32"\n", reg32);

		pci_write_config32(addr, 0xd0, reg32);
		reg32 = pci_read_config32(addr, 0xd0) & 0xf;
		xhci_debug("Actually switched over:      0x%"PRIx32"\n", reg32);
	}
}
Beispiel #12
0
static void
xhci_handle_transfer_event(xhci_t *const xhci)
{
	const trb_t *const ev = xhci->er.cur;

	const int cc = TRB_GET(CC, ev);
	const int id = TRB_GET(ID, ev);
	const int ep = TRB_GET(EP, ev);

	devinfo_t *di;
	intrq_t *intrq;

	if (id && id <= xhci->max_slots_en &&
			(di = DEVINFO_FROM_XHCI(xhci, id)) &&
			(intrq = di->interrupt_queues[ep])) {
		/* It's a running interrupt endpoint */
		intrq->ready = phys_to_virt(ev->ptr_low);
		if (cc == CC_SUCCESS || cc == CC_SHORT_PACKET) {
			TRB_SET(TL, intrq->ready,
				intrq->size - TRB_GET(EVTL, ev));
		} else {
			xhci_debug("Interrupt Transfer failed: %d\n",
				   cc);
			TRB_SET(TL, intrq->ready, 0);
		}
	} else if (cc == CC_STOPPED || cc == CC_STOPPED_LENGTH_INVALID) {
		/* Ignore 'Forced Stop Events' */
	} else {
		xhci_debug("Warning: "
			   "Spurious transfer event for ID %d, EP %d:\n"
			   "  Pointer: 0x%08x%08x\n"
			   "       TL: 0x%06x\n"
			   "       CC: %d\n",
			   id, ep,
			   ev->ptr_high, ev->ptr_low,
			   TRB_GET(EVTL, ev), cc);
	}
	xhci_advance_event_ring(xhci);
}
Beispiel #13
0
static void
xhci_reset(hci_t *const controller)
{
	xhci_t *const xhci = XHCI_INST(controller);

	xhci_stop(controller);

	xhci->opreg->usbcmd |= USBCMD_HCRST;
	xhci_debug("Resetting controller... ");
	if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
		usb_debug("timeout!\n");
	else
		usb_debug("ok.\n");
}
Beispiel #14
0
static void
xhci_handle_command_completion_event(xhci_t *const xhci)
{
	const trb_t *const ev = xhci->er.cur;

	xhci_debug("Warning: Spurious command completion event:\n"
		   "  Pointer: 0x%08x%08x\n"
		   "       CC: %d\n"
		   "  Slot ID: %d\n"
		   "    Cycle: %d\n",
		   ev->ptr_high, ev->ptr_low,
		   TRB_GET(CC, ev), TRB_GET(ID, ev), ev->control & TRB_CYCLE);
	xhci_advance_event_ring(xhci);
}
Beispiel #15
0
void
xhci_post_command(xhci_t *const xhci)
{
	xhci_debug("Command %d (@%p)\n",
		   TRB_GET(TT, xhci->cr.cur), xhci->cr.cur);

	TRB_SET(C, xhci->cr.cur, xhci->cr.pcs);
	++xhci->cr.cur;

	/* pass command trb to hardware */
	wmb();
	/* Ring the doorbell */
	xhci->dbreg[0] = 0;

	while (TRB_GET(TT, xhci->cr.cur) == TRB_LINK) {
		xhci_debug("Handling LINK pointer (@%p)\n", xhci->cr.cur);
		const int tc = TRB_GET(TC, xhci->cr.cur);
		TRB_SET(C, xhci->cr.cur, xhci->cr.pcs);
		xhci->cr.cur = phys_to_virt(xhci->cr.cur->ptr_low);
		if (tc)
			xhci->cr.pcs ^= 1;
	}
}
Beispiel #16
0
void
xhci_dump_transfer_trb(const trb_t *const cur)
{
	xhci_debug("Transfer TRB (@%p):\n", cur);
	usb_debug(" PTR_L\t0x%08"PRIx32"\n", cur->ptr_low);
	usb_debug(" PTR_H\t0x%08"PRIx32"\n", cur->ptr_high);
	usb_debug(" STATUS\t0x%08"PRIx32"\n", cur->status);
	usb_debug(" CNTRL\t0x%08"PRIx32"\n", cur->control);
	TRB_DUMP(TL,	cur);
	TRB_DUMP(TDS,	cur);
	TRB_DUMP(C,	cur);
	TRB_DUMP(ISP,	cur);
	TRB_DUMP(CH,	cur);
	TRB_DUMP(IOC,	cur);
	TRB_DUMP(IDT,	cur);
	TRB_DUMP(TT,	cur);
	TRB_DUMP(DIR,	cur);
}
Beispiel #17
0
void
xhci_dump_epctx(const epctx_t *const ec)
{
	xhci_debug("Endpoint Context (@%p):\n", ec);
	usb_debug(" FIELD1\t0x%08"PRIx32"\n", ec->f1);
	usb_debug(" FIELD2\t0x%08"PRIx32"\n", ec->f2);
	usb_debug(" TRDQ_L\t0x%08"PRIx32"\n", ec->tr_dq_low);
	usb_debug(" TRDQ_H\t0x%08"PRIx32"\n", ec->tr_dq_high);
	usb_debug(" FIELD5\t0x%08"PRIx32"\n", ec->f5);
	EC_DUMP(STATE,  ec);
	EC_DUMP(INTVAL, ec);
	EC_DUMP(CERR,   ec);
	EC_DUMP(TYPE,   ec);
	EC_DUMP(MBS,    ec);
	EC_DUMP(MPS,    ec);
	EC_DUMP(DCS,    ec);
	EC_DUMP(AVRTRB, ec);
	EC_DUMP(MXESIT, ec);
}
Beispiel #18
0
void
xhci_dump_slotctx(const slotctx_t *const sc)
{
	xhci_debug("Slot Context (@%p):\n", sc);
	usb_debug(" FIELD1\t0x%08"PRIx32"\n", sc->f1);
	usb_debug(" FIELD2\t0x%08"PRIx32"\n", sc->f2);
	usb_debug(" FIELD3\t0x%08"PRIx32"\n", sc->f3);
	usb_debug(" FIELD4\t0x%08"PRIx32"\n", sc->f4);
	SC_DUMP(ROUTE,  sc);
	SC_DUMP(SPEED1, sc);
	SC_DUMP(MTT,    sc);
	SC_DUMP(HUB,    sc);
	SC_DUMP(CTXENT, sc);
	SC_DUMP(RHPORT, sc);
	SC_DUMP(NPORTS, sc);
	SC_DUMP(TTID,   sc);
	SC_DUMP(TTPORT, sc);
	SC_DUMP(TTT,    sc);
	SC_DUMP(UADDR,  sc);
	SC_DUMP(STATE,  sc);
}
Beispiel #19
0
/* returns cc of transfer for given slot/endpoint pair */
int
xhci_wait_for_transfer(xhci_t *const xhci, const int slot_id, const int ep_id)
{
	xhci_spew("Waiting for transfer on ID %d EP %d\n", slot_id, ep_id);
	/* 2s for all types of transfers */ /* TODO: test, wait longer? */
	unsigned long timeout_us = 2 * 1000 * 1000;
	int cc = TIMEOUT;
	while (xhci_wait_for_event_type(xhci, TRB_EV_TRANSFER, &timeout_us)) {
		if (TRB_GET(ID, xhci->er.cur) == slot_id &&
				TRB_GET(EP, xhci->er.cur) == ep_id) {
			cc = TRB_GET(CC, xhci->er.cur);
			xhci_advance_event_ring(xhci);
			break;
		}

		xhci_handle_transfer_event(xhci);
	}
	if (!timeout_us)
		xhci_debug("Warning: Timed out waiting for TRB_EV_TRANSFER.\n");
	xhci_update_event_dq(xhci);
	return cc;
}
Beispiel #20
0
static int
xhci_wait_for_command(xhci_t *const xhci,
		      const trb_t *const cmd_trb,
		      const int clear_event)
{
	int cc;

	cc = xhci_wait_for_command_done(xhci, cmd_trb, clear_event);
	if (cc != TIMEOUT)
		return cc;

	/* Abort command on timeout */
	xhci_debug("Aborting command (@%p), CRCR: 0x%"PRIx32"\n",
		   cmd_trb, xhci->opreg->crcr_lo);
	xhci->opreg->crcr_lo |= CRCR_CS | CRCR_CA;
	xhci->opreg->crcr_hi = 0;
	cc = xhci_wait_for_command_aborted(xhci, cmd_trb);

	if (xhci->opreg->crcr_lo & CRCR_CRR)
		fatal("xhci_wait_for_command: Command ring still running\n");

	return cc;
}
Beispiel #21
0
hci_t *
xhci_init (unsigned long physical_bar)
{
	int i;

	/* First, allocate and initialize static controller structures */

	hci_t *const controller = new_controller();
	if (!controller) {
		xhci_debug("Could not create USB controller instance\n");
		return controller;
	}

	controller->type		= XHCI;
	controller->start		= xhci_start;
	controller->stop		= xhci_stop;
	controller->reset		= xhci_reset;
	controller->init		= xhci_reinit;
	controller->shutdown		= xhci_shutdown;
	controller->bulk		= xhci_bulk;
	controller->control		= xhci_control;
	controller->set_address		= xhci_set_address;
	controller->finish_device_config= xhci_finish_device_config;
	controller->destroy_device	= xhci_destroy_dev;
	controller->create_intr_queue	= xhci_create_intr_queue;
	controller->destroy_intr_queue	= xhci_destroy_intr_queue;
	controller->poll_intr_queue	= xhci_poll_intr_queue;
	controller->pcidev		= 0;
	for (i = 0; i < 128; ++i) {
		controller->devices[i] = NULL;
	}

	controller->instance = malloc(sizeof(xhci_t));
	if (!controller->instance) {
		xhci_debug("Out of memory creating xHCI controller instance\n");
		goto _free_controller;
	}
	xhci_t *const xhci = (xhci_t *)controller->instance;
	memset(xhci, 0x00, sizeof(*xhci));

	init_device_entry(controller, 0);
	xhci->roothub = controller->devices[0];
	xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
	xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
	xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
	if (!xhci->roothub || !xhci->cr.ring ||
			!xhci->er.ring || !xhci->ev_ring_table) {
		xhci_debug("Out of memory\n");
		goto _free_xhci;
	}

	xhci->capreg	= phys_to_virt(physical_bar);
	xhci->opreg	= ((void *)xhci->capreg) + xhci->capreg->caplength;
	xhci->hcrreg	= ((void *)xhci->capreg) + xhci->capreg->rtsoff;
	xhci->dbreg	= ((void *)xhci->capreg) + xhci->capreg->dboff;
	xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar);
	xhci_debug("caplen:  0x%"PRIx32"\n", xhci->capreg->caplength);
	xhci_debug("rtsoff:  0x%"PRIx32"\n", xhci->capreg->rtsoff);
	xhci_debug("dboff:   0x%"PRIx32"\n", xhci->capreg->dboff);

	xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
		   xhci->capreg->hciver_hi, xhci->capreg->hciver_lo);
	if ((xhci->capreg->hciversion < 0x96) ||
			(xhci->capreg->hciversion > 0x100)) {
		xhci_debug("Unsupported xHCI version\n");
		goto _free_xhci;
	}

	xhci_debug("context size: %dB\n", CTXSIZE(xhci));
	xhci_debug("maxslots: 0x%02lx\n", xhci->capreg->MaxSlots);
	xhci_debug("maxports: 0x%02lx\n", xhci->capreg->MaxPorts);
	const unsigned pagesize = xhci->opreg->pagesize << 12;
	xhci_debug("pagesize: 0x%04x\n", pagesize);

	/*
	 * We haven't touched the hardware yet. So we allocate all dynamic
	 * structures at first and can still chicken out easily if we run out
	 * of memory.
	 */
	xhci->max_slots_en = xhci->capreg->MaxSlots & CONFIG_LP_MASK_MaxSlotsEn;
	xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
	xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
	if (!xhci->dcbaa || !xhci->dev) {
		xhci_debug("Out of memory\n");
		goto _free_xhci;
	}
	memset(xhci->dcbaa, 0x00, (xhci->max_slots_en + 1) * sizeof(u64));
	memset(xhci->dev, 0x00, (xhci->max_slots_en + 1) * sizeof(*xhci->dev));

	/*
	 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
	 * The pointers therein point to scratchpad buffers (pages).
	 */
	const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs;
	xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
	if (max_sp_bufs) {
		const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
		xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
		if (!xhci->sp_ptrs) {
			xhci_debug("Out of memory\n");
			goto _free_xhci_structs;
		}
		memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
		for (i = 0; i < max_sp_bufs; ++i) {
			/* Could use mmap() here if we had it.
			   Maybe there is another way. */
			void *const page = memalign(pagesize, pagesize);
			if (!page) {
				xhci_debug("Out of memory\n");
				goto _free_xhci_structs;
			}
			xhci->sp_ptrs[i] = virt_to_phys(page);
		}
		xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
	}

	if (dma_initialized()) {
		xhci->dma_buffer = dma_memalign(64 * 1024, DMA_SIZE);
		if (!xhci->dma_buffer) {
			xhci_debug("Not enough memory for DMA bounce buffer\n");
			goto _free_xhci_structs;
		}
	}

	/* Now start working on the hardware */
	if (xhci_wait_ready(xhci))
		goto _free_xhci_structs;

	/* TODO: Check if BIOS claims ownership (and hand over) */

	xhci_reset(controller);
	xhci_reinit(controller);

	xhci->roothub->controller = controller;
	xhci->roothub->init = xhci_rh_init;
	xhci->roothub->init(xhci->roothub);

	return controller;

_free_xhci_structs:
	if (xhci->sp_ptrs) {
		for (i = 0; i < max_sp_bufs; ++i) {
			if (xhci->sp_ptrs[i])
				free(phys_to_virt(xhci->sp_ptrs[i]));
		}
	}
	free(xhci->sp_ptrs);
	free(xhci->dcbaa);
_free_xhci:
	free((void *)xhci->ev_ring_table);
	free((void *)xhci->er.ring);
	free((void *)xhci->cr.ring);
	free(xhci->roothub);
	free(xhci->dev);
	free(xhci);
_free_controller:
	detach_controller(controller);
	free(controller);
	return NULL;
}
Beispiel #22
0
static void
xhci_reinit (hci_t *controller)
{
	xhci_t *const xhci = XHCI_INST(controller);

	if (xhci_wait_ready(xhci))
		return;

	/* Enable all available slots */
	xhci->opreg->config = xhci->max_slots_en;

	/* Set DCBAA */
	xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
	xhci->opreg->dcbaap_hi = 0;

	/* Initialize command ring */
	xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
	xhci_debug("command ring @%p (0x%08x)\n",
		   xhci->cr.ring, virt_to_phys(xhci->cr.ring));
	xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
	xhci->opreg->crcr_hi = 0;

	/* Make sure interrupts are disabled */
	xhci->opreg->usbcmd &= ~USBCMD_INTE;

	/* Initialize event ring */
	xhci_reset_event_ring(&xhci->er);
	xhci_debug("event ring @%p (0x%08x)\n",
		   xhci->er.ring, virt_to_phys(xhci->er.ring));
	xhci_debug("ERST Max: 0x%lx ->  0x%lx entries\n",
		   xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max);
	memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
	xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
	xhci->ev_ring_table[0].seg_base_hi = 0;
	xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;

	/* Initialize primary interrupter */
	xhci->hcrreg->intrrs[0].erstsz = 1;
	xhci_update_event_dq(xhci);
	/* erstba has to be written at last */
	xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
	xhci->hcrreg->intrrs[0].erstba_hi = 0;

	xhci_start(controller);

#ifdef USB_DEBUG
	int i;
	for (i = 0; i < 32; ++i) {
		xhci_debug("NOOP run #%d\n", i);
		trb_t *const cmd = xhci_next_command_trb(xhci);
		TRB_SET(TT, cmd, TRB_CMD_NOOP);

		xhci_post_command(xhci);

		/* Wait for result in event ring */
		xhci_wait_for_command_done(xhci, cmd, 1);
		xhci_debug("Command ring is %srunning\n",
			   (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ");
	}
#endif
}
Beispiel #23
0
static int
xhci_control(usbdev_t *const dev, const direction_t dir,
	     const int drlen, void *const devreq,
	     const int dalen, unsigned char *const src)
{
	unsigned char *data = src;
	xhci_t *const xhci = XHCI_INST(dev->controller);
	epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0;
	transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1];

	const size_t off = (size_t)data & 0xffff;
	if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) {
		xhci_debug("Unsupported transfer size\n");
		return -1;
	}

	/* Reset endpoint if it's halted */
	const unsigned ep_state = EC_GET(STATE, epctx);
	if (ep_state == 2 || ep_state == 4) {
		if (xhci_reset_endpoint(dev, NULL, 0))
			return -1;
	}

	if (dalen && !dma_coherent(src)) {
		data = xhci->dma_buffer;
		if (dalen > DMA_SIZE) {
			xhci_debug("Control transfer too large: %d\n", dalen);
			return -1;
		}
		if (dir == OUT)
			memcpy(data, src, dalen);
	}

	/* Fill and enqueue setup TRB */
	trb_t *const setup = tr->cur;
	xhci_clear_trb(setup, tr->pcs);
	setup->ptr_low = ((u32 *)devreq)[0];
	setup->ptr_high = ((u32 *)devreq)[1];
	TRB_SET(TL, setup, 8);
	TRB_SET(TRT, setup, (dalen)
			? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
			: TRB_TRT_NO_DATA);
	TRB_SET(TT, setup, TRB_SETUP_STAGE);
	TRB_SET(IDT, setup, 1);
	TRB_SET(IOC, setup, 1);
	xhci_enqueue_trb(tr);

	/* Fill and enqueue data TRBs (if any) */
	if (dalen) {
		const unsigned mps = EC_GET(MPS, epctx);
		const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
		xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
	}

	/* Fill status TRB */
	trb_t *const status = tr->cur;
	xhci_clear_trb(status, tr->pcs);
	TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
	TRB_SET(TT, status, TRB_STATUS_STAGE);
	TRB_SET(IOC, status, 1);
	xhci_enqueue_trb(tr);

	/* Ring doorbell for EP0 */
	xhci->dbreg[dev->address] = 1;

	/* Wait for transfer events */
	int i, transferred = 0;
	const int n_stages = 2 + !!dalen;
	for (i = 0; i < n_stages; ++i) {
		const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
		transferred += ret;
		if (ret < 0) {
			if (ret == TIMEOUT) {
				xhci_debug("Stopping ID %d EP 1\n",
					   dev->address);
				xhci_cmd_stop_endpoint(xhci, dev->address, 1);
			}
			xhci_debug("Stage %d/%d failed: %d\n"
				   "  trb ring:   @%p\n"
				   "  setup trb:  @%p\n"
				   "  status trb: @%p\n"
				   "  ep state:   %d -> %d\n"
				   "  usbsts:     0x%08"PRIx32"\n",
				   i, n_stages, ret,
				   tr->ring, setup, status,
				   ep_state, EC_GET(STATE, epctx),
				   xhci->opreg->usbsts);
			return ret;
		}
	}

	if (dir == IN && data != src)
		memcpy(src, data, transferred);
	return transferred;
}
Beispiel #24
0
/* finalize == 1: if data is of packet aligned size, add a zero length packet */
static int
xhci_bulk(endpoint_t *const ep, const int size, u8 *const src,
	  const int finalize)
{
	/* finalize: Hopefully the xHCI controller always does this.
		     We have no control over the packets. */

	u8 *data = src;
	xhci_t *const xhci = XHCI_INST(ep->dev->controller);
	const int slot_id = ep->dev->address;
	const int ep_id = xhci_ep_id(ep);
	epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
	transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];

	const size_t off = (size_t)data & 0xffff;
	if ((off + size) > ((TRANSFER_RING_SIZE - 2) << 16)) {
		xhci_debug("Unsupported transfer size\n");
		return -1;
	}

	if (!dma_coherent(src)) {
		data = xhci->dma_buffer;
		if (size > DMA_SIZE) {
			xhci_debug("Bulk transfer too large: %d\n", size);
			return -1;
		}
		if (ep->direction == OUT)
			memcpy(data, src, size);
	}

	/* Reset endpoint if it's halted */
	const unsigned ep_state = EC_GET(STATE, epctx);
	if (ep_state == 2 || ep_state == 4) {
		if (xhci_reset_endpoint(ep->dev, ep, 0))
			return -1;
	}

	/* Enqueue transfer and ring doorbell */
	const unsigned mps = EC_GET(MPS, epctx);
	const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
	xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
	xhci->dbreg[ep->dev->address] = ep_id;

	/* Wait for transfer event */
	const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
	if (ret < 0) {
		if (ret == TIMEOUT) {
			xhci_debug("Stopping ID %d EP %d\n",
				   ep->dev->address, ep_id);
			xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
		} else if (ret == -CC_STALL_ERROR) {
			xhci_reset_endpoint(ep->dev, ep, 1);
		}
		xhci_debug("Bulk transfer failed: %d\n"
			   "  ep state: %d -> %d\n"
			   "  usbsts:   0x%08"PRIx32"\n",
			   ret, ep_state,
			   EC_GET(STATE, epctx),
			   xhci->opreg->usbsts);
		return ret;
	}

	if (ep->direction == IN && data != src)
		memcpy(src, data, ret);
	return ret;
}
Beispiel #25
0
/* create and hook-up an intr queue into device schedule */
static void *
xhci_create_intr_queue(endpoint_t *const ep,
		       const int reqsize, const int reqcount,
		       const int reqtiming)
{
	/* reqtiming: We ignore it and use the interval from the
		      endpoint descriptor configured earlier. */

	xhci_t *const xhci = XHCI_INST(ep->dev->controller);
	const int slot_id = ep->dev->address;
	const int ep_id = xhci_ep_id(ep);
	transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];

	if (reqcount > (TRANSFER_RING_SIZE - 2)) {
		xhci_debug("reqcount is too high, at most %d supported\n",
			   TRANSFER_RING_SIZE - 2);
		return NULL;
	}
	if (reqsize > 0x10000) {
		xhci_debug("reqsize is too large, at most 64KiB supported\n");
		return NULL;
	}
	if (xhci->dev[slot_id].interrupt_queues[ep_id]) {
		xhci_debug("Only one interrupt queue per endpoint supported\n");
		return NULL;
	}

	/* Allocate intrq structure and reqdata chunks */

	intrq_t *const intrq = malloc(sizeof(*intrq));
	if (!intrq) {
		xhci_debug("Out of memory\n");
		return NULL;
	}

	int i;
	int pcs = tr->pcs;
	trb_t *cur = tr->cur;
	for (i = 0; i < reqcount; ++i) {
		if (TRB_GET(C, cur) == pcs) {
			xhci_debug("Not enough empty TRBs\n");
			goto _free_return;
		}
		void *const reqdata = xhci_align(1, reqsize);
		if (!reqdata) {
			xhci_debug("Out of memory\n");
			goto _free_return;
		}
		xhci_clear_trb(cur, pcs);
		cur->ptr_low = virt_to_phys(reqdata);
		cur->ptr_high = 0;
		TRB_SET(TL,	cur, reqsize);
		TRB_SET(TT,	cur, TRB_NORMAL);
		TRB_SET(ISP,	cur, 1);
		TRB_SET(IOC,	cur, 1);

		cur = xhci_next_trb(cur, &pcs);
	}

	intrq->size	= reqsize;
	intrq->count	= reqcount;
	intrq->next	= tr->cur;
	intrq->ready	= NULL;
	intrq->ep	= ep;
	xhci->dev[slot_id].interrupt_queues[ep_id] = intrq;

	/* Now enqueue all the prepared TRBs but the last
	   and ring the doorbell. */
	for (i = 0; i < (reqcount - 1); ++i)
		xhci_enqueue_trb(tr);
	xhci->dbreg[slot_id] = ep_id;

	return intrq;

_free_return:
	cur = tr->cur;
	for (--i; i >= 0; --i) {
		free(phys_to_virt(cur->ptr_low));
		cur = xhci_next_trb(cur, NULL);
	}
	free(intrq);
	return NULL;
}