static void xhci_restore_registers(struct xhci_hcd *xhci) { writel(xhci->s3.command, &xhci->op_regs->command); writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); }
static void xhci_work(struct xhci_hcd *xhci) { u32 temp; u64 temp_64; temp = xhci_readl(xhci, &xhci->op_regs->status); temp |= STS_EINT; xhci_writel(xhci, temp, &xhci->op_regs->status); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp |= 0x3; xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_handle_event(xhci); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); xhci_readl(xhci, &xhci->ir_set->irq_pending); }
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) { u64 val_64; /* step 2: initialize command ring buffer */ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue) & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Setting command ring address to 0x%llx", (long unsigned long) val_64); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); }
static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length) { struct xhci_dbc *dbc; struct dbc_info_context *info; struct xhci_ep_ctx *ep_ctx; u32 dev_info; dma_addr_t deq, dma; unsigned int max_burst; dbc = xhci->dbc; if (!dbc) return; /* Populate info Context: */ info = (struct dbc_info_context *)dbc->ctx->bytes; dma = dbc->string_dma; info->string0 = cpu_to_le64(dma); info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); info->length = cpu_to_le32(string_length); /* Populate bulk out endpoint context: */ ep_ctx = dbc_bulkout_ctx(dbc); max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); deq = dbc_bulkout_enq(dbc); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); /* Populate bulk in endpoint context: */ ep_ctx = dbc_bulkin_ctx(dbc); deq = dbc_bulkin_enq(dbc); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); /* Set DbC context and info registers: */ xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp); dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); writel(dev_info, &dbc->regs->devinfo1); dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID); writel(dev_info, &dbc->regs->devinfo2); }