/** * Allocates a generic ring segment from the ring pool, sets the dma address, * initializes the segment to zero, and sets the private next pointer to NULL. * Section 4.11.1.1: * "All components of all Command and Transfer TRBs shall be initialized to '0'" * * @param none * @return pointer to the newly allocated SEGMENT */ static struct xhci_segment *xhci_segment_alloc(void) { struct xhci_segment *seg; seg = (struct xhci_segment *)KSEG1ADDR(xhci_malloc(sizeof(struct xhci_segment))); BUG_ON(!seg); seg->trbs = (union xhci_trb *)KSEG1ADDR(xhci_malloc(SEGMENT_SIZE)); seg->next = NULL; return seg; }
/** * Set up the scratchpad buffer array and scratchpad buffers * * @ctrl host controller data structure * @return -ENOMEM if buffer allocation fails, 0 on success */ static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl) { struct xhci_hccr *hccr = ctrl->hccr; struct xhci_hcor *hcor = ctrl->hcor; struct xhci_scratchpad *scratchpad; int num_sp; uint32_t page_size; void *buf; int i; num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); if (!num_sp) return 0; scratchpad = malloc(sizeof(*scratchpad)); if (!scratchpad) goto fail_sp; ctrl->scratchpad = scratchpad; scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64)); if (!scratchpad->sp_array) goto fail_sp2; ctrl->dcbaa->dev_context_ptrs[0] = cpu_to_le64((uintptr_t)scratchpad->sp_array); xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0], sizeof(ctrl->dcbaa->dev_context_ptrs[0])); page_size = xhci_readl(&hcor->or_pagesize) & 0xffff; for (i = 0; i < 16; i++) { if ((0x1 & page_size) != 0) break; page_size = page_size >> 1; } BUG_ON(i == 16); page_size = 1 << (i + 12); buf = memalign(page_size, num_sp * page_size); if (!buf) goto fail_sp3; memset(buf, '\0', num_sp * page_size); xhci_flush_cache((uintptr_t)buf, num_sp * page_size); for (i = 0; i < num_sp; i++) { uintptr_t ptr = (uintptr_t)buf + i * page_size; scratchpad->sp_array[i] = cpu_to_le64(ptr); } return 0; fail_sp3: free(scratchpad->sp_array); fail_sp2: free(scratchpad); ctrl->scratchpad = NULL; fail_sp: return -ENOMEM; }
/** * Allocating virtual device * * @param udev pointer to USB deivce structure * @return 0 on success else -1 on failure */ int xhci_alloc_virt_device(struct usb_device *udev) { u64 byte_64 = 0; unsigned int slot_id = udev->slot_id; struct xhci_virt_device *virt_dev; struct xhci_ctrl *ctrl = udev->controller; /* Slot ID 0 is reserved */ if (ctrl->devs[slot_id]) { printf("Virt dev for slot[%d] already allocated\n", slot_id); return -EEXIST; } ctrl->devs[slot_id] = (struct xhci_virt_device *)KSEG1ADDR( xhci_malloc(sizeof(struct xhci_virt_device))); if (!ctrl->devs[slot_id]) { puts("Failed to allocate virtual device\n"); return -ENOMEM; } memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); virt_dev = ctrl->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, XHCI_CTX_TYPE_DEVICE); if (!virt_dev->out_ctx) { puts("Failed to allocate out context for virt dev\n"); return -ENOMEM; } /* Allocate the (input) device context for address device command */ virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, XHCI_CTX_TYPE_INPUT); if (!virt_dev->in_ctx) { puts("Failed to allocate in context for virt dev\n"); return -ENOMEM; } /* Allocate endpoint 0 ring */ virt_dev->eps[0].ring = xhci_ring_alloc(1, true); byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes); /* Point to output device context in dcbaa. */ ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64; xhci_flush_cache((uint32_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], sizeof(__le64)); return 0; }
/** * Allocates the Container context * * @param ctrl Host controller data structure * @param type type of XHCI Container Context * @return NULL if failed else pointer to the context on success */ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) { struct xhci_container_ctx *ctx; ctx = (struct xhci_container_ctx *) malloc(sizeof(struct xhci_container_ctx)); BUG_ON(!ctx); BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); ctx->type = type; ctx->size = (MAX_EP_CTX_NUM + 1) * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); ctx->bytes = (u8 *)xhci_malloc(ctx->size); return ctx; }
/** * Create a new ring with zero or more segments. * TODO: current code only uses one-time-allocated single-segment rings * of 1KB anyway, so we might as well get rid of all the segment and * linking code (and maybe increase the size a bit, e.g. 4KB). * * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. * * @param num_segs number of segments in the ring * @param link_trbs flag to indicate whether to link the trbs or NOT * @return pointer to the newly created RING */ struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, int link_trbs) { struct xhci_ring *ring; struct xhci_segment *prev; ring = (struct xhci_ring *)KSEG1ADDR(xhci_malloc(sizeof(struct xhci_ring))); BUG_ON(!ring); if (num_segs == 0) return ring; ring->first_seg = xhci_segment_alloc(); BUG_ON(!ring->first_seg); num_segs--; prev = ring->first_seg; while (num_segs > 0) { struct xhci_segment *next; next = xhci_segment_alloc(); BUG_ON(!next); xhci_link_segments(prev, next, link_trbs); prev = next; num_segs--; } xhci_link_segments(prev, ring->first_seg, link_trbs); if (link_trbs) { /* See section 4.9.2.1 and 6.4.4.1 */ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE); } xhci_initialize_ring_info(ring); return ring; }
/** * Allocates the necessary data structures * for XHCI host controller * * @param ctrl Host controller data structure * @param hccr pointer to HOST Controller Control Registers * @param hcor pointer to HOST Controller Operational Registers * @return 0 if successful else -1 on failure */ int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, struct xhci_hcor *hcor) { uint64_t val_64; uint64_t trb_64; uint32_t val; unsigned long deq; int i; struct xhci_segment *seg; /* DCBAA initialization */ ctrl->dcbaa = (struct xhci_device_context_array *) xhci_malloc(sizeof(struct xhci_device_context_array)); if (ctrl->dcbaa == NULL) { puts("unable to allocate DCBA\n"); return -ENOMEM; } val_64 = (uintptr_t)ctrl->dcbaa; /* Set the pointer in DCBAA register */ xhci_writeq(&hcor->or_dcbaap, val_64); /* Command ring control pointer register initialization */ ctrl->cmd_ring = xhci_ring_alloc(1, true); /* Set the address in the Command Ring Control register */ trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs; val_64 = xhci_readq(&hcor->or_crcr); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | ctrl->cmd_ring->cycle_state; xhci_writeq(&hcor->or_crcr, val_64); /* write the address of db register */ val = xhci_readl(&hccr->cr_dboff); val &= DBOFF_MASK; ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); /* write the address of runtime register */ val = xhci_readl(&hccr->cr_rtsoff); val &= RTSOFF_MASK; ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); /* writting the address of ir_set structure */ ctrl->ir_set = &ctrl->run_regs->ir_set[0]; /* Event ring does not maintain link TRB */ ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); ctrl->erst.entries = (struct xhci_erst_entry *) xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); ctrl->erst.num_entries = ERST_NUM_SEGS; for (val = 0, seg = ctrl->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { trb_64 = 0; trb_64 = (uintptr_t)seg->trbs; struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; xhci_writeq(&entry->seg_addr, trb_64); entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); entry->rsvd = 0; seg = seg->next; } xhci_flush_cache((uint32_t)ctrl->erst.entries, ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); deq = (unsigned long)ctrl->event_ring->dequeue; /* Update HC event ring dequeue pointer */ xhci_writeq(&ctrl->ir_set->erst_dequeue, (u64)deq & (u64)~ERST_PTR_MASK); /* set ERST count with the number of entries in the segment table */ val = xhci_readl(&ctrl->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; xhci_writel(&ctrl->ir_set->erst_size, val); /* this is the event ring segment table pointer */ val_64 = xhci_readq(&ctrl->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= ((u32)(ctrl->erst.entries) & ~ERST_PTR_MASK); xhci_writeq(&ctrl->ir_set->erst_base, val_64); /* initializing the virtual devices to NULL */ for (i = 0; i < MAX_HC_SLOTS; ++i) ctrl->devs[i] = NULL; /* * Just Zero'ing this register completely, * or some spurious Device Notification Events * might screw things here. */ xhci_writel(&hcor->or_dnctrl, 0x0); return 0; }