/* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *dev; int i; /* Slot ID 0 is reserved */ if (slot_id == 0 || !xhci->devs[slot_id]) return; dev = xhci->devs[slot_id]; xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; for (i = 0; i < 31; ++i) if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); if (dev->ring_cache) { for (i = 0; i < dev->num_rings_cached; i++) xhci_ring_free(xhci, dev->ring_cache[i]); kfree(dev->ring_cache); } if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = 0; }
/** * frees all the memory allocated * * @param ptr pointer to "xhci_ctrl" to be cleaned up * @return none */ void xhci_cleanup(struct xhci_ctrl *ctrl) { xhci_ring_free(ctrl->event_ring); xhci_ring_free(ctrl->cmd_ring); xhci_free_virt_devices(ctrl); free(ctrl->erst.entries); free(ctrl->dcbaa); memset(ctrl, '\0', sizeof(struct xhci_ctrl)); }
/** * frees all the memory allocated * * @param ptr pointer to "xhci_ctrl" to be cleaned up * @return none */ void xhci_cleanup(struct xhci_ctrl *ctrl) { if (ctrl->event_ring) xhci_ring_free(ctrl->event_ring); if (ctrl->cmd_ring) xhci_ring_free(ctrl->cmd_ring); if (ctrl) xhci_free_virt_devices(ctrl); if (ctrl->erst.entries) free(KSEG0ADDR(ctrl->erst.entries)); if (ctrl->dcbaa) free(KSEG0ADDR(ctrl->dcbaa)); memset(ctrl, '\0', sizeof(struct xhci_ctrl)); }
/** * frees the virtual devices for "xhci_ctrl" pointer passed * * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed * @return none */ static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) { int i; int slot_id; struct xhci_virt_device *virt_dev; /* * refactored here to loop through all virt_dev * Slot ID 0 is reserved */ for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { virt_dev = ctrl->devs[slot_id]; if (!virt_dev) continue; ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; for (i = 0; i < 31; ++i) if (virt_dev->eps[i].ring) xhci_ring_free(virt_dev->eps[i].ring); if (virt_dev->in_ctx) xhci_free_container_ctx(virt_dev->in_ctx); if (virt_dev->out_ctx) xhci_free_container_ctx(virt_dev->out_ctx); free(virt_dev); /* make sure we are pointing to NULL */ ctrl->devs[slot_id] = NULL; } }
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; int i, ret; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) return; xhci = hcd_to_xhci(hcd); if (!xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return; } xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; for (i = 0; i < 31; ++i) { if (virt_dev->eps[i].new_ring) { xhci_ring_free(xhci, virt_dev->eps[i].new_ring); virt_dev->eps[i].new_ring = NULL; } } xhci_zero_in_ctx(xhci, virt_dev); }
/* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *dev; int i; /* Slot ID 0 is reserved */ if (slot_id == 0 || !xhci->devs[slot_id]) return; dev = xhci->devs[slot_id]; xhci->dcbaa->dev_context_ptrs[slot_id] = 0; UBI_DMA_FLUSH(&xhci->dcbaa->dev_context_ptrs[slot_id], sizeof(u64)); if (!dev) return; for (i = 0; i < 31; ++i) if (dev->ep_rings[i]) xhci_ring_free(xhci, dev->ep_rings[i]); if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = 0; }
/** * Create a new ring with zero or more segments. * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, bool link_trbs, gfp_t flags) { struct xhci_ring *ring; struct xhci_segment *prev; ring = kzalloc(sizeof *(ring), flags); xhci_dbg(xhci, "Allocating ring at %p\n", ring); if (!ring) return 0; INIT_LIST_HEAD(&ring->td_list); INIT_LIST_HEAD(&ring->cancelled_td_list); if (num_segs == 0) return ring; ring->first_seg = xhci_segment_alloc(xhci, flags); if (!ring->first_seg) goto fail; num_segs--; prev = ring->first_seg; while (num_segs > 0) { struct xhci_segment *next; next = xhci_segment_alloc(xhci, flags); if (!next) goto fail; xhci_link_segments(xhci, prev, next, link_trbs); prev = next; num_segs--; } xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); if (link_trbs) { /* See section 4.9.2.1 and 6.4.4.1 */ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); UBI_DMA_FLUSH(&prev->trbs[TRBS_PER_SEGMENT-1], sizeof(prev->trbs[TRBS_PER_SEGMENT-1])); xhci_dbg(xhci, "Wrote link toggle flag to" " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); } /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; ring->enq_seg = ring->first_seg; ring->dequeue = ring->enqueue; ring->deq_seg = ring->first_seg; /* The ring is initialized to 0. The producer must write 1 to the cycle * bit to handover ownership of the TRB, so PCS = 1. The consumer must * compare CCS to the cycle bit to check ownership, so CCS = 1. */ ring->cycle_state = 1; return ring; fail: xhci_ring_free(xhci, ring); return 0; }
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_slot_ctx *slot_ctx; ret = xhci_check_args(hcd, udev, NULL, 0, __func__); if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { xhci_warn(xhci, "xHCI %s called with unaddressed device\n", __func__); return -EINVAL; } xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx->add_flags |= SLOT_FLAG; ctrl_ctx->add_flags &= ~EP0_FLAG; ctrl_ctx->drop_flags &= ~SLOT_FLAG; ctrl_ctx->drop_flags &= ~EP0_FLAG; xhci_dbg(xhci, "New Input Control Context:\n"); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); xhci_dbg_ctx(xhci, virt_dev->in_ctx, LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); ret = xhci_configure_endpoint(xhci, udev, NULL, false, false); if (ret) { return ret; } xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); xhci_zero_in_ctx(xhci, virt_dev); for (i = 1; i < 31; ++i) { if (virt_dev->eps[i].new_ring) { xhci_ring_free(xhci, virt_dev->eps[i].ring); virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; } } return ret; }
/** * Create a new ring with zero or more segments. * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, bool link_trbs, gfp_t flags) { struct xhci_ring *ring; struct xhci_segment *prev; ring = kzalloc(sizeof *(ring), flags); xhci_dbg(xhci, "Allocating ring at %p\n", ring); if (!ring) return 0; INIT_LIST_HEAD(&ring->td_list); if (num_segs == 0) return ring; ring->first_seg = xhci_segment_alloc(xhci, flags); if (!ring->first_seg) goto fail; num_segs--; prev = ring->first_seg; while (num_segs > 0) { struct xhci_segment *next; next = xhci_segment_alloc(xhci, flags); if (!next) goto fail; xhci_link_segments(xhci, prev, next, link_trbs); prev = next; num_segs--; } xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); if (link_trbs) { /* See section 4.9.2.1 and 6.4.4.1 */ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); xhci_dbg(xhci, "Wrote link toggle flag to" " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); } xhci_initialize_ring_info(ring); return ring; fail: xhci_ring_free(xhci, ring); return 0; }
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index) { int rings_cached; rings_cached = virt_dev->num_rings_cached; if (rings_cached < XHCI_MAX_RINGS_CACHED) { virt_dev->num_rings_cached++; rings_cached = virt_dev->num_rings_cached; virt_dev->ring_cache[rings_cached] = virt_dev->eps[ep_index].ring; xhci_dbg(xhci, "Cached old ring, " "%d ring%s cached\n", rings_cached, (rings_cached > 1) ? "s" : ""); } else { xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); xhci_dbg(xhci, "Ring cache full (%d rings), " "freeing ring\n", virt_dev->num_rings_cached); } virt_dev->eps[ep_index].ring = NULL; }