static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); struct drm_i915_gem_object *obj = mo->obj; struct work_struct *active; /* Cancel any active worker and force us to re-evaluate gup */ mutex_lock(&obj->mm.lock); active = fetch_and_zero(&obj->userptr.work); mutex_unlock(&obj->mm.lock); if (active) goto out; i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); mutex_lock(&obj->base.dev->struct_mutex); /* We are inside a kthread context and can't be interrupted */ if (i915_gem_object_unbind(obj) == 0) __i915_gem_object_put_pages(obj, I915_MM_NORMAL); WARN_ONCE(i915_gem_object_has_pages(obj), "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", obj->bind_count, atomic_read(&obj->mm.pages_pin_count), obj->pin_global); mutex_unlock(&obj->base.dev->struct_mutex); out: i915_gem_object_put(obj); }
/** * i915_gem_shrink - Shrink buffer object caches * @dev_priv: i915 device * @target: amount of memory to make available, in pages * @nr_scanned: optional output for number of pages scanned (incremental) * @flags: control flags for selecting cache types * * This function is the main interface to the shrinker. It will try to release * up to @target pages of main memory backing storage from buffer objects. * Selection of the specific caches can be done with @flags. This is e.g. useful * when purgeable objects should be removed from caches preferentially. * * Note that it's not guaranteed that released amount is actually available as * free system memory - the pages might still be in-used to due to other reasons * (like cpu mmaps) or the mm core has reused them before we could grab them. * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). * * Also note that any kind of pinning (both per-vma address space pins and * backing storage pins at the buffer object level) result in the shrinker code * having to skip the object. * * Returns: * The number of pages of backing storage actually released. */ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long target, unsigned long *nr_scanned, unsigned flags) { const struct { struct list_head *list; unsigned int bit; } phases[] = { { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, { NULL, 0 }, }, *phase; unsigned long count = 0; unsigned long scanned = 0; bool unlock; if (!shrinker_lock(dev_priv, &unlock)) return 0; /* * When shrinking the active list, also consider active contexts. * Active contexts are pinned until they are retired, and so can * not be simply unbound to retire and unpin their pages. To shrink * the contexts, we must wait until the gpu is idle. * * We don't care about errors here; if we cannot wait upon the GPU, * we will free as much as we can and hope to get a second chance. */ if (flags & I915_SHRINK_ACTIVE) i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); trace_i915_gem_shrink(dev_priv, target, flags); i915_gem_retire_requests(dev_priv); /* * Unbinding of objects will require HW access; Let us not wake the * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier. */ if ((flags & I915_SHRINK_BOUND) && !intel_runtime_pm_get_if_in_use(dev_priv)) flags &= ~I915_SHRINK_BOUND; /* * As we may completely rewrite the (un)bound list whilst unbinding * (due to retiring requests) we have to strictly process only * one element of the list at the time, and recheck the list * on every iteration. * * In particular, we must hold a reference whilst removing the * object as we may end up waiting for and/or retiring the objects. * This might release the final reference (held by the active list) * and result in the object being freed from under us. This is * similar to the precautions the eviction code must take whilst * removing objects. * * Also note that although these lists do not hold a reference to * the object we can safely grab one here: The final object * unreferencing and the bound_list are both protected by the * dev->struct_mutex and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0. */ for (phase = phases; phase->list; phase++) { struct list_head still_in_list; struct drm_i915_gem_object *obj; if ((flags & phase->bit) == 0) continue; INIT_LIST_HEAD(&still_in_list); /* * We serialize our access to unreferenced objects through * the use of the struct_mutex. While the objects are not * yet freed (due to RCU then a workqueue) we still want * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. */ spin_lock(&dev_priv->mm.obj_lock); while (count < target && (obj = list_first_entry_or_null(phase->list, typeof(*obj), mm.link))) { list_move_tail(&obj->mm.link, &still_in_list); if (flags & I915_SHRINK_PURGEABLE && obj->mm.madv != I915_MADV_DONTNEED) continue; if (flags & I915_SHRINK_VMAPS && !is_vmalloc_addr(obj->mm.mapping)) continue; if (!(flags & I915_SHRINK_ACTIVE) && (i915_gem_object_is_active(obj) || i915_gem_object_is_framebuffer(obj))) continue; if (!can_release_pages(obj)) continue; spin_unlock(&dev_priv->mm.obj_lock); if (unsafe_drop_pages(obj)) { /* May arrive from get_pages on another bo */ mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); if (!i915_gem_object_has_pages(obj)) { __i915_gem_object_invalidate(obj); count += obj->base.size >> PAGE_SHIFT; } mutex_unlock(&obj->mm.lock); } scanned += obj->base.size >> PAGE_SHIFT; spin_lock(&dev_priv->mm.obj_lock); } list_splice_tail(&still_in_list, phase->list); spin_unlock(&dev_priv->mm.obj_lock); }
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) { if (i915_gem_object_unbind(obj) == 0) __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); return !i915_gem_object_has_pages(obj); }
static void __i915_do_clflush(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); drm_clflush_sg(obj->mm.pages); intel_fb_obj_flush(obj, ORIGIN_CPU); }