static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); struct drm_i915_gem_object *obj = mo->obj; struct work_struct *active; /* Cancel any active worker and force us to re-evaluate gup */ mutex_lock(&obj->mm.lock); active = fetch_and_zero(&obj->userptr.work); mutex_unlock(&obj->mm.lock); if (active) goto out; i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); mutex_lock(&obj->base.dev->struct_mutex); /* We are inside a kthread context and can't be interrupted */ if (i915_gem_object_unbind(obj) == 0) __i915_gem_object_put_pages(obj, I915_MM_NORMAL); WARN_ONCE(i915_gem_object_has_pages(obj), "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", obj->bind_count, atomic_read(&obj->mm.pages_pin_count), obj->pin_global); mutex_unlock(&obj->base.dev->struct_mutex); out: i915_gem_object_put(obj); }
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) { if (i915_gem_object_unbind(obj) == 0) __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); return !i915_gem_object_has_pages(obj); }
static int userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, const struct mmu_notifier_range *range) { struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct interval_tree_node *it; struct mutex *unlock = NULL; unsigned long end; int ret = 0; if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return 0; /* interval ranges are inclusive, but invalidate range is exclusive */ end = range->end - 1; spin_lock(&mn->lock); it = interval_tree_iter_first(&mn->objects, range->start, end); while (it) { struct drm_i915_gem_object *obj; if (!mmu_notifier_range_blockable(range)) { ret = -EAGAIN; break; } /* * The mmu_object is released late when destroying the * GEM object so it is entirely possible to gain a * reference on an object in the process of being freed * since our serialisation is via the spinlock and not * the struct_mutex - and consequently use it after it * is freed and then double free it. To prevent that * use-after-free we only acquire a reference on the * object if it is not in the process of being destroyed. */ obj = container_of(it, struct i915_mmu_object, it)->obj; if (!kref_get_unless_zero(&obj->base.refcount)) { it = interval_tree_iter_next(it, range->start, end); continue; } spin_unlock(&mn->lock); if (!unlock) { unlock = &mn->mm->i915->drm.struct_mutex; switch (mutex_trylock_recursive(unlock)) { default: case MUTEX_TRYLOCK_FAILED: if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { i915_gem_object_put(obj); return -EINTR; } /* fall through */ case MUTEX_TRYLOCK_SUCCESS: break; case MUTEX_TRYLOCK_RECURSIVE: unlock = ERR_PTR(-EEXIST); break; } } ret = i915_gem_object_unbind(obj); if (ret == 0) ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); i915_gem_object_put(obj); if (ret) goto unlock; spin_lock(&mn->lock); /* * As we do not (yet) protect the mmu from concurrent insertion * over this range, there is no guarantee that this search will * terminate given a pathologic workload. */ it = interval_tree_iter_first(&mn->objects, range->start, end); } spin_unlock(&mn->lock); unlock: if (!IS_ERR_OR_NULL(unlock)) mutex_unlock(unlock); return ret; }