Ejemplo n.º 1
0
static int kcmp_lock(struct mutex *m1, struct mutex *m2)
{
	int err;

	if (m2 > m1)
		swap(m1, m2);

	err = mutex_lock_killable(m1);
	if (!err && likely(m1 != m2)) {
		err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
		if (err)
			mutex_unlock(m1);
	}

	return err;
}
Ejemplo n.º 2
0
int mt_mutex_lock_killable_nested(struct mutex *lock,unsigned int subclass)
{
    return mutex_lock_killable_nested(lock, subclass);
}
Ejemplo n.º 3
0
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct interval_tree_node *it;
	struct mutex *unlock = NULL;
	unsigned long end;
	int ret = 0;

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end = range->end - 1;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, range->start, end);
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}

		/*
		 * The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;

		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
	}
	spin_unlock(&mn->lock);

unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;

}