Esempio n. 1
0
static void test_cycle_work(struct work_struct *work)
{
	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
	struct ww_acquire_ctx ctx;
	int err;

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&cycle->a_mutex, &ctx);

	complete(cycle->a_signal);
	wait_for_completion(&cycle->b_signal);

	err = ww_mutex_lock(cycle->b_mutex, &ctx);
	if (err == -EDEADLK) {
		ww_mutex_unlock(&cycle->a_mutex);
		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
		err = ww_mutex_lock(&cycle->a_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(cycle->b_mutex);
	ww_mutex_unlock(&cycle->a_mutex);
	ww_acquire_fini(&ctx);

	cycle->result = err;
}
Esempio n. 2
0
static void test_abba_work(struct work_struct *work)
{
	struct test_abba *abba = container_of(work, typeof(*abba), work);
	struct ww_acquire_ctx ctx;
	int err;

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&abba->b_mutex, &ctx);

	complete(&abba->b_ready);
	wait_for_completion(&abba->a_ready);

	err = ww_mutex_lock(&abba->a_mutex, &ctx);
	if (abba->resolve && err == -EDEADLK) {
		ww_mutex_unlock(&abba->b_mutex);
		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
		err = ww_mutex_lock(&abba->b_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(&abba->a_mutex);
	ww_mutex_unlock(&abba->b_mutex);
	ww_acquire_fini(&ctx);

	abba->result = err;
}
Esempio n. 3
0
static void ww_test_edeadlk_normal_slow(void)
{
	int ret;

	mutex_lock(&o2.base);
	mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
	o2.ctx = &t2;

	WWAI(&t);
	t2 = t;
	t2.stamp--;

	ret = WWL(&o, &t);
	WARN_ON(ret);

	ret = WWL(&o2, &t);
	WARN_ON(ret != -EDEADLK);

	o2.ctx = NULL;
	mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
	mutex_unlock(&o2.base);
	WWU(&o);

	ww_mutex_lock_slow(&o2, &t);
}
Esempio n. 4
0
static void stress_inorder_work(struct work_struct *work)
{
	struct stress *stress = container_of(work, typeof(*stress), work);
	const int nlocks = stress->nlocks;
	struct ww_mutex *locks = stress->locks;
	struct ww_acquire_ctx ctx;
	int *order;

	order = get_random_order(nlocks);
	if (!order)
		return;

	do {
		int contended = -1;
		int n, err;

		ww_acquire_init(&ctx, &ww_class);
retry:
		err = 0;
		for (n = 0; n < nlocks; n++) {
			if (n == contended)
				continue;

			err = ww_mutex_lock(&locks[order[n]], &ctx);
			if (err < 0)
				break;
		}
		if (!err)
			dummy_load(stress);

		if (contended > n)
			ww_mutex_unlock(&locks[order[contended]]);
		contended = n;
		while (n--)
			ww_mutex_unlock(&locks[order[n]]);

		if (err == -EDEADLK) {
			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
			goto retry;
		}

		if (err) {
			pr_err_once("stress (%s) failed with %d\n",
				    __func__, err);
			break;
		}

		ww_acquire_fini(&ctx);
	} while (!time_after(jiffies, stress->timeout));

	kfree(order);
	kfree(stress);
}
Esempio n. 5
0
static int test_abba(bool resolve)
{
	struct test_abba abba;
	struct ww_acquire_ctx ctx;
	int err, ret;

	ww_mutex_init(&abba.a_mutex, &ww_class);
	ww_mutex_init(&abba.b_mutex, &ww_class);
	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
	init_completion(&abba.a_ready);
	init_completion(&abba.b_ready);
	abba.resolve = resolve;

	schedule_work(&abba.work);

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&abba.a_mutex, &ctx);

	complete(&abba.a_ready);
	wait_for_completion(&abba.b_ready);

	err = ww_mutex_lock(&abba.b_mutex, &ctx);
	if (resolve && err == -EDEADLK) {
		ww_mutex_unlock(&abba.a_mutex);
		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
		err = ww_mutex_lock(&abba.a_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(&abba.b_mutex);
	ww_mutex_unlock(&abba.a_mutex);
	ww_acquire_fini(&ctx);

	flush_work(&abba.work);
	destroy_work_on_stack(&abba.work);

	ret = 0;
	if (resolve) {
		if (err || abba.result) {
			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
			       __func__, err, abba.result);
			ret = -EINVAL;
		}
	} else {
		if (err != -EDEADLK && abba.result != -EDEADLK) {
			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
			       __func__, err, abba.result);
			ret = -EINVAL;
		}
	}
	return ret;
}
Esempio n. 6
0
int drm_lock_reservations(struct reservation_object **resvs,
			  unsigned int num_resvs, struct ww_acquire_ctx *ctx)
{
	unsigned int r;
	struct reservation_object *slow_res = NULL;

	ww_acquire_init(ctx, &reservation_ww_class);

retry:
	for (r = 0; r < num_resvs; r++) {
		int ret;
		/* skip the resv we locked with slow lock */
		if (resvs[r] == slow_res) {
			slow_res = NULL;
			continue;
		}
		ret = ww_mutex_lock(&resvs[r]->lock, ctx);
		if (ret < 0) {
			unsigned int slow_r = r;
			/*
			 * undo all the locks we already done,
			 * in reverse order
			 */
			while (r > 0) {
				r--;
				ww_mutex_unlock(&resvs[r]->lock);
			}
			if (slow_res)
				ww_mutex_unlock(&slow_res->lock);
			if (ret == -EDEADLK) {
				slow_res = resvs[slow_r];
				ww_mutex_lock_slow(&slow_res->lock, ctx);
				goto retry;
			}
			ww_acquire_fini(ctx);
			return ret;
		}
	}

	ww_acquire_done(ctx);
	return 0;
}
Esempio n. 7
0
static inline int modeset_lock(struct drm_modeset_lock *lock,
		struct drm_modeset_acquire_ctx *ctx,
		bool interruptible, bool slow)
{
	int ret;

	WARN_ON(ctx->contended);

	if (ctx->trylock_only) {
		lockdep_assert_held(&ctx->ww_ctx);

		if (!ww_mutex_trylock(&lock->mutex))
			return -EBUSY;
		else
			return 0;
	} else if (interruptible && slow) {
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
	} else if (interruptible) {
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
	} else if (slow) {
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
		ret = 0;
	} else {
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
	}
	if (!ret) {
		WARN_ON(!list_empty(&lock->head));
		list_add(&lock->head, &ctx->locked);
	} else if (ret == -EALREADY) {
		/* we already hold the lock.. this is fine.  For atomic
		 * we will need to be able to drm_modeset_lock() things
		 * without having to keep track of what is already locked
		 * or not.
		 */
		ret = 0;
	} else if (ret == -EDEADLK) {
		ctx->contended = lock;
	}

	return ret;
}
Esempio n. 8
0
static void ww_test_unneeded_slow(void)
{
	WWAI(&t);

	ww_mutex_lock_slow(&o, &t);
}