示例#1
0
static void test_cycle_work(struct work_struct *work)
{
	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
	struct ww_acquire_ctx ctx;
	int err;

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&cycle->a_mutex, &ctx);

	complete(cycle->a_signal);
	wait_for_completion(&cycle->b_signal);

	err = ww_mutex_lock(cycle->b_mutex, &ctx);
	if (err == -EDEADLK) {
		ww_mutex_unlock(&cycle->a_mutex);
		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
		err = ww_mutex_lock(&cycle->a_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(cycle->b_mutex);
	ww_mutex_unlock(&cycle->a_mutex);
	ww_acquire_fini(&ctx);

	cycle->result = err;
}
示例#2
0
static void test_abba_work(struct work_struct *work)
{
	struct test_abba *abba = container_of(work, typeof(*abba), work);
	struct ww_acquire_ctx ctx;
	int err;

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&abba->b_mutex, &ctx);

	complete(&abba->b_ready);
	wait_for_completion(&abba->a_ready);

	err = ww_mutex_lock(&abba->a_mutex, &ctx);
	if (abba->resolve && err == -EDEADLK) {
		ww_mutex_unlock(&abba->b_mutex);
		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
		err = ww_mutex_lock(&abba->b_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(&abba->a_mutex);
	ww_mutex_unlock(&abba->b_mutex);
	ww_acquire_fini(&ctx);

	abba->result = err;
}
示例#3
0
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
				 struct dma_buf_attachment *attach,
				 struct sg_table *sg)
{
	struct reservation_object *resv = attach->dmabuf->resv;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_bo *bo;
	int ret;

	ww_mutex_lock(&resv->lock, NULL);
	ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
			       AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
			       resv, &bo);
	if (ret)
		goto error;

	bo->tbo.sg = sg;
	bo->tbo.ttm->sg = sg;
	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
	if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
		bo->prime_shared_count = 1;

	ww_mutex_unlock(&resv->lock);
	return &bo->gem_base;

error:
	ww_mutex_unlock(&resv->lock);
	return ERR_PTR(ret);
}
示例#4
0
static int test_aa(void)
{
	struct ww_mutex mutex;
	struct ww_acquire_ctx ctx;
	int ret;

	ww_mutex_init(&mutex, &ww_class);
	ww_acquire_init(&ctx, &ww_class);

	ww_mutex_lock(&mutex, &ctx);

	if (ww_mutex_trylock(&mutex))  {
		pr_err("%s: trylocked itself!\n", __func__);
		ww_mutex_unlock(&mutex);
		ret = -EINVAL;
		goto out;
	}

	ret = ww_mutex_lock(&mutex, &ctx);
	if (ret != -EALREADY) {
		pr_err("%s: missed deadlock for recursing, ret=%d\n",
		       __func__, ret);
		if (!ret)
			ww_mutex_unlock(&mutex);
		ret = -EINVAL;
		goto out;
	}

	ret = 0;
out:
	ww_mutex_unlock(&mutex);
	ww_acquire_fini(&ctx);
	return ret;
}
示例#5
0
static void stress_inorder_work(struct work_struct *work)
{
	struct stress *stress = container_of(work, typeof(*stress), work);
	const int nlocks = stress->nlocks;
	struct ww_mutex *locks = stress->locks;
	struct ww_acquire_ctx ctx;
	int *order;

	order = get_random_order(nlocks);
	if (!order)
		return;

	do {
		int contended = -1;
		int n, err;

		ww_acquire_init(&ctx, &ww_class);
retry:
		err = 0;
		for (n = 0; n < nlocks; n++) {
			if (n == contended)
				continue;

			err = ww_mutex_lock(&locks[order[n]], &ctx);
			if (err < 0)
				break;
		}
		if (!err)
			dummy_load(stress);

		if (contended > n)
			ww_mutex_unlock(&locks[order[contended]]);
		contended = n;
		while (n--)
			ww_mutex_unlock(&locks[order[n]]);

		if (err == -EDEADLK) {
			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
			goto retry;
		}

		if (err) {
			pr_err_once("stress (%s) failed with %d\n",
				    __func__, err);
			break;
		}

		ww_acquire_fini(&ctx);
	} while (!time_after(jiffies, stress->timeout));

	kfree(order);
	kfree(stress);
}
示例#6
0
static int test_abba(bool resolve)
{
	struct test_abba abba;
	struct ww_acquire_ctx ctx;
	int err, ret;

	ww_mutex_init(&abba.a_mutex, &ww_class);
	ww_mutex_init(&abba.b_mutex, &ww_class);
	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
	init_completion(&abba.a_ready);
	init_completion(&abba.b_ready);
	abba.resolve = resolve;

	schedule_work(&abba.work);

	ww_acquire_init(&ctx, &ww_class);
	ww_mutex_lock(&abba.a_mutex, &ctx);

	complete(&abba.a_ready);
	wait_for_completion(&abba.b_ready);

	err = ww_mutex_lock(&abba.b_mutex, &ctx);
	if (resolve && err == -EDEADLK) {
		ww_mutex_unlock(&abba.a_mutex);
		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
		err = ww_mutex_lock(&abba.a_mutex, &ctx);
	}

	if (!err)
		ww_mutex_unlock(&abba.b_mutex);
	ww_mutex_unlock(&abba.a_mutex);
	ww_acquire_fini(&ctx);

	flush_work(&abba.work);
	destroy_work_on_stack(&abba.work);

	ret = 0;
	if (resolve) {
		if (err || abba.result) {
			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
			       __func__, err, abba.result);
			ret = -EINVAL;
		}
	} else {
		if (err != -EDEADLK && abba.result != -EDEADLK) {
			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
			       __func__, err, abba.result);
			ret = -EINVAL;
		}
	}
	return ret;
}
static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
	if (submit->bos[i].flags & BO_LOCKED) {
		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;

		ww_mutex_unlock(&etnaviv_obj->resv->lock);
		submit->bos[i].flags &= ~BO_LOCKED;
	}
}
示例#8
0
int drm_lock_reservations(struct reservation_object **resvs,
			  unsigned int num_resvs, struct ww_acquire_ctx *ctx)
{
	unsigned int r;
	struct reservation_object *slow_res = NULL;

	ww_acquire_init(ctx, &reservation_ww_class);

retry:
	for (r = 0; r < num_resvs; r++) {
		int ret;
		/* skip the resv we locked with slow lock */
		if (resvs[r] == slow_res) {
			slow_res = NULL;
			continue;
		}
		ret = ww_mutex_lock(&resvs[r]->lock, ctx);
		if (ret < 0) {
			unsigned int slow_r = r;
			/*
			 * undo all the locks we already done,
			 * in reverse order
			 */
			while (r > 0) {
				r--;
				ww_mutex_unlock(&resvs[r]->lock);
			}
			if (slow_res)
				ww_mutex_unlock(&slow_res->lock);
			if (ret == -EDEADLK) {
				slow_res = resvs[slow_r];
				ww_mutex_lock_slow(&slow_res->lock, ctx);
				goto retry;
			}
			ww_acquire_fini(ctx);
			return ret;
		}
	}

	ww_acquire_done(ctx);
	return 0;
}
示例#9
0
void drm_unlock_reservations(struct reservation_object **resvs,
			     unsigned int num_resvs,
			     struct ww_acquire_ctx *ctx)
{
	unsigned int r;

	for (r = 0; r < num_resvs; r++)
		ww_mutex_unlock(&resvs[r]->lock);

	ww_acquire_fini(ctx);
}
示例#10
0
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(tmp, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(tmp))
			break;

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
								 true));

		if (tmp->base.size >= size) {
			/* Clear the set of shared fences early */
			ww_mutex_lock(&tmp->resv->lock, NULL);
			reservation_object_add_excl_fence(tmp->resv, NULL);
			ww_mutex_unlock(&tmp->resv->lock);

			obj = tmp;
			break;
		}
	}

	if (obj == NULL) {
		obj = i915_gem_object_create_internal(pool->engine->i915, size);
		if (IS_ERR(obj))
			return obj;
	}

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
示例#11
0
static int __test_mutex(unsigned int flags)
{
#define TIMEOUT (HZ / 16)
	struct test_mutex mtx;
	struct ww_acquire_ctx ctx;
	int ret;

	ww_mutex_init(&mtx.mutex, &ww_class);
	ww_acquire_init(&ctx, &ww_class);

	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
	init_completion(&mtx.ready);
	init_completion(&mtx.go);
	init_completion(&mtx.done);
	mtx.flags = flags;

	schedule_work(&mtx.work);

	wait_for_completion(&mtx.ready);
	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
	complete(&mtx.go);
	if (flags & TEST_MTX_SPIN) {
		unsigned long timeout = jiffies + TIMEOUT;

		ret = 0;
		do {
			if (completion_done(&mtx.done)) {
				ret = -EINVAL;
				break;
			}
			cond_resched();
		} while (time_before(jiffies, timeout));
	} else {
		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
	}
	ww_mutex_unlock(&mtx.mutex);
	ww_acquire_fini(&ctx);

	if (ret) {
		pr_err("%s(flags=%x): mutual exclusion failure\n",
		       __func__, flags);
		ret = -EINVAL;
	}

	flush_work(&mtx.work);
	destroy_work_on_stack(&mtx.work);
	return ret;
#undef TIMEOUT
}
示例#12
0
static void test_mutex_work(struct work_struct *work)
{
	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);

	complete(&mtx->ready);
	wait_for_completion(&mtx->go);

	if (mtx->flags & TEST_MTX_TRY) {
		while (!ww_mutex_trylock(&mtx->mutex))
			cond_resched();
	} else {
		ww_mutex_lock(&mtx->mutex, NULL);
	}
	complete(&mtx->done);
	ww_mutex_unlock(&mtx->mutex);
}
示例#13
0
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

		entry->reserved = false;
		if (entry->removed) {
			ttm_bo_add_to_lru(bo);
			entry->removed = false;
		}
		ww_mutex_unlock(&bo->resv->lock);
	}
}
示例#14
0
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
							struct dma_buf_attachment *attach,
							struct sg_table *sg)
{
	struct reservation_object *resv = attach->dmabuf->resv;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_bo *bo;
	int ret;

	ww_mutex_lock(&resv->lock, NULL);
	ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
	ww_mutex_unlock(&resv->lock);
	if (ret)
		return ERR_PTR(ret);

	return &bo->gem_base;
}
示例#15
0
文件: radeon_prime.c 项目: bluhm/sys
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
							struct dma_buf_attachment *attach,
							struct sg_table *sg)
{
	struct reservation_object *resv = attach->dmabuf->resv;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_bo *bo;
	int ret;

	ww_mutex_lock(&resv->lock, NULL);
	ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
			       RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
	ww_mutex_unlock(&resv->lock);
	if (ret)
		return ERR_PTR(ret);

	mutex_lock(&rdev->gem.mutex);
	list_add_tail(&bo->list, &rdev->gem.objects);
	mutex_unlock(&rdev->gem.mutex);

	bo->prime_shared_count = 1;
	return &bo->gem_base;
}
示例#16
0
/**
 * drm_modeset_unlock - drop modeset lock
 * @lock: lock to release
 */
void drm_modeset_unlock(struct drm_modeset_lock *lock)
{
	list_del_init(&lock->head);
	ww_mutex_unlock(&lock->mutex);
}