static int test_aa(void) { struct ww_mutex mutex; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&mutex, &ctx); if (ww_mutex_trylock(&mutex)) { pr_err("%s: trylocked itself!\n", __func__); ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = ww_mutex_lock(&mutex, &ctx); if (ret != -EALREADY) { pr_err("%s: missed deadlock for recursing, ret=%d\n", __func__, ret); if (!ret) ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = 0; out: ww_mutex_unlock(&mutex); ww_acquire_fini(&ctx); return ret; }
static void test_cycle_work(struct work_struct *work) { struct test_cycle *cycle = container_of(work, typeof(*cycle), work); struct ww_acquire_ctx ctx; int err; ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&cycle->a_mutex, &ctx); complete(cycle->a_signal); wait_for_completion(&cycle->b_signal); err = ww_mutex_lock(cycle->b_mutex, &ctx); if (err == -EDEADLK) { ww_mutex_unlock(&cycle->a_mutex); ww_mutex_lock_slow(cycle->b_mutex, &ctx); err = ww_mutex_lock(&cycle->a_mutex, &ctx); } if (!err) ww_mutex_unlock(cycle->b_mutex); ww_mutex_unlock(&cycle->a_mutex); ww_acquire_fini(&ctx); cycle->result = err; }
static void test_abba_work(struct work_struct *work) { struct test_abba *abba = container_of(work, typeof(*abba), work); struct ww_acquire_ctx ctx; int err; ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&abba->b_mutex, &ctx); complete(&abba->b_ready); wait_for_completion(&abba->a_ready); err = ww_mutex_lock(&abba->a_mutex, &ctx); if (abba->resolve && err == -EDEADLK) { ww_mutex_unlock(&abba->b_mutex); ww_mutex_lock_slow(&abba->a_mutex, &ctx); err = ww_mutex_lock(&abba->b_mutex, &ctx); } if (!err) ww_mutex_unlock(&abba->a_mutex); ww_mutex_unlock(&abba->b_mutex); ww_acquire_fini(&ctx); abba->result = err; }
void drm_unlock_reservations(struct reservation_object **resvs, unsigned int num_resvs, struct ww_acquire_ctx *ctx) { unsigned int r; for (r = 0; r < num_resvs; r++) ww_mutex_unlock(&resvs[r]->lock); ww_acquire_fini(ctx); }
static void stress_inorder_work(struct work_struct *work) { struct stress *stress = container_of(work, typeof(*stress), work); const int nlocks = stress->nlocks; struct ww_mutex *locks = stress->locks; struct ww_acquire_ctx ctx; int *order; order = get_random_order(nlocks); if (!order) return; do { int contended = -1; int n, err; ww_acquire_init(&ctx, &ww_class); retry: err = 0; for (n = 0; n < nlocks; n++) { if (n == contended) continue; err = ww_mutex_lock(&locks[order[n]], &ctx); if (err < 0) break; } if (!err) dummy_load(stress); if (contended > n) ww_mutex_unlock(&locks[order[contended]]); contended = n; while (n--) ww_mutex_unlock(&locks[order[n]]); if (err == -EDEADLK) { ww_mutex_lock_slow(&locks[order[contended]], &ctx); goto retry; } if (err) { pr_err_once("stress (%s) failed with %d\n", __func__, err); break; } ww_acquire_fini(&ctx); } while (!time_after(jiffies, stress->timeout)); kfree(order); kfree(stress); }
static int test_abba(bool resolve) { struct test_abba abba; struct ww_acquire_ctx ctx; int err, ret; ww_mutex_init(&abba.a_mutex, &ww_class); ww_mutex_init(&abba.b_mutex, &ww_class); INIT_WORK_ONSTACK(&abba.work, test_abba_work); init_completion(&abba.a_ready); init_completion(&abba.b_ready); abba.resolve = resolve; schedule_work(&abba.work); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&abba.a_mutex, &ctx); complete(&abba.a_ready); wait_for_completion(&abba.b_ready); err = ww_mutex_lock(&abba.b_mutex, &ctx); if (resolve && err == -EDEADLK) { ww_mutex_unlock(&abba.a_mutex); ww_mutex_lock_slow(&abba.b_mutex, &ctx); err = ww_mutex_lock(&abba.a_mutex, &ctx); } if (!err) ww_mutex_unlock(&abba.b_mutex); ww_mutex_unlock(&abba.a_mutex); ww_acquire_fini(&ctx); flush_work(&abba.work); destroy_work_on_stack(&abba.work); ret = 0; if (resolve) { if (err || abba.result) { pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } else { if (err != -EDEADLK && abba.result != -EDEADLK) { pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } return ret; }
static int __test_mutex(unsigned int flags) { #define TIMEOUT (HZ / 16) struct test_mutex mtx; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mtx.mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); init_completion(&mtx.ready); init_completion(&mtx.go); init_completion(&mtx.done); mtx.flags = flags; schedule_work(&mtx.work); wait_for_completion(&mtx.ready); ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); complete(&mtx.go); if (flags & TEST_MTX_SPIN) { unsigned long timeout = jiffies + TIMEOUT; ret = 0; do { if (completion_done(&mtx.done)) { ret = -EINVAL; break; } cond_resched(); } while (time_before(jiffies, timeout)); } else { ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); } ww_mutex_unlock(&mtx.mutex); ww_acquire_fini(&ctx); if (ret) { pr_err("%s(flags=%x): mutual exclusion failure\n", __func__, flags); ret = -EINVAL; } flush_work(&mtx.work); destroy_work_on_stack(&mtx.work); return ret; #undef TIMEOUT }
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; if (list_empty(list)) return; entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ttm_eu_backoff_reservation_locked(list, ticket); ww_acquire_fini(ticket); lockmgr(&glob->lru_lock, LK_RELEASE); }
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; if (list_empty(list)) return; entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; spin_lock(&glob->lru_lock); ttm_eu_backoff_reservation_locked(list); if (ticket) ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); }
int drm_lock_reservations(struct reservation_object **resvs, unsigned int num_resvs, struct ww_acquire_ctx *ctx) { unsigned int r; struct reservation_object *slow_res = NULL; ww_acquire_init(ctx, &reservation_ww_class); retry: for (r = 0; r < num_resvs; r++) { int ret; /* skip the resv we locked with slow lock */ if (resvs[r] == slow_res) { slow_res = NULL; continue; } ret = ww_mutex_lock(&resvs[r]->lock, ctx); if (ret < 0) { unsigned int slow_r = r; /* * undo all the locks we already done, * in reverse order */ while (r > 0) { r--; ww_mutex_unlock(&resvs[r]->lock); } if (slow_res) ww_mutex_unlock(&slow_res->lock); if (ret == -EDEADLK) { slow_res = resvs[slow_r]; ww_mutex_lock_slow(&slow_res->lock, ctx); goto retry; } ww_acquire_fini(ctx); return ret; } } ww_acquire_done(ctx); return 0; }
/** * drm_modeset_acquire_fini - cleanup acquire context * @ctx: the acquire context */ void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx) { ww_acquire_fini(&ctx->ww_ctx); }
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct etnaviv_drm_private *priv = dev->dev_private; struct drm_etnaviv_gem_submit *args = data; struct drm_etnaviv_gem_submit_reloc *relocs; struct drm_etnaviv_gem_submit_pmr *pmrs; struct drm_etnaviv_gem_submit_bo *bos; struct etnaviv_gem_submit *submit; struct etnaviv_gpu *gpu; struct sync_file *sync_file = NULL; struct ww_acquire_ctx ticket; int out_fence_fd = -1; void *stream; int ret; if (args->pipe >= ETNA_MAX_PIPES) return -EINVAL; gpu = priv->gpu[args->pipe]; if (!gpu) return -ENXIO; if (args->stream_size % 4) { DRM_ERROR("non-aligned cmdstream buffer size: %u\n", args->stream_size); return -EINVAL; } if (args->exec_state != ETNA_PIPE_3D && args->exec_state != ETNA_PIPE_2D && args->exec_state != ETNA_PIPE_VG) { DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state); return -EINVAL; } if (args->flags & ~ETNA_SUBMIT_FLAGS) { DRM_ERROR("invalid flags: 0x%x\n", args->flags); return -EINVAL; } /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. */ bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL); relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL); pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL); stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL); if (!bos || !relocs || !pmrs || !stream) { ret = -ENOMEM; goto err_submit_cmds; } ret = copy_from_user(bos, u64_to_user_ptr(args->bos), args->nr_bos * sizeof(*bos)); if (ret) { ret = -EFAULT; goto err_submit_cmds; } ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs), args->nr_relocs * sizeof(*relocs)); if (ret) { ret = -EFAULT; goto err_submit_cmds; } ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs), args->nr_pmrs * sizeof(*pmrs)); if (ret) { ret = -EFAULT; goto err_submit_cmds; } ret = copy_from_user(stream, u64_to_user_ptr(args->stream), args->stream_size); if (ret) { ret = -EFAULT; goto err_submit_cmds; } if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { ret = out_fence_fd; goto err_submit_cmds; } } ww_acquire_init(&ticket, &reservation_ww_class); submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs); if (!submit) { ret = -ENOMEM; goto err_submit_ww_acquire; } ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf, ALIGN(args->stream_size, 8) + 8); if (ret) goto err_submit_objects; submit->cmdbuf.ctx = file->driver_priv; submit->exec_state = args->exec_state; submit->flags = args->flags; ret = submit_lookup_objects(submit, file, bos, args->nr_bos); if (ret) goto err_submit_objects; ret = submit_lock_objects(submit, &ticket); if (ret) goto err_submit_objects; if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4, relocs, args->nr_relocs)) { ret = -EINVAL; goto err_submit_objects; } if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) { submit->in_fence = sync_file_get_fence(args->fence_fd); if (!submit->in_fence) { ret = -EINVAL; goto err_submit_objects; } } ret = submit_fence_sync(submit); if (ret) goto err_submit_objects; ret = submit_pin_objects(submit); if (ret) goto err_submit_objects; ret = submit_reloc(submit, stream, args->stream_size / 4, relocs, args->nr_relocs); if (ret) goto err_submit_objects; ret = submit_perfmon_validate(submit, args->exec_state, pmrs); if (ret) goto err_submit_objects; memcpy(submit->cmdbuf.vaddr, stream, args->stream_size); submit->cmdbuf.user_size = ALIGN(args->stream_size, 8); ret = etnaviv_gpu_submit(gpu, submit); if (ret) goto err_submit_objects; submit_attach_object_fences(submit); if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { /* * This can be improved: ideally we want to allocate the sync * file before kicking off the GPU job and just attach the * fence to the sync file here, eliminating the ENOMEM * possibility at this stage. */ sync_file = sync_file_create(submit->out_fence); if (!sync_file) { ret = -ENOMEM; goto err_submit_objects; } fd_install(out_fence_fd, sync_file->file); } args->fence_fd = out_fence_fd; args->fence = submit->out_fence->seqno; err_submit_objects: etnaviv_submit_put(submit); err_submit_ww_acquire: ww_acquire_fini(&ticket); err_submit_cmds: if (ret && (out_fence_fd >= 0)) put_unused_fd(out_fence_fd); if (stream) kvfree(stream); if (bos) kvfree(bos); if (relocs) kvfree(relocs); if (pmrs) kvfree(pmrs); return ret; }