static int test_abba(bool resolve) { struct test_abba abba; struct ww_acquire_ctx ctx; int err, ret; ww_mutex_init(&abba.a_mutex, &ww_class); ww_mutex_init(&abba.b_mutex, &ww_class); INIT_WORK_ONSTACK(&abba.work, test_abba_work); init_completion(&abba.a_ready); init_completion(&abba.b_ready); abba.resolve = resolve; schedule_work(&abba.work); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&abba.a_mutex, &ctx); complete(&abba.a_ready); wait_for_completion(&abba.b_ready); err = ww_mutex_lock(&abba.b_mutex, &ctx); if (resolve && err == -EDEADLK) { ww_mutex_unlock(&abba.a_mutex); ww_mutex_lock_slow(&abba.b_mutex, &ctx); err = ww_mutex_lock(&abba.a_mutex, &ctx); } if (!err) ww_mutex_unlock(&abba.b_mutex); ww_mutex_unlock(&abba.a_mutex); ww_acquire_fini(&ctx); flush_work(&abba.work); destroy_work_on_stack(&abba.work); ret = 0; if (resolve) { if (err || abba.result) { pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } else { if (err != -EDEADLK && abba.result != -EDEADLK) { pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } return ret; }
static int test_aa(void) { struct ww_mutex mutex; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&mutex, &ctx); if (ww_mutex_trylock(&mutex)) { pr_err("%s: trylocked itself!\n", __func__); ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = ww_mutex_lock(&mutex, &ctx); if (ret != -EALREADY) { pr_err("%s: missed deadlock for recursing, ret=%d\n", __func__, ret); if (!ret) ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = 0; out: ww_mutex_unlock(&mutex); ww_acquire_fini(&ctx); return ret; }
static void reset_locks(void) { local_irq_disable(); lockdep_free_key_range(&ww_lockdep.acquire_key, 1); lockdep_free_key_range(&ww_lockdep.mutex_key, 1); I1(A); I1(B); I1(C); I1(D); I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base); lockdep_reset(); I2(A); I2(B); I2(C); I2(D); init_shared_classes(); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key)); memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key)); local_irq_enable(); }
static int __test_cycle(unsigned int nthreads) { struct test_cycle *cycles; unsigned int n, last = nthreads - 1; int ret; cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); if (!cycles) return -ENOMEM; for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; ww_mutex_init(&cycle->a_mutex, &ww_class); if (n == last) cycle->b_mutex = &cycles[0].a_mutex; else cycle->b_mutex = &cycles[n + 1].a_mutex; if (n == 0) cycle->a_signal = &cycles[last].b_signal; else cycle->a_signal = &cycles[n - 1].b_signal; init_completion(&cycle->b_signal); INIT_WORK(&cycle->work, test_cycle_work); cycle->result = 0; } for (n = 0; n < nthreads; n++) queue_work(wq, &cycles[n].work); flush_workqueue(wq); ret = 0; for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; if (!cycle->result) continue; pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n", n, nthreads, cycle->result); ret = -EINVAL; break; } for (n = 0; n < nthreads; n++) ww_mutex_destroy(&cycles[n].a_mutex); kfree(cycles); return ret; }
static int __test_mutex(unsigned int flags) { #define TIMEOUT (HZ / 16) struct test_mutex mtx; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mtx.mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); init_completion(&mtx.ready); init_completion(&mtx.go); init_completion(&mtx.done); mtx.flags = flags; schedule_work(&mtx.work); wait_for_completion(&mtx.ready); ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); complete(&mtx.go); if (flags & TEST_MTX_SPIN) { unsigned long timeout = jiffies + TIMEOUT; ret = 0; do { if (completion_done(&mtx.done)) { ret = -EINVAL; break; } cond_resched(); } while (time_before(jiffies, timeout)); } else { ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); } ww_mutex_unlock(&mtx.mutex); ww_acquire_fini(&ctx); if (ret) { pr_err("%s(flags=%x): mutual exclusion failure\n", __func__, flags); ret = -EINVAL; } flush_work(&mtx.work); destroy_work_on_stack(&mtx.work); return ret; #undef TIMEOUT }
/** * drm_modeset_lock_init - initialize lock * @lock: lock to init */ void drm_modeset_lock_init(struct drm_modeset_lock *lock) { ww_mutex_init(&lock->mutex, &crtc_ww_class); INIT_LIST_HEAD(&lock->head); }