Esempio n. 1
0
static void lov_lock_cancel(const struct lu_env *env,
			    const struct cl_lock_slice *slice)
{
	struct cl_lock *lock = slice->cls_lock;
	struct lov_lock *lovlck = cl2lov_lock(slice);
	int i;

	for (i = 0; i < lovlck->lls_nr; ++i) {
		struct lov_lock_sub *lls = &lovlck->lls_sub[i];
		struct cl_lock *sublock = &lls->sub_lock;
		struct lov_sublock_env *subenv;

		if (!lls->sub_is_enqueued)
			continue;

		lls->sub_is_enqueued = 0;
		subenv = lov_sublock_env_get(env, lock, lls);
		if (!IS_ERR(subenv)) {
			cl_lock_cancel(subenv->lse_env, sublock);
			lov_sublock_env_put(subenv);
		} else {
			CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
				      "lov_lock_cancel fails with %ld.\n",
				      PTR_ERR(subenv));
		}
	}
}
Esempio n. 2
0
/**
 * Core of osc_dlm_blocking_ast() logic.
 */
static void osc_lock_blocking(const struct lu_env *env,
			      struct ldlm_lock *dlmlock,
			      struct osc_lock *olck, int blocking)
{
	struct cl_lock *lock = olck->ols_cl.cls_lock;

	LASSERT(olck->ols_lock == dlmlock);
	CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
	LASSERT(!osc_lock_is_lockless(olck));

	/*
	 * Lock might be still addref-ed here, if e.g., blocking ast
	 * is sent for a failed lock.
	 */
	osc_lock_unhold(olck);

	if (blocking && olck->ols_state < OLS_BLOCKED)
		/*
		 * Move osc_lock into OLS_BLOCKED before canceling the lock,
		 * because it recursively re-enters osc_lock_blocking(), with
		 * the state set to OLS_CANCELLED.
		 */
		olck->ols_state = OLS_BLOCKED;
	/*
	 * cancel and destroy lock at least once no matter how blocking ast is
	 * entered (see comment above osc_ldlm_blocking_ast() for use
	 * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
	 */
	cl_lock_cancel(env, lock);
	cl_lock_delete(env, lock);
}