/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
			   int errcode)
{
	struct osc_lock         *oscl  = cookie;
	struct cl_lock_slice    *slice = &oscl->ols_cl;
	struct lu_env           *env;
	int			rc;

	ENTRY;

	env = cl_env_percpu_get();
	/* should never happen, similar to osc_ldlm_blocking_ast(). */
	LASSERT(!IS_ERR(env));

	rc = ldlm_error2errno(errcode);
	if (oscl->ols_state == OLS_ENQUEUED) {
		oscl->ols_state = OLS_UPCALL_RECEIVED;
	} else if (oscl->ols_state == OLS_CANCELLED) {
		rc = -EIO;
	} else {
		CERROR("Impossible state: %d\n", oscl->ols_state);
		LBUG();
	}

	if (rc == 0)
		osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);

	/* Error handling, some errors are tolerable. */
	if (oscl->ols_locklessable && rc == -EUSERS) {
		/* This is a tolerable error, turn this lock into
		 * lockless lock.
		 */
		osc_object_set_contended(cl2osc(slice->cls_obj));
		LASSERT(slice->cls_ops != oscl->ols_lockless_ops);

		/* Change this lock to ldlmlock-less lock. */
		osc_lock_to_lockless(env, oscl, 1);
		oscl->ols_state = OLS_GRANTED;
		rc = 0;
	} else if (oscl->ols_glimpse && rc == -ENAVAIL) {
		LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
		osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
				    NULL, &oscl->ols_lvb);
		/* Hide the error. */
		rc = 0;
	} else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
		rc = -EWOULDBLOCK;
	}

	if (oscl->ols_owner != NULL)
		cl_sync_io_note(env, oscl->ols_owner, rc);
	cl_env_percpu_put(env);

	RETURN(rc);
}
Exemple #2
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, int errcode)
{
	struct osc_lock	 *olck  = cookie;
	struct cl_lock_slice    *slice = &olck->ols_cl;
	struct cl_lock	  *lock  = slice->cls_lock;
	struct lu_env	   *env;
	struct cl_env_nest       nest;

	ENTRY;
	env = cl_env_nested_get(&nest);
	if (!IS_ERR(env)) {
		int rc;

		cl_lock_mutex_get(env, lock);

		LASSERT(lock->cll_state >= CLS_QUEUING);
		if (olck->ols_state == OLS_ENQUEUED) {
			olck->ols_state = OLS_UPCALL_RECEIVED;
			rc = ldlm_error2errno(errcode);
		} else if (olck->ols_state == OLS_CANCELLED) {
			rc = -EIO;
		} else {
			CERROR("Impossible state: %d\n", olck->ols_state);
			LBUG();
		}
		if (rc) {
			struct ldlm_lock *dlmlock;

			dlmlock = ldlm_handle2lock(&olck->ols_handle);
			if (dlmlock != NULL) {
				lock_res_and_lock(dlmlock);
				spin_lock(&osc_ast_guard);
				LASSERT(olck->ols_lock == NULL);
				dlmlock->l_ast_data = NULL;
				olck->ols_handle.cookie = 0ULL;
				spin_unlock(&osc_ast_guard);
				ldlm_lock_fail_match_locked(dlmlock);
				unlock_res_and_lock(dlmlock);
				LDLM_LOCK_PUT(dlmlock);
			}
		} else {
			if (olck->ols_glimpse)
				olck->ols_glimpse = 0;
			osc_lock_upcall0(env, olck);
		}

		/* Error handling, some errors are tolerable. */
		if (olck->ols_locklessable && rc == -EUSERS) {
			/* This is a tolerable error, turn this lock into
			 * lockless lock.
			 */
			osc_object_set_contended(cl2osc(slice->cls_obj));
			LASSERT(slice->cls_ops == &osc_lock_ops);

			/* Change this lock to ldlmlock-less lock. */
			osc_lock_to_lockless(env, olck, 1);
			olck->ols_state = OLS_GRANTED;
			rc = 0;
		} else if (olck->ols_glimpse && rc == -ENAVAIL) {
			osc_lock_lvb_update(env, olck, rc);
			cl_lock_delete(env, lock);
			/* Hide the error. */
			rc = 0;
		}

		if (rc == 0) {
			/* For AGL case, the RPC sponsor may exits the cl_lock
			*  processing without wait() called before related OSC
			*  lock upcall(). So update the lock status according
			*  to the enqueue result inside AGL upcall(). */
			if (olck->ols_agl) {
				lock->cll_flags |= CLF_FROM_UPCALL;
				cl_wait_try(env, lock);
				lock->cll_flags &= ~CLF_FROM_UPCALL;
				if (!olck->ols_glimpse)
					olck->ols_agl = 0;
			}
			cl_lock_signal(env, lock);
			/* del user for lock upcall cookie */
			cl_unuse_try(env, lock);
		} else {
			/* del user for lock upcall cookie */
			cl_lock_user_del(env, lock);
			cl_lock_error(env, lock, rc);
		}

		/* release cookie reference, acquired by osc_lock_enqueue() */
		cl_lock_hold_release(env, lock, "upcall", lock);
		cl_lock_mutex_put(env, lock);

		lu_ref_del(&lock->cll_reference, "upcall", lock);
		/* This maybe the last reference, so must be called after
		 * cl_lock_mutex_put(). */
		cl_lock_put(env, lock);

		cl_env_nested_put(&nest, env);
	} else {
		/* should never happen, similar to osc_ldlm_blocking_ast(). */
		LBUG();
	}
	RETURN(errcode);
}