Пример #1
0
/**
 * Callback handler for receiving incoming glimpse ASTs.
 *
 * This only can happen on client side.  After handling the glimpse AST
 * we also consider dropping the lock here if it is unused locally for a
 * long time.
 */
static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
				    struct ldlm_namespace *ns,
				    struct ldlm_request *dlm_req,
				    struct ldlm_lock *lock)
{
	int rc = -ENOSYS;

	LDLM_DEBUG(lock, "client glimpse AST callback handler");

	if (lock->l_glimpse_ast != NULL)
		rc = lock->l_glimpse_ast(lock, req);

	if (req->rq_repmsg != NULL) {
		ptlrpc_reply(req);
	} else {
		req->rq_status = rc;
		ptlrpc_error(req);
	}

	lock_res_and_lock(lock);
	if (lock->l_granted_mode == LCK_PW &&
	    !lock->l_readers && !lock->l_writers &&
	    cfs_time_after(cfs_time_current(),
			   cfs_time_add(lock->l_last_used,
					cfs_time_seconds(10)))) {
		unlock_res_and_lock(lock);
		if (ldlm_bl_to_thread_lock(ns, NULL, lock))
			ldlm_handle_bl_callback(ns, NULL, lock);

		return;
	}
	unlock_res_and_lock(lock);
	LDLM_LOCK_RELEASE(lock);
}
Пример #2
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int mdc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	bool discard;
	enum cl_lock_mode mode = CLM_READ;

	ENTRY;

	LASSERT(flag == LDLM_CB_CANCELING);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		RETURN(0);
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data != NULL) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;
		cl_object_get(obj);
	}
	ldlm_set_kms_ignore(dlmlock);
	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed. */
	if (obj != NULL) {
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;

		/* Destroy pages covered by the extent of the DLM lock */
		result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
					CL_PAGE_EOF, mode, discard);
		/* Losing a lock, set KMS to 0.
		 * NB: assumed that DOM lock covers whole data on MDT.
		 */
		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		attr->cat_kms = 0;
		cl_object_attr_update(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);
		cl_object_put(env, obj);
	}
	RETURN(result);
}
Пример #3
0
static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
{
	struct osc_lock *olck;

	lock_res_and_lock(dlm_lock);
	spin_lock(&osc_ast_guard);
	olck = dlm_lock->l_ast_data;
	if (olck != NULL) {
		struct cl_lock *lock = olck->ols_cl.cls_lock;
		/*
		 * If osc_lock holds a reference on ldlm lock, return it even
		 * when cl_lock is in CLS_FREEING state. This way
		 *
		 *	 osc_ast_data_get(dlmlock) == NULL
		 *
		 * guarantees that all osc references on dlmlock were
		 * released. osc_dlm_blocking_ast0() relies on that.
		 */
		if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
			cl_lock_get_trust(lock);
			lu_ref_add_atomic(&lock->cll_reference,
					  "ast", current);
		} else
			olck = NULL;
	}
	spin_unlock(&osc_ast_guard);
	unlock_res_and_lock(dlm_lock);
	return olck;
}
Пример #4
0
/**
 * Callback handler for receiving incoming blocking ASTs.
 *
 * This can only happen on client side.
 */
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
			     struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
{
	int do_ast;

	LDLM_DEBUG(lock, "client blocking AST callback handler");

	lock_res_and_lock(lock);
	lock->l_flags |= LDLM_FL_CBPENDING;

	if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
		lock->l_flags |= LDLM_FL_CANCEL;

	do_ast = (!lock->l_readers && !lock->l_writers);
	unlock_res_and_lock(lock);

	if (do_ast) {
		CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
		       lock, lock->l_blocking_ast);
		if (lock->l_blocking_ast != NULL)
			lock->l_blocking_ast(lock, ld, lock->l_ast_data,
					     LDLM_CB_BLOCKING);
	} else {
		CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
		       lock);
	}

	LDLM_DEBUG(lock, "client blocking callback handler END");
	LDLM_LOCK_RELEASE(lock);
}
Пример #5
0
/*
 * Return qsd_qtype_info structure associated with a global lock
 *
 * \param lock - is the global lock from which we should extract the qqi
 * \param reset - whether lock->l_ast_data should be cleared
 */
static struct qsd_qtype_info *qsd_glb_ast_data_get(struct ldlm_lock *lock,
						   bool reset) {
	struct qsd_qtype_info *qqi;
	ENTRY;

	lock_res_and_lock(lock);
	qqi = lock->l_ast_data;
	if (qqi != NULL) {
		qqi_getref(qqi);
		if (reset)
			lock->l_ast_data = NULL;
	}
	unlock_res_and_lock(lock);

	if (qqi != NULL)
		/* it is not safe to call lu_ref_add() under spinlock */
		lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);

	if (reset && qqi != NULL) {
		/* release qqi reference hold for the lock */
		lu_ref_del(&qqi->qqi_reference, "glb_lock", lock);
		qqi_putref(qqi);
	}
	RETURN(qqi);
}
Пример #6
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object	*osc = cookie;
	struct ldlm_lock	*dlmlock;
	struct lu_env           *env;
	struct cl_env_nest       nest;
	ENTRY;

	env = cl_env_nested_get(&nest);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED)
		GOTO(out, errcode = ELDLM_OK);

	if (errcode != ELDLM_OK)
		GOTO(out, errcode);

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_nested_put(&nest, env);
	RETURN(ldlm_error2errno(errcode));
}
Пример #7
0
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
			     struct lustre_handle *lockh, bool lvb_update)
{
	struct ldlm_lock *dlmlock;

	dlmlock = ldlm_handle2lock_long(lockh, 0);
	LASSERT(dlmlock);

	/* lock reference taken by ldlm_handle2lock_long() is
	 * owned by osc_lock and released in osc_lock_detach()
	 */
	lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
	oscl->ols_has_ref = 1;

	LASSERT(!oscl->ols_dlmlock);
	oscl->ols_dlmlock = dlmlock;

	/* This may be a matched lock for glimpse request, do not hold
	 * lock reference in that case.
	 */
	if (!oscl->ols_glimpse) {
		/* hold a refc for non glimpse lock which will
		 * be released in osc_lock_cancel()
		 */
		lustre_handle_copy(&oscl->ols_handle, lockh);
		ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
		oscl->ols_hold = 1;
	}

	/* Lock must have been granted. */
	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
		struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;

		/* extend the lock extent, otherwise it will have problem when
		 * we decide whether to grant a lockless lock.
		 */
		descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid = ext->gid;

		/* no lvb update for matched lock */
		if (lvb_update) {
			LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
			osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
					    dlmlock, NULL);
		}
		LINVRNT(osc_lock_invariant(oscl));
	}
	unlock_res_and_lock(dlmlock);

	LASSERT(oscl->ols_state != OLS_GRANTED);
	oscl->ols_state = OLS_GRANTED;
}
Пример #8
0
struct inode *llu_inode_from_resource_lock(struct ldlm_lock *lock)
{
	struct inode *inode;
	lock_res_and_lock(lock);

	if (lock->l_resource->lr_lvb_inode) {
		inode = (struct inode *)lock->l_resource->lr_lvb_inode;
		I_REF(inode);
	} else
		inode = NULL;

	unlock_res_and_lock(lock);
	return inode;
}
Пример #9
0
/*
 * Return lquota entry structure associated with a per-ID lock
 *
 * \param lock - is the per-ID lock from which we should extract the lquota
 *               entry
 * \param reset - whether lock->l_ast_data should be cleared
 */
static struct lquota_entry *qsd_id_ast_data_get(struct ldlm_lock *lock,
						bool reset) {
	struct lquota_entry *lqe;
	ENTRY;

	lock_res_and_lock(lock);
	lqe = lock->l_ast_data;
	if (lqe != NULL) {
		lqe_getref(lqe);
		if (reset)
			lock->l_ast_data = NULL;
	}
	unlock_res_and_lock(lock);

	if (reset && lqe != NULL)
		/* release lqe reference hold for the lock */
		lqe_putref(lqe);
	RETURN(lqe);
}
Пример #10
0
/**
 * Breaks a link between osc_lock and dlm_lock.
 */
static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
	struct ldlm_lock *dlmlock;

	spin_lock(&osc_ast_guard);
	dlmlock = olck->ols_lock;
	if (dlmlock == NULL) {
		spin_unlock(&osc_ast_guard);
		return;
	}

	olck->ols_lock = NULL;
	/* wb(); --- for all who checks (ols->ols_lock != NULL) before
	 * call to osc_lock_detach() */
	dlmlock->l_ast_data = NULL;
	olck->ols_handle.cookie = 0ULL;
	spin_unlock(&osc_ast_guard);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct cl_object *obj = olck->ols_cl.cls_obj;
		struct cl_attr *attr  = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid possible races. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
	}
	unlock_res_and_lock(dlmlock);

	/* release a reference taken in osc_lock_upcall0(). */
	LASSERT(olck->ols_has_ref);
	lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
	LDLM_LOCK_RELEASE(dlmlock);
	olck->ols_has_ref = 0;
}
Пример #11
0
/**
 * Called when a lock is granted, from an upcall (when server returned a
 * granted lock), or from completion AST, when server returned a blocked lock.
 *
 * Called under lock and resource spin-locks, that are released temporarily
 * here.
 */
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
			     struct ldlm_lock *dlmlock, int rc)
{
	struct ldlm_extent   *ext;
	struct cl_lock       *lock;
	struct cl_lock_descr *descr;

	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	ENTRY;
	if (olck->ols_state < OLS_GRANTED) {
		lock  = olck->ols_cl.cls_lock;
		ext   = &dlmlock->l_policy_data.l_extent;
		descr = &osc_env_info(env)->oti_descr;
		descr->cld_obj = lock->cll_descr.cld_obj;

		/* XXX check that ->l_granted_mode is valid. */
		descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end   = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid   = ext->gid;
		/*
		 * tell upper layers the extent of the lock that was actually
		 * granted
		 */
		olck->ols_state = OLS_GRANTED;
		osc_lock_lvb_update(env, olck, rc);

		/* release DLM spin-locks to allow cl_lock_{modify,signal}()
		 * to take a semaphore on a parent lock. This is safe, because
		 * spin-locks are needed to protect consistency of
		 * dlmlock->l_*_mode and LVB, and we have finished processing
		 * them. */
		unlock_res_and_lock(dlmlock);
		cl_lock_modify(env, lock, descr);
		cl_lock_signal(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		lock_res_and_lock(dlmlock);
	}
	EXIT;
}
Пример #12
0
static int mdc_set_dom_lock_data(const struct lu_env *env,
				 struct ldlm_lock *lock, void *data)
{
	struct osc_object *obj = data;
	int set = 0;

	LASSERT(lock != NULL);
	LASSERT(lock->l_glimpse_ast == mdc_ldlm_glimpse_ast);

	lock_res_and_lock(lock);
	if (lock->l_ast_data == NULL) {
		lock->l_ast_data = data;
		mdc_lock_lvb_update(env, obj, lock, NULL);
	}

	if (lock->l_ast_data == data)
		set = 1;

	unlock_res_and_lock(lock);

	return set;
}
Пример #13
0
static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)

{
	struct ldlm_lock *dlmlock;

	ENTRY;

	dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	spin_lock(&osc_ast_guard);
	LASSERT(dlmlock->l_ast_data == olck);
	LASSERT(olck->ols_lock == NULL);
	olck->ols_lock = dlmlock;
	spin_unlock(&osc_ast_guard);

	/*
	 * Lock might be not yet granted. In this case, completion ast
	 * (osc_ldlm_completion_ast()) comes later and finishes lock
	 * granting.
	 */
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
		osc_lock_granted(env, olck, dlmlock, 0);
	unlock_res_and_lock(dlmlock);

	/*
	 * osc_enqueue_interpret() decrefs asynchronous locks, counter
	 * this.
	 */
	ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
	olck->ols_hold = 1;

	/* lock reference taken by ldlm_handle2lock_long() is owned by
	 * osc_lock and released in osc_lock_detach() */
	lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
	olck->ols_has_ref = 1;
}
Пример #14
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object *osc = cookie;
	struct ldlm_lock *dlmlock;
	struct lu_env *env;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED) {
		errcode = ELDLM_OK;
		goto out;
	}

	if (errcode != ELDLM_OK)
		goto out;

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_put(env, &refcheck);
	return ldlm_error2errno(errcode);
}
Пример #15
0
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
	struct ldlm_namespace *ns;
	struct ldlm_request *dlm_req;
	struct ldlm_lock *lock;
	int rc;

	/* Requests arrive in sender's byte order.  The ptlrpc service
	 * handler has already checked and, if necessary, byte-swapped the
	 * incoming request message body, but I am responsible for the
	 * message buffers. */

	/* do nothing for sec context finalize */
	if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
		return 0;

	req_capsule_init(&req->rq_pill, req, RCL_SERVER);

	if (req->rq_export == NULL) {
		rc = ldlm_callback_reply(req, -ENOTCONN);
		ldlm_callback_errmsg(req, "Operate on unconnected server",
				     rc, NULL);
		return 0;
	}

	LASSERT(req->rq_export != NULL);
	LASSERT(req->rq_export->exp_obd != NULL);

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_CP_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
			return 0;
		break;
	case LDLM_GL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_SET_INFO:
		rc = ldlm_handle_setinfo(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
		CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
		req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
			return 0;
		rc = llog_origin_handle_cancel(req);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
			return 0;
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CREATE:
		req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_open(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_next_block(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_READ_HEADER:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_read_header(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CLOSE:
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_close(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_QC_CALLBACK:
		req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
			return 0;
		rc = ldlm_handle_qc_callback(req);
		ldlm_callback_reply(req, rc);
		return 0;
	default:
		CERROR("unknown opcode %u\n",
		       lustre_msg_get_opc(req->rq_reqmsg));
		ldlm_callback_reply(req, -EPROTO);
		return 0;
	}

	ns = req->rq_export->exp_obd->obd_namespace;
	LASSERT(ns != NULL);

	req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);

	dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
	if (dlm_req == NULL) {
		rc = ldlm_callback_reply(req, -EPROTO);
		ldlm_callback_errmsg(req, "Operate without parameter", rc,
				     NULL);
		return 0;
	}

	/* Force a known safe race, send a cancel to the server for a lock
	 * which the server has already started a blocking callback on. */
	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
		if (rc < 0)
			CERROR("ldlm_cli_cancel: %d\n", rc);
	}

	lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
	if (!lock) {
		CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
		       "disappeared\n", dlm_req->lock_handle[0].cookie);
		rc = ldlm_callback_reply(req, -EINVAL);
		ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
				     &dlm_req->lock_handle[0]);
		return 0;
	}

	if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
		OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);

	/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
	lock_res_and_lock(lock);
	lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
					      LDLM_AST_FLAGS);
	if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		/* If somebody cancels lock and cache is already dropped,
		 * or lock is failed before cp_ast received on client,
		 * we can tell the server we have no lock. Otherwise, we
		 * should send cancel after dropping the cache. */
		if (((lock->l_flags & LDLM_FL_CANCELING) &&
		    (lock->l_flags & LDLM_FL_BL_DONE)) ||
		    (lock->l_flags & LDLM_FL_FAILED)) {
			LDLM_DEBUG(lock, "callback on lock "
				   LPX64" - lock disappeared\n",
				   dlm_req->lock_handle[0].cookie);
			unlock_res_and_lock(lock);
			LDLM_LOCK_RELEASE(lock);
			rc = ldlm_callback_reply(req, -EINVAL);
			ldlm_callback_errmsg(req, "Operate on stale lock", rc,
					     &dlm_req->lock_handle[0]);
			return 0;
		}
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_BL_AST;
	}
	unlock_res_and_lock(lock);

	/* We want the ost thread to get this reply so that it can respond
	 * to ost requests (write cache writeback) that might be triggered
	 * in the callback.
	 *
	 * But we'd also like to be able to indicate in the reply that we're
	 * cancelling right now, because it's unused, or have an intent result
	 * in the reply, so we might have to push the responsibility for sending
	 * the reply down into the AST handlers, alas. */

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		CDEBUG(D_INODE, "blocking ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
		if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
			rc = ldlm_callback_reply(req, 0);
			if (req->rq_no_reply || rc)
				ldlm_callback_errmsg(req, "Normal process", rc,
						     &dlm_req->lock_handle[0]);
		}
		if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
			ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
		break;
	case LDLM_CP_CALLBACK:
		CDEBUG(D_INODE, "completion ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
		ldlm_callback_reply(req, 0);
		ldlm_handle_cp_callback(req, ns, dlm_req, lock);
		break;
	case LDLM_GL_CALLBACK:
		CDEBUG(D_INODE, "glimpse ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
		ldlm_handle_gl_callback(req, ns, dlm_req, lock);
		break;
	default:
		LBUG();			 /* checked above */
	}

	return 0;
}
Пример #16
0
/**
 * Callback handler for receiving incoming completion ASTs.
 *
 * This only can happen on client side.
 */
static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
				    struct ldlm_namespace *ns,
				    struct ldlm_request *dlm_req,
				    struct ldlm_lock *lock)
{
	int lvb_len;
	LIST_HEAD(ast_list);
	int rc = 0;

	LDLM_DEBUG(lock, "client completion callback handler START");

	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
		int to = cfs_time_seconds(1);
		while (to > 0) {
			schedule_timeout_and_set_state(
				TASK_INTERRUPTIBLE, to);
			if (lock->l_granted_mode == lock->l_req_mode ||
			    lock->l_flags & LDLM_FL_DESTROYED)
				break;
		}
	}

	lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
	if (lvb_len < 0) {
		LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
		GOTO(out, rc = lvb_len);
	} else if (lvb_len > 0) {
		if (lock->l_lvb_len > 0) {
			/* for extent lock, lvb contains ost_lvb{}. */
			LASSERT(lock->l_lvb_data != NULL);

			if (unlikely(lock->l_lvb_len < lvb_len)) {
				LDLM_ERROR(lock, "Replied LVB is larger than "
					   "expectation, expected = %d, "
					   "replied = %d",
					   lock->l_lvb_len, lvb_len);
				GOTO(out, rc = -EINVAL);
			}
		} else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
						     * variable length */
			void *lvb_data;

			OBD_ALLOC(lvb_data, lvb_len);
			if (lvb_data == NULL) {
				LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
				GOTO(out, rc = -ENOMEM);
			}

			lock_res_and_lock(lock);
			LASSERT(lock->l_lvb_data == NULL);
			lock->l_lvb_data = lvb_data;
			lock->l_lvb_len = lvb_len;
			unlock_res_and_lock(lock);
		}
	}

	lock_res_and_lock(lock);
	if ((lock->l_flags & LDLM_FL_DESTROYED) ||
	    lock->l_granted_mode == lock->l_req_mode) {
		/* bug 11300: the lock has already been granted */
		unlock_res_and_lock(lock);
		LDLM_DEBUG(lock, "Double grant race happened");
		GOTO(out, rc = 0);
	}

	/* If we receive the completion AST before the actual enqueue returned,
	 * then we might need to switch lock modes, resources, or extents. */
	if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
		lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
		LDLM_DEBUG(lock, "completion AST, new lock mode");
	}

	if (lock->l_resource->lr_type != LDLM_PLAIN) {
		ldlm_convert_policy_to_local(req->rq_export,
					  dlm_req->lock_desc.l_resource.lr_type,
					  &dlm_req->lock_desc.l_policy_data,
					  &lock->l_policy_data);
		LDLM_DEBUG(lock, "completion AST, new policy data");
	}

	ldlm_resource_unlink_lock(lock);
	if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
		   &lock->l_resource->lr_name,
		   sizeof(lock->l_resource->lr_name)) != 0) {
		unlock_res_and_lock(lock);
		rc = ldlm_lock_change_resource(ns, lock,
				&dlm_req->lock_desc.l_resource.lr_name);
		if (rc < 0) {
			LDLM_ERROR(lock, "Failed to allocate resource");
			GOTO(out, rc);
		}
		LDLM_DEBUG(lock, "completion AST, new resource");
		CERROR("change resource!\n");
		lock_res_and_lock(lock);
	}

	if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
		LDLM_DEBUG(lock, "completion AST includes blocking AST");
	}

	if (lock->l_lvb_len > 0) {
		rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
				   lock->l_lvb_data, lvb_len);
		if (rc < 0) {
			unlock_res_and_lock(lock);
			GOTO(out, rc);
		}
	}

	ldlm_grant_lock(lock, &ast_list);
	unlock_res_and_lock(lock);

	LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");

	/* Let Enqueue to call osc_lock_upcall() and initialize
	 * l_ast_data */
	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);

	ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);

	LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
			  lock);
	GOTO(out, rc);

out:
	if (rc < 0) {
		lock_res_and_lock(lock);
		lock->l_flags |= LDLM_FL_FAILED;
		unlock_res_and_lock(lock);
		wake_up(&lock->l_waitq);
	}
	LDLM_LOCK_RELEASE(lock);
}
Пример #17
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	int discard;
	enum cl_lock_mode mode = CLM_READ;

	LASSERT(flag == LDLM_CB_CANCELING);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		return 0;
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;

		cl_object_get(obj);
	}

	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed.
	 */
	if (obj) {
		struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		/* Destroy pages covered by the extent of the DLM lock */
		result = osc_lock_flush(cl2osc(obj),
					cl_index(obj, extent->start),
					cl_index(obj, extent->end),
					mode, discard);

		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid race. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client
		 */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);

		cl_object_put(env, obj);
	}
	return result;
}
Пример #18
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, int errcode)
{
	struct osc_lock	 *olck  = cookie;
	struct cl_lock_slice    *slice = &olck->ols_cl;
	struct cl_lock	  *lock  = slice->cls_lock;
	struct lu_env	   *env;
	struct cl_env_nest       nest;

	ENTRY;
	env = cl_env_nested_get(&nest);
	if (!IS_ERR(env)) {
		int rc;

		cl_lock_mutex_get(env, lock);

		LASSERT(lock->cll_state >= CLS_QUEUING);
		if (olck->ols_state == OLS_ENQUEUED) {
			olck->ols_state = OLS_UPCALL_RECEIVED;
			rc = ldlm_error2errno(errcode);
		} else if (olck->ols_state == OLS_CANCELLED) {
			rc = -EIO;
		} else {
			CERROR("Impossible state: %d\n", olck->ols_state);
			LBUG();
		}
		if (rc) {
			struct ldlm_lock *dlmlock;

			dlmlock = ldlm_handle2lock(&olck->ols_handle);
			if (dlmlock != NULL) {
				lock_res_and_lock(dlmlock);
				spin_lock(&osc_ast_guard);
				LASSERT(olck->ols_lock == NULL);
				dlmlock->l_ast_data = NULL;
				olck->ols_handle.cookie = 0ULL;
				spin_unlock(&osc_ast_guard);
				ldlm_lock_fail_match_locked(dlmlock);
				unlock_res_and_lock(dlmlock);
				LDLM_LOCK_PUT(dlmlock);
			}
		} else {
			if (olck->ols_glimpse)
				olck->ols_glimpse = 0;
			osc_lock_upcall0(env, olck);
		}

		/* Error handling, some errors are tolerable. */
		if (olck->ols_locklessable && rc == -EUSERS) {
			/* This is a tolerable error, turn this lock into
			 * lockless lock.
			 */
			osc_object_set_contended(cl2osc(slice->cls_obj));
			LASSERT(slice->cls_ops == &osc_lock_ops);

			/* Change this lock to ldlmlock-less lock. */
			osc_lock_to_lockless(env, olck, 1);
			olck->ols_state = OLS_GRANTED;
			rc = 0;
		} else if (olck->ols_glimpse && rc == -ENAVAIL) {
			osc_lock_lvb_update(env, olck, rc);
			cl_lock_delete(env, lock);
			/* Hide the error. */
			rc = 0;
		}

		if (rc == 0) {
			/* For AGL case, the RPC sponsor may exits the cl_lock
			*  processing without wait() called before related OSC
			*  lock upcall(). So update the lock status according
			*  to the enqueue result inside AGL upcall(). */
			if (olck->ols_agl) {
				lock->cll_flags |= CLF_FROM_UPCALL;
				cl_wait_try(env, lock);
				lock->cll_flags &= ~CLF_FROM_UPCALL;
				if (!olck->ols_glimpse)
					olck->ols_agl = 0;
			}
			cl_lock_signal(env, lock);
			/* del user for lock upcall cookie */
			cl_unuse_try(env, lock);
		} else {
			/* del user for lock upcall cookie */
			cl_lock_user_del(env, lock);
			cl_lock_error(env, lock, rc);
		}

		/* release cookie reference, acquired by osc_lock_enqueue() */
		cl_lock_hold_release(env, lock, "upcall", lock);
		cl_lock_mutex_put(env, lock);

		lu_ref_del(&lock->cll_reference, "upcall", lock);
		/* This maybe the last reference, so must be called after
		 * cl_lock_mutex_put(). */
		cl_lock_put(env, lock);

		cl_env_nested_put(&nest, env);
	} else {
		/* should never happen, similar to osc_ldlm_blocking_ast(). */
		LBUG();
	}
	RETURN(errcode);
}