Ejemplo n.º 1
0
/**
 * Callback handler for receiving incoming blocking ASTs.
 *
 * This can only happen on client side.
 */
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
			     struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
{
	int do_ast;

	LDLM_DEBUG(lock, "client blocking AST callback handler");

	lock_res_and_lock(lock);
	lock->l_flags |= LDLM_FL_CBPENDING;

	if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
		lock->l_flags |= LDLM_FL_CANCEL;

	do_ast = (!lock->l_readers && !lock->l_writers);
	unlock_res_and_lock(lock);

	if (do_ast) {
		CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
		       lock, lock->l_blocking_ast);
		if (lock->l_blocking_ast != NULL)
			lock->l_blocking_ast(lock, ld, lock->l_ast_data,
					     LDLM_CB_BLOCKING);
	} else {
		CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
		       lock);
	}

	LDLM_DEBUG(lock, "client blocking callback handler END");
	LDLM_LOCK_RELEASE(lock);
}
Ejemplo n.º 2
0
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
				int rc)
{
	struct ost_lvb    *lvb;
	struct cl_object  *obj;
	struct lov_oinfo  *oinfo;
	struct cl_attr    *attr;
	unsigned	   valid;

	if (!(olck->ols_flags & LDLM_FL_LVB_READY))
		return;

	lvb   = &olck->ols_lvb;
	obj   = olck->ols_cl.cls_obj;
	oinfo = cl2osc(obj)->oo_oinfo;
	attr  = &osc_env_info(env)->oti_attr;
	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (rc == 0) {
		struct ldlm_lock  *dlmlock;
		__u64 size;

		dlmlock = olck->ols_lock;
		LASSERT(dlmlock != NULL);

		/* re-grab LVB from a dlm lock under DLM spin-locks. */
		*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
		size = lvb->lvb_size;
		/* Extend KMS up to the end of this lock and no further
		 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
		if (size > dlmlock->l_policy_data.l_extent.end)
			size = dlmlock->l_policy_data.l_extent.end + 1;
		if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
				   ", kms="LPU64, lvb->lvb_size, size);
			valid |= CAT_KMS;
			attr->cat_kms = size;
		} else {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   LPU64"; leaving kms="LPU64", end="LPU64,
				   lvb->lvb_size, oinfo->loi_kms,
				   dlmlock->l_policy_data.l_extent.end);
		}
		ldlm_lock_allow_match_locked(dlmlock);
	} else if (rc == -ENAVAIL && olck->ols_glimpse) {
		CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
		       " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
	} else
		valid = 0;

	if (valid != 0)
		cl_object_attr_set(env, obj, attr, valid);

	cl_object_attr_unlock(obj);
}
Ejemplo n.º 3
0
/*
 * Blocking callback handler for global index lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
				struct ldlm_lock_desc *desc, void *data,
				int flag)
{
	int rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {
		struct lustre_handle lockh;

		LDLM_DEBUG(lock, "blocking AST on global quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct qsd_qtype_info *qqi;

		LDLM_DEBUG(lock, "canceling global quota lock");

		qqi = qsd_glb_ast_data_get(lock, true);
		if (qqi == NULL)
			break;

		/* we are losing the global index lock, so let's mark the
		 * global & slave indexes as not up-to-date any more */
		write_lock(&qqi->qqi_qsd->qsd_lock);
		qqi->qqi_glb_uptodate = false;
		qqi->qqi_slv_uptodate = false;
		if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
			memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
		write_unlock(&qqi->qqi_qsd->qsd_lock);

		CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
		       qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));

		/* kick off reintegration thread if not running already, if
		 * it's just local cancel (for stack clean up or eviction),
		 * don't re-trigger the reintegration. */
		if (!ldlm_is_local_only(lock))
			qsd_start_reint_thread(qqi);

		lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
		qqi_putref(qqi);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
Ejemplo n.º 4
0
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env,
				struct osc_object *osc,
				struct ldlm_lock *dlmlock,
				struct ost_lvb *lvb)
{
	struct cl_object  *obj = osc2cl(osc);
	struct lov_oinfo  *oinfo = osc->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	unsigned           valid;

	ENTRY;

	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	if (lvb == NULL) {
		LASSERT(dlmlock != NULL);
		lvb = dlmlock->l_lvb_data;
	}
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (dlmlock != NULL) {
		__u64 size;

		check_res_locked(dlmlock->l_resource);

		LASSERT(lvb == dlmlock->l_lvb_data);
                size = lvb->lvb_size;

                /* Extend KMS up to the end of this lock and no further
                 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
                if (size > dlmlock->l_policy_data.l_extent.end)
                        size = dlmlock->l_policy_data.l_extent.end + 1;
                if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu"
				   ", kms=%llu", lvb->lvb_size, size);
                        valid |= CAT_KMS;
                        attr->cat_kms = size;
                } else {
                        LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   "%llu; leaving kms=%llu, end=%llu",
                                   lvb->lvb_size, oinfo->loi_kms,
                                   dlmlock->l_policy_data.l_extent.end);
                }
		ldlm_lock_allow_match_locked(dlmlock);
	}

	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);

	EXIT;
}
Ejemplo n.º 5
0
/**
 * Callback handler for receiving incoming glimpse ASTs.
 *
 * This only can happen on client side.  After handling the glimpse AST
 * we also consider dropping the lock here if it is unused locally for a
 * long time.
 */
static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
				    struct ldlm_namespace *ns,
				    struct ldlm_request *dlm_req,
				    struct ldlm_lock *lock)
{
	int rc = -ENOSYS;

	LDLM_DEBUG(lock, "client glimpse AST callback handler");

	if (lock->l_glimpse_ast != NULL)
		rc = lock->l_glimpse_ast(lock, req);

	if (req->rq_repmsg != NULL) {
		ptlrpc_reply(req);
	} else {
		req->rq_status = rc;
		ptlrpc_error(req);
	}

	lock_res_and_lock(lock);
	if (lock->l_granted_mode == LCK_PW &&
	    !lock->l_readers && !lock->l_writers &&
	    cfs_time_after(cfs_time_current(),
			   cfs_time_add(lock->l_last_used,
					cfs_time_seconds(10)))) {
		unlock_res_and_lock(lock);
		if (ldlm_bl_to_thread_lock(ns, NULL, lock))
			ldlm_handle_bl_callback(ns, NULL, lock);

		return;
	}
	unlock_res_and_lock(lock);
	LDLM_LOCK_RELEASE(lock);
}
Ejemplo n.º 6
0
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
	struct ldlm_namespace *ns;
	struct ldlm_request *dlm_req;
	struct ldlm_lock *lock;
	int rc;

	/* Requests arrive in sender's byte order.  The ptlrpc service
	 * handler has already checked and, if necessary, byte-swapped the
	 * incoming request message body, but I am responsible for the
	 * message buffers. */

	/* do nothing for sec context finalize */
	if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
		return 0;

	req_capsule_init(&req->rq_pill, req, RCL_SERVER);

	if (req->rq_export == NULL) {
		rc = ldlm_callback_reply(req, -ENOTCONN);
		ldlm_callback_errmsg(req, "Operate on unconnected server",
				     rc, NULL);
		return 0;
	}

	LASSERT(req->rq_export != NULL);
	LASSERT(req->rq_export->exp_obd != NULL);

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_CP_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
			return 0;
		break;
	case LDLM_GL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_SET_INFO:
		rc = ldlm_handle_setinfo(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
		CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
		req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
			return 0;
		rc = llog_origin_handle_cancel(req);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
			return 0;
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CREATE:
		req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_open(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_next_block(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_READ_HEADER:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_read_header(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CLOSE:
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_close(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_QC_CALLBACK:
		req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
			return 0;
		rc = ldlm_handle_qc_callback(req);
		ldlm_callback_reply(req, rc);
		return 0;
	default:
		CERROR("unknown opcode %u\n",
		       lustre_msg_get_opc(req->rq_reqmsg));
		ldlm_callback_reply(req, -EPROTO);
		return 0;
	}

	ns = req->rq_export->exp_obd->obd_namespace;
	LASSERT(ns != NULL);

	req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);

	dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
	if (dlm_req == NULL) {
		rc = ldlm_callback_reply(req, -EPROTO);
		ldlm_callback_errmsg(req, "Operate without parameter", rc,
				     NULL);
		return 0;
	}

	/* Force a known safe race, send a cancel to the server for a lock
	 * which the server has already started a blocking callback on. */
	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
		if (rc < 0)
			CERROR("ldlm_cli_cancel: %d\n", rc);
	}

	lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
	if (!lock) {
		CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
		       "disappeared\n", dlm_req->lock_handle[0].cookie);
		rc = ldlm_callback_reply(req, -EINVAL);
		ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
				     &dlm_req->lock_handle[0]);
		return 0;
	}

	if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
		OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);

	/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
	lock_res_and_lock(lock);
	lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
					      LDLM_AST_FLAGS);
	if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		/* If somebody cancels lock and cache is already dropped,
		 * or lock is failed before cp_ast received on client,
		 * we can tell the server we have no lock. Otherwise, we
		 * should send cancel after dropping the cache. */
		if (((lock->l_flags & LDLM_FL_CANCELING) &&
		    (lock->l_flags & LDLM_FL_BL_DONE)) ||
		    (lock->l_flags & LDLM_FL_FAILED)) {
			LDLM_DEBUG(lock, "callback on lock "
				   LPX64" - lock disappeared\n",
				   dlm_req->lock_handle[0].cookie);
			unlock_res_and_lock(lock);
			LDLM_LOCK_RELEASE(lock);
			rc = ldlm_callback_reply(req, -EINVAL);
			ldlm_callback_errmsg(req, "Operate on stale lock", rc,
					     &dlm_req->lock_handle[0]);
			return 0;
		}
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_BL_AST;
	}
	unlock_res_and_lock(lock);

	/* We want the ost thread to get this reply so that it can respond
	 * to ost requests (write cache writeback) that might be triggered
	 * in the callback.
	 *
	 * But we'd also like to be able to indicate in the reply that we're
	 * cancelling right now, because it's unused, or have an intent result
	 * in the reply, so we might have to push the responsibility for sending
	 * the reply down into the AST handlers, alas. */

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		CDEBUG(D_INODE, "blocking ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
		if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
			rc = ldlm_callback_reply(req, 0);
			if (req->rq_no_reply || rc)
				ldlm_callback_errmsg(req, "Normal process", rc,
						     &dlm_req->lock_handle[0]);
		}
		if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
			ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
		break;
	case LDLM_CP_CALLBACK:
		CDEBUG(D_INODE, "completion ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
		ldlm_callback_reply(req, 0);
		ldlm_handle_cp_callback(req, ns, dlm_req, lock);
		break;
	case LDLM_GL_CALLBACK:
		CDEBUG(D_INODE, "glimpse ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
		ldlm_handle_gl_callback(req, ns, dlm_req, lock);
		break;
	default:
		LBUG();			 /* checked above */
	}

	return 0;
}
Ejemplo n.º 7
0
/**
 * Callback handler for receiving incoming completion ASTs.
 *
 * This only can happen on client side.
 */
static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
				    struct ldlm_namespace *ns,
				    struct ldlm_request *dlm_req,
				    struct ldlm_lock *lock)
{
	int lvb_len;
	LIST_HEAD(ast_list);
	int rc = 0;

	LDLM_DEBUG(lock, "client completion callback handler START");

	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
		int to = cfs_time_seconds(1);
		while (to > 0) {
			schedule_timeout_and_set_state(
				TASK_INTERRUPTIBLE, to);
			if (lock->l_granted_mode == lock->l_req_mode ||
			    lock->l_flags & LDLM_FL_DESTROYED)
				break;
		}
	}

	lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
	if (lvb_len < 0) {
		LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
		GOTO(out, rc = lvb_len);
	} else if (lvb_len > 0) {
		if (lock->l_lvb_len > 0) {
			/* for extent lock, lvb contains ost_lvb{}. */
			LASSERT(lock->l_lvb_data != NULL);

			if (unlikely(lock->l_lvb_len < lvb_len)) {
				LDLM_ERROR(lock, "Replied LVB is larger than "
					   "expectation, expected = %d, "
					   "replied = %d",
					   lock->l_lvb_len, lvb_len);
				GOTO(out, rc = -EINVAL);
			}
		} else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
						     * variable length */
			void *lvb_data;

			OBD_ALLOC(lvb_data, lvb_len);
			if (lvb_data == NULL) {
				LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
				GOTO(out, rc = -ENOMEM);
			}

			lock_res_and_lock(lock);
			LASSERT(lock->l_lvb_data == NULL);
			lock->l_lvb_data = lvb_data;
			lock->l_lvb_len = lvb_len;
			unlock_res_and_lock(lock);
		}
	}

	lock_res_and_lock(lock);
	if ((lock->l_flags & LDLM_FL_DESTROYED) ||
	    lock->l_granted_mode == lock->l_req_mode) {
		/* bug 11300: the lock has already been granted */
		unlock_res_and_lock(lock);
		LDLM_DEBUG(lock, "Double grant race happened");
		GOTO(out, rc = 0);
	}

	/* If we receive the completion AST before the actual enqueue returned,
	 * then we might need to switch lock modes, resources, or extents. */
	if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
		lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
		LDLM_DEBUG(lock, "completion AST, new lock mode");
	}

	if (lock->l_resource->lr_type != LDLM_PLAIN) {
		ldlm_convert_policy_to_local(req->rq_export,
					  dlm_req->lock_desc.l_resource.lr_type,
					  &dlm_req->lock_desc.l_policy_data,
					  &lock->l_policy_data);
		LDLM_DEBUG(lock, "completion AST, new policy data");
	}

	ldlm_resource_unlink_lock(lock);
	if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
		   &lock->l_resource->lr_name,
		   sizeof(lock->l_resource->lr_name)) != 0) {
		unlock_res_and_lock(lock);
		rc = ldlm_lock_change_resource(ns, lock,
				&dlm_req->lock_desc.l_resource.lr_name);
		if (rc < 0) {
			LDLM_ERROR(lock, "Failed to allocate resource");
			GOTO(out, rc);
		}
		LDLM_DEBUG(lock, "completion AST, new resource");
		CERROR("change resource!\n");
		lock_res_and_lock(lock);
	}

	if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
		LDLM_DEBUG(lock, "completion AST includes blocking AST");
	}

	if (lock->l_lvb_len > 0) {
		rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
				   lock->l_lvb_data, lvb_len);
		if (rc < 0) {
			unlock_res_and_lock(lock);
			GOTO(out, rc);
		}
	}

	ldlm_grant_lock(lock, &ast_list);
	unlock_res_and_lock(lock);

	LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");

	/* Let Enqueue to call osc_lock_upcall() and initialize
	 * l_ast_data */
	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);

	ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);

	LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
			  lock);
	GOTO(out, rc);

out:
	if (rc < 0) {
		lock_res_and_lock(lock);
		lock->l_flags |= LDLM_FL_FAILED;
		unlock_res_and_lock(lock);
		wake_up(&lock->l_waitq);
	}
	LDLM_LOCK_RELEASE(lock);
}
Ejemplo n.º 8
0
/**
 * Blocking callback handler for per-ID lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
			       void *data, int flag)
{
	struct lustre_handle	lockh;
	int			rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {

		LDLM_DEBUG(lock, "blocking AST on ID quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct lu_env           *env;
		struct lquota_entry	*lqe;
		bool			 rel = false;

		LDLM_DEBUG(lock, "canceling global quota lock");
		lqe = qsd_id_ast_data_get(lock, true);
		if (lqe == NULL)
			break;

		LQUOTA_DEBUG(lqe, "losing ID lock");

		/* just local cancel (for stack clean up or eviction), don't
		 * release quota space in this case */
		if (ldlm_is_local_only(lock)) {
			lqe_putref(lqe);
			break;
		}

		/* allocate environment */
		OBD_ALLOC_PTR(env);
		if (env == NULL) {
			lqe_putref(lqe);
			rc = -ENOMEM;
			break;
		}

		/* initialize environment */
		rc = lu_env_init(env, LCT_DT_THREAD);
		if (rc) {
			OBD_FREE_PTR(env);
			lqe_putref(lqe);
			break;
		}

		ldlm_lock2handle(lock, &lockh);
		lqe_write_lock(lqe);
		if (lustre_handle_equal(&lockh, &lqe->lqe_lockh)) {
			/* Clear lqe_lockh & reset qunit to 0 */
			qsd_set_qunit(lqe, 0);
			memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
			lqe->lqe_edquot = false;
			rel = true;
		}
		lqe_write_unlock(lqe);

		/* If there is qqacq inflight, the release will be skipped
		 * at this time, and triggered on dqacq completion later,
		 * which means there could be a short window that slave is
		 * holding spare grant wihtout per-ID lock. */
		if (rel)
			rc = qsd_adjust(env, lqe);

		/* release lqe reference grabbed by qsd_id_ast_data_get() */
		lqe_putref(lqe);
		lu_env_fini(env);
		OBD_FREE_PTR(env);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
Ejemplo n.º 9
0
/*
 * Handle quota request from slave.
 *
 * \param env  - is the environment passed by the caller
 * \param ld   - is the lu device associated with the qmt
 * \param req  - is the quota acquire request
 */
static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
		     struct ptlrpc_request *req)
{
	struct qmt_device	*qmt = lu2qmt_dev(ld);
	struct quota_body	*qbody, *repbody;
	struct obd_uuid		*uuid;
	struct ldlm_lock	*lock;
	struct lquota_entry	*lqe;
	int			 pool_id, pool_type, qtype;
	int			 rc;
	ENTRY;

	qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	if (qbody == NULL)
		RETURN(err_serious(-EPROTO));

	repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
	if (repbody == NULL)
		RETURN(err_serious(-EFAULT));

	/* verify if global lock is stale */
	if (!lustre_handle_is_used(&qbody->qb_glb_lockh))
		RETURN(-ENOLCK);

	lock = ldlm_handle2lock(&qbody->qb_glb_lockh);
	if (lock == NULL)
		RETURN(-ENOLCK);
	LDLM_LOCK_PUT(lock);

	uuid = &req->rq_export->exp_client_uuid;

	if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) +
	    req_is_preacq(qbody->qb_flags) > 1) {
		CERROR("%s: malformed quota request with conflicting flags set "
		       "(%x) from slave %s\n", qmt->qmt_svname,
		       qbody->qb_flags, obd_uuid2str(uuid));
		RETURN(-EPROTO);
	}

	if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
		/* acquire and pre-acquire should use a valid ID lock */

		if (!lustre_handle_is_used(&qbody->qb_lockh))
			RETURN(-ENOLCK);

		lock = ldlm_handle2lock(&qbody->qb_lockh);
		if (lock == NULL)
			/* no lock associated with this handle */
			RETURN(-ENOLCK);

		LDLM_DEBUG(lock, "%sacquire request",
			   req_is_preacq(qbody->qb_flags) ? "pre" : "");

		if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) {
			/* sorry, no way to cheat ... */
			LDLM_LOCK_PUT(lock);
			RETURN(-ENOLCK);
		}

		if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) {
			struct ptlrpc_service_part	*svc;
			unsigned int			 timeout;

			svc = req->rq_rqbd->rqbd_svcpt;
			timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
			timeout = max(timeout, ldlm_timeout);

			/* lock is being cancelled, prolong timeout */
			ldlm_refresh_waiting_lock(lock, timeout);
		}
		LDLM_LOCK_PUT(lock);
	}

	/* extract pool & quota information from global index FID packed in the
	 * request */
	rc = lquota_extract_fid(&qbody->qb_fid, &pool_id, &pool_type, &qtype);
	if (rc)
		RETURN(-EINVAL);

	/* Find the quota entry associated with the quota id */
	lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, pool_type, qtype,
				  &qbody->qb_id);
	if (IS_ERR(lqe))
		RETURN(PTR_ERR(lqe));

	/* process quota request */
	rc = qmt_dqacq0(env, lqe, qmt, uuid, qbody->qb_flags, qbody->qb_count,
			qbody->qb_usage, repbody);

	if (lustre_handle_is_used(&qbody->qb_lockh))
		/* return current qunit value only to slaves owning an per-ID
		 * quota lock. For enqueue, the qunit value will be returned in
		 * the LVB */
		 repbody->qb_qunit = lqe->lqe_qunit;
	lqe_putref(lqe);
	RETURN(rc);
}