コード例 #1
0
ファイル: qsd_lock.c プロジェクト: Lezval/lustre
/*
 * Blocking callback handler for global index lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
				struct ldlm_lock_desc *desc, void *data,
				int flag)
{
	int rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {
		struct lustre_handle lockh;

		LDLM_DEBUG(lock, "blocking AST on global quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct qsd_qtype_info *qqi;

		LDLM_DEBUG(lock, "canceling global quota lock");

		qqi = qsd_glb_ast_data_get(lock, true);
		if (qqi == NULL)
			break;

		/* we are losing the global index lock, so let's mark the
		 * global & slave indexes as not up-to-date any more */
		write_lock(&qqi->qqi_qsd->qsd_lock);
		qqi->qqi_glb_uptodate = false;
		qqi->qqi_slv_uptodate = false;
		if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
			memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
		write_unlock(&qqi->qqi_qsd->qsd_lock);

		CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
		       qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));

		/* kick off reintegration thread if not running already, if
		 * it's just local cancel (for stack clean up or eviction),
		 * don't re-trigger the reintegration. */
		if (!ldlm_is_local_only(lock))
			qsd_start_reint_thread(qqi);

		lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
		qqi_putref(qqi);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
コード例 #2
0
ファイル: mdc_lib.c プロジェクト: Xyratex/lustre-stable
static void mdc_close_intent_pack(struct ptlrpc_request *req,
				  struct md_op_data *op_data)
{
	struct close_data	*data;
	struct ldlm_lock	*lock;
	enum mds_op_bias	 bias = op_data->op_bias;

	if (!(bias & (MDS_CLOSE_INTENT | MDS_CLOSE_MIGRATE)))
		return;

	data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
	LASSERT(data != NULL);

	lock = ldlm_handle2lock(&op_data->op_lease_handle);
	if (lock != NULL) {
		data->cd_handle = lock->l_remote_handle;
		LDLM_LOCK_PUT(lock);
	}
	ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);

	data->cd_data_version = op_data->op_data_version;
	data->cd_fid = op_data->op_fid2;

	if (bias & MDS_CLOSE_LAYOUT_SPLIT) {
		data->cd_mirror_id = op_data->op_mirror_id;
	} else if (bias & MDS_CLOSE_RESYNC_DONE) {
		struct close_data_resync_done *sync = &data->cd_resync;

		CLASSERT(sizeof(data->cd_resync) <= sizeof(data->cd_reserved));
		sync->resync_count = op_data->op_data_size / sizeof(__u32);
		if (sync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
			memcpy(sync->resync_ids_inline, op_data->op_data,
			       op_data->op_data_size);
		} else {
			size_t count = sync->resync_count;

			memcpy(req_capsule_client_get(&req->rq_pill, &RMF_U32),
				op_data->op_data, count * sizeof(__u32));
		}
	}
}
コード例 #3
0
ファイル: ldlm_lockd.c プロジェクト: IIosTaJI/linux-2.6
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
	struct ldlm_namespace *ns;
	struct ldlm_request *dlm_req;
	struct ldlm_lock *lock;
	int rc;

	/* Requests arrive in sender's byte order.  The ptlrpc service
	 * handler has already checked and, if necessary, byte-swapped the
	 * incoming request message body, but I am responsible for the
	 * message buffers. */

	/* do nothing for sec context finalize */
	if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
		return 0;

	req_capsule_init(&req->rq_pill, req, RCL_SERVER);

	if (req->rq_export == NULL) {
		rc = ldlm_callback_reply(req, -ENOTCONN);
		ldlm_callback_errmsg(req, "Operate on unconnected server",
				     rc, NULL);
		return 0;
	}

	LASSERT(req->rq_export != NULL);
	LASSERT(req->rq_export->exp_obd != NULL);

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_CP_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
			return 0;
		break;
	case LDLM_GL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_SET_INFO:
		rc = ldlm_handle_setinfo(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
		CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
		req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
			return 0;
		rc = llog_origin_handle_cancel(req);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
			return 0;
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CREATE:
		req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_open(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_next_block(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_READ_HEADER:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_read_header(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CLOSE:
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_close(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_QC_CALLBACK:
		req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
			return 0;
		rc = ldlm_handle_qc_callback(req);
		ldlm_callback_reply(req, rc);
		return 0;
	default:
		CERROR("unknown opcode %u\n",
		       lustre_msg_get_opc(req->rq_reqmsg));
		ldlm_callback_reply(req, -EPROTO);
		return 0;
	}

	ns = req->rq_export->exp_obd->obd_namespace;
	LASSERT(ns != NULL);

	req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);

	dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
	if (dlm_req == NULL) {
		rc = ldlm_callback_reply(req, -EPROTO);
		ldlm_callback_errmsg(req, "Operate without parameter", rc,
				     NULL);
		return 0;
	}

	/* Force a known safe race, send a cancel to the server for a lock
	 * which the server has already started a blocking callback on. */
	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
		if (rc < 0)
			CERROR("ldlm_cli_cancel: %d\n", rc);
	}

	lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
	if (!lock) {
		CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
		       "disappeared\n", dlm_req->lock_handle[0].cookie);
		rc = ldlm_callback_reply(req, -EINVAL);
		ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
				     &dlm_req->lock_handle[0]);
		return 0;
	}

	if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
		OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);

	/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
	lock_res_and_lock(lock);
	lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
					      LDLM_AST_FLAGS);
	if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		/* If somebody cancels lock and cache is already dropped,
		 * or lock is failed before cp_ast received on client,
		 * we can tell the server we have no lock. Otherwise, we
		 * should send cancel after dropping the cache. */
		if (((lock->l_flags & LDLM_FL_CANCELING) &&
		    (lock->l_flags & LDLM_FL_BL_DONE)) ||
		    (lock->l_flags & LDLM_FL_FAILED)) {
			LDLM_DEBUG(lock, "callback on lock "
				   LPX64" - lock disappeared\n",
				   dlm_req->lock_handle[0].cookie);
			unlock_res_and_lock(lock);
			LDLM_LOCK_RELEASE(lock);
			rc = ldlm_callback_reply(req, -EINVAL);
			ldlm_callback_errmsg(req, "Operate on stale lock", rc,
					     &dlm_req->lock_handle[0]);
			return 0;
		}
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_BL_AST;
	}
	unlock_res_and_lock(lock);

	/* We want the ost thread to get this reply so that it can respond
	 * to ost requests (write cache writeback) that might be triggered
	 * in the callback.
	 *
	 * But we'd also like to be able to indicate in the reply that we're
	 * cancelling right now, because it's unused, or have an intent result
	 * in the reply, so we might have to push the responsibility for sending
	 * the reply down into the AST handlers, alas. */

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		CDEBUG(D_INODE, "blocking ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
		if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
			rc = ldlm_callback_reply(req, 0);
			if (req->rq_no_reply || rc)
				ldlm_callback_errmsg(req, "Normal process", rc,
						     &dlm_req->lock_handle[0]);
		}
		if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
			ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
		break;
	case LDLM_CP_CALLBACK:
		CDEBUG(D_INODE, "completion ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
		ldlm_callback_reply(req, 0);
		ldlm_handle_cp_callback(req, ns, dlm_req, lock);
		break;
	case LDLM_GL_CALLBACK:
		CDEBUG(D_INODE, "glimpse ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
		ldlm_handle_gl_callback(req, ns, dlm_req, lock);
		break;
	default:
		LBUG();			 /* checked above */
	}

	return 0;
}
コード例 #4
0
ファイル: namei.c プロジェクト: hocks/lustre-release
int llu_md_blocking_ast(struct ldlm_lock *lock,
                        struct ldlm_lock_desc *desc,
                        void *data, int flag)
{
        struct lustre_handle lockh;
        int rc;
        ENTRY;


        switch (flag) {
        case LDLM_CB_BLOCKING:
                ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, 0);
                if (rc < 0) {
                        CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
                        RETURN(rc);
                }
                break;
        case LDLM_CB_CANCELING: {
		struct inode *inode = llu_inode_from_resource_lock(lock);
                struct llu_inode_info *lli;
                struct intnl_stat *st;
                __u64 bits = lock->l_policy_data.l_inodebits.bits;
                struct lu_fid *fid;

		/* Inode is set to lock->l_resource->lr_lvb_inode
		* for mdc - bug 24555 */
		LASSERT(lock->l_ast_data == NULL);

                /* Invalidate all dentries associated with this inode */
                if (inode == NULL)
                        break;

                lli =  llu_i2info(inode);
                st = llu_i2stat(inode);

                if (bits & MDS_INODELOCK_UPDATE)
                        lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;

		fid = &lli->lli_fid;
		if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
			LDLM_ERROR(lock, "data mismatch with object "
				   DFID" (%p)", PFID(fid), inode);
                if (S_ISDIR(st->st_mode) &&
                    (bits & MDS_INODELOCK_UPDATE)) {
                        CDEBUG(D_INODE, "invalidating inode %llu\n",
                               (long long)st->st_ino);

                        llu_invalidate_inode_pages(inode);
                }

/*
                if (inode->i_sb->s_root &&
                    inode != inode->i_sb->s_root->d_inode)
                        ll_unhash_aliases(inode);
*/
                I_RELE(inode);
                break;
        }
        default:
                LBUG();
        }

        RETURN(0);
}
コード例 #5
0
ファイル: qsd_lock.c プロジェクト: Lezval/lustre
/**
 * Blocking callback handler for per-ID lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
			       void *data, int flag)
{
	struct lustre_handle	lockh;
	int			rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {

		LDLM_DEBUG(lock, "blocking AST on ID quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct lu_env           *env;
		struct lquota_entry	*lqe;
		bool			 rel = false;

		LDLM_DEBUG(lock, "canceling global quota lock");
		lqe = qsd_id_ast_data_get(lock, true);
		if (lqe == NULL)
			break;

		LQUOTA_DEBUG(lqe, "losing ID lock");

		/* just local cancel (for stack clean up or eviction), don't
		 * release quota space in this case */
		if (ldlm_is_local_only(lock)) {
			lqe_putref(lqe);
			break;
		}

		/* allocate environment */
		OBD_ALLOC_PTR(env);
		if (env == NULL) {
			lqe_putref(lqe);
			rc = -ENOMEM;
			break;
		}

		/* initialize environment */
		rc = lu_env_init(env, LCT_DT_THREAD);
		if (rc) {
			OBD_FREE_PTR(env);
			lqe_putref(lqe);
			break;
		}

		ldlm_lock2handle(lock, &lockh);
		lqe_write_lock(lqe);
		if (lustre_handle_equal(&lockh, &lqe->lqe_lockh)) {
			/* Clear lqe_lockh & reset qunit to 0 */
			qsd_set_qunit(lqe, 0);
			memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
			lqe->lqe_edquot = false;
			rel = true;
		}
		lqe_write_unlock(lqe);

		/* If there is qqacq inflight, the release will be skipped
		 * at this time, and triggered on dqacq completion later,
		 * which means there could be a short window that slave is
		 * holding spare grant wihtout per-ID lock. */
		if (rel)
			rc = qsd_adjust(env, lqe);

		/* release lqe reference grabbed by qsd_id_ast_data_get() */
		lqe_putref(lqe);
		lu_env_fini(env);
		OBD_FREE_PTR(env);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
コード例 #6
0
ファイル: osc_lock.c プロジェクト: Cool-Joe/imx23-audio
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct osc_lock *olck;
	struct cl_lock  *lock;
	int result;
	int cancel;

	LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);

	cancel = 0;
	olck = osc_ast_data_get(dlmlock);
	if (olck != NULL) {
		lock = olck->ols_cl.cls_lock;
		cl_lock_mutex_get(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		if (olck->ols_ast_wait) {
			/* wake up osc_lock_use() */
			cl_lock_signal(env, lock);
			olck->ols_ast_wait = 0;
		}
		/*
		 * Lock might have been canceled while this thread was
		 * sleeping for lock mutex, but olck is pinned in memory.
		 */
		if (olck == dlmlock->l_ast_data) {
			/*
			 * NOTE: DLM sends blocking AST's for failed locks
			 *       (that are still in pre-OLS_GRANTED state)
			 *       too, and they have to be canceled otherwise
			 *       DLM lock is never destroyed and stuck in
			 *       the memory.
			 *
			 *       Alternatively, ldlm_cli_cancel() can be
			 *       called here directly for osc_locks with
			 *       ols_state < OLS_GRANTED to maintain an
			 *       invariant that ->clo_cancel() is only called
			 *       for locks that were granted.
			 */
			LASSERT(data == olck);
			osc_lock_blocking(env, dlmlock,
					  olck, flag == LDLM_CB_BLOCKING);
		} else
			cancel = 1;
		cl_lock_mutex_put(env, lock);
		osc_ast_data_put(env, olck);
	} else
		/*
		 * DLM lock exists, but there is no cl_lock attached to it.
		 * This is a `normal' race. cl_object and its cl_lock's can be
		 * removed by memory pressure, together with all pages.
		 */
		cancel = (flag == LDLM_CB_BLOCKING);

	if (cancel) {
		struct lustre_handle *lockh;

		lockh = &osc_env_info(env)->oti_handle;
		ldlm_lock2handle(dlmlock, lockh);
		result = ldlm_cli_cancel(lockh, LCF_ASYNC);
	} else
		result = 0;
	return result;
}