Esempio n. 1
0
int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
		       struct ldlm_res_id *res_id,
		       enum ldlm_type type, union ldlm_policy_data *policy,
		       enum ldlm_mode mode, __u64 *flags, void *data,
		       struct lustre_handle *lockh, int unref)
{
	struct obd_device *obd = exp->exp_obd;
	__u64 lflags = *flags;
	enum ldlm_mode rc;

	ENTRY;

	rc = ldlm_lock_match(obd->obd_namespace, lflags,
			     res_id, type, policy, mode, lockh, unref);
	if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
		RETURN(rc);

	if (data != NULL) {
		struct ldlm_lock *lock = ldlm_handle2lock(lockh);

		LASSERT(lock != NULL);
		if (!mdc_set_dom_lock_data(env, lock, data)) {
			ldlm_lock_decref(lockh, rc);
			rc = 0;
		}
		LDLM_LOCK_PUT(lock);
	}
	RETURN(rc);
}
Esempio n. 2
0
/**
 * Check whether a slave already own a ldlm lock for the quota identifier \qid.
 *
 * \param lockh  - is the local lock handle from lquota entry.
 * \param rlockh - is the remote lock handle of the matched lock, if any.
 *
 * \retval 0      : on successful look up and \lockh contains the lock handle.
 * \retval -ENOENT: no lock found
 */
int qsd_id_lock_match(struct lustre_handle *lockh, struct lustre_handle *rlockh)
{
	struct ldlm_lock	*lock;
	int			 rc;
	ENTRY;

	LASSERT(lockh);

	if (!lustre_handle_is_used(lockh))
		RETURN(-ENOENT);

	rc = ldlm_lock_addref_try(lockh, qsd_id_einfo.ei_mode);
	if (rc)
		RETURN(-ENOENT);

	LASSERT(lustre_handle_is_used(lockh));
	ldlm_lock_dump_handle(D_QUOTA, lockh);

	if (rlockh == NULL)
		/* caller not interested in remote handle */
		RETURN(0);

	/* look up lock associated with local handle and extract remote handle
	 * to be packed in quota request */
	lock = ldlm_handle2lock(lockh);
	LASSERT(lock != NULL);
	lustre_handle_copy(rlockh, &lock->l_remote_handle);
	LDLM_LOCK_PUT(lock);

	RETURN(0);
}
Esempio n. 3
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object	*osc = cookie;
	struct ldlm_lock	*dlmlock;
	struct lu_env           *env;
	struct cl_env_nest       nest;
	ENTRY;

	env = cl_env_nested_get(&nest);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED)
		GOTO(out, errcode = ELDLM_OK);

	if (errcode != ELDLM_OK)
		GOTO(out, errcode);

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_nested_put(&nest, env);
	RETURN(ldlm_error2errno(errcode));
}
Esempio n. 4
0
/**
 * Check if page @page is covered by an extra lock or discard it.
 */
static int mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
				    struct osc_page *ops, void *cbdata)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct osc_object *osc = cbdata;
	pgoff_t index;

	index = osc_index(ops);
	if (index >= info->oti_fn_index) {
		struct ldlm_lock *tmp;
		struct cl_page *page = ops->ops_cl.cpl_page;

		/* refresh non-overlapped index */
		tmp = mdc_dlmlock_at_pgoff(env, osc, index,
					   OSC_DAP_FL_TEST_LOCK);
		if (tmp != NULL) {
			info->oti_fn_index = CL_PAGE_EOF;
			LDLM_LOCK_PUT(tmp);
		} else if (cl_page_own(env, io, page) == 0) {
			/* discard the page */
			cl_page_discard(env, io, page);
			cl_page_disown(env, io, page);
		} else {
			LASSERT(page->cp_state == CPS_FREEING);
		}
	}

	info->oti_next_index = index + 1;
	return CLP_GANG_OKAY;
}
Esempio n. 5
0
/**
 * Check whether a qsd instance is all set to send quota request to master.
 * This includes checking whether:
 * - the connection to master is set up and usable,
 * - the qsd isn't stopping
 * - reintegration has been successfully completed and all indexes are
 *   up-to-date
 *
 * \param lqe - is the lquota entry for which we would like to send an quota
 *              request
 * \param lockh - is the remote handle of the global lock returned on success
 *
 * \retval 0 on success, appropriate error on failure
 */
static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
{
	struct qsd_qtype_info	*qqi = lqe2qqi(lqe);
	struct qsd_instance	*qsd = qqi->qqi_qsd;
	struct obd_import	*imp = NULL;
	struct ldlm_lock	*lock;
	ENTRY;

	read_lock(&qsd->qsd_lock);
	/* is the qsd about to shut down? */
	if (qsd->qsd_stopping) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping");
		/* Target is about to shut down, client will retry */
		RETURN(-EINPROGRESS);
	}

	/* is the connection to the quota master ready? */
	if (qsd->qsd_exp_valid)
		imp = class_exp2cliimp(qsd->qsd_exp);
	if (imp == NULL || imp->imp_invalid) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "connection to master not ready");
		RETURN(-ENOTCONN);
	}

	/* In most case, reintegration must have been triggered (when enable
	 * quota or on OST start), however, in rare race condition (enabling
	 * quota when starting OSTs), we might miss triggering reintegration
	 * for some qqi.
	 *
	 * If the previous reintegration failed for some reason, we'll
	 * re-trigger it here as well. */
	if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and "
			     "kicking off reintegration");
		qsd_start_reint_thread(qqi);
		RETURN(-EINPROGRESS);
	}

	/* Fill the remote global lock handle, master will check this handle
	 * to see if the slave is sending request with stale lock */
	lustre_handle_copy(lockh, &qqi->qqi_lockh);
	read_unlock(&qsd->qsd_lock);

	if (!lustre_handle_is_used(lockh))
		RETURN(-ENOLCK);

	lock = ldlm_handle2lock(lockh);
	if (lock == NULL)
		RETURN(-ENOLCK);

	/* return remote lock handle to be packed in quota request */
	lustre_handle_copy(lockh, &lock->l_remote_handle);
	LDLM_LOCK_PUT(lock);

	RETURN(0);
}
Esempio n. 6
0
/**
 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
 * pointer cannot be dereferenced, as lock is not protected from concurrent
 * reclaim. This function is a helper for osc_lock_invariant().
 */
static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
{
	struct ldlm_lock *lock;

	lock = ldlm_handle2lock(handle);
	if (lock)
		LDLM_LOCK_PUT(lock);
	return lock;
}
Esempio n. 7
0
static void osc_read_ahead_release(const struct lu_env *env,
				   void *cbdata)
{
	struct ldlm_lock *dlmlock = cbdata;
	struct lustre_handle lockh;

	ldlm_lock2handle(dlmlock, &lockh);
	ldlm_lock_decref(&lockh, LCK_PR);
	LDLM_LOCK_PUT(dlmlock);
}
Esempio n. 8
0
/**
 * Implementation of dt_object_operations::do_object_lock
 *
 * Enqueue a lock (by ldlm_cli_enqueue()) of remote object on the remote MDT,
 * which will lock the object in the global namespace. And because the
 * cross-MDT locks are relatively rare compared with normal local MDT operation,
 * let's release it right away, instead of putting it into the LRU list.
 *
 * \param[in] env	execution environment
 * \param[in] dt	object to be locked
 * \param[out] lh	lock handle
 * \param[in] einfo	enqueue information
 * \param[in] policy	lock policy
 *
 * \retval		ELDLM_OK if locking the object succeeds.
 * \retval		negative errno if locking fails.
 */
static int osp_md_object_lock(const struct lu_env *env,
			      struct dt_object *dt,
			      struct lustre_handle *lh,
			      struct ldlm_enqueue_info *einfo,
			      union ldlm_policy_data *policy)
{
	struct ldlm_res_id	*res_id;
	struct dt_device	*dt_dev = lu2dt_dev(dt->do_lu.lo_dev);
	struct osp_device	*osp = dt2osp_dev(dt_dev);
	struct ptlrpc_request	*req;
	int			rc = 0;
	__u64			flags = 0;
	enum ldlm_mode		mode;

	res_id = einfo->ei_res_id;
	LASSERT(res_id != NULL);

	mode = ldlm_lock_match(osp->opd_obd->obd_namespace,
			       LDLM_FL_BLOCK_GRANTED, res_id,
			       einfo->ei_type, policy,
			       einfo->ei_mode, lh, 0);
	if (mode > 0)
		return ELDLM_OK;

	if (einfo->ei_nonblock)
		flags |= LDLM_FL_BLOCK_NOWAIT;

	req = ldlm_enqueue_pack(osp->opd_exp, 0);
	if (IS_ERR(req))
		RETURN(PTR_ERR(req));

	rc = ldlm_cli_enqueue(osp->opd_exp, &req, einfo, res_id,
			      (const union ldlm_policy_data *)policy,
			      &flags, NULL, 0, LVB_T_NONE, lh, 0);

	ptlrpc_req_finished(req);
	if (rc == ELDLM_OK) {
		struct ldlm_lock *lock;

		lock = __ldlm_handle2lock(lh, 0);
		ldlm_set_cbpending(lock);
		LDLM_LOCK_PUT(lock);
	}

	return rc == ELDLM_OK ? 0 : -EIO;
}
Esempio n. 9
0
static void mdc_close_intent_pack(struct ptlrpc_request *req,
				  struct md_op_data *op_data)
{
	struct close_data	*data;
	struct ldlm_lock	*lock;
	enum mds_op_bias	 bias = op_data->op_bias;

	if (!(bias & (MDS_CLOSE_INTENT | MDS_CLOSE_MIGRATE)))
		return;

	data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
	LASSERT(data != NULL);

	lock = ldlm_handle2lock(&op_data->op_lease_handle);
	if (lock != NULL) {
		data->cd_handle = lock->l_remote_handle;
		LDLM_LOCK_PUT(lock);
	}
	ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);

	data->cd_data_version = op_data->op_data_version;
	data->cd_fid = op_data->op_fid2;

	if (bias & MDS_CLOSE_LAYOUT_SPLIT) {
		data->cd_mirror_id = op_data->op_mirror_id;
	} else if (bias & MDS_CLOSE_RESYNC_DONE) {
		struct close_data_resync_done *sync = &data->cd_resync;

		CLASSERT(sizeof(data->cd_resync) <= sizeof(data->cd_reserved));
		sync->resync_count = op_data->op_data_size / sizeof(__u32);
		if (sync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
			memcpy(sync->resync_ids_inline, op_data->op_data,
			       op_data->op_data_size);
		} else {
			size_t count = sync->resync_count;

			memcpy(req_capsule_client_get(&req->rq_pill, &RMF_U32),
				op_data->op_data, count * sizeof(__u32));
		}
	}
}
Esempio n. 10
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object *osc = cookie;
	struct ldlm_lock *dlmlock;
	struct lu_env *env;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED) {
		errcode = ELDLM_OK;
		goto out;
	}

	if (errcode != ELDLM_OK)
		goto out;

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_put(env, &refcheck);
	return ldlm_error2errno(errcode);
}
Esempio n. 11
0
/**
 * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
 * fields.
 */
static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
                             struct cl_req_attr *attr)
{
    struct lov_oinfo *oinfo;
    struct obdo      *oa;
    struct ost_lvb   *lvb;
    u64		  flags = attr->cra_flags;

    oinfo   = cl2osc(obj)->oo_oinfo;
    lvb     = &oinfo->loi_lvb;
    oa      = attr->cra_oa;

    if ((flags & OBD_MD_FLMTIME) != 0) {
        oa->o_mtime = lvb->lvb_mtime;
        oa->o_valid |= OBD_MD_FLMTIME;
    }
    if ((flags & OBD_MD_FLATIME) != 0) {
        oa->o_atime = lvb->lvb_atime;
        oa->o_valid |= OBD_MD_FLATIME;
    }
    if ((flags & OBD_MD_FLCTIME) != 0) {
        oa->o_ctime = lvb->lvb_ctime;
        oa->o_valid |= OBD_MD_FLCTIME;
    }
    if (flags & OBD_MD_FLGROUP) {
        ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLGROUP;
    }
    if (flags & OBD_MD_FLID) {
        ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
        oa->o_valid |= OBD_MD_FLID;
    }
    if (flags & OBD_MD_FLHANDLE) {
        struct ldlm_lock *lock;
        struct osc_page *opg;

        opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
        lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
                                    OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
        if (lock == NULL && !opg->ops_srvlock) {
            struct ldlm_resource *res;
            struct ldlm_res_id *resname;

            CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
                          "uncovered page!\n");

            resname = &osc_env_info(env)->oti_resname;
            ostid_build_res_name(&oinfo->loi_oi, resname);
            res = ldlm_resource_get(
                      osc_export(cl2osc(obj))->exp_obd->obd_namespace,
                      NULL, resname, LDLM_EXTENT, 0);
            ldlm_resource_dump(D_ERROR, res);

            libcfs_debug_dumpstack(NULL);
            LBUG();
        }

        /* check for lockless io. */
        if (lock != NULL) {
            oa->o_handle = lock->l_remote_handle;
            oa->o_valid |= OBD_MD_FLHANDLE;
            LDLM_LOCK_PUT(lock);
        }
    }
}
Esempio n. 12
0
/*
 * Get intent per-ID lock or global-index lock from master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the intent RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous (pre-acquire)
 * \param it_op  - IT_QUOTA_DQACQ or IT_QUOTA_CONN
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lvb    - is the lvb associated with the lock and returned by the
 *                 server
 * \param arg    - is an opaq argument passed to the completion callback
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
		    struct quota_body *qbody, bool sync, int it_op,
		    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		    struct lquota_lvb *lvb, void *arg)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct ptlrpc_request	*req;
	struct qsd_async_args	*aa = NULL;
	struct ldlm_intent	*lit;
	struct quota_body	*req_qbody;
	__u64			 flags = LDLM_FL_HAS_INTENT;
	int			 rc;
	ENTRY;

	LASSERT(exp != NULL);
	LASSERT(!lustre_handle_is_used(&qbody->qb_lockh));

	memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));

	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_LDLM_INTENT_QUOTA);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_retry_einprogress = 1;
	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
	lit->opc = (__u64)it_op;

	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
			     sizeof(*lvb));
	ptlrpc_request_set_replen(req);

	switch(it_op) {
	case IT_QUOTA_CONN:
		/* build resource name associated with global index */
		fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with qqi pointer */
		memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = qqi;

		/* don't cancel global lock on memory pressure */
		flags |= LDLM_FL_NO_LRU;
		break;
	case IT_QUOTA_DQACQ:
		/* build resource name associated for per-ID quota lock */
		fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
					 &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with lqe pointer */
		memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = arg;
		break;
	default:
		LASSERTF(0, "invalid it_op %d", it_op);
	}

	/* build lock enqueue request */
	rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
			      &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
			      &qti->qti_lockh, 1);
	if (rc < 0) {
		ptlrpc_req_finished(req);
		GOTO(out, rc);
	}

	/* grab reference on backend structure for the new lock */
	switch(it_op) {
	case IT_QUOTA_CONN:
		/* grab reference on qqi for new lock */
#ifdef USE_LU_REF
	{
		struct ldlm_lock	*lock;

		lock = ldlm_handle2lock(&qti->qti_lockh);
		if (lock == NULL) {
			ptlrpc_req_finished(req);
			GOTO(out, rc = -ENOLCK);
		}
		lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
		LDLM_LOCK_PUT(lock);
	}
#endif
		qqi_getref(qqi);
		break;
	case IT_QUOTA_DQACQ:
		/* grab reference on lqe for new lock */
		lqe_getref((struct lquota_entry *)arg);
		/* all acquire/release request are sent with no_resend and
		 * no_delay flag */
		req->rq_no_resend = req->rq_no_delay = 1;
		break;
	default:
		break;
	}

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = arg;
	aa->aa_lvb = lvb;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh);

	if (sync) {
		/* send lock enqueue request and wait for completion */
		rc = ptlrpc_queue_wait(req);
		rc = qsd_intent_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		/* queue lock request and return */
		req->rq_interpret_reply = qsd_intent_interpret;
		ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc);
	return rc;
}
Esempio n. 13
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, int errcode)
{
	struct osc_lock	 *olck  = cookie;
	struct cl_lock_slice    *slice = &olck->ols_cl;
	struct cl_lock	  *lock  = slice->cls_lock;
	struct lu_env	   *env;
	struct cl_env_nest       nest;

	ENTRY;
	env = cl_env_nested_get(&nest);
	if (!IS_ERR(env)) {
		int rc;

		cl_lock_mutex_get(env, lock);

		LASSERT(lock->cll_state >= CLS_QUEUING);
		if (olck->ols_state == OLS_ENQUEUED) {
			olck->ols_state = OLS_UPCALL_RECEIVED;
			rc = ldlm_error2errno(errcode);
		} else if (olck->ols_state == OLS_CANCELLED) {
			rc = -EIO;
		} else {
			CERROR("Impossible state: %d\n", olck->ols_state);
			LBUG();
		}
		if (rc) {
			struct ldlm_lock *dlmlock;

			dlmlock = ldlm_handle2lock(&olck->ols_handle);
			if (dlmlock != NULL) {
				lock_res_and_lock(dlmlock);
				spin_lock(&osc_ast_guard);
				LASSERT(olck->ols_lock == NULL);
				dlmlock->l_ast_data = NULL;
				olck->ols_handle.cookie = 0ULL;
				spin_unlock(&osc_ast_guard);
				ldlm_lock_fail_match_locked(dlmlock);
				unlock_res_and_lock(dlmlock);
				LDLM_LOCK_PUT(dlmlock);
			}
		} else {
			if (olck->ols_glimpse)
				olck->ols_glimpse = 0;
			osc_lock_upcall0(env, olck);
		}

		/* Error handling, some errors are tolerable. */
		if (olck->ols_locklessable && rc == -EUSERS) {
			/* This is a tolerable error, turn this lock into
			 * lockless lock.
			 */
			osc_object_set_contended(cl2osc(slice->cls_obj));
			LASSERT(slice->cls_ops == &osc_lock_ops);

			/* Change this lock to ldlmlock-less lock. */
			osc_lock_to_lockless(env, olck, 1);
			olck->ols_state = OLS_GRANTED;
			rc = 0;
		} else if (olck->ols_glimpse && rc == -ENAVAIL) {
			osc_lock_lvb_update(env, olck, rc);
			cl_lock_delete(env, lock);
			/* Hide the error. */
			rc = 0;
		}

		if (rc == 0) {
			/* For AGL case, the RPC sponsor may exits the cl_lock
			*  processing without wait() called before related OSC
			*  lock upcall(). So update the lock status according
			*  to the enqueue result inside AGL upcall(). */
			if (olck->ols_agl) {
				lock->cll_flags |= CLF_FROM_UPCALL;
				cl_wait_try(env, lock);
				lock->cll_flags &= ~CLF_FROM_UPCALL;
				if (!olck->ols_glimpse)
					olck->ols_agl = 0;
			}
			cl_lock_signal(env, lock);
			/* del user for lock upcall cookie */
			cl_unuse_try(env, lock);
		} else {
			/* del user for lock upcall cookie */
			cl_lock_user_del(env, lock);
			cl_lock_error(env, lock, rc);
		}

		/* release cookie reference, acquired by osc_lock_enqueue() */
		cl_lock_hold_release(env, lock, "upcall", lock);
		cl_lock_mutex_put(env, lock);

		lu_ref_del(&lock->cll_reference, "upcall", lock);
		/* This maybe the last reference, so must be called after
		 * cl_lock_mutex_put(). */
		cl_lock_put(env, lock);

		cl_env_nested_put(&nest, env);
	} else {
		/* should never happen, similar to osc_ldlm_blocking_ast(). */
		LBUG();
	}
	RETURN(errcode);
}
Esempio n. 14
0
/*
 * Handle quota request from slave.
 *
 * \param env  - is the environment passed by the caller
 * \param ld   - is the lu device associated with the qmt
 * \param req  - is the quota acquire request
 */
static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
		     struct ptlrpc_request *req)
{
	struct qmt_device	*qmt = lu2qmt_dev(ld);
	struct quota_body	*qbody, *repbody;
	struct obd_uuid		*uuid;
	struct ldlm_lock	*lock;
	struct lquota_entry	*lqe;
	int			 pool_id, pool_type, qtype;
	int			 rc;
	ENTRY;

	qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	if (qbody == NULL)
		RETURN(err_serious(-EPROTO));

	repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
	if (repbody == NULL)
		RETURN(err_serious(-EFAULT));

	/* verify if global lock is stale */
	if (!lustre_handle_is_used(&qbody->qb_glb_lockh))
		RETURN(-ENOLCK);

	lock = ldlm_handle2lock(&qbody->qb_glb_lockh);
	if (lock == NULL)
		RETURN(-ENOLCK);
	LDLM_LOCK_PUT(lock);

	uuid = &req->rq_export->exp_client_uuid;

	if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) +
	    req_is_preacq(qbody->qb_flags) > 1) {
		CERROR("%s: malformed quota request with conflicting flags set "
		       "(%x) from slave %s\n", qmt->qmt_svname,
		       qbody->qb_flags, obd_uuid2str(uuid));
		RETURN(-EPROTO);
	}

	if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
		/* acquire and pre-acquire should use a valid ID lock */

		if (!lustre_handle_is_used(&qbody->qb_lockh))
			RETURN(-ENOLCK);

		lock = ldlm_handle2lock(&qbody->qb_lockh);
		if (lock == NULL)
			/* no lock associated with this handle */
			RETURN(-ENOLCK);

		LDLM_DEBUG(lock, "%sacquire request",
			   req_is_preacq(qbody->qb_flags) ? "pre" : "");

		if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) {
			/* sorry, no way to cheat ... */
			LDLM_LOCK_PUT(lock);
			RETURN(-ENOLCK);
		}

		if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) {
			struct ptlrpc_service_part	*svc;
			unsigned int			 timeout;

			svc = req->rq_rqbd->rqbd_svcpt;
			timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
			timeout = max(timeout, ldlm_timeout);

			/* lock is being cancelled, prolong timeout */
			ldlm_refresh_waiting_lock(lock, timeout);
		}
		LDLM_LOCK_PUT(lock);
	}

	/* extract pool & quota information from global index FID packed in the
	 * request */
	rc = lquota_extract_fid(&qbody->qb_fid, &pool_id, &pool_type, &qtype);
	if (rc)
		RETURN(-EINVAL);

	/* Find the quota entry associated with the quota id */
	lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, pool_type, qtype,
				  &qbody->qb_id);
	if (IS_ERR(lqe))
		RETURN(PTR_ERR(lqe));

	/* process quota request */
	rc = qmt_dqacq0(env, lqe, qmt, uuid, qbody->qb_flags, qbody->qb_count,
			qbody->qb_usage, repbody);

	if (lustre_handle_is_used(&qbody->qb_lockh))
		/* return current qunit value only to slaves owning an per-ID
		 * quota lock. For enqueue, the qunit value will be returned in
		 * the LVB */
		 repbody->qb_qunit = lqe->lqe_qunit;
	lqe_putref(lqe);
	RETURN(rc);
}