Exemplo n.º 1
0
static int osc_io_read_ahead(const struct lu_env *env,
			     const struct cl_io_slice *ios,
			     pgoff_t start, struct cl_read_ahead *ra)
{
	struct osc_object	*osc = cl2osc(ios->cis_obj);
	struct ldlm_lock	*dlmlock;
	int			result = -ENODATA;
	ENTRY;

	dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
	if (dlmlock != NULL) {
		if (dlmlock->l_req_mode != LCK_PR) {
			struct lustre_handle lockh;
			ldlm_lock2handle(dlmlock, &lockh);
			ldlm_lock_addref(&lockh, LCK_PR);
			ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
		}

		ra->cra_end = cl_index(osc2cl(osc),
				       dlmlock->l_policy_data.l_extent.end);
		ra->cra_release = osc_read_ahead_release;
		ra->cra_cbdata = dlmlock;
		result = 0;
	}

	RETURN(result);
}
Exemplo n.º 2
0
static void osc_read_ahead_release(const struct lu_env *env,
				   void *cbdata)
{
	struct ldlm_lock *dlmlock = cbdata;
	struct lustre_handle lockh;

	ldlm_lock2handle(dlmlock, &lockh);
	ldlm_lock_decref(&lockh, LCK_PR);
	LDLM_LOCK_PUT(dlmlock);
}
Exemplo n.º 3
0
/*
 * Blocking callback handler for global index lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
				struct ldlm_lock_desc *desc, void *data,
				int flag)
{
	int rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {
		struct lustre_handle lockh;

		LDLM_DEBUG(lock, "blocking AST on global quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct qsd_qtype_info *qqi;

		LDLM_DEBUG(lock, "canceling global quota lock");

		qqi = qsd_glb_ast_data_get(lock, true);
		if (qqi == NULL)
			break;

		/* we are losing the global index lock, so let's mark the
		 * global & slave indexes as not up-to-date any more */
		write_lock(&qqi->qqi_qsd->qsd_lock);
		qqi->qqi_glb_uptodate = false;
		qqi->qqi_slv_uptodate = false;
		if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
			memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
		write_unlock(&qqi->qqi_qsd->qsd_lock);

		CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
		       qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));

		/* kick off reintegration thread if not running already, if
		 * it's just local cancel (for stack clean up or eviction),
		 * don't re-trigger the reintegration. */
		if (!ldlm_is_local_only(lock))
			qsd_start_reint_thread(qqi);

		lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
		qqi_putref(qqi);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
Exemplo n.º 4
0
static int mgs_completion_ast_ir(struct ldlm_lock *lock, int flags,
                                 void *cbdata)
{
        ENTRY;

        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
                       LDLM_FL_BLOCK_CONV))) {
                struct fs_db *fsdb = (struct fs_db *)lock->l_ast_data;
                struct lustre_handle lockh;

                mgs_ir_notify_complete(fsdb);

                ldlm_lock2handle(lock, &lockh);
                ldlm_lock_decref_and_cancel(&lockh, LCK_EX);
        }

        RETURN(ldlm_completion_ast(lock, flags, cbdata));
}
Exemplo n.º 5
0
static int mgs_completion_ast_config(struct ldlm_lock *lock, int flags,
                                     void *cbdata)
{
        ENTRY;

        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
                       LDLM_FL_BLOCK_CONV))) {
                struct fs_db *fsdb = (struct fs_db *)lock->l_ast_data;
                struct lustre_handle lockh;

                /* clear the bit before lock put */
                cfs_clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags);

                ldlm_lock2handle(lock, &lockh);
                ldlm_lock_decref_and_cancel(&lockh, LCK_EX);
        }

        RETURN(ldlm_completion_ast(lock, flags, cbdata));
}
Exemplo n.º 6
0
int llu_md_blocking_ast(struct ldlm_lock *lock,
                        struct ldlm_lock_desc *desc,
                        void *data, int flag)
{
        struct lustre_handle lockh;
        int rc;
        ENTRY;


        switch (flag) {
        case LDLM_CB_BLOCKING:
                ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, 0);
                if (rc < 0) {
                        CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
                        RETURN(rc);
                }
                break;
        case LDLM_CB_CANCELING: {
		struct inode *inode = llu_inode_from_resource_lock(lock);
                struct llu_inode_info *lli;
                struct intnl_stat *st;
                __u64 bits = lock->l_policy_data.l_inodebits.bits;
                struct lu_fid *fid;

		/* Inode is set to lock->l_resource->lr_lvb_inode
		* for mdc - bug 24555 */
		LASSERT(lock->l_ast_data == NULL);

                /* Invalidate all dentries associated with this inode */
                if (inode == NULL)
                        break;

                lli =  llu_i2info(inode);
                st = llu_i2stat(inode);

                if (bits & MDS_INODELOCK_UPDATE)
                        lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;

		fid = &lli->lli_fid;
		if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
			LDLM_ERROR(lock, "data mismatch with object "
				   DFID" (%p)", PFID(fid), inode);
                if (S_ISDIR(st->st_mode) &&
                    (bits & MDS_INODELOCK_UPDATE)) {
                        CDEBUG(D_INODE, "invalidating inode %llu\n",
                               (long long)st->st_ino);

                        llu_invalidate_inode_pages(inode);
                }

/*
                if (inode->i_sb->s_root &&
                    inode != inode->i_sb->s_root->d_inode)
                        ll_unhash_aliases(inode);
*/
                I_RELE(inode);
                break;
        }
        default:
                LBUG();
        }

        RETURN(0);
}
Exemplo n.º 7
0
/**
 * Blocking callback handler for per-ID lock
 *
 * \param lock - is the lock for which ast occurred.
 * \param desc - is the description of a conflicting lock in case of blocking
 *               ast.
 * \param data - is the value of lock->l_ast_data
 * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
 *               cancellation and blocking ast's.
 */
static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
			       void *data, int flag)
{
	struct lustre_handle	lockh;
	int			rc = 0;
	ENTRY;

	switch(flag) {
	case LDLM_CB_BLOCKING: {

		LDLM_DEBUG(lock, "blocking AST on ID quota lock");
		ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
		break;
	}
	case LDLM_CB_CANCELING: {
		struct lu_env           *env;
		struct lquota_entry	*lqe;
		bool			 rel = false;

		LDLM_DEBUG(lock, "canceling global quota lock");
		lqe = qsd_id_ast_data_get(lock, true);
		if (lqe == NULL)
			break;

		LQUOTA_DEBUG(lqe, "losing ID lock");

		/* just local cancel (for stack clean up or eviction), don't
		 * release quota space in this case */
		if (ldlm_is_local_only(lock)) {
			lqe_putref(lqe);
			break;
		}

		/* allocate environment */
		OBD_ALLOC_PTR(env);
		if (env == NULL) {
			lqe_putref(lqe);
			rc = -ENOMEM;
			break;
		}

		/* initialize environment */
		rc = lu_env_init(env, LCT_DT_THREAD);
		if (rc) {
			OBD_FREE_PTR(env);
			lqe_putref(lqe);
			break;
		}

		ldlm_lock2handle(lock, &lockh);
		lqe_write_lock(lqe);
		if (lustre_handle_equal(&lockh, &lqe->lqe_lockh)) {
			/* Clear lqe_lockh & reset qunit to 0 */
			qsd_set_qunit(lqe, 0);
			memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
			lqe->lqe_edquot = false;
			rel = true;
		}
		lqe_write_unlock(lqe);

		/* If there is qqacq inflight, the release will be skipped
		 * at this time, and triggered on dqacq completion later,
		 * which means there could be a short window that slave is
		 * holding spare grant wihtout per-ID lock. */
		if (rel)
			rc = qsd_adjust(env, lqe);

		/* release lqe reference grabbed by qsd_id_ast_data_get() */
		lqe_putref(lqe);
		lu_env_fini(env);
		OBD_FREE_PTR(env);
		break;
	}
	default:
		LASSERTF(0, "invalid flags for blocking ast %d", flag);
	}

	RETURN(rc);
}
Exemplo n.º 8
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct osc_lock *olck;
	struct cl_lock  *lock;
	int result;
	int cancel;

	LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);

	cancel = 0;
	olck = osc_ast_data_get(dlmlock);
	if (olck != NULL) {
		lock = olck->ols_cl.cls_lock;
		cl_lock_mutex_get(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		if (olck->ols_ast_wait) {
			/* wake up osc_lock_use() */
			cl_lock_signal(env, lock);
			olck->ols_ast_wait = 0;
		}
		/*
		 * Lock might have been canceled while this thread was
		 * sleeping for lock mutex, but olck is pinned in memory.
		 */
		if (olck == dlmlock->l_ast_data) {
			/*
			 * NOTE: DLM sends blocking AST's for failed locks
			 *       (that are still in pre-OLS_GRANTED state)
			 *       too, and they have to be canceled otherwise
			 *       DLM lock is never destroyed and stuck in
			 *       the memory.
			 *
			 *       Alternatively, ldlm_cli_cancel() can be
			 *       called here directly for osc_locks with
			 *       ols_state < OLS_GRANTED to maintain an
			 *       invariant that ->clo_cancel() is only called
			 *       for locks that were granted.
			 */
			LASSERT(data == olck);
			osc_lock_blocking(env, dlmlock,
					  olck, flag == LDLM_CB_BLOCKING);
		} else
			cancel = 1;
		cl_lock_mutex_put(env, lock);
		osc_ast_data_put(env, olck);
	} else
		/*
		 * DLM lock exists, but there is no cl_lock attached to it.
		 * This is a `normal' race. cl_object and its cl_lock's can be
		 * removed by memory pressure, together with all pages.
		 */
		cancel = (flag == LDLM_CB_BLOCKING);

	if (cancel) {
		struct lustre_handle *lockh;

		lockh = &osc_env_info(env)->oti_handle;
		ldlm_lock2handle(dlmlock, lockh);
		result = ldlm_cli_cancel(lockh, LCF_ASYNC);
	} else
		result = 0;
	return result;
}