Exemplo n.º 1
0
/**
 * Discard pages protected by the given lock. This function traverses radix
 * tree to find all covering pages and discard them. If a page is being covered
 * by other locks, it should remain in cache.
 *
 * If error happens on any step, the process continues anyway (the reasoning
 * behind this being that lock cancellation cannot be delayed indefinitely).
 */
static int mdc_lock_discard_pages(const struct lu_env *env,
				  struct osc_object *osc,
				  pgoff_t start, pgoff_t end,
				  bool discard)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct cl_io *io = &info->oti_io;
	osc_page_gang_cbt cb;
	int res;
	int result;

	ENTRY;

	io->ci_obj = cl_object_top(osc2cl(osc));
	io->ci_ignore_layout = 1;
	result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
	if (result != 0)
		GOTO(out, result);

	cb = discard ? osc_discard_cb : mdc_check_and_discard_cb;
	info->oti_fn_index = info->oti_next_index = start;
	do {
		res = osc_page_gang_lookup(env, io, osc, info->oti_next_index,
					   end, cb, (void *)osc);
		if (info->oti_next_index > end)
			break;

		if (res == CLP_GANG_RESCHED)
			cond_resched();
	} while (res != CLP_GANG_OKAY);
out:
	cl_io_fini(env, io);
	RETURN(result);
}
Exemplo n.º 2
0
static int osc_io_read_ahead(const struct lu_env *env,
			     const struct cl_io_slice *ios,
			     pgoff_t start, struct cl_read_ahead *ra)
{
	struct osc_object	*osc = cl2osc(ios->cis_obj);
	struct ldlm_lock	*dlmlock;
	int			result = -ENODATA;
	ENTRY;

	dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
	if (dlmlock != NULL) {
		if (dlmlock->l_req_mode != LCK_PR) {
			struct lustre_handle lockh;
			ldlm_lock2handle(dlmlock, &lockh);
			ldlm_lock_addref(&lockh, LCK_PR);
			ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
		}

		ra->cra_end = cl_index(osc2cl(osc),
				       dlmlock->l_policy_data.l_extent.end);
		ra->cra_release = osc_read_ahead_release;
		ra->cra_cbdata = dlmlock;
		result = 0;
	}

	RETURN(result);
}
Exemplo n.º 3
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object	*osc = cookie;
	struct ldlm_lock	*dlmlock;
	struct lu_env           *env;
	struct cl_env_nest       nest;
	ENTRY;

	env = cl_env_nested_get(&nest);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED)
		GOTO(out, errcode = ELDLM_OK);

	if (errcode != ELDLM_OK)
		GOTO(out, errcode);

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_nested_put(&nest, env);
	RETURN(ldlm_error2errno(errcode));
}
Exemplo n.º 4
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int mdc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	bool discard;
	enum cl_lock_mode mode = CLM_READ;

	ENTRY;

	LASSERT(flag == LDLM_CB_CANCELING);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		RETURN(0);
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data != NULL) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;
		cl_object_get(obj);
	}
	ldlm_set_kms_ignore(dlmlock);
	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed. */
	if (obj != NULL) {
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;

		/* Destroy pages covered by the extent of the DLM lock */
		result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
					CL_PAGE_EOF, mode, discard);
		/* Losing a lock, set KMS to 0.
		 * NB: assumed that DOM lock covers whole data on MDT.
		 */
		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		attr->cat_kms = 0;
		cl_object_attr_update(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);
		cl_object_put(env, obj);
	}
	RETURN(result);
}
Exemplo n.º 5
0
/**
 * Updates object attributes from a lock value block (lvb) received together
 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
 * logic.
 *
 * This can be optimized to not update attributes when lock is a result of a
 * local match.
 *
 * Called under lock and resource spin-locks.
 */
static void osc_lock_lvb_update(const struct lu_env *env,
				struct osc_object *osc,
				struct ldlm_lock *dlmlock,
				struct ost_lvb *lvb)
{
	struct cl_object  *obj = osc2cl(osc);
	struct lov_oinfo  *oinfo = osc->oo_oinfo;
	struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
	unsigned           valid;

	ENTRY;

	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
	if (lvb == NULL) {
		LASSERT(dlmlock != NULL);
		lvb = dlmlock->l_lvb_data;
	}
	cl_lvb2attr(attr, lvb);

	cl_object_attr_lock(obj);
	if (dlmlock != NULL) {
		__u64 size;

		check_res_locked(dlmlock->l_resource);

		LASSERT(lvb == dlmlock->l_lvb_data);
                size = lvb->lvb_size;

                /* Extend KMS up to the end of this lock and no further
                 * A lock on [x,y] means a KMS of up to y + 1 bytes! */
                if (size > dlmlock->l_policy_data.l_extent.end)
                        size = dlmlock->l_policy_data.l_extent.end + 1;
                if (size >= oinfo->loi_kms) {
			LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu"
				   ", kms=%llu", lvb->lvb_size, size);
                        valid |= CAT_KMS;
                        attr->cat_kms = size;
                } else {
                        LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
				   "%llu; leaving kms=%llu, end=%llu",
                                   lvb->lvb_size, oinfo->loi_kms,
                                   dlmlock->l_policy_data.l_extent.end);
                }
		ldlm_lock_allow_match_locked(dlmlock);
	}

	cl_object_attr_update(env, obj, attr, valid);
	cl_object_attr_unlock(obj);

	EXIT;
}
Exemplo n.º 6
0
int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc)
{
    struct l_wait_info lwi = { 0 };
    ENTRY;

    CDEBUG(D_INODE, "Invalidate osc object: %p, # of active IOs: %d\n",
           osc, atomic_read(&osc->oo_nr_ios));

    l_wait_event(osc->oo_io_waitq, atomic_read(&osc->oo_nr_ios) == 0, &lwi);

    /* Discard all dirty pages of this object. */
    osc_cache_truncate_start(env, osc, 0, NULL);

    /* Discard all caching pages */
    osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, CLM_WRITE);

    /* Clear ast data of dlm lock. Do this after discarding all pages */
    osc_object_prune(env, osc2cl(osc));

    RETURN(0);
}
Exemplo n.º 7
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object *osc = cookie;
	struct ldlm_lock *dlmlock;
	struct lu_env *env;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED) {
		errcode = ELDLM_OK;
		goto out;
	}

	if (errcode != ELDLM_OK)
		goto out;

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_put(env, &refcheck);
	return ldlm_error2errno(errcode);
}
Exemplo n.º 8
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct cl_object *obj = NULL;
	int result = 0;
	int discard;
	enum cl_lock_mode mode = CLM_READ;

	LASSERT(flag == LDLM_CB_CANCELING);

	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
		dlmlock->l_ast_data = NULL;
		unlock_res_and_lock(dlmlock);
		return 0;
	}

	discard = ldlm_is_discard_data(dlmlock);
	if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
		mode = CLM_WRITE;

	if (dlmlock->l_ast_data) {
		obj = osc2cl(dlmlock->l_ast_data);
		dlmlock->l_ast_data = NULL;

		cl_object_get(obj);
	}

	unlock_res_and_lock(dlmlock);

	/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
	 * the object has been destroyed.
	 */
	if (obj) {
		struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
		struct cl_attr *attr = &osc_env_info(env)->oti_attr;
		__u64 old_kms;

		/* Destroy pages covered by the extent of the DLM lock */
		result = osc_lock_flush(cl2osc(obj),
					cl_index(obj, extent->start),
					cl_index(obj, extent->end),
					mode, discard);

		/* losing a lock, update kms */
		lock_res_and_lock(dlmlock);
		cl_object_attr_lock(obj);
		/* Must get the value under the lock to avoid race. */
		old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
		/* Update the kms. Need to loop all granted locks.
		 * Not a problem for the client
		 */
		attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);

		cl_object_attr_set(env, obj, attr, CAT_KMS);
		cl_object_attr_unlock(obj);
		unlock_res_and_lock(dlmlock);

		cl_object_put(env, obj);
	}
	return result;
}
Exemplo n.º 9
0
static int osc_io_commit_async(const struct lu_env *env,
				const struct cl_io_slice *ios,
				struct cl_page_list *qin, int from, int to,
				cl_commit_cbt cb)
{
	struct cl_io    *io = ios->cis_io;
	struct osc_io   *oio = cl2osc_io(env, ios);
	struct osc_object *osc = cl2osc(ios->cis_obj);
	struct cl_page  *page;
	struct cl_page  *last_page;
	struct osc_page *opg;
	int result = 0;
	ENTRY;

	LASSERT(qin->pl_nr > 0);

	/* Handle partial page cases */
	last_page = cl_page_list_last(qin);
	if (oio->oi_lockless) {
		page = cl_page_list_first(qin);
		if (page == last_page) {
			cl_page_clip(env, page, from, to);
		} else {
			if (from != 0)
				cl_page_clip(env, page, from, PAGE_SIZE);
			if (to != PAGE_SIZE)
				cl_page_clip(env, last_page, 0, to);
		}
	}

	while (qin->pl_nr > 0) {
		struct osc_async_page *oap;

		page = cl_page_list_first(qin);
		opg = osc_cl_page_osc(page, osc);
		oap = &opg->ops_oap;

		if (!list_empty(&oap->oap_rpc_item)) {
			CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
			       oap, opg);
			result = -EBUSY;
			break;
		}

		/* The page may be already in dirty cache. */
		if (list_empty(&oap->oap_pending_item)) {
			result = osc_page_cache_add(env, &opg->ops_cl, io);
			if (result != 0)
				break;
		}

		osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
				  page == last_page ? to : PAGE_SIZE);

		cl_page_list_del(env, qin, page);

		(*cb)(env, io, page);
		/* Can't access page any more. Page can be in transfer and
		 * complete at any time. */
	}

	/* for sync write, kernel will wait for this page to be flushed before
	 * osc_io_end() is called, so release it earlier.
	 * for mkwrite(), it's known there is no further pages. */
	if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
		osc_extent_release(env, oio->oi_active);
		oio->oi_active = NULL;
	}

	CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
	RETURN(result);
}