예제 #1
0
static int vvp_io_prepare_write(const struct lu_env *env,
                                const struct cl_io_slice *ios,
                                const struct cl_page_slice *slice,
                                unsigned from, unsigned to)
{
        struct cl_object *obj    = slice->cpl_obj;
        struct ccc_page  *cp     = cl2ccc_page(slice);
        struct cl_page   *pg     = slice->cpl_page;
	struct page       *vmpage = cp->cpg_page;

        int result;

        ENTRY;

        LINVRNT(cl_page_is_vmlocked(env, pg));
        LASSERT(vmpage->mapping->host == ccc_object_inode(obj));

        result = 0;

        CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
        if (!PageUptodate(vmpage)) {
                /*
                 * We're completely overwriting an existing page, so _don't_
                 * set it up to date until commit_write
                 */
		if (from == 0 && to == PAGE_CACHE_SIZE) {
                        CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
                        POISON_PAGE(page, 0x11);
                } else
                        result = vvp_io_prepare_partial(env, ios->cis_io, obj,
                                                        pg, cp, from, to);
        } else
                CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
        RETURN(result);
}
예제 #2
0
static int osd_oi_iam_refresh(struct osd_thread_info *oti, struct osd_oi *oi,
			     const struct dt_rec *rec, const struct dt_key *key,
			     struct thandle *th, bool insert)
{
	struct iam_container	*bag;
	struct iam_path_descr	*ipd;
	struct osd_thandle	*oh;
	int			rc;
	ENTRY;

	LASSERT(oi);
	LASSERT(oi->oi_inode);
	ll_vfs_dq_init(oi->oi_inode);

	bag = &oi->oi_dir.od_container;
	ipd = osd_idx_ipd_get(oti->oti_env, bag);
	if (unlikely(ipd == NULL))
		RETURN(-ENOMEM);

	oh = container_of0(th, struct osd_thandle, ot_super);
	LASSERT(oh->ot_handle != NULL);
	LASSERT(oh->ot_handle->h_transaction != NULL);
	if (insert)
		rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
				(const struct iam_rec *)rec, ipd);
	else
		rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)key,
				(const struct iam_rec *)rec, ipd);
	osd_ipd_put(oti->oti_env, bag, ipd);
	LINVRNT(osd_invariant(obj));
	RETURN(rc);
}
예제 #3
0
static int osd_oi_iam_delete(struct osd_thread_info *oti, struct osd_oi *oi,
			     const struct dt_key *key, handle_t *th)
{
	struct iam_container	*bag;
	struct iam_path_descr	*ipd;
	int			 rc;
	ENTRY;

	LASSERT(oi);
	LASSERT(oi->oi_inode);
	ll_vfs_dq_init(oi->oi_inode);

	bag = &oi->oi_dir.od_container;
	ipd = osd_idx_ipd_get(oti->oti_env, bag);
	if (unlikely(ipd == NULL))
		RETURN(-ENOMEM);

	LASSERT(th != NULL);
	LASSERT(th->h_transaction != NULL);

	rc = iam_delete(th, bag, (const struct iam_key *)key, ipd);
	osd_ipd_put(oti->oti_env, bag, ipd);
	LINVRNT(osd_invariant(obj));
	RETURN(rc);
}
예제 #4
0
static int osd_oi_iam_lookup(struct osd_thread_info *oti,
                             struct osd_oi *oi, struct dt_rec *rec,
                             const struct dt_key *key)
{
        struct iam_container  *bag;
        struct iam_iterator   *it = &oti->oti_idx_it;
        struct iam_path_descr *ipd;
        int                    rc;
        ENTRY;

        LASSERT(oi);
        LASSERT(oi->oi_inode);

        bag = &oi->oi_dir.od_container;
        ipd = osd_idx_ipd_get(oti->oti_env, bag);
        if (IS_ERR(ipd))
                RETURN(-ENOMEM);

        /* got ipd now we can start iterator. */
        iam_it_init(it, bag, 0, ipd);

        rc = iam_it_get(it, (struct iam_key *)key);
	if (rc > 0)
		iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)rec);
        iam_it_put(it);
        iam_it_fini(it);
        osd_ipd_put(oti->oti_env, bag, ipd);

        LINVRNT(osd_invariant(obj));

        RETURN(rc);
}
예제 #5
0
static int osc_lock_unuse(const struct lu_env *env,
			  const struct cl_lock_slice *slice)
{
	struct osc_lock *ols = cl2osc_lock(slice);

	LINVRNT(osc_lock_invariant(ols));

	switch (ols->ols_state) {
	case OLS_NEW:
		LASSERT(!ols->ols_hold);
		LASSERT(ols->ols_agl);
		return 0;
	case OLS_UPCALL_RECEIVED:
		osc_lock_unhold(ols);
	case OLS_ENQUEUED:
		LASSERT(!ols->ols_hold);
		osc_lock_detach(env, ols);
		ols->ols_state = OLS_NEW;
		return 0;
	case OLS_GRANTED:
		LASSERT(!ols->ols_glimpse);
		LASSERT(ols->ols_hold);
		/*
		 * Move lock into OLS_RELEASED state before calling
		 * osc_cancel_base() so that possible synchronous cancellation
		 * (that always happens e.g., for liblustre) sees that lock is
		 * released.
		 */
		ols->ols_state = OLS_RELEASED;
		return osc_lock_unhold(ols);
	default:
		CERROR("Impossible state: %d\n", ols->ols_state);
		LBUG();
	}
}
예제 #6
0
static struct osc_io *cl2osc_io(const struct lu_env *env,
                                const struct cl_io_slice *slice)
{
        struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
        LINVRNT(oio == osc_env_io(env));
        return oio;
}
예제 #7
0
static void lov_key_fini(const struct lu_context *ctx,
			 struct lu_context_key *key, void *data)
{
	struct lov_thread_info *info = data;
	LINVRNT(list_empty(&info->lti_closure.clc_list));
	OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
}
예제 #8
0
파일: lov_dev.c 프로젝트: Chong-Li/cse522
static void lov_key_fini(const struct lu_context *ctx,
			 struct lu_context_key *key, void *data)
{
	struct lov_thread_info *info = data;

	LINVRNT(list_empty(&info->lti_closure.clc_list));
	kmem_cache_free(lov_thread_kmem, info);
}
예제 #9
0
void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
	struct osc_lock  *ols = cl2osc_lock(slice);

	LINVRNT(osc_lock_invariant(ols));
	LASSERT(ols->ols_dlmlock == NULL);

	OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
예제 #10
0
파일: lov_page.c 프로젝트: Chong-Li/cse522
static int lov_page_own(const struct lu_env *env,
			const struct cl_page_slice *slice, struct cl_io *io,
			int nonblock)
{
	struct lov_io     *lio = lov_env_io(env);
	struct lov_io_sub *sub;

	LINVRNT(lov_page_invariant(slice));
	LINVRNT(!cl2lov_page(slice)->lps_invalid);

	sub = lov_page_subio(env, lio, slice);
	if (!IS_ERR(sub)) {
		lov_sub_page(slice)->cp_owner = sub->sub_io;
		lov_sub_put(sub);
	} else
		LBUG(); /* Arrgh */
	return 0;
}
예제 #11
0
파일: osc_lock.c 프로젝트: AK101111/linux
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
			     struct lustre_handle *lockh, bool lvb_update)
{
	struct ldlm_lock *dlmlock;

	dlmlock = ldlm_handle2lock_long(lockh, 0);
	LASSERT(dlmlock);

	/* lock reference taken by ldlm_handle2lock_long() is
	 * owned by osc_lock and released in osc_lock_detach()
	 */
	lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
	oscl->ols_has_ref = 1;

	LASSERT(!oscl->ols_dlmlock);
	oscl->ols_dlmlock = dlmlock;

	/* This may be a matched lock for glimpse request, do not hold
	 * lock reference in that case.
	 */
	if (!oscl->ols_glimpse) {
		/* hold a refc for non glimpse lock which will
		 * be released in osc_lock_cancel()
		 */
		lustre_handle_copy(&oscl->ols_handle, lockh);
		ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
		oscl->ols_hold = 1;
	}

	/* Lock must have been granted. */
	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
		struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;

		/* extend the lock extent, otherwise it will have problem when
		 * we decide whether to grant a lockless lock.
		 */
		descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid = ext->gid;

		/* no lvb update for matched lock */
		if (lvb_update) {
			LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
			osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
					    dlmlock, NULL);
		}
		LINVRNT(osc_lock_invariant(oscl));
	}
	unlock_res_and_lock(dlmlock);

	LASSERT(oscl->ols_state != OLS_GRANTED);
	oscl->ols_state = OLS_GRANTED;
}
예제 #12
0
파일: osc_lock.c 프로젝트: AK101111/linux
static void osc_lock_fini(const struct lu_env *env,
			  struct cl_lock_slice *slice)
{
	struct osc_lock *ols = cl2osc_lock(slice);

	LINVRNT(osc_lock_invariant(ols));
	LASSERT(!ols->ols_dlmlock);

	kmem_cache_free(osc_lock_kmem, ols);
}
예제 #13
0
파일: lov_page.c 프로젝트: Chong-Li/cse522
static int lov_page_cache_add(const struct lu_env *env,
			      const struct cl_page_slice *slice,
			      struct cl_io *io)
{
	struct lov_io     *lio = lov_env_io(env);
	struct lov_io_sub *sub;
	int rc = 0;

	LINVRNT(lov_page_invariant(slice));
	LINVRNT(!cl2lov_page(slice)->lps_invalid);

	sub = lov_page_subio(env, lio, slice);
	if (!IS_ERR(sub)) {
		rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
				       slice->cpl_page->cp_child, CRT_WRITE);
		lov_sub_put(sub);
	} else {
		rc = PTR_ERR(sub);
		CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
	}
	return rc;
}
예제 #14
0
파일: lov_page.c 프로젝트: Chong-Li/cse522
static void lov_page_fini(const struct lu_env *env,
			  struct cl_page_slice *slice)
{
	struct cl_page  *sub = lov_sub_page(slice);

	LINVRNT(lov_page_invariant(slice));

	if (sub != NULL) {
		LASSERT(sub->cp_state == CPS_FREEING);
		lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
		sub->cp_parent = NULL;
		slice->cpl_page->cp_child = NULL;
		cl_page_put(env, sub);
	}
}
예제 #15
0
static void osc_lock_fini(const struct lu_env *env,
			  struct cl_lock_slice *slice)
{
	struct osc_lock  *ols = cl2osc_lock(slice);

	LINVRNT(osc_lock_invariant(ols));
	/*
	 * ->ols_hold can still be true at this point if, for example, a
	 * thread that requested a lock was killed (and released a reference
	 * to the lock), before reply from a server was received. In this case
	 * lock is destroyed immediately after upcall.
	 */
	osc_lock_unhold(ols);
	LASSERT(ols->ols_lock == NULL);
	LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
		atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);

	OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
예제 #16
0
/**
 * Called when a lock is granted, from an upcall (when server returned a
 * granted lock), or from completion AST, when server returned a blocked lock.
 *
 * Called under lock and resource spin-locks, that are released temporarily
 * here.
 */
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
			     struct ldlm_lock *dlmlock, int rc)
{
	struct ldlm_extent   *ext;
	struct cl_lock       *lock;
	struct cl_lock_descr *descr;

	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	ENTRY;
	if (olck->ols_state < OLS_GRANTED) {
		lock  = olck->ols_cl.cls_lock;
		ext   = &dlmlock->l_policy_data.l_extent;
		descr = &osc_env_info(env)->oti_descr;
		descr->cld_obj = lock->cll_descr.cld_obj;

		/* XXX check that ->l_granted_mode is valid. */
		descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end   = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid   = ext->gid;
		/*
		 * tell upper layers the extent of the lock that was actually
		 * granted
		 */
		olck->ols_state = OLS_GRANTED;
		osc_lock_lvb_update(env, olck, rc);

		/* release DLM spin-locks to allow cl_lock_{modify,signal}()
		 * to take a semaphore on a parent lock. This is safe, because
		 * spin-locks are needed to protect consistency of
		 * dlmlock->l_*_mode and LVB, and we have finished processing
		 * them. */
		unlock_res_and_lock(dlmlock);
		cl_lock_modify(env, lock, descr);
		cl_lock_signal(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		lock_res_and_lock(dlmlock);
	}
	EXIT;
}
예제 #17
0
파일: osc_object.c 프로젝트: 19Dan01/linux
static struct osc_object *lu2osc(const struct lu_object *obj)
{
	LINVRNT(osc_is_object(obj));
	return container_of0(obj, struct osc_object, oo_cl.co_lu);
}
예제 #18
0
파일: osc_io.c 프로젝트: 383530895/linux
static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
{
	LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
	return container_of0(slice, struct osc_req, or_cl);
}
예제 #19
0
static int vvp_io_commit_write(const struct lu_env *env,
                               const struct cl_io_slice *ios,
                               const struct cl_page_slice *slice,
                               unsigned from, unsigned to)
{
        struct cl_object  *obj    = slice->cpl_obj;
        struct cl_io      *io     = ios->cis_io;
        struct ccc_page   *cp     = cl2ccc_page(slice);
        struct cl_page    *pg     = slice->cpl_page;
        struct inode      *inode  = ccc_object_inode(obj);
        struct ll_sb_info *sbi    = ll_i2sbi(inode);
	struct ll_inode_info *lli = ll_i2info(inode);
	struct page        *vmpage = cp->cpg_page;

        int    result;
        int    tallyop;
        loff_t size;

        ENTRY;

        LINVRNT(cl_page_is_vmlocked(env, pg));
        LASSERT(vmpage->mapping->host == inode);

        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n");
        CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);

        /*
         * queue a write for some time in the future the first time we
         * dirty the page.
         *
         * This is different from what other file systems do: they usually
         * just mark page (and some of its buffers) dirty and rely on
         * balance_dirty_pages() to start a write-back. Lustre wants write-back
         * to be started earlier for the following reasons:
         *
         *     (1) with a large number of clients we need to limit the amount
         *     of cached data on the clients a lot;
         *
         *     (2) large compute jobs generally want compute-only then io-only
         *     and the IO should complete as quickly as possible;
         *
         *     (3) IO is batched up to the RPC size and is async until the
         *     client max cache is hit
         *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
         *
         */
        if (!PageDirty(vmpage)) {
                tallyop = LPROC_LL_DIRTY_MISSES;
                result = cl_page_cache_add(env, io, pg, CRT_WRITE);
                if (result == 0) {
                        /* page was added into cache successfully. */
                        set_page_dirty(vmpage);
                        vvp_write_pending(cl2ccc(obj), cp);
                } else if (result == -EDQUOT) {
			pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
                        bool need_clip = true;

                        /*
                         * Client ran out of disk space grant. Possible
                         * strategies are:
                         *
                         *     (a) do a sync write, renewing grant;
                         *
                         *     (b) stop writing on this stripe, switch to the
                         *     next one.
                         *
                         * (b) is a part of "parallel io" design that is the
                         * ultimate goal. (a) is what "old" client did, and
                         * what the new code continues to do for the time
                         * being.
                         */
                        if (last_index > pg->cp_index) {
				to = PAGE_CACHE_SIZE;
                                need_clip = false;
                        } else if (last_index == pg->cp_index) {
                                int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
                                if (to < size_to)
                                        to = size_to;
                        }
                        if (need_clip)
                                cl_page_clip(env, pg, 0, to);
                        result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
                        if (result)
                                CERROR("Write page %lu of inode %p failed %d\n",
                                       pg->cp_index, inode, result);
                }
예제 #20
0
/**
 * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
 * and ldlm_lock caches.
 */
static int osc_dlm_blocking_ast0(const struct lu_env *env,
				 struct ldlm_lock *dlmlock,
				 void *data, int flag)
{
	struct osc_lock *olck;
	struct cl_lock  *lock;
	int result;
	int cancel;

	LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);

	cancel = 0;
	olck = osc_ast_data_get(dlmlock);
	if (olck != NULL) {
		lock = olck->ols_cl.cls_lock;
		cl_lock_mutex_get(env, lock);
		LINVRNT(osc_lock_invariant(olck));
		if (olck->ols_ast_wait) {
			/* wake up osc_lock_use() */
			cl_lock_signal(env, lock);
			olck->ols_ast_wait = 0;
		}
		/*
		 * Lock might have been canceled while this thread was
		 * sleeping for lock mutex, but olck is pinned in memory.
		 */
		if (olck == dlmlock->l_ast_data) {
			/*
			 * NOTE: DLM sends blocking AST's for failed locks
			 *       (that are still in pre-OLS_GRANTED state)
			 *       too, and they have to be canceled otherwise
			 *       DLM lock is never destroyed and stuck in
			 *       the memory.
			 *
			 *       Alternatively, ldlm_cli_cancel() can be
			 *       called here directly for osc_locks with
			 *       ols_state < OLS_GRANTED to maintain an
			 *       invariant that ->clo_cancel() is only called
			 *       for locks that were granted.
			 */
			LASSERT(data == olck);
			osc_lock_blocking(env, dlmlock,
					  olck, flag == LDLM_CB_BLOCKING);
		} else
			cancel = 1;
		cl_lock_mutex_put(env, lock);
		osc_ast_data_put(env, olck);
	} else
		/*
		 * DLM lock exists, but there is no cl_lock attached to it.
		 * This is a `normal' race. cl_object and its cl_lock's can be
		 * removed by memory pressure, together with all pages.
		 */
		cancel = (flag == LDLM_CB_BLOCKING);

	if (cancel) {
		struct lustre_handle *lockh;

		lockh = &osc_env_info(env)->oti_handle;
		ldlm_lock2handle(dlmlock, lockh);
		result = ldlm_cli_cancel(lockh, LCF_ASYNC);
	} else
		result = 0;
	return result;
}