Exemplo n.º 1
0
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
                     struct ccc_grouplock *cg)
{
        struct lu_env          *env;
        struct cl_io           *io;
        struct cl_lock         *lock;
        struct cl_lock_descr   *descr;
        __u32                   enqflags;
        int                     refcheck;
        int                     rc;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                return PTR_ERR(env);

        io = ccc_env_thread_io(env);
        io->ci_obj = obj;
	io->ci_ignore_layout = 1;

	rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
	if (rc) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		/* Does not make sense to take GL for released layout */
		if (rc > 0)
			rc = -ENOTSUPP;
		return rc;
        }

        descr = &ccc_env_info(env)->cti_descr;
        descr->cld_obj = obj;
        descr->cld_start = 0;
        descr->cld_end = CL_PAGE_EOF;
        descr->cld_gid = gid;
        descr->cld_mode = CLM_GROUP;

	enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
	descr->cld_enq_flags = enqflags;

	lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
	if (IS_ERR(lock)) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		return PTR_ERR(lock);
	}

        cg->cg_env  = cl_env_get(&refcheck);
        cg->cg_io   = io;
        cg->cg_lock = lock;
        cg->cg_gid  = gid;
        LASSERT(cg->cg_env == env);

        cl_env_unplant(env, &refcheck);
        return 0;
}
Exemplo n.º 2
0
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
                     struct ccc_grouplock *cg)
{
        struct lu_env          *env;
        struct cl_io           *io;
        struct cl_lock         *lock;
        struct cl_lock_descr   *descr;
        __u32                   enqflags;
        int                     refcheck;
        int                     rc;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                return PTR_ERR(env);

        io = &ccc_env_info(env)->cti_io;
        io->ci_obj = obj;

        rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
        if (rc) {
                LASSERT(rc < 0);
                cl_env_put(env, &refcheck);
                return rc;
        }

        descr = &ccc_env_info(env)->cti_descr;
        descr->cld_obj = obj;
        descr->cld_start = 0;
        descr->cld_end = CL_PAGE_EOF;
        descr->cld_gid = gid;
        descr->cld_mode = CLM_GROUP;

        enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
        descr->cld_enq_flags = enqflags;

        lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current());
        if (IS_ERR(lock)) {
                cl_io_fini(env, io);
                cl_env_put(env, &refcheck);
                return PTR_ERR(lock);
        }

        cg->cg_env = cl_env_get(&refcheck);
        cg->cg_lock = lock;
        cg->cg_gid = gid;
        LASSERT(cg->cg_env == env);

        cl_env_unplant(env, &refcheck);
        return 0;
}
Exemplo n.º 3
0
int cl_sb_fini(struct super_block *sb)
{
    struct ll_sb_info *sbi;
    struct lu_env     *env;
    struct cl_device  *cld;
    int		refcheck;
    int		result;

    sbi = ll_s2sbi(sb);
    env = cl_env_get(&refcheck);
    if (!IS_ERR(env)) {
        cld = sbi->ll_cl;

        if (cld != NULL) {
            cl_stack_fini(env, cld);
            sbi->ll_cl = NULL;
            sbi->ll_site = NULL;
        }
        cl_env_put(env, &refcheck);
        result = 0;
    } else {
        CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
        result = PTR_ERR(env);
    }
    /*
     * If mount failed (sbi->ll_cl == NULL), and this there are no other
     * mounts, stop device types manually (this usually happens
     * automatically when last device is destroyed).
     */
    lu_types_stop();
    return result;
}
Exemplo n.º 4
0
static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
			  enum cl_lock_mode mode, bool discard)
{
	struct lu_env		*env;
	__u16			refcheck;
	int			rc = 0;
	int			rc2 = 0;

	ENTRY;

	env = cl_env_get(&refcheck);
	if (IS_ERR(env))
		RETURN(PTR_ERR(env));

	if (mode == CLM_WRITE) {
		rc = osc_cache_writeback_range(env, obj, start, end, 1,
					       discard);
		CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
		       obj, start, end, rc,
		       discard ? "discarded" : "written back");
		if (rc > 0)
			rc = 0;
	}

	rc2 = osc_lock_discard_pages(env, obj, start, end, discard);
	if (rc == 0 && rc2 < 0)
		rc = rc2;

	cl_env_put(env, &refcheck);
	RETURN(rc);
}
Exemplo n.º 5
0
int cl_sb_fini(struct super_block *sb)
{
	struct ll_sb_info *sbi;
	struct lu_env     *env;
	struct cl_device  *cld;
	u16 refcheck;
	int		result;

	sbi = ll_s2sbi(sb);
	env = cl_env_get(&refcheck);
	if (!IS_ERR(env)) {
		cld = sbi->ll_cl;

		if (cld) {
			cl_stack_fini(env, cld);
			sbi->ll_cl = NULL;
			sbi->ll_site = NULL;
		}
		cl_env_put(env, &refcheck);
		result = 0;
	} else {
		CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
		result = PTR_ERR(env);
	}
	return result;
}
Exemplo n.º 6
0
int cl_sb_fini(struct llu_sb_info *sbi)
{
        struct lu_env *env;
        int refcheck;

        ENTRY;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                RETURN(PTR_ERR(env));

        if (sbi->ll_cl != NULL) {
                cl_stack_fini(env, sbi->ll_cl);
                sbi->ll_cl = NULL;
                sbi->ll_site = NULL;
        }
        cl_env_put(env, &refcheck);
        /*
         * If mount failed (sbi->ll_cl == NULL), and this there are no other
         * mounts, stop device types manually (this usually happens
         * automatically when last device is destroyed).
         */
        lu_types_stop();
        cl_env_cache_purge(~0);
        RETURN(0);
}
Exemplo n.º 7
0
static int cl_io_get(struct inode *inode, struct lu_env **envout,
		     struct cl_io **ioout, int *refcheck)
{
	struct lu_env	  *env;
	struct cl_io	   *io;
	struct ll_inode_info	*lli = ll_i2info(inode);
	struct cl_object       *clob = lli->lli_clob;
	int result;

	if (S_ISREG(inode->i_mode)) {
		env = cl_env_get(refcheck);
		if (!IS_ERR(env)) {
			io = vvp_env_thread_io(env);
			io->ci_obj = clob;
			*envout = env;
			*ioout  = io;
			result = 1;
		} else {
			result = PTR_ERR(env);
		}
	} else {
		result = 0;
	}
	return result;
}
Exemplo n.º 8
0
static int lov_layout_change(const struct lu_env *unused,
                             struct lov_object *lov,
                             const struct cl_object_conf *conf)
{
	int result;
	enum lov_layout_type llt = LLT_EMPTY;
	union lov_layout_state *state = &lov->u;
	const struct lov_layout_operations *old_ops;
	const struct lov_layout_operations *new_ops;

	struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
	void *cookie;
	struct lu_env *env;
	int refcheck;
	ENTRY;

	LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));

	if (conf->u.coc_md != NULL)
		llt = lov_type(conf->u.coc_md->lsm);
	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));

	cookie = cl_env_reenter();
	env = cl_env_get(&refcheck);
	if (IS_ERR(env)) {
		cl_env_reexit(cookie);
		RETURN(PTR_ERR(env));
	}

	old_ops = &lov_dispatch[lov->lo_type];
	new_ops = &lov_dispatch[llt];

	result = old_ops->llo_delete(env, lov, &lov->u);
	if (result == 0) {
		old_ops->llo_fini(env, lov, &lov->u);

		LASSERT(cfs_atomic_read(&lov->lo_active_ios) == 0);
		LASSERT(hdr->coh_tree.rnode == NULL);
		LASSERT(hdr->coh_pages == 0);

		lov->lo_type = LLT_EMPTY;
		result = new_ops->llo_init(env,
					lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
					lov, conf, state);
		if (result == 0) {
			new_ops->llo_install(env, lov, state);
			lov->lo_type = llt;
		} else {
			new_ops->llo_delete(env, lov, state);
			new_ops->llo_fini(env, lov, state);
			/* this file becomes an EMPTY file. */
		}
	}

	cl_env_put(env, &refcheck);
	cl_env_reexit(cookie);
	RETURN(result);
}
Exemplo n.º 9
0
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
		     struct ll_grouplock *lg)
{
        struct lu_env          *env;
        struct cl_io           *io;
        struct cl_lock         *lock;
        struct cl_lock_descr   *descr;
        __u32                   enqflags;
	__u16                   refcheck;
        int                     rc;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                return PTR_ERR(env);

	io = vvp_env_thread_io(env);
        io->ci_obj = obj;

	rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
	if (rc != 0) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		/* Does not make sense to take GL for released layout */
		if (rc > 0)
			rc = -ENOTSUPP;
		return rc;
	}

	lock = vvp_env_lock(env);
	descr = &lock->cll_descr;
        descr->cld_obj = obj;
        descr->cld_start = 0;
        descr->cld_end = CL_PAGE_EOF;
        descr->cld_gid = gid;
        descr->cld_mode = CLM_GROUP;

	enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
	descr->cld_enq_flags = enqflags;

	rc = cl_lock_request(env, io, lock);
	if (rc < 0) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		return rc;
	}

	lg->lg_env = env;
	lg->lg_io = io;
	lg->lg_lock = lock;
	lg->lg_gid = gid;

	return 0;
}
Exemplo n.º 10
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
			   int errcode)
{
	struct osc_lock *oscl = cookie;
	struct cl_lock_slice *slice = &oscl->ols_cl;
	struct lu_env *env;
	int rc;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	/* should never happen, similar to osc_ldlm_blocking_ast(). */
	LASSERT(!IS_ERR(env));

	rc = ldlm_error2errno(errcode);
	if (oscl->ols_state == OLS_ENQUEUED) {
		oscl->ols_state = OLS_UPCALL_RECEIVED;
	} else if (oscl->ols_state == OLS_CANCELLED) {
		rc = -EIO;
	} else {
		CERROR("Impossible state: %d\n", oscl->ols_state);
		LBUG();
	}

	if (rc == 0)
		osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);

	/* Error handling, some errors are tolerable. */
	if (oscl->ols_locklessable && rc == -EUSERS) {
		/* This is a tolerable error, turn this lock into
		 * lockless lock.
		 */
		osc_object_set_contended(cl2osc(slice->cls_obj));
		LASSERT(slice->cls_ops == &osc_lock_ops);

		/* Change this lock to ldlmlock-less lock. */
		osc_lock_to_lockless(env, oscl, 1);
		oscl->ols_state = OLS_GRANTED;
		rc = 0;
	} else if (oscl->ols_glimpse && rc == -ENAVAIL) {
		LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
		osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
				    NULL, &oscl->ols_lvb);
		/* Hide the error. */
		rc = 0;
	}

	if (oscl->ols_owner)
		cl_sync_io_note(env, oscl->ols_owner, rc);
	cl_env_put(env, &refcheck);

	return rc;
}
Exemplo n.º 11
0
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static void ll_invalidatepage(struct page *vmpage,
#ifdef HAVE_INVALIDATE_RANGE
				unsigned int offset, unsigned int length
#else
				unsigned long offset
#endif
			     )
{
        struct inode     *inode;
        struct lu_env    *env;
        struct cl_page   *page;
        struct cl_object *obj;

        int refcheck;

        LASSERT(PageLocked(vmpage));
        LASSERT(!PageWriteback(vmpage));

	/*
	 * It is safe to not check anything in invalidatepage/releasepage
	 * below because they are run with page locked and all our io is
	 * happening with locked page too
	 */
#ifdef HAVE_INVALIDATE_RANGE
	if (offset == 0 && length == PAGE_CACHE_SIZE) {
#else
	if (offset == 0) {
#endif
                env = cl_env_get(&refcheck);
                if (!IS_ERR(env)) {
                        inode = vmpage->mapping->host;
                        obj = ll_i2info(inode)->lli_clob;
                        if (obj != NULL) {
                                page = cl_vmpage_page(vmpage, obj);
                                if (page != NULL) {
                                        cl_page_delete(env, page);
                                        cl_page_put(env, page);
                                }
                        } else
                                LASSERT(vmpage->private == 0);
                        cl_env_put(env, &refcheck);
                }
        }
Exemplo n.º 12
0
Arquivo: rw26.c Projeto: hpc/lustre
/**
 * Implements Linux VM address_space::invalidatepage() method. This method is
 * called when the page is truncate from a file, either as a result of
 * explicit truncate, or when inode is removed from memory (as a result of
 * final iput(), umount, or memory pressure induced icache shrinking).
 *
 * [0, offset] bytes of the page remain valid (this is for a case of not-page
 * aligned truncate). Lustre leaves partially truncated page in the cache,
 * relying on struct inode::i_size to limit further accesses.
 */
static int cl_invalidatepage(struct page *vmpage, unsigned long offset)
{
        struct inode     *inode;
        struct lu_env    *env;
        struct cl_page   *page;
        struct cl_object *obj;

        int result;
        int refcheck;

        LASSERT(PageLocked(vmpage));
        LASSERT(!PageWriteback(vmpage));

        /*
         * It is safe to not check anything in invalidatepage/releasepage
         * below because they are run with page locked and all our io is
         * happening with locked page too
         */
        result = 0;
        if (offset == 0) {
                env = cl_env_get(&refcheck);
                if (!IS_ERR(env)) {
                        inode = vmpage->mapping->host;
                        obj = ll_i2info(inode)->lli_clob;
                        if (obj != NULL) {
                                page = cl_vmpage_page(vmpage, obj);
                                if (page != NULL) {
                                        lu_ref_add(&page->cp_reference,
                                                   "delete", vmpage);
                                        cl_page_delete(env, page);
                                        result = 1;
                                        lu_ref_del(&page->cp_reference,
                                                   "delete", vmpage);
                                        cl_page_put(env, page);
                                }
                        } else
                                LASSERT(vmpage->private == 0);
                        cl_env_put(env, &refcheck);
                }
Exemplo n.º 13
0
int cl_sb_init(struct llu_sb_info *sbi)
{
        struct cl_device  *cl;
        struct lu_env     *env;
        int rc = 0;
        int refcheck;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                RETURN(PTR_ERR(env));

        cl = cl_type_setup(env, NULL, &slp_device_type,
                           sbi->ll_dt_exp->exp_obd->obd_lu_dev);
        if (IS_ERR(cl))
                GOTO(out, rc = PTR_ERR(cl));

        sbi->ll_cl = cl;
        sbi->ll_site = cl2lu_dev(cl)->ld_site;
out:
        cl_env_put(env, &refcheck);
        RETURN(rc);
}
Exemplo n.º 14
0
int cl_sb_init(struct super_block *sb)
{
	struct ll_sb_info *sbi;
	struct cl_device  *cl;
	struct lu_env     *env;
	int rc = 0;
	u16 refcheck;

	sbi  = ll_s2sbi(sb);
	env = cl_env_get(&refcheck);
	if (!IS_ERR(env)) {
		cl = cl_type_setup(env, NULL, &vvp_device_type,
				   sbi->ll_dt_exp->exp_obd->obd_lu_dev);
		if (!IS_ERR(cl)) {
			sbi->ll_cl = cl;
			sbi->ll_site = cl2lu_dev(cl)->ld_site;
		}
		cl_env_put(env, &refcheck);
	} else {
		rc = PTR_ERR(env);
	}
	return rc;
}
Exemplo n.º 15
0
static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
			       int errcode)
{
	struct osc_object *osc = cookie;
	struct ldlm_lock *dlmlock;
	struct lu_env *env;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED) {
		errcode = ELDLM_OK;
		goto out;
	}

	if (errcode != ELDLM_OK)
		goto out;

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with AGL lock */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_put(env, &refcheck);
	return ldlm_error2errno(errcode);
}
Exemplo n.º 16
0
static int osc_lock_upcall_speculative(void *cookie,
				       struct lustre_handle *lockh,
				       int errcode)
{
	struct osc_object	*osc = cookie;
	struct ldlm_lock	*dlmlock;
	struct lu_env           *env;
	__u16			 refcheck;
	ENTRY;

	env = cl_env_get(&refcheck);
	LASSERT(!IS_ERR(env));

	if (errcode == ELDLM_LOCK_MATCHED)
		GOTO(out, errcode = ELDLM_OK);

	if (errcode != ELDLM_OK)
		GOTO(out, errcode);

	dlmlock = ldlm_handle2lock(lockh);
	LASSERT(dlmlock != NULL);

	lock_res_and_lock(dlmlock);
	LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);

	/* there is no osc_lock associated with speculative locks */
	osc_lock_lvb_update(env, osc, dlmlock, NULL);

	unlock_res_and_lock(dlmlock);
	LDLM_LOCK_PUT(dlmlock);

out:
	cl_object_put(env, osc2cl(osc));
	cl_env_put(env, &refcheck);
	RETURN(ldlm_error2errno(errcode));
}
Exemplo n.º 17
0
static int lov_layout_change(const struct lu_env *unused,
			     struct lov_object *lov,
			     const struct cl_object_conf *conf)
{
	int result;
	enum lov_layout_type llt = LLT_EMPTY;
	union lov_layout_state *state = &lov->u;
	const struct lov_layout_operations *old_ops;
	const struct lov_layout_operations *new_ops;

	void *cookie;
	struct lu_env *env;
	int refcheck;

	LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));

	if (conf->u.coc_md)
		llt = lov_type(conf->u.coc_md->lsm);
	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));

	cookie = cl_env_reenter();
	env = cl_env_get(&refcheck);
	if (IS_ERR(env)) {
		cl_env_reexit(cookie);
		return PTR_ERR(env);
	}

	CDEBUG(D_INODE, DFID" from %s to %s\n",
	       PFID(lu_object_fid(lov2lu(lov))),
	       llt2str(lov->lo_type), llt2str(llt));

	old_ops = &lov_dispatch[lov->lo_type];
	new_ops = &lov_dispatch[llt];

	result = cl_object_prune(env, &lov->lo_cl);
	if (result != 0)
		goto out;

	result = old_ops->llo_delete(env, lov, &lov->u);
	if (result == 0) {
		old_ops->llo_fini(env, lov, &lov->u);

		LASSERT(atomic_read(&lov->lo_active_ios) == 0);

		lov->lo_type = LLT_EMPTY;
		result = new_ops->llo_init(env,
					lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
					lov, conf, state);
		if (result == 0) {
			new_ops->llo_install(env, lov, state);
			lov->lo_type = llt;
		} else {
			new_ops->llo_delete(env, lov, state);
			new_ops->llo_fini(env, lov, state);
			/* this file becomes an EMPTY file. */
		}
	}

out:
	cl_env_put(env, &refcheck);
	cl_env_reexit(cookie);
	return result;
}
Exemplo n.º 18
0
/**
 * Lustre implementation of a vm_operations_struct::fault() method, called by
 * VM to server page fault (both in kernel and user space).
 *
 * \param vma - is virtiual area struct related to page fault
 * \param vmf - structure which describe type and address where hit fault
 *
 * \return allocated and filled _locked_ page for address
 * \retval VM_FAULT_ERROR on general error
 * \retval NOPAGE_OOM not have memory for allocate new page
 */
static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct lu_env           *env;
	struct cl_io            *io;
	struct vvp_io           *vio = NULL;
	struct page             *vmpage;
	unsigned long            ra_flags;
	int                      result = 0;
	int                      fault_ret = 0;
	__u16			 refcheck;
	ENTRY;

	env = cl_env_get(&refcheck);
	if (IS_ERR(env))
		RETURN(PTR_ERR(env));

	if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
		/* do fast fault */
		ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
		fault_ret = filemap_fault(vma, vmf);
		ll_cl_remove(vma->vm_file, env);

		/* - If there is no error, then the page was found in cache and
		 *   uptodate;
		 * - If VM_FAULT_RETRY is set, the page existed but failed to
		 *   lock. It will return to kernel and retry;
		 * - Otherwise, it should try normal fault under DLM lock. */
		if ((fault_ret & VM_FAULT_RETRY) ||
		    !(fault_ret & VM_FAULT_ERROR))
			GOTO(out, result = 0);

		fault_ret = 0;
	}

	io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
	if (IS_ERR(io))
		GOTO(out, result = PTR_ERR(io));

	result = io->ci_result;
	if (result == 0) {
		vio = vvp_env_io(env);
		vio->u.fault.ft_vma       = vma;
		vio->u.fault.ft_vmpage    = NULL;
		vio->u.fault.ft_vmf = vmf;
		vio->u.fault.ft_flags = 0;
		vio->u.fault.ft_flags_valid = 0;

		/* May call ll_readpage() */
		ll_cl_add(vma->vm_file, env, io, LCC_MMAP);

		result = cl_io_loop(env, io);

		ll_cl_remove(vma->vm_file, env);

		/* ft_flags are only valid if we reached
		 * the call to filemap_fault */
		if (vio->u.fault.ft_flags_valid)
			fault_ret = vio->u.fault.ft_flags;

		vmpage = vio->u.fault.ft_vmpage;
		if (result != 0 && vmpage != NULL) {
			put_page(vmpage);
			vmf->page = NULL;
		}
        }
	cl_io_fini(env, io);

	vma->vm_flags |= ra_flags;

out:
	cl_env_put(env, &refcheck);
	if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
		fault_ret |= to_fault_error(result);

	CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
	RETURN(fault_ret);
}
Exemplo n.º 19
0
static int lov_layout_change(const struct lu_env *unused,
			     struct lov_object *lov, struct lov_stripe_md *lsm,
			     const struct cl_object_conf *conf)
{
	enum lov_layout_type llt = lov_type(lsm);
	union lov_layout_state *state = &lov->u;
	const struct lov_layout_operations *old_ops;
	const struct lov_layout_operations *new_ops;
	void *cookie;
	struct lu_env *env;
	__u16 refcheck;
	int rc;
	ENTRY;

	LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));

	cookie = cl_env_reenter();
	env = cl_env_get(&refcheck);
	if (IS_ERR(env)) {
		cl_env_reexit(cookie);
		RETURN(PTR_ERR(env));
	}

	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));

	CDEBUG(D_INODE, DFID" from %s to %s\n",
	       PFID(lu_object_fid(lov2lu(lov))),
	       llt2str(lov->lo_type), llt2str(llt));

	old_ops = &lov_dispatch[lov->lo_type];
	new_ops = &lov_dispatch[llt];

	rc = cl_object_prune(env, &lov->lo_cl);
	if (rc != 0)
		GOTO(out, rc);

	rc = old_ops->llo_delete(env, lov, &lov->u);
	if (rc != 0)
		GOTO(out, rc);

	old_ops->llo_fini(env, lov, &lov->u);

	LASSERT(atomic_read(&lov->lo_active_ios) == 0);

	lov->lo_type = LLT_EMPTY;

	/* page bufsize fixup */
	cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
		lov_page_slice_fixup(lov, NULL);

	rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state);
	if (rc != 0) {
		new_ops->llo_delete(env, lov, state);
		new_ops->llo_fini(env, lov, state);
		/* this file becomes an EMPTY file. */
		GOTO(out, rc);
	}

	new_ops->llo_install(env, lov, state);
	lov->lo_type = llt;

out:
	cl_env_put(env, &refcheck);
	cl_env_reexit(cookie);

	RETURN(rc);
}
Exemplo n.º 20
0
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
			    bool *retry)
{
	struct lu_env	   *env;
	struct cl_io	    *io;
	struct vvp_io	   *vio;
	int		      result;
	u16 refcheck;
	sigset_t	     set;
	struct inode	     *inode;
	struct ll_inode_info     *lli;

	env = cl_env_get(&refcheck);
	if (IS_ERR(env))
		return PTR_ERR(env);

	io = ll_fault_io_init(env, vma, vmpage->index, NULL);
	if (IS_ERR(io)) {
		result = PTR_ERR(io);
		goto out;
	}

	result = io->ci_result;
	if (result < 0)
		goto out_io;

	io->u.ci_fault.ft_mkwrite = 1;
	io->u.ci_fault.ft_writable = 1;

	vio = vvp_env_io(env);
	vio->u.fault.ft_vma    = vma;
	vio->u.fault.ft_vmpage = vmpage;

	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));

	inode = vvp_object_inode(io->ci_obj);
	lli = ll_i2info(inode);

	result = cl_io_loop(env, io);

	cfs_restore_sigs(set);

	if (result == 0) {
		struct inode *inode = file_inode(vma->vm_file);
		struct ll_inode_info *lli = ll_i2info(inode);

		lock_page(vmpage);
		if (!vmpage->mapping) {
			unlock_page(vmpage);

			/* page was truncated and lock was cancelled, return
			 * ENODATA so that VM_FAULT_NOPAGE will be returned
			 * to handle_mm_fault().
			 */
			if (result == 0)
				result = -ENODATA;
		} else if (!PageDirty(vmpage)) {
			/* race, the page has been cleaned by ptlrpcd after
			 * it was unlocked, it has to be added into dirty
			 * cache again otherwise this soon-to-dirty page won't
			 * consume any grants, even worse if this page is being
			 * transferred because it will break RPC checksum.
			 */
			unlock_page(vmpage);

			CDEBUG(D_MMAP,
			       "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
			       vmpage, vmpage->index);

			*retry = true;
			result = -EAGAIN;
		}

		if (!result)
			set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
	}

out_io:
	cl_io_fini(env, io);
out:
	cl_env_put(env, &refcheck);
	CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
	LASSERT(ergo(result == 0, PageLocked(vmpage)));

	return result;
}
Exemplo n.º 21
0
/**
 * Lustre implementation of a vm_operations_struct::fault() method, called by
 * VM to server page fault (both in kernel and user space).
 *
 * \param vma - is virtual area struct related to page fault
 * \param vmf - structure which describe type and address where hit fault
 *
 * \return allocated and filled _locked_ page for address
 * \retval VM_FAULT_ERROR on general error
 * \retval NOPAGE_OOM not have memory for allocate new page
 */
static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct lu_env	   *env;
	struct cl_io	    *io;
	struct vvp_io	   *vio = NULL;
	struct page	     *vmpage;
	unsigned long	    ra_flags;
	int		      result = 0;
	int		      fault_ret = 0;
	u16 refcheck;

	env = cl_env_get(&refcheck);
	if (IS_ERR(env))
		return PTR_ERR(env);

	io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
	if (IS_ERR(io)) {
		result = to_fault_error(PTR_ERR(io));
		goto out;
	}

	result = io->ci_result;
	if (result == 0) {
		vio = vvp_env_io(env);
		vio->u.fault.ft_vma       = vma;
		vio->u.fault.ft_vmpage    = NULL;
		vio->u.fault.ft_vmf = vmf;
		vio->u.fault.ft_flags = 0;
		vio->u.fault.ft_flags_valid = false;

		/* May call ll_readpage() */
		ll_cl_add(vma->vm_file, env, io);

		result = cl_io_loop(env, io);

		ll_cl_remove(vma->vm_file, env);

		/* ft_flags are only valid if we reached
		 * the call to filemap_fault
		 */
		if (vio->u.fault.ft_flags_valid)
			fault_ret = vio->u.fault.ft_flags;

		vmpage = vio->u.fault.ft_vmpage;
		if (result != 0 && vmpage) {
			put_page(vmpage);
			vmf->page = NULL;
		}
	}
	cl_io_fini(env, io);

	vma->vm_flags |= ra_flags;

out:
	cl_env_put(env, &refcheck);
	if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
		fault_ret |= to_fault_error(result);

	CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
	return fault_ret;
}