Ejemplo n.º 1
0
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
                     struct ccc_grouplock *cg)
{
        struct lu_env          *env;
        struct cl_io           *io;
        struct cl_lock         *lock;
        struct cl_lock_descr   *descr;
        __u32                   enqflags;
        int                     refcheck;
        int                     rc;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                return PTR_ERR(env);

        io = &ccc_env_info(env)->cti_io;
        io->ci_obj = obj;

        rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
        if (rc) {
                LASSERT(rc < 0);
                cl_env_put(env, &refcheck);
                return rc;
        }

        descr = &ccc_env_info(env)->cti_descr;
        descr->cld_obj = obj;
        descr->cld_start = 0;
        descr->cld_end = CL_PAGE_EOF;
        descr->cld_gid = gid;
        descr->cld_mode = CLM_GROUP;

        enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
        descr->cld_enq_flags = enqflags;

        lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current());
        if (IS_ERR(lock)) {
                cl_io_fini(env, io);
                cl_env_put(env, &refcheck);
                return PTR_ERR(lock);
        }

        cg->cg_env = cl_env_get(&refcheck);
        cg->cg_lock = lock;
        cg->cg_gid = gid;
        LASSERT(cg->cg_env == env);

        cl_env_unplant(env, &refcheck);
        return 0;
}
Ejemplo n.º 2
0
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
                     struct ccc_grouplock *cg)
{
        struct lu_env          *env;
        struct cl_io           *io;
        struct cl_lock         *lock;
        struct cl_lock_descr   *descr;
        __u32                   enqflags;
        int                     refcheck;
        int                     rc;

        env = cl_env_get(&refcheck);
        if (IS_ERR(env))
                return PTR_ERR(env);

        io = ccc_env_thread_io(env);
        io->ci_obj = obj;
	io->ci_ignore_layout = 1;

	rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
	if (rc) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		/* Does not make sense to take GL for released layout */
		if (rc > 0)
			rc = -ENOTSUPP;
		return rc;
        }

        descr = &ccc_env_info(env)->cti_descr;
        descr->cld_obj = obj;
        descr->cld_start = 0;
        descr->cld_end = CL_PAGE_EOF;
        descr->cld_gid = gid;
        descr->cld_mode = CLM_GROUP;

	enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
	descr->cld_enq_flags = enqflags;

	lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
	if (IS_ERR(lock)) {
		cl_io_fini(env, io);
		cl_env_put(env, &refcheck);
		return PTR_ERR(lock);
	}

        cg->cg_env  = cl_env_get(&refcheck);
        cg->cg_io   = io;
        cg->cg_lock = lock;
        cg->cg_gid  = gid;
        LASSERT(cg->cg_env == env);

        cl_env_unplant(env, &refcheck);
        return 0;
}
Ejemplo n.º 3
0
void cl_put_grouplock(struct ccc_grouplock *cg)
{
        struct lu_env          *env = cg->cg_env;
        struct cl_lock         *lock = cg->cg_lock;
        int                     refcheck;

        LASSERT(cg->cg_env);
        LASSERT(cg->cg_gid);

        cl_env_implant(env, &refcheck);
        cl_env_put(env, &refcheck);

        cl_unuse(env, lock);
        cl_lock_release(env, lock, GROUPLOCK_SCOPE, cfs_current());
        cl_io_fini(env, &ccc_env_info(env)->cti_io);
        cl_env_put(env, NULL);
}
Ejemplo n.º 4
0
int cl_local_size(struct inode *inode)
{
	struct lu_env	   *env = NULL;
	struct cl_io	    *io  = NULL;
	struct ccc_thread_info  *cti;
	struct cl_object	*clob;
	struct cl_lock_descr    *descr;
	struct cl_lock	  *lock;
	int		      result;
	int		      refcheck;

	ENTRY;

	if (!cl_i2info(inode)->lli_has_smd)
		RETURN(0);

	result = cl_io_get(inode, &env, &io, &refcheck);
	if (result <= 0)
		RETURN(result);

	clob = io->ci_obj;
	result = cl_io_init(env, io, CIT_MISC, clob);
	if (result > 0)
		result = io->ci_result;
	else if (result == 0) {
		cti = ccc_env_info(env);
		descr = &cti->cti_descr;

		*descr = whole_file;
		descr->cld_obj = clob;
		lock = cl_lock_peek(env, io, descr, "localsize", current);
		if (lock != NULL) {
			cl_merge_lvb(env, inode);
			cl_unuse(env, lock);
			cl_lock_release(env, lock, "localsize", current);
			result = 0;
		} else
			result = -ENODATA;
	}
	cl_io_fini(env, io);
	cl_env_put(env, &refcheck);
	RETURN(result);
}
Ejemplo n.º 5
0
static int vvp_mmap_locks(const struct lu_env *env,
                          struct ccc_io *vio, struct cl_io *io)
{
        struct ccc_thread_info *cti = ccc_env_info(env);
        struct mm_struct       *mm = current->mm;
        struct vm_area_struct  *vma;
        struct cl_lock_descr   *descr = &cti->cti_descr;
        ldlm_policy_data_t      policy;
        unsigned long           addr;
        unsigned long           seg;
        ssize_t                 count;
        int                     result;
        ENTRY;

        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);

        if (!cl_is_normalio(env, io))
                RETURN(0);

        if (vio->cui_iov == NULL) /* nfs or loop back device write */
                RETURN(0);

        /* No MM (e.g. NFS)? No vmas too. */
        if (mm == NULL)
                RETURN(0);

        for (seg = 0; seg < vio->cui_nrsegs; seg++) {
                const struct iovec *iv = &vio->cui_iov[seg];

                addr = (unsigned long)iv->iov_base;
                count = iv->iov_len;
                if (count == 0)
                        continue;

                count += addr & (~CFS_PAGE_MASK);
                addr &= CFS_PAGE_MASK;

                down_read(&mm->mmap_sem);
                while((vma = our_vma(mm, addr, count)) != NULL) {
                        struct inode *inode = vma->vm_file->f_dentry->d_inode;
                        int flags = CEF_MUST;

                        if (ll_file_nolock(vma->vm_file)) {
                                /*
                                 * For no lock case, a lockless lock will be
                                 * generated.
                                 */
                                flags = CEF_NEVER;
                        }

                        /*
                         * XXX: Required lock mode can be weakened: CIT_WRITE
                         * io only ever reads user level buffer, and CIT_READ
                         * only writes on it.
                         */
                        policy_from_vma(&policy, vma, addr, count);
                        descr->cld_mode = vvp_mode_from_vma(vma);
                        descr->cld_obj = ll_i2info(inode)->lli_clob;
                        descr->cld_start = cl_index(descr->cld_obj,
                                                    policy.l_extent.start);
                        descr->cld_end = cl_index(descr->cld_obj,
                                                  policy.l_extent.end);
                        descr->cld_enq_flags = flags;
                        result = cl_io_lock_alloc_add(env, io, descr);

                        CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
                               descr->cld_mode, descr->cld_start,
                               descr->cld_end);

                        if (result < 0)
                                RETURN(result);

                        if (vma->vm_end - addr >= count)
                                break;

                        count -= vma->vm_end - addr;
                        addr = vma->vm_end;
                }
                up_read(&mm->mmap_sem);
        }
        RETURN(0);
}
Ejemplo n.º 6
0
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
		    struct inode *inode, struct cl_object *clob, int agl)
{
	struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
	struct cl_inode_info *lli   = cl_i2info(inode);
	const struct lu_fid  *fid   = lu_object_fid(&clob->co_lu);
	struct ccc_io	*cio   = ccc_env_io(env);
	struct cl_lock       *lock;
	int result;

	ENTRY;
	result = 0;
	if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
		CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
		if (lli->lli_has_smd) {
			/* NOTE: this looks like DLM lock request, but it may
			 *       not be one. Due to CEF_ASYNC flag (translated
			 *       to LDLM_FL_HAS_INTENT by osc), this is
			 *       glimpse request, that won't revoke any
			 *       conflicting DLM locks held. Instead,
			 *       ll_glimpse_callback() will be called on each
			 *       client holding a DLM lock against this file,
			 *       and resulting size will be returned for each
			 *       stripe. DLM lock on [0, EOF] is acquired only
			 *       if there were no conflicting locks. If there
			 *       were conflicting locks, enqueuing or waiting
			 *       fails with -ENAVAIL, but valid inode
			 *       attributes are returned anyway. */
			*descr = whole_file;
			descr->cld_obj   = clob;
			descr->cld_mode  = CLM_PHANTOM;
			descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
			if (agl)
				descr->cld_enq_flags |= CEF_AGL;
			cio->cui_glimpse = 1;
			/*
			 * CEF_ASYNC is used because glimpse sub-locks cannot
			 * deadlock (because they never conflict with other
			 * locks) and, hence, can be enqueued out-of-order.
			 *
			 * CEF_MUST protects glimpse lock from conversion into
			 * a lockless mode.
			 */
			lock = cl_lock_request(env, io, descr, "glimpse",
					       current);
			cio->cui_glimpse = 0;

			if (lock == NULL)
				RETURN(0);

			if (IS_ERR(lock))
				RETURN(PTR_ERR(lock));

			LASSERT(agl == 0);
			result = cl_wait(env, lock);
			if (result == 0) {
				cl_merge_lvb(env, inode);
				if (cl_isize_read(inode) > 0 &&
				    inode->i_blocks == 0) {
					/*
					 * LU-417: Add dirty pages block count
					 * lest i_blocks reports 0, some "cp" or
					 * "tar" may think it's a completely
					 * sparse file and skip it.
					 */
					inode->i_blocks = dirty_cnt(inode);
				}
				cl_unuse(env, lock);
			}
			cl_lock_release(env, lock, "glimpse", current);
		} else {
			CDEBUG(D_DLMTRACE, "No objects for inode\n");
			cl_merge_lvb(env, inode);
		}
	}

	RETURN(result);
}