예제 #1
0
static int slp_io_rw_lock(const struct lu_env *env,
                          const struct cl_io_slice *ios)
{
        struct ccc_io *cio = ccc_env_io(env);
        struct cl_io *io   = ios->cis_io;
        loff_t start;
        loff_t end;

        if (cl_io_is_append(io)) {
                start = 0;
                end   = OBD_OBJECT_EOF;
        } else {
                start = io->u.ci_wr.wr.crw_pos;
                end   = start + io->u.ci_wr.wr.crw_count - 1;
        }

        ccc_io_update_iov(env, cio, io);

        /*
         * This acquires real DLM lock only in O_APPEND case, because of
         * the io->ci_lockreq setting in llu_io_init().
         */
        LASSERT(ergo(cl_io_is_append(io), io->ci_lockreq == CILR_MANDATORY));
        LASSERT(ergo(!cl_io_is_append(io), io->ci_lockreq == CILR_NEVER));
        return ccc_io_one_lock(env, io, 0,
                               io->ci_type == CIT_READ ? CLM_READ : CLM_WRITE,
                               start, end);

}
예제 #2
0
static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
                       struct cl_io *io)
{
        struct ccc_io      *vio   = ccc_env_io(env);
        int result = 0;

        CLOBINVRNT(env, obj, ccc_object_invariant(obj));

        cl_io_slice_add(io, &vio->cui_cl, obj, &ccc_io_ops);
        if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
                size_t count;

                count = io->u.ci_rw.crw_count;
                /* "If nbyte is 0, read() will return 0 and have no other
                 *  results."  -- Single Unix Spec */
                if (count == 0)
                        result = 1;
                else {
                        vio->cui_tot_count = count;
                        vio->cui_tot_nrsegs = 0;
                }

        }
        return result;
}
예제 #3
0
/**
 * For swapping layout. The file's layout may have changed.
 * To avoid populating pages to a wrong stripe, we have to verify the
 * correctness of layout. It works because swapping layout processes
 * have to acquire group lock.
 */
static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
				struct inode *inode)
{
	struct ll_inode_info	*lli = ll_i2info(inode);
	struct ccc_io		*cio = ccc_env_io(env);
	bool rc = true;

	switch (io->ci_type) {
	case CIT_READ:
	case CIT_WRITE:
		/* don't need lock here to check lli_layout_gen as we have held
		 * extent lock and GROUP lock has to hold to swap layout */
		if (lli->lli_layout_gen != cio->cui_layout_gen) {
			io->ci_need_restart = 1;
			/* this will return application a short read/write */
			io->ci_continue = 0;
			rc = false;
		}
	case CIT_FAULT:
		/* fault is okay because we've already had a page. */
	default:
		break;
	}

	return rc;
}
예제 #4
0
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
                          enum cl_lock_mode mode, loff_t start, loff_t end)
{
        struct ccc_io *cio = ccc_env_io(env);
        int result;
        int ast_flags = 0;

        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
        ENTRY;

        ccc_io_update_iov(env, cio, io);

        if (io->u.ci_rw.crw_nonblock)
                ast_flags |= CEF_NONBLOCK;
        result = vvp_mmap_locks(env, cio, io);
        if (result == 0)
                result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
        RETURN(result);
}
예제 #5
0
/**
 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
 *
 * Handles "lockless io" mode when extent locking is done by server.
 */
static int vvp_io_setattr_lock(const struct lu_env *env,
                               const struct cl_io_slice *ios)
{
	struct ccc_io *cio = ccc_env_io(env);
	struct cl_io  *io  = ios->cis_io;
	__u64 new_size;
	__u32 enqflags = 0;

        if (cl_io_is_trunc(io)) {
                new_size = io->u.ci_setattr.sa_attr.lvb_size;
                if (new_size == 0)
                        enqflags = CEF_DISCARD_DATA;
        } else {
                if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
                     io->u.ci_setattr.sa_attr.lvb_ctime) ||
                    (io->u.ci_setattr.sa_attr.lvb_atime >=
                     io->u.ci_setattr.sa_attr.lvb_ctime))
                        return 0;
                new_size = 0;
        }
        cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
        return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
                               new_size, OBD_OBJECT_EOF);
}
예제 #6
0
/**
 * API independent part for page fault initialization.
 * \param vma - virtual memory area addressed to page fault
 * \param env - corespondent lu_env to processing
 * \param nest - nested level
 * \param index - page index corespondent to fault.
 * \parm ra_flags - vma readahead flags.
 *
 * \return allocated and initialized env for fault operation.
 * \retval EINVAL if env can't allocated
 * \return other error codes from cl_io_init.
 */
static struct cl_io *
ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
		 struct cl_env_nest *nest, pgoff_t index,
		 unsigned long *ra_flags)
{
	struct file	       *file = vma->vm_file;
	struct inode	       *inode = file_inode(file);
	struct cl_io	       *io;
	struct cl_fault_io     *fio;
	struct lu_env	       *env;
	int			rc;

	*env_ret = NULL;
	if (ll_file_nolock(file))
		return ERR_PTR(-EOPNOTSUPP);

	/*
	 * page fault can be called when lustre IO is
	 * already active for the current thread, e.g., when doing read/write
	 * against user level buffer mapped from Lustre buffer. To avoid
	 * stomping on existing context, optionally force an allocation of a new
	 * one.
	 */
	env = cl_env_nested_get(nest);
	if (IS_ERR(env))
		return ERR_PTR(-EINVAL);

	*env_ret = env;

	io = ccc_env_thread_io(env);
	io->ci_obj = ll_i2info(inode)->lli_clob;
	LASSERT(io->ci_obj);

	fio = &io->u.ci_fault;
	fio->ft_index      = index;
	fio->ft_executable = vma->vm_flags&VM_EXEC;

	/*
	 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
	 * the kernel will not read other pages not covered by ldlm in
	 * filemap_nopage. we do our readahead in ll_readpage.
	 */
	if (ra_flags)
		*ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
	vma->vm_flags &= ~VM_SEQ_READ;
	vma->vm_flags |= VM_RAND_READ;

	CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
	       fio->ft_index, fio->ft_executable);

	rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
	if (rc == 0) {
		struct ccc_io *cio = ccc_env_io(env);
		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);

		LASSERT(cio->cui_cl.cis_io == io);

		/* mmap lock must be MANDATORY it has to cache pages. */
		io->ci_lockreq = CILR_MANDATORY;
		cio->cui_fd = fd;
	} else {
		LASSERT(rc < 0);
		cl_io_fini(env, io);
		cl_env_nested_put(nest, env);
		io = ERR_PTR(rc);
	}

	return io;
}
예제 #7
0
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
		    struct inode *inode, struct cl_object *clob, int agl)
{
	struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
	struct cl_inode_info *lli   = cl_i2info(inode);
	const struct lu_fid  *fid   = lu_object_fid(&clob->co_lu);
	struct ccc_io	*cio   = ccc_env_io(env);
	struct cl_lock       *lock;
	int result;

	ENTRY;
	result = 0;
	if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
		CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
		if (lli->lli_has_smd) {
			/* NOTE: this looks like DLM lock request, but it may
			 *       not be one. Due to CEF_ASYNC flag (translated
			 *       to LDLM_FL_HAS_INTENT by osc), this is
			 *       glimpse request, that won't revoke any
			 *       conflicting DLM locks held. Instead,
			 *       ll_glimpse_callback() will be called on each
			 *       client holding a DLM lock against this file,
			 *       and resulting size will be returned for each
			 *       stripe. DLM lock on [0, EOF] is acquired only
			 *       if there were no conflicting locks. If there
			 *       were conflicting locks, enqueuing or waiting
			 *       fails with -ENAVAIL, but valid inode
			 *       attributes are returned anyway. */
			*descr = whole_file;
			descr->cld_obj   = clob;
			descr->cld_mode  = CLM_PHANTOM;
			descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
			if (agl)
				descr->cld_enq_flags |= CEF_AGL;
			cio->cui_glimpse = 1;
			/*
			 * CEF_ASYNC is used because glimpse sub-locks cannot
			 * deadlock (because they never conflict with other
			 * locks) and, hence, can be enqueued out-of-order.
			 *
			 * CEF_MUST protects glimpse lock from conversion into
			 * a lockless mode.
			 */
			lock = cl_lock_request(env, io, descr, "glimpse",
					       current);
			cio->cui_glimpse = 0;

			if (lock == NULL)
				RETURN(0);

			if (IS_ERR(lock))
				RETURN(PTR_ERR(lock));

			LASSERT(agl == 0);
			result = cl_wait(env, lock);
			if (result == 0) {
				cl_merge_lvb(env, inode);
				if (cl_isize_read(inode) > 0 &&
				    inode->i_blocks == 0) {
					/*
					 * LU-417: Add dirty pages block count
					 * lest i_blocks reports 0, some "cp" or
					 * "tar" may think it's a completely
					 * sparse file and skip it.
					 */
					inode->i_blocks = dirty_cnt(inode);
				}
				cl_unuse(env, lock);
			}
			cl_lock_release(env, lock, "glimpse", current);
		} else {
			CDEBUG(D_DLMTRACE, "No objects for inode\n");
			cl_merge_lvb(env, inode);
		}
	}

	RETURN(result);
}