Пример #1
0
/**
 * API independent part for page fault initialization.
 * \param env - corespondent lu_env to processing
 * \param vma - virtual memory area addressed to page fault
 * \param index - page index corespondent to fault.
 * \parm ra_flags - vma readahead flags.
 *
 * \return error codes from cl_io_init.
 */
static struct cl_io *
ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
		 pgoff_t index, unsigned long *ra_flags)
{
	struct file	       *file = vma->vm_file;
	struct inode	       *inode = file_inode(file);
	struct cl_io	       *io;
	struct cl_fault_io     *fio;
	int			rc;
	ENTRY;

        if (ll_file_nolock(file))
                RETURN(ERR_PTR(-EOPNOTSUPP));

restart:
	io = vvp_env_thread_io(env);
        io->ci_obj = ll_i2info(inode)->lli_clob;
        LASSERT(io->ci_obj != NULL);

        fio = &io->u.ci_fault;
        fio->ft_index      = index;
        fio->ft_executable = vma->vm_flags&VM_EXEC;

        /*
         * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
         * the kernel will not read other pages not covered by ldlm in
         * filemap_nopage. we do our readahead in ll_readpage.
         */
        if (ra_flags != NULL)
                *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
        vma->vm_flags &= ~VM_SEQ_READ;
        vma->vm_flags |= VM_RAND_READ;

        CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
               fio->ft_index, fio->ft_executable);

	rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
	if (rc == 0) {
		struct vvp_io *vio = vvp_env_io(env);
		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);

		LASSERT(vio->vui_cl.cis_io == io);

		/* mmap lock must be MANDATORY it has to cache
		 * pages. */
		io->ci_lockreq = CILR_MANDATORY;
		vio->vui_fd = fd;
	} else {
		LASSERT(rc < 0);
		cl_io_fini(env, io);
		if (io->ci_need_restart)
			goto restart;

		io = ERR_PTR(rc);
	}

	RETURN(io);
}
Пример #2
0
int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct inode *inode = file_inode(file);
	int rc;

	if (ll_file_nolock(file))
		return -EOPNOTSUPP;

	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
	rc = generic_file_mmap(file, vma);
	if (rc == 0) {
		vma->vm_ops = &ll_file_vm_ops;
		vma->vm_ops->open(vma);
		/* update the inode's size and mtime */
		rc = ll_glimpse_size(inode);
	}

	return rc;
}
Пример #3
0
/**
 * API independent part for page fault initialization.
 * \param vma - virtual memory area addressed to page fault
 * \param env - corespondent lu_env to processing
 * \param nest - nested level
 * \param index - page index corespondent to fault.
 * \parm ra_flags - vma readahead flags.
 *
 * \return allocated and initialized env for fault operation.
 * \retval EINVAL if env can't allocated
 * \return other error codes from cl_io_init.
 */
static struct cl_io *
ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
		 struct cl_env_nest *nest, pgoff_t index,
		 unsigned long *ra_flags)
{
	struct file	       *file = vma->vm_file;
	struct inode	       *inode = file_inode(file);
	struct cl_io	       *io;
	struct cl_fault_io     *fio;
	struct lu_env	       *env;
	int			rc;

	*env_ret = NULL;
	if (ll_file_nolock(file))
		return ERR_PTR(-EOPNOTSUPP);

	/*
	 * page fault can be called when lustre IO is
	 * already active for the current thread, e.g., when doing read/write
	 * against user level buffer mapped from Lustre buffer. To avoid
	 * stomping on existing context, optionally force an allocation of a new
	 * one.
	 */
	env = cl_env_nested_get(nest);
	if (IS_ERR(env))
		return ERR_PTR(-EINVAL);

	*env_ret = env;

	io = ccc_env_thread_io(env);
	io->ci_obj = ll_i2info(inode)->lli_clob;
	LASSERT(io->ci_obj);

	fio = &io->u.ci_fault;
	fio->ft_index      = index;
	fio->ft_executable = vma->vm_flags&VM_EXEC;

	/*
	 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
	 * the kernel will not read other pages not covered by ldlm in
	 * filemap_nopage. we do our readahead in ll_readpage.
	 */
	if (ra_flags)
		*ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
	vma->vm_flags &= ~VM_SEQ_READ;
	vma->vm_flags |= VM_RAND_READ;

	CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
	       fio->ft_index, fio->ft_executable);

	rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
	if (rc == 0) {
		struct ccc_io *cio = ccc_env_io(env);
		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);

		LASSERT(cio->cui_cl.cis_io == io);

		/* mmap lock must be MANDATORY it has to cache pages. */
		io->ci_lockreq = CILR_MANDATORY;
		cio->cui_fd = fd;
	} else {
		LASSERT(rc < 0);
		cl_io_fini(env, io);
		cl_env_nested_put(nest, env);
		io = ERR_PTR(rc);
	}

	return io;
}
Пример #4
0
static int vvp_mmap_locks(const struct lu_env *env,
                          struct ccc_io *vio, struct cl_io *io)
{
        struct ccc_thread_info *cti = ccc_env_info(env);
        struct mm_struct       *mm = current->mm;
        struct vm_area_struct  *vma;
        struct cl_lock_descr   *descr = &cti->cti_descr;
        ldlm_policy_data_t      policy;
        unsigned long           addr;
        unsigned long           seg;
        ssize_t                 count;
        int                     result;
        ENTRY;

        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);

        if (!cl_is_normalio(env, io))
                RETURN(0);

        if (vio->cui_iov == NULL) /* nfs or loop back device write */
                RETURN(0);

        /* No MM (e.g. NFS)? No vmas too. */
        if (mm == NULL)
                RETURN(0);

        for (seg = 0; seg < vio->cui_nrsegs; seg++) {
                const struct iovec *iv = &vio->cui_iov[seg];

                addr = (unsigned long)iv->iov_base;
                count = iv->iov_len;
                if (count == 0)
                        continue;

                count += addr & (~CFS_PAGE_MASK);
                addr &= CFS_PAGE_MASK;

                down_read(&mm->mmap_sem);
                while((vma = our_vma(mm, addr, count)) != NULL) {
                        struct inode *inode = vma->vm_file->f_dentry->d_inode;
                        int flags = CEF_MUST;

                        if (ll_file_nolock(vma->vm_file)) {
                                /*
                                 * For no lock case, a lockless lock will be
                                 * generated.
                                 */
                                flags = CEF_NEVER;
                        }

                        /*
                         * XXX: Required lock mode can be weakened: CIT_WRITE
                         * io only ever reads user level buffer, and CIT_READ
                         * only writes on it.
                         */
                        policy_from_vma(&policy, vma, addr, count);
                        descr->cld_mode = vvp_mode_from_vma(vma);
                        descr->cld_obj = ll_i2info(inode)->lli_clob;
                        descr->cld_start = cl_index(descr->cld_obj,
                                                    policy.l_extent.start);
                        descr->cld_end = cl_index(descr->cld_obj,
                                                  policy.l_extent.end);
                        descr->cld_enq_flags = flags;
                        result = cl_io_lock_alloc_add(env, io, descr);

                        CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
                               descr->cld_mode, descr->cld_start,
                               descr->cld_end);

                        if (result < 0)
                                RETURN(result);

                        if (vma->vm_end - addr >= count)
                                break;

                        count -= vma->vm_end - addr;
                        addr = vma->vm_end;
                }
                up_read(&mm->mmap_sem);
        }
        RETURN(0);
}