int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, struct ccc_grouplock *cg) { struct lu_env *env; struct cl_io *io; struct cl_lock *lock; struct cl_lock_descr *descr; __u32 enqflags; int refcheck; int rc; env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); io = ccc_env_thread_io(env); io->ci_obj = obj; io->ci_ignore_layout = 1; rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc) { cl_io_fini(env, io); cl_env_put(env, &refcheck); /* Does not make sense to take GL for released layout */ if (rc > 0) rc = -ENOTSUPP; return rc; } descr = &ccc_env_info(env)->cti_descr; descr->cld_obj = obj; descr->cld_start = 0; descr->cld_end = CL_PAGE_EOF; descr->cld_gid = gid; descr->cld_mode = CLM_GROUP; enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); descr->cld_enq_flags = enqflags; lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current); if (IS_ERR(lock)) { cl_io_fini(env, io); cl_env_put(env, &refcheck); return PTR_ERR(lock); } cg->cg_env = cl_env_get(&refcheck); cg->cg_io = io; cg->cg_lock = lock; cg->cg_gid = gid; LASSERT(cg->cg_env == env); cl_env_unplant(env, &refcheck); return 0; }
static int cl_io_get(struct inode *inode, struct lu_env **envout, struct cl_io **ioout, int *refcheck) { struct lu_env *env; struct cl_io *io; struct cl_inode_info *lli = cl_i2info(inode); struct cl_object *clob = lli->lli_clob; int result; if (S_ISREG(cl_inode_mode(inode))) { env = cl_env_get(refcheck); if (!IS_ERR(env)) { io = ccc_env_thread_io(env); io->ci_obj = clob; *envout = env; *ioout = io; result = +1; } else result = PTR_ERR(env); } else result = 0; return result; }
/** * API independent part for page fault initialization. * \param vma - virtual memory area addressed to page fault * \param env - corespondent lu_env to processing * \param nest - nested level * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * * \return allocated and initialized env for fault operation. * \retval EINVAL if env can't allocated * \return other error codes from cl_io_init. */ static struct cl_io * ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct cl_io *io; struct cl_fault_io *fio; struct lu_env *env; int rc; *env_ret = NULL; if (ll_file_nolock(file)) return ERR_PTR(-EOPNOTSUPP); /* * page fault can be called when lustre IO is * already active for the current thread, e.g., when doing read/write * against user level buffer mapped from Lustre buffer. To avoid * stomping on existing context, optionally force an allocation of a new * one. */ env = cl_env_nested_get(nest); if (IS_ERR(env)) return ERR_PTR(-EINVAL); *env_ret = env; io = ccc_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; LASSERT(io->ci_obj); fio = &io->u.ci_fault; fio->ft_index = index; fio->ft_executable = vma->vm_flags&VM_EXEC; /* * disable VM_SEQ_READ and use VM_RAND_READ to make sure that * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ if (ra_flags) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { struct ccc_io *cio = ccc_env_io(env); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); LASSERT(cio->cui_cl.cis_io == io); /* mmap lock must be MANDATORY it has to cache pages. */ io->ci_lockreq = CILR_MANDATORY; cio->cui_fd = fd; } else { LASSERT(rc < 0); cl_io_fini(env, io); cl_env_nested_put(nest, env); io = ERR_PTR(rc); } return io; }