/** * Discard pages protected by the given lock. This function traverses radix * tree to find all covering pages and discard them. If a page is being covered * by other locks, it should remain in cache. * * If error happens on any step, the process continues anyway (the reasoning * behind this being that lock cancellation cannot be delayed indefinitely). */ static int mdc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc, pgoff_t start, pgoff_t end, bool discard) { struct osc_thread_info *info = osc_env_info(env); struct cl_io *io = &info->oti_io; osc_page_gang_cbt cb; int res; int result; ENTRY; io->ci_obj = cl_object_top(osc2cl(osc)); io->ci_ignore_layout = 1; result = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (result != 0) GOTO(out, result); cb = discard ? osc_discard_cb : mdc_check_and_discard_cb; info->oti_fn_index = info->oti_next_index = start; do { res = osc_page_gang_lookup(env, io, osc, info->oti_next_index, end, cb, (void *)osc); if (info->oti_next_index > end) break; if (res == CLP_GANG_RESCHED) cond_resched(); } while (res != CLP_GANG_OKAY); out: cl_io_fini(env, io); RETURN(result); }
/** * API independent part for page fault initialization. * \param env - corespondent lu_env to processing * \param vma - virtual memory area addressed to page fault * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * * \return error codes from cl_io_init. */ static struct cl_io * ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct cl_io *io; struct cl_fault_io *fio; int rc; ENTRY; if (ll_file_nolock(file)) RETURN(ERR_PTR(-EOPNOTSUPP)); restart: io = vvp_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; LASSERT(io->ci_obj != NULL); fio = &io->u.ci_fault; fio->ft_index = index; fio->ft_executable = vma->vm_flags&VM_EXEC; /* * disable VM_SEQ_READ and use VM_RAND_READ to make sure that * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ if (ra_flags != NULL) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { struct vvp_io *vio = vvp_env_io(env); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); LASSERT(vio->vui_cl.cis_io == io); /* mmap lock must be MANDATORY it has to cache * pages. */ io->ci_lockreq = CILR_MANDATORY; vio->vui_fd = fd; } else { LASSERT(rc < 0); cl_io_fini(env, io); if (io->ci_need_restart) goto restart; io = ERR_PTR(rc); } RETURN(io); }
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, struct ccc_grouplock *cg) { struct lu_env *env; struct cl_io *io; struct cl_lock *lock; struct cl_lock_descr *descr; __u32 enqflags; int refcheck; int rc; env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); io = ccc_env_thread_io(env); io->ci_obj = obj; io->ci_ignore_layout = 1; rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc) { cl_io_fini(env, io); cl_env_put(env, &refcheck); /* Does not make sense to take GL for released layout */ if (rc > 0) rc = -ENOTSUPP; return rc; } descr = &ccc_env_info(env)->cti_descr; descr->cld_obj = obj; descr->cld_start = 0; descr->cld_end = CL_PAGE_EOF; descr->cld_gid = gid; descr->cld_mode = CLM_GROUP; enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); descr->cld_enq_flags = enqflags; lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current); if (IS_ERR(lock)) { cl_io_fini(env, io); cl_env_put(env, &refcheck); return PTR_ERR(lock); } cg->cg_env = cl_env_get(&refcheck); cg->cg_io = io; cg->cg_lock = lock; cg->cg_gid = gid; LASSERT(cg->cg_env == env); cl_env_unplant(env, &refcheck); return 0; }
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, struct ll_grouplock *lg) { struct lu_env *env; struct cl_io *io; struct cl_lock *lock; struct cl_lock_descr *descr; __u32 enqflags; __u16 refcheck; int rc; env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); io = vvp_env_thread_io(env); io->ci_obj = obj; rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc != 0) { cl_io_fini(env, io); cl_env_put(env, &refcheck); /* Does not make sense to take GL for released layout */ if (rc > 0) rc = -ENOTSUPP; return rc; } lock = vvp_env_lock(env); descr = &lock->cll_descr; descr->cld_obj = obj; descr->cld_start = 0; descr->cld_end = CL_PAGE_EOF; descr->cld_gid = gid; descr->cld_mode = CLM_GROUP; enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); descr->cld_enq_flags = enqflags; rc = cl_lock_request(env, io, lock); if (rc < 0) { cl_io_fini(env, io); cl_env_put(env, &refcheck); return rc; } lg->lg_env = env; lg->lg_io = io; lg->lg_lock = lock; lg->lg_gid = gid; return 0; }
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, struct ccc_grouplock *cg) { struct lu_env *env; struct cl_io *io; struct cl_lock *lock; struct cl_lock_descr *descr; __u32 enqflags; int refcheck; int rc; env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); io = &ccc_env_info(env)->cti_io; io->ci_obj = obj; rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc) { LASSERT(rc < 0); cl_env_put(env, &refcheck); return rc; } descr = &ccc_env_info(env)->cti_descr; descr->cld_obj = obj; descr->cld_start = 0; descr->cld_end = CL_PAGE_EOF; descr->cld_gid = gid; descr->cld_mode = CLM_GROUP; enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); descr->cld_enq_flags = enqflags; lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current()); if (IS_ERR(lock)) { cl_io_fini(env, io); cl_env_put(env, &refcheck); return PTR_ERR(lock); } cg->cg_env = cl_env_get(&refcheck); cg->cg_lock = lock; cg->cg_gid = gid; LASSERT(cg->cg_env == env); cl_env_unplant(env, &refcheck); return 0; }
int cl_glimpse_size0(struct inode *inode, int agl) { /* * We don't need ast_flags argument to cl_glimpse_size(), because * osc_lock_enqueue() takes care of the possible deadlock that said * argument was introduced to avoid. */ /* * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to * cl_glimpse_size(), which doesn't make sense: glimpse locks are not * blocking anyway. */ struct lu_env *env = NULL; struct cl_io *io = NULL; __u16 refcheck; int retried = 0; int result; ENTRY; result = cl_io_get(inode, &env, &io, &refcheck); if (result <= 0) RETURN(result); do { io->ci_ndelay_tried = retried++; io->ci_ndelay = io->ci_verify_layout = 1; result = cl_io_init(env, io, CIT_GLIMPSE, io->ci_obj); if (result > 0) { /* * nothing to do for this io. This currently happens * when stripe sub-object's are not yet created. */ result = io->ci_result; } else if (result == 0) { result = cl_glimpse_lock(env, io, inode, io->ci_obj, agl); if (!agl && result == -EWOULDBLOCK) io->ci_need_restart = 1; } OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2); cl_io_fini(env, io); } while (unlikely(io->ci_need_restart)); cl_env_put(env, &refcheck); RETURN(result); }
int cl_local_size(struct inode *inode) { struct lu_env *env = NULL; struct cl_io *io = NULL; struct ccc_thread_info *cti; struct cl_object *clob; struct cl_lock_descr *descr; struct cl_lock *lock; int result; int refcheck; ENTRY; if (!cl_i2info(inode)->lli_has_smd) RETURN(0); result = cl_io_get(inode, &env, &io, &refcheck); if (result <= 0) RETURN(result); clob = io->ci_obj; result = cl_io_init(env, io, CIT_MISC, clob); if (result > 0) result = io->ci_result; else if (result == 0) { cti = ccc_env_info(env); descr = &cti->cti_descr; *descr = whole_file; descr->cld_obj = clob; lock = cl_lock_peek(env, io, descr, "localsize", current); if (lock != NULL) { cl_merge_lvb(env, inode); cl_unuse(env, lock); cl_lock_release(env, lock, "localsize", current); result = 0; } else result = -ENODATA; } cl_io_fini(env, io); cl_env_put(env, &refcheck); RETURN(result); }
/** * API independent part for page fault initialization. * \param vma - virtual memory area addressed to page fault * \param env - corespondent lu_env to processing * \param nest - nested level * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * * \return allocated and initialized env for fault operation. * \retval EINVAL if env can't allocated * \return other error codes from cl_io_init. */ static struct cl_io * ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct cl_io *io; struct cl_fault_io *fio; struct lu_env *env; int rc; *env_ret = NULL; if (ll_file_nolock(file)) return ERR_PTR(-EOPNOTSUPP); /* * page fault can be called when lustre IO is * already active for the current thread, e.g., when doing read/write * against user level buffer mapped from Lustre buffer. To avoid * stomping on existing context, optionally force an allocation of a new * one. */ env = cl_env_nested_get(nest); if (IS_ERR(env)) return ERR_PTR(-EINVAL); *env_ret = env; io = ccc_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; LASSERT(io->ci_obj); fio = &io->u.ci_fault; fio->ft_index = index; fio->ft_executable = vma->vm_flags&VM_EXEC; /* * disable VM_SEQ_READ and use VM_RAND_READ to make sure that * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ if (ra_flags) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { struct ccc_io *cio = ccc_env_io(env); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); LASSERT(cio->cui_cl.cis_io == io); /* mmap lock must be MANDATORY it has to cache pages. */ io->ci_lockreq = CILR_MANDATORY; cio->cui_fd = fd; } else { LASSERT(rc < 0); cl_io_fini(env, io); cl_env_nested_put(nest, env); io = ERR_PTR(rc); } return io; }