static int lov_page_cache_add(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io) { struct lov_io *lio = lov_env_io(env); struct lov_io_sub *sub; int rc = 0; LINVRNT(lov_page_invariant(slice)); LINVRNT(!cl2lov_page(slice)->lps_invalid); sub = lov_page_subio(env, lio, slice); if (!IS_ERR(sub)) { rc = cl_page_cache_add(sub->sub_env, sub->sub_io, slice->cpl_page->cp_child, CRT_WRITE); lov_sub_put(sub); } else { rc = PTR_ERR(sub); CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc); } return rc; }
static int vvp_io_commit_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct cl_object *obj = slice->cpl_obj; struct cl_io *io = ios->cis_io; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); struct page *vmpage = cp->cpg_page; int result; int tallyop; loff_t size; ENTRY; LINVRNT(cl_page_is_vmlocked(env, pg)); LASSERT(vmpage->mapping->host == inode); LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n"); CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to); /* * queue a write for some time in the future the first time we * dirty the page. * * This is different from what other file systems do: they usually * just mark page (and some of its buffers) dirty and rely on * balance_dirty_pages() to start a write-back. Lustre wants write-back * to be started earlier for the following reasons: * * (1) with a large number of clients we need to limit the amount * of cached data on the clients a lot; * * (2) large compute jobs generally want compute-only then io-only * and the IO should complete as quickly as possible; * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { tallyop = LPROC_LL_DIRTY_MISSES; result = cl_page_cache_add(env, io, pg, CRT_WRITE); if (result == 0) { /* page was added into cache successfully. */ set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; bool need_clip = true; /* * Client ran out of disk space grant. Possible * strategies are: * * (a) do a sync write, renewing grant; * * (b) stop writing on this stripe, switch to the * next one. * * (b) is a part of "parallel io" design that is the * ultimate goal. (a) is what "old" client did, and * what the new code continues to do for the time * being. */ if (last_index > pg->cp_index) { to = PAGE_CACHE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; if (to < size_to) to = size_to; } if (need_clip) cl_page_clip(env, pg, 0, to); result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); if (result) CERROR("Write page %lu of inode %p failed %d\n", pg->cp_index, inode, result); }
static int vvp_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct cl_fault_io *fio = &io->u.ci_fault; struct vvp_fault_io *cfio = &vio->u.fault; loff_t offset; int result = 0; struct page *vmpage = NULL; struct cl_page *page; loff_t size; pgoff_t last; /* last page in a file data region */ if (fio->ft_executable && LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime) CWARN("binary "DFID " changed while waiting for the page fault lock\n", PFID(lu_object_fid(&obj->co_lu))); /* offset of the last byte on the page */ offset = cl_offset(obj, fio->ft_index + 1) - 1; LASSERT(cl_index(obj, offset) == fio->ft_index); result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); if (result != 0) return result; /* must return locked page */ if (fio->ft_mkwrite) { LASSERT(cfio->ft_vmpage != NULL); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); if (result != 0) return result; } vmpage = cfio->ft_vmpage; LASSERT(PageLocked(vmpage)); if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) ll_invalidate_page(vmpage); size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but * it still can be truncated locally. */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch * and retry. */ GOTO(out, result = +1); } if (fio->ft_mkwrite ) { pgoff_t last_index; /* * Capture the size while holding the lli_trunc_sem from above * we want to make sure that we complete the mkwrite action * while holding this lock. We need to make sure that we are * not past the end of the file. */ last_index = cl_index(obj, size - 1); if (last_index < fio->ft_index) { CDEBUG(D_PAGE, "llite: mkwrite and truncate race happened: " "%p: 0x%lx 0x%lx\n", vmpage->mapping,fio->ft_index,last_index); /* * We need to return if we are * passed the end of the file. This will propagate * up the call stack to ll_page_mkwrite where * we will return VM_FAULT_NOPAGE. Any non-negative * value returned here will be silently * converted to 0. If the vmpage->mapping is null * the error code would be converted back to ENODATA * in ll_page_mkwrite0. Thus we return -ENODATA * to handle both cases */ GOTO(out, result = -ENODATA); } } page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); if (IS_ERR(page)) GOTO(out, result = PTR_ERR(page)); /* if page is going to be written, we should add this page into cache * earlier. */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { struct ccc_page *cp; /* vvp_page_assume() calls wait_on_page_writeback(). */ cl_page_assume(env, io, page); cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vvp_write_pending(cl2ccc(obj), cp); /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we * still have chance to detect it. */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); vmpage = NULL; if (result < 0) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); cl_page_disown(env, io, page); cl_page_put(env, page); /* we're in big trouble, what can we do now? */ if (result == -EDQUOT) result = -ENOSPC; GOTO(out, result); } else cl_page_disown(env, io, page); } } last = cl_index(obj, size - 1); /* * The ft_index is only used in the case of * a mkwrite action. We need to check * our assertions are correct, since * we should have caught this above */ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); if (fio->ft_index == last) /* * Last page is mapped partially. */ fio->ft_nob = size - cl_offset(obj, fio->ft_index); else fio->ft_nob = cl_page_size(obj); lu_ref_add(&page->cp_reference, "fault", io); fio->ft_page = page; EXIT; out: /* return unlocked vmpage to avoid deadlocking */ if (vmpage != NULL) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; }