static int vvp_io_prepare_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; struct page *vmpage = cp->cpg_page; int result; ENTRY; LINVRNT(cl_page_is_vmlocked(env, pg)); LASSERT(vmpage->mapping->host == ccc_object_inode(obj)); result = 0; CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to); if (!PageUptodate(vmpage)) { /* * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ if (from == 0 && to == PAGE_CACHE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else result = vvp_io_prepare_partial(env, ios->cis_io, obj, pg, cp, from, to); } else CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n"); RETURN(result); }
static int vvp_io_fault_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = ccc_object_inode(ios->cis_obj); LASSERT(inode == cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); return 0; }
static int vvp_prune(const struct lu_env *env, struct cl_object *obj) { struct inode *inode = ccc_object_inode(obj); int rc; ENTRY; rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_ALL, 1); if (rc == 0) truncate_inode_pages(inode->i_mapping, 0); RETURN(rc); }
static int vvp_io_read_page(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice) { struct cl_io *io = ios->cis_io; struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *page = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; struct ll_readahead_state *ras = &fd->fd_ras; struct page *vmpage = cp->cpg_page; struct cl_2queue *queue = &io->ci_queue; int rc; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); LASSERT(slice->cpl_obj == obj); ENTRY; if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ras_update(sbi, inode, ras, page->cp_index, cp->cpg_defer_uptodate); /* Sanity check whether the page is protected by a lock. */ rc = cl_page_is_under_lock(env, io, page); if (rc != -EBUSY) { CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n", rc == -ENODATA ? "without a lock" : "match failed", rc); if (rc != -ENODATA) RETURN(rc); } if (cp->cpg_defer_uptodate) { cp->cpg_ra_used = 1; cl_page_export(env, page, 1); } /* * Add page into the queue even when it is marked uptodate above. * this will unlock it automatically as part of cl_page_list_disown(). */ cl_2queue_add(queue, page); if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ll_readahead(env, io, ras, vmpage->mapping, &queue->c2_qin, fd->fd_flags); RETURN(0); }
static void vvp_io_setattr_end(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct inode *inode = ccc_object_inode(io->ci_obj); if (cl_io_is_trunc(io)) { /* Truncate in memory pages - they must be clean pages * because osc has already notified to destroy osc_extents. */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_dio_write_done(inode); } mutex_unlock(&inode->i_mutex); }
static int slp_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct inode *inode = ccc_object_inode(obj); struct intnl_stat *st = llu_i2stat(inode); attr->cat_size = st->st_size; attr->cat_blocks = st->st_blocks; attr->cat_mtime = st->st_mtime; attr->cat_atime = st->st_atime; attr->cat_ctime = st->st_ctime; /* KMS is not known by this layer */ return 0; /* layers below have to fill in the rest */ }
static int vvp_io_setattr_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct inode *inode = ccc_object_inode(io->ci_obj); int result = 0; mutex_lock(&inode->i_mutex); if (cl_io_is_trunc(io)) result = vvp_io_setattr_trunc(env, ios, inode, io->u.ci_setattr.sa_attr.lvb_size); if (result == 0) result = vvp_io_setattr_time(env, ios); return result; }
static int vvp_io_read_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj)); int result; ENTRY; /* XXX: Layer violation, we shouldn't see lsm at llite level. */ if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */ result = vvp_io_rw_lock(env, io, CLM_READ, io->u.ci_rd.rd.crw_pos, io->u.ci_rd.rd.crw_pos + io->u.ci_rd.rd.crw_count - 1); else result = 0; RETURN(result); }
static int vvp_io_write_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct file *file = cio->cui_fd->fd_file; ssize_t result = 0; loff_t pos = io->u.ci_wr.wr.crw_pos; size_t cnt = io->u.ci_wr.wr.crw_count; ENTRY; if (!can_populate_pages(env, io, inode)) return 0; if (cl_io_is_append(io)) { /* * PARALLEL IO This has to be changed for parallel IO doing * out-of-order writes. */ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); cio->cui_iocb->ki_pos = pos; } CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */ result = 0; else result = lustre_generic_file_write(file, cio, &pos); if (result > 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, WRITE); result = 0; } RETURN(result); }
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct inode *inode = ccc_object_inode(obj); /* * lov overwrites most of these fields in * lov_attr_get()->...lov_merge_lvb_kms(), except when inode * attributes are newer. */ attr->cat_size = i_size_read(inode); attr->cat_mtime = LTIME_S(inode->i_mtime); attr->cat_atime = LTIME_S(inode->i_atime); attr->cat_ctime = LTIME_S(inode->i_ctime); attr->cat_blocks = inode->i_blocks; attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid); attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid); /* KMS is not known by this layer */ return 0; /* layers below have to fill in the rest */ }
static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid) { struct inode *inode = ccc_object_inode(obj); if (valid & CAT_UID) inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid); if (valid & CAT_GID) inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid); if (valid & CAT_ATIME) LTIME_S(inode->i_atime) = attr->cat_atime; if (valid & CAT_MTIME) LTIME_S(inode->i_mtime) = attr->cat_mtime; if (valid & CAT_CTIME) LTIME_S(inode->i_ctime) = attr->cat_ctime; if (0 && valid & CAT_SIZE) cl_isize_write_nolock(inode, attr->cat_size); /* not currently necessary */ if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE)) mark_inode_dirty(inode); return 0; }
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, bool *retry) { struct lu_env *env; struct cl_io *io; struct vvp_io *vio; struct cl_env_nest nest; int result; sigset_t set; struct inode *inode; struct ll_inode_info *lli; io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); if (IS_ERR(io)) { result = PTR_ERR(io); goto out; } result = io->ci_result; if (result < 0) goto out_io; io->u.ci_fault.ft_mkwrite = 1; io->u.ci_fault.ft_writable = 1; vio = vvp_env_io(env); vio->u.fault.ft_vma = vma; vio->u.fault.ft_vmpage = vmpage; set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); /* we grab lli_trunc_sem to exclude truncate case. * Otherwise, we could add dirty pages into osc cache * while truncate is on-going. */ inode = ccc_object_inode(io->ci_obj); lli = ll_i2info(inode); down_read(&lli->lli_trunc_sem); result = cl_io_loop(env, io); up_read(&lli->lli_trunc_sem); cfs_restore_sigs(set); if (result == 0) { struct inode *inode = file_inode(vma->vm_file); struct ll_inode_info *lli = ll_i2info(inode); lock_page(vmpage); if (!vmpage->mapping) { unlock_page(vmpage); /* page was truncated and lock was cancelled, return * ENODATA so that VM_FAULT_NOPAGE will be returned * to handle_mm_fault(). */ if (result == 0) result = -ENODATA; } else if (!PageDirty(vmpage)) { /* race, the page has been cleaned by ptlrpcd after * it was unlocked, it has to be added into dirty * cache again otherwise this soon-to-dirty page won't * consume any grants, even worse if this page is being * transferred because it will break RPC checksum. */ unlock_page(vmpage); CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n", vmpage, vmpage->index); *retry = true; result = -EAGAIN; } if (result == 0) { spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_DATA_MODIFIED; spin_unlock(&lli->lli_lock); } } out_io: cl_io_fini(env, io); cl_env_nested_put(&nest, env); out: CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); LASSERT(ergo(result == 0, PageLocked(vmpage))); return result; }
static int slp_io_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); int err, ret; loff_t pos; long cnt; struct llu_io_group *iogroup; struct lustre_rw_params p = {0}; int iovidx; struct intnl_stat *st = llu_i2stat(inode); struct llu_inode_info *lli = llu_i2info(inode); struct llu_io_session *session = cl2slp_io(env, ios)->sio_session; int write = io->ci_type == CIT_WRITE; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); if (write) { pos = io->u.ci_wr.wr.crw_pos; cnt = io->u.ci_wr.wr.crw_count; } else { pos = io->u.ci_rd.rd.crw_pos; cnt = io->u.ci_rd.rd.crw_count; } if (io->u.ci_wr.wr_append) { p.lrp_lock_mode = LCK_PW; } else { p.lrp_brw_flags = OBD_BRW_SRVLOCK; p.lrp_lock_mode = LCK_NL; } iogroup = get_io_group(inode, max_io_pages(cnt, cio->cui_nrsegs), &p); if (IS_ERR(iogroup)) RETURN(PTR_ERR(iogroup)); err = ccc_prep_size(env, obj, io, pos, cnt, &exceed); if (err != 0 || (write == 0 && exceed != 0)) GOTO(out, err); CDEBUG(D_INODE, "%s ino %lu, %lu bytes, offset "LPU64", i_size "LPU64"\n", write ? "Write" : "Read", (unsigned long)st->st_ino, cnt, (__u64)pos, (__u64)st->st_size); if (write && io->u.ci_wr.wr_append) pos = io->u.ci_wr.wr.crw_pos = st->st_size; /* XXX? Do we need to change io content too here? */ /* XXX What about if one write syscall writes at 2 different offsets? */ for (iovidx = 0; iovidx < cio->cui_nrsegs; iovidx++) { char *buf = (char *) cio->cui_iov[iovidx].iov_base; long count = cio->cui_iov[iovidx].iov_len; if (!count) continue; if (cnt < count) count = cnt; if (IS_BAD_PTR(buf) || IS_BAD_PTR(buf + count)) { GOTO(out, err = -EFAULT); } if (io->ci_type == CIT_READ) { if (/* local_lock && */ pos >= st->st_size) break; } else if (io->ci_type == CIT_WRITE) { if (pos >= lli->lli_maxbytes) { GOTO(out, err = -EFBIG); } if (pos + count >= lli->lli_maxbytes) count = lli->lli_maxbytes - pos; } else { LBUG(); } ret = llu_queue_pio(env, io, iogroup, buf, count, pos); if (ret < 0) { GOTO(out, err = ret); } else { io->ci_nob += ret; pos += ret; cnt -= ret; if (io->ci_type == CIT_WRITE) { // obd_adjust_kms(exp, lsm, pos, 0); // XXX if (pos > st->st_size) st->st_size = pos; } if (!cnt) break; } } LASSERT(cnt == 0 || io->ci_type == CIT_READ); /* libsysio should guarantee this */ if (!iogroup->lig_rc) session->lis_rwcount += iogroup->lig_rwcount; else if (!session->lis_rc) session->lis_rc = iogroup->lig_rc; err = 0; out: put_io_group(iogroup); return err; }
static int llu_queue_pio(const struct lu_env *env, struct cl_io *io, struct llu_io_group *group, char *buf, size_t count, loff_t pos) { struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct intnl_stat *st = llu_i2stat(inode); struct obd_export *exp = llu_i2obdexp(inode); struct page *page; int rc = 0, ret_bytes = 0; struct cl_page *clp; struct cl_2queue *queue; ENTRY; if (!exp) RETURN(-EINVAL); queue = &io->ci_queue; cl_2queue_init(queue); /* prepare the pages array */ do { unsigned long index, offset, bytes; offset = (pos & ~CFS_PAGE_MASK); index = pos >> PAGE_CACHE_SHIFT; bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) bytes = count; /* prevent read beyond file range */ if (/* local_lock && */ io->ci_type == CIT_READ && pos + bytes >= st->st_size) { if (pos >= st->st_size) break; bytes = st->st_size - pos; } /* prepare page for this index */ page = llu_get_user_page(index, buf - offset, offset, bytes); if (!page) { rc = -ENOMEM; break; } clp = cl_page_find(env, obj, cl_index(obj, pos), page, CPT_TRANSIENT); if (IS_ERR(clp)) { rc = PTR_ERR(clp); break; } rc = cl_page_own(env, io, clp); if (rc) { LASSERT(clp->cp_state == CPS_FREEING); cl_page_put(env, clp); break; } cl_2queue_add(queue, clp); /* drop the reference count for cl_page_find, so that the page * will be freed in cl_2queue_fini. */ cl_page_put(env, clp); cl_page_clip(env, clp, offset, offset+bytes); count -= bytes; pos += bytes; buf += bytes; group->lig_rwcount += bytes; ret_bytes += bytes; page++; } while (count); if (rc == 0) { enum cl_req_type iot; iot = io->ci_type == CIT_READ ? CRT_READ : CRT_WRITE; rc = cl_io_submit_sync(env, io, iot, queue, 0); } group->lig_rc = rc; cl_2queue_discard(env, io, queue); cl_2queue_disown(env, io, queue); cl_2queue_fini(env, queue); RETURN(ret_bytes); }
static int vvp_io_commit_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct cl_object *obj = slice->cpl_obj; struct cl_io *io = ios->cis_io; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); struct page *vmpage = cp->cpg_page; int result; int tallyop; loff_t size; ENTRY; LINVRNT(cl_page_is_vmlocked(env, pg)); LASSERT(vmpage->mapping->host == inode); LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n"); CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to); /* * queue a write for some time in the future the first time we * dirty the page. * * This is different from what other file systems do: they usually * just mark page (and some of its buffers) dirty and rely on * balance_dirty_pages() to start a write-back. Lustre wants write-back * to be started earlier for the following reasons: * * (1) with a large number of clients we need to limit the amount * of cached data on the clients a lot; * * (2) large compute jobs generally want compute-only then io-only * and the IO should complete as quickly as possible; * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { tallyop = LPROC_LL_DIRTY_MISSES; result = cl_page_cache_add(env, io, pg, CRT_WRITE); if (result == 0) { /* page was added into cache successfully. */ set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; bool need_clip = true; /* * Client ran out of disk space grant. Possible * strategies are: * * (a) do a sync write, renewing grant; * * (b) stop writing on this stripe, switch to the * next one. * * (b) is a part of "parallel io" design that is the * ultimate goal. (a) is what "old" client did, and * what the new code continues to do for the time * being. */ if (last_index > pg->cp_index) { to = PAGE_CACHE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; if (to < size_to) to = size_to; } if (need_clip) cl_page_clip(env, pg, 0, to); result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); if (result) CERROR("Write page %lu of inode %p failed %d\n", pg->cp_index, inode, result); }
static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct ccc_io *cio = cl2ccc_io(env, ios); struct inode *inode = ccc_object_inode(obj); CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d " "restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen, io->ci_restore_needed); if (io->ci_restore_needed == 1) { int rc; /* file was detected release, we need to restore it * before finishing the io */ rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF); /* if restore registration failed, no restart, * we will return -ENODATA */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) * we have to explicitly fetch it, all this will be done * by ll_layout_refresh() */ if (rc == 0) { io->ci_restore_needed = 0; io->ci_need_restart = 1; io->ci_verify_layout = 1; } else { io->ci_restore_needed = 1; io->ci_need_restart = 0; io->ci_verify_layout = 0; io->ci_result = rc; } } if (!io->ci_ignore_layout && io->ci_verify_layout) { __u32 gen = 0; /* check layout version */ ll_layout_refresh(inode, &gen); io->ci_need_restart = cio->cui_layout_gen != gen; if (io->ci_need_restart) { CDEBUG(D_VFSTRACE, DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); /* today successful restore is the only possible * case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; } } }
static int vvp_io_read_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct ll_ra_read *bead = &vio->cui_bead; struct file *file = cio->cui_fd->fd_file; int result; loff_t pos = io->u.ci_rd.rd.crw_pos; long cnt = io->u.ci_rd.rd.crw_count; long tot = cio->cui_tot_count; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); if (!can_populate_pages(env, io, inode)) return 0; result = ccc_prep_size(env, obj, io, pos, tot, &exceed); if (result != 0) return result; else if (exceed != 0) goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "Read ino %lu, %lu bytes, offset %lld, size %llu\n", inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; /* initialize read-ahead window once per syscall */ if (!vio->cui_ra_window_set) { vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); ll_ra_read_in(file, bead); } /* BUG: 5972 */ file_accessed(file); switch (vio->cui_io_subtype) { case IO_NORMAL: result = lustre_generic_file_read(file, cio, &pos); break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, vio->u.splice.cui_pipe, cnt, vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe * buffers. */ io->ci_continue = 0; break; default: CERROR("Wrong IO type %u\n", vio->cui_io_subtype); LBUG(); } out: if (result >= 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, READ); result = 0; } return result; }
static int vvp_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct cl_fault_io *fio = &io->u.ci_fault; struct vvp_fault_io *cfio = &vio->u.fault; loff_t offset; int result = 0; struct page *vmpage = NULL; struct cl_page *page; loff_t size; pgoff_t last; /* last page in a file data region */ if (fio->ft_executable && LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime) CWARN("binary "DFID " changed while waiting for the page fault lock\n", PFID(lu_object_fid(&obj->co_lu))); /* offset of the last byte on the page */ offset = cl_offset(obj, fio->ft_index + 1) - 1; LASSERT(cl_index(obj, offset) == fio->ft_index); result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); if (result != 0) return result; /* must return locked page */ if (fio->ft_mkwrite) { LASSERT(cfio->ft_vmpage != NULL); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); if (result != 0) return result; } vmpage = cfio->ft_vmpage; LASSERT(PageLocked(vmpage)); if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) ll_invalidate_page(vmpage); size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but * it still can be truncated locally. */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch * and retry. */ GOTO(out, result = +1); } if (fio->ft_mkwrite ) { pgoff_t last_index; /* * Capture the size while holding the lli_trunc_sem from above * we want to make sure that we complete the mkwrite action * while holding this lock. We need to make sure that we are * not past the end of the file. */ last_index = cl_index(obj, size - 1); if (last_index < fio->ft_index) { CDEBUG(D_PAGE, "llite: mkwrite and truncate race happened: " "%p: 0x%lx 0x%lx\n", vmpage->mapping,fio->ft_index,last_index); /* * We need to return if we are * passed the end of the file. This will propagate * up the call stack to ll_page_mkwrite where * we will return VM_FAULT_NOPAGE. Any non-negative * value returned here will be silently * converted to 0. If the vmpage->mapping is null * the error code would be converted back to ENODATA * in ll_page_mkwrite0. Thus we return -ENODATA * to handle both cases */ GOTO(out, result = -ENODATA); } } page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); if (IS_ERR(page)) GOTO(out, result = PTR_ERR(page)); /* if page is going to be written, we should add this page into cache * earlier. */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { struct ccc_page *cp; /* vvp_page_assume() calls wait_on_page_writeback(). */ cl_page_assume(env, io, page); cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vvp_write_pending(cl2ccc(obj), cp); /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we * still have chance to detect it. */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); vmpage = NULL; if (result < 0) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); cl_page_disown(env, io, page); cl_page_put(env, page); /* we're in big trouble, what can we do now? */ if (result == -EDQUOT) result = -ENOSPC; GOTO(out, result); } else cl_page_disown(env, io, page); } } last = cl_index(obj, size - 1); /* * The ft_index is only used in the case of * a mkwrite action. We need to check * our assertions are correct, since * we should have caught this above */ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); if (fio->ft_index == last) /* * Last page is mapped partially. */ fio->ft_nob = size - cl_offset(obj, fio->ft_index); else fio->ft_nob = cl_page_size(obj); lu_ref_add(&page->cp_reference, "fault", io); fio->ft_page = page; EXIT; out: /* return unlocked vmpage to avoid deadlocking */ if (vmpage != NULL) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; }