static struct slp_io *cl2slp_io(const struct lu_env *env, const struct cl_io_slice *slice) { /* We call it just for assertion here */ cl2ccc_io(env, slice); return slp_env_io(env); }
static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct ccc_io *cio = cl2ccc_io(env, ios); if (vio->cui_ra_window_set) ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead); vvp_io_fini(env, ios); }
static int vvp_io_fault_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct inode *inode = ccc_object_inode(ios->cis_obj); LASSERT(inode == cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); return 0; }
static int vvp_io_read_page(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice) { struct cl_io *io = ios->cis_io; struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *page = slice->cpl_page; struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; struct ll_readahead_state *ras = &fd->fd_ras; struct page *vmpage = cp->cpg_page; struct cl_2queue *queue = &io->ci_queue; int rc; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); LASSERT(slice->cpl_obj == obj); ENTRY; if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ras_update(sbi, inode, ras, page->cp_index, cp->cpg_defer_uptodate); /* Sanity check whether the page is protected by a lock. */ rc = cl_page_is_under_lock(env, io, page); if (rc != -EBUSY) { CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n", rc == -ENODATA ? "without a lock" : "match failed", rc); if (rc != -ENODATA) RETURN(rc); } if (cp->cpg_defer_uptodate) { cp->cpg_ra_used = 1; cl_page_export(env, page, 1); } /* * Add page into the queue even when it is marked uptodate above. * this will unlock it automatically as part of cl_page_list_disown(). */ cl_2queue_add(queue, page); if (sbi->ll_ra_info.ra_max_pages_per_file && sbi->ll_ra_info.ra_max_pages) ll_readahead(env, io, ras, vmpage->mapping, &queue->c2_qin, fd->fd_flags); RETURN(0); }
static int vvp_io_write_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct file *file = cio->cui_fd->fd_file; ssize_t result = 0; loff_t pos = io->u.ci_wr.wr.crw_pos; size_t cnt = io->u.ci_wr.wr.crw_count; ENTRY; if (!can_populate_pages(env, io, inode)) return 0; if (cl_io_is_append(io)) { /* * PARALLEL IO This has to be changed for parallel IO doing * out-of-order writes. */ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); cio->cui_iocb->ki_pos = pos; } CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */ result = 0; else result = lustre_generic_file_write(file, cio, &pos); if (result > 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, WRITE); result = 0; } RETURN(result); }
static int slp_io_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); int err, ret; loff_t pos; long cnt; struct llu_io_group *iogroup; struct lustre_rw_params p = {0}; int iovidx; struct intnl_stat *st = llu_i2stat(inode); struct llu_inode_info *lli = llu_i2info(inode); struct llu_io_session *session = cl2slp_io(env, ios)->sio_session; int write = io->ci_type == CIT_WRITE; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); if (write) { pos = io->u.ci_wr.wr.crw_pos; cnt = io->u.ci_wr.wr.crw_count; } else { pos = io->u.ci_rd.rd.crw_pos; cnt = io->u.ci_rd.rd.crw_count; } if (io->u.ci_wr.wr_append) { p.lrp_lock_mode = LCK_PW; } else { p.lrp_brw_flags = OBD_BRW_SRVLOCK; p.lrp_lock_mode = LCK_NL; } iogroup = get_io_group(inode, max_io_pages(cnt, cio->cui_nrsegs), &p); if (IS_ERR(iogroup)) RETURN(PTR_ERR(iogroup)); err = ccc_prep_size(env, obj, io, pos, cnt, &exceed); if (err != 0 || (write == 0 && exceed != 0)) GOTO(out, err); CDEBUG(D_INODE, "%s ino %lu, %lu bytes, offset "LPU64", i_size "LPU64"\n", write ? "Write" : "Read", (unsigned long)st->st_ino, cnt, (__u64)pos, (__u64)st->st_size); if (write && io->u.ci_wr.wr_append) pos = io->u.ci_wr.wr.crw_pos = st->st_size; /* XXX? Do we need to change io content too here? */ /* XXX What about if one write syscall writes at 2 different offsets? */ for (iovidx = 0; iovidx < cio->cui_nrsegs; iovidx++) { char *buf = (char *) cio->cui_iov[iovidx].iov_base; long count = cio->cui_iov[iovidx].iov_len; if (!count) continue; if (cnt < count) count = cnt; if (IS_BAD_PTR(buf) || IS_BAD_PTR(buf + count)) { GOTO(out, err = -EFAULT); } if (io->ci_type == CIT_READ) { if (/* local_lock && */ pos >= st->st_size) break; } else if (io->ci_type == CIT_WRITE) { if (pos >= lli->lli_maxbytes) { GOTO(out, err = -EFBIG); } if (pos + count >= lli->lli_maxbytes) count = lli->lli_maxbytes - pos; } else { LBUG(); } ret = llu_queue_pio(env, io, iogroup, buf, count, pos); if (ret < 0) { GOTO(out, err = ret); } else { io->ci_nob += ret; pos += ret; cnt -= ret; if (io->ci_type == CIT_WRITE) { // obd_adjust_kms(exp, lsm, pos, 0); // XXX if (pos > st->st_size) st->st_size = pos; } if (!cnt) break; } } LASSERT(cnt == 0 || io->ci_type == CIT_READ); /* libsysio should guarantee this */ if (!iogroup->lig_rc) session->lis_rwcount += iogroup->lig_rwcount; else if (!session->lis_rc) session->lis_rc = iogroup->lig_rc; err = 0; out: put_io_group(iogroup); return err; }
static int vvp_io_read_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct vvp_io *vio = cl2vvp_io(env, ios); struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct ll_ra_read *bead = &vio->cui_bead; struct file *file = cio->cui_fd->fd_file; int result; loff_t pos = io->u.ci_rd.rd.crw_pos; long cnt = io->u.ci_rd.rd.crw_count; long tot = cio->cui_tot_count; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); if (!can_populate_pages(env, io, inode)) return 0; result = ccc_prep_size(env, obj, io, pos, tot, &exceed); if (result != 0) return result; else if (exceed != 0) goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "Read ino %lu, %lu bytes, offset %lld, size %llu\n", inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; /* initialize read-ahead window once per syscall */ if (!vio->cui_ra_window_set) { vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); ll_ra_read_in(file, bead); } /* BUG: 5972 */ file_accessed(file); switch (vio->cui_io_subtype) { case IO_NORMAL: result = lustre_generic_file_read(file, cio, &pos); break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, vio->u.splice.cui_pipe, cnt, vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe * buffers. */ io->ci_continue = 0; break; default: CERROR("Wrong IO type %u\n", vio->cui_io_subtype); LBUG(); } out: if (result >= 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, READ); result = 0; } return result; }
static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct ccc_io *cio = cl2ccc_io(env, ios); struct inode *inode = ccc_object_inode(obj); CLOBINVRNT(env, obj, ccc_object_invariant(obj)); CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d " "restore needed %d\n", PFID(lu_object_fid(&obj->co_lu)), io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen, io->ci_restore_needed); if (io->ci_restore_needed == 1) { int rc; /* file was detected release, we need to restore it * before finishing the io */ rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF); /* if restore registration failed, no restart, * we will return -ENODATA */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) * we have to explicitly fetch it, all this will be done * by ll_layout_refresh() */ if (rc == 0) { io->ci_restore_needed = 0; io->ci_need_restart = 1; io->ci_verify_layout = 1; } else { io->ci_restore_needed = 1; io->ci_need_restart = 0; io->ci_verify_layout = 0; io->ci_result = rc; } } if (!io->ci_ignore_layout && io->ci_verify_layout) { __u32 gen = 0; /* check layout version */ ll_layout_refresh(inode, &gen); io->ci_need_restart = cio->cui_layout_gen != gen; if (io->ci_need_restart) { CDEBUG(D_VFSTRACE, DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); /* today successful restore is the only possible * case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; } } }