static int slp_io_rw_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = ccc_env_io(env); struct cl_io *io = ios->cis_io; loff_t start; loff_t end; if (cl_io_is_append(io)) { start = 0; end = OBD_OBJECT_EOF; } else { start = io->u.ci_wr.wr.crw_pos; end = start + io->u.ci_wr.wr.crw_count - 1; } ccc_io_update_iov(env, cio, io); /* * This acquires real DLM lock only in O_APPEND case, because of * the io->ci_lockreq setting in llu_io_init(). */ LASSERT(ergo(cl_io_is_append(io), io->ci_lockreq == CILR_MANDATORY)); LASSERT(ergo(!cl_io_is_append(io), io->ci_lockreq == CILR_NEVER)); return ccc_io_one_lock(env, io, 0, io->ci_type == CIT_READ ? CLM_READ : CLM_WRITE, start, end); }
static int vvp_io_write_start(const struct lu_env *env, const struct cl_io_slice *ios) { struct ccc_io *cio = cl2ccc_io(env, ios); struct cl_io *io = ios->cis_io; struct cl_object *obj = io->ci_obj; struct inode *inode = ccc_object_inode(obj); struct file *file = cio->cui_fd->fd_file; ssize_t result = 0; loff_t pos = io->u.ci_wr.wr.crw_pos; size_t cnt = io->u.ci_wr.wr.crw_count; ENTRY; if (!can_populate_pages(env, io, inode)) return 0; if (cl_io_is_append(io)) { /* * PARALLEL IO This has to be changed for parallel IO doing * out-of-order writes. */ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); cio->cui_iocb->ki_pos = pos; } CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */ result = 0; else result = lustre_generic_file_write(file, cio, &pos); if (result > 0) { if (result < cnt) io->ci_continue = 0; io->ci_nob += result; ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd, pos, result, WRITE); result = 0; } RETURN(result); }
static int osc_io_write_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); unsigned long npages; ENTRY; if (cl_io_is_append(io)) RETURN(osc_io_iter_init(env, ios)); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~PAGE_MASK) ++npages; oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages); RETURN(osc_io_iter_init(env, ios)); }
static int osc_io_rw_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); struct client_obd *cli = osc_cli(osc); unsigned long c; unsigned int npages; unsigned int max_pages; ENTRY; if (cl_io_is_append(io)) RETURN(0); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK) ++npages; max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; if (npages > max_pages) npages = max_pages; c = atomic_read(cli->cl_lru_left); if (c < npages && osc_lru_reclaim(cli) > 0) c = atomic_read(cli->cl_lru_left); while (c >= npages) { if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) { oio->oi_lru_reserved = npages; break; } c = atomic_read(cli->cl_lru_left); } RETURN(0); }