static void osc_io_rw_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); struct client_obd *cli = osc_cli(osc); if (oio->oi_lru_reserved > 0) { atomic_add(oio->oi_lru_reserved, cli->cl_lru_left); oio->oi_lru_reserved = 0; } }
static void osc_io_write_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); if (oio->oi_lru_reserved > 0) { osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved); oio->oi_lru_reserved = 0; } oio->oi_write_osclock = NULL; osc_io_iter_fini(env, ios); }
static int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_object *osc = cl2osc(ios->cis_obj); struct obd_import *imp = osc_cli(osc)->cl_import; int rc = -EIO; spin_lock(&imp->imp_lock); if (likely(!imp->imp_invalid)) { struct osc_io *oio = osc_env_io(env); atomic_inc(&osc->oo_nr_ios); oio->oi_is_active = 1; rc = 0; } spin_unlock(&imp->imp_lock); return rc; }
static int osc_io_write_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); unsigned long npages; ENTRY; if (cl_io_is_append(io)) RETURN(osc_io_iter_init(env, ios)); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~PAGE_MASK) ++npages; oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages); RETURN(osc_io_iter_init(env, ios)); }
static int osc_io_rw_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); struct client_obd *cli = osc_cli(osc); unsigned long c; unsigned int npages; unsigned int max_pages; ENTRY; if (cl_io_is_append(io)) RETURN(0); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK) ++npages; max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; if (npages > max_pages) npages = max_pages; c = atomic_read(cli->cl_lru_left); if (c < npages && osc_lru_reclaim(cli) > 0) c = atomic_read(cli->cl_lru_left); while (c >= npages) { if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) { oio->oi_lru_reserved = npages; break; } c = atomic_read(cli->cl_lru_left); } RETURN(0); }
/** * An implementation of cl_io_operations::cio_io_submit() method for osc * layer. Iterates over pages in the in-queue, prepares each for io by calling * cl_page_prep() and then either submits them through osc_io_submit_page() * or, if page is already submitted, changes osc flags through * osc_set_async_flags(). */ static int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios, enum cl_req_type crt, struct cl_2queue *queue) { struct cl_page *page; struct cl_page *tmp; struct client_obd *cli = NULL; struct osc_object *osc = NULL; /* to keep gcc happy */ struct osc_page *opg; struct cl_io *io; LIST_HEAD(list); struct cl_page_list *qin = &queue->c2_qin; struct cl_page_list *qout = &queue->c2_qout; int queued = 0; int result = 0; int cmd; int brw_flags; int max_pages; LASSERT(qin->pl_nr > 0); CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt); osc = cl2osc(ios->cis_obj); cli = osc_cli(osc); max_pages = cli->cl_max_pages_per_rpc; cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0; /* * NOTE: here @page is a top-level page. This is done to avoid * creation of sub-page-list. */ cl_page_list_for_each_safe(page, tmp, qin) { struct osc_async_page *oap; /* Top level IO. */ io = page->cp_owner; LASSERT(io != NULL); opg = osc_cl_page_osc(page); oap = &opg->ops_oap; LASSERT(osc == oap->oap_obj); if (!list_empty(&oap->oap_pending_item) || !list_empty(&oap->oap_rpc_item)) { CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", oap, opg); result = -EBUSY; break; } result = cl_page_prep(env, io, page, crt); if (result != 0) { LASSERT(result < 0); if (result != -EALREADY) break; /* * Handle -EALREADY error: for read case, the page is * already in UPTODATE state; for write, the page * is not dirty. */ result = 0; continue; } cl_page_list_move(qout, qin, page); oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY; oap->oap_async_flags |= ASYNC_COUNT_STABLE; osc_page_submit(env, opg, crt, brw_flags); list_add_tail(&oap->oap_pending_item, &list); if (++queued == max_pages) { queued = 0; result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags); if (result < 0) break; } } if (queued > 0) result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags); CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result); return qout->pl_nr > 0 ? 0 : result; }