/** * Discard pages protected by the given lock. This function traverses radix * tree to find all covering pages and discard them. If a page is being covered * by other locks, it should remain in cache. * * If error happens on any step, the process continues anyway (the reasoning * behind this being that lock cancellation cannot be delayed indefinitely). */ static int mdc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc, pgoff_t start, pgoff_t end, bool discard) { struct osc_thread_info *info = osc_env_info(env); struct cl_io *io = &info->oti_io; osc_page_gang_cbt cb; int res; int result; ENTRY; io->ci_obj = cl_object_top(osc2cl(osc)); io->ci_ignore_layout = 1; result = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (result != 0) GOTO(out, result); cb = discard ? osc_discard_cb : mdc_check_and_discard_cb; info->oti_fn_index = info->oti_next_index = start; do { res = osc_page_gang_lookup(env, io, osc, info->oti_next_index, end, cb, (void *)osc); if (info->oti_next_index > end) break; if (res == CLP_GANG_RESCHED) cond_resched(); } while (res != CLP_GANG_OKAY); out: cl_io_fini(env, io); RETURN(result); }
static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, struct osc_io *oio, __u64 size) { struct cl_object *clob; int partial; pgoff_t start; clob = oio->oi_cl.cis_obj; start = cl_index(clob, size); partial = cl_offset(clob, start) < size; /* * Complain if there are pages in the truncated region. */ osc_page_gang_lookup(env, io, cl2osc(clob), start + partial, CL_PAGE_EOF, trunc_check_cb, (void *)&size); }