/** * Check if page @page is covered by an extra lock or discard it. */ static int mdc_check_and_discard_cb(const struct lu_env *env, struct cl_io *io, struct osc_page *ops, void *cbdata) { struct osc_thread_info *info = osc_env_info(env); struct osc_object *osc = cbdata; pgoff_t index; index = osc_index(ops); if (index >= info->oti_fn_index) { struct ldlm_lock *tmp; struct cl_page *page = ops->ops_cl.cpl_page; /* refresh non-overlapped index */ tmp = mdc_dlmlock_at_pgoff(env, osc, index, OSC_DAP_FL_TEST_LOCK); if (tmp != NULL) { info->oti_fn_index = CL_PAGE_EOF; LDLM_LOCK_PUT(tmp); } else if (cl_page_own(env, io, page) == 0) { /* discard the page */ cl_page_discard(env, io, page); cl_page_disown(env, io, page); } else { LASSERT(page->cp_state == CPS_FREEING); } } info->oti_next_index = index + 1; return CLP_GANG_OKAY; }
/** * Checks that there are no pages being written in the extent being truncated. */ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, struct osc_page *ops , void *cbdata) { struct cl_page *page = ops->ops_cl.cpl_page; struct osc_async_page *oap; __u64 start = *(__u64 *)cbdata; oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && !list_empty(&oap->oap_pending_item)) CL_PAGE_DEBUG(D_ERROR, env, page, "exists " LPU64 "/%s.\n", start, current->comm); if (PageLocked(page->cp_vmpage)) CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n", ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK); return CLP_GANG_OKAY; }
/** * Implementation of struct cl_object_operations::coo_req_attr_set() for osc * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq * fields. */ static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj, struct cl_req_attr *attr) { struct lov_oinfo *oinfo; struct obdo *oa; struct ost_lvb *lvb; u64 flags = attr->cra_flags; oinfo = cl2osc(obj)->oo_oinfo; lvb = &oinfo->loi_lvb; oa = attr->cra_oa; if ((flags & OBD_MD_FLMTIME) != 0) { oa->o_mtime = lvb->lvb_mtime; oa->o_valid |= OBD_MD_FLMTIME; } if ((flags & OBD_MD_FLATIME) != 0) { oa->o_atime = lvb->lvb_atime; oa->o_valid |= OBD_MD_FLATIME; } if ((flags & OBD_MD_FLCTIME) != 0) { oa->o_ctime = lvb->lvb_ctime; oa->o_valid |= OBD_MD_FLCTIME; } if (flags & OBD_MD_FLGROUP) { ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLGROUP; } if (flags & OBD_MD_FLID) { ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLID; } if (flags & OBD_MD_FLHANDLE) { struct ldlm_lock *lock; struct osc_page *opg; opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj)); lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING); if (lock == NULL && !opg->ops_srvlock) { struct ldlm_resource *res; struct ldlm_res_id *resname; CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page, "uncovered page!\n"); resname = &osc_env_info(env)->oti_resname; ostid_build_res_name(&oinfo->loi_oi, resname); res = ldlm_resource_get( osc_export(cl2osc(obj))->exp_obd->obd_namespace, NULL, resname, LDLM_EXTENT, 0); ldlm_resource_dump(D_ERROR, res); libcfs_debug_dumpstack(NULL); LBUG(); } /* check for lockless io. */ if (lock != NULL) { oa->o_handle = lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; LDLM_LOCK_PUT(lock); } } }
static int osc_io_commit_async(const struct lu_env *env, const struct cl_io_slice *ios, struct cl_page_list *qin, int from, int to, cl_commit_cbt cb) { struct cl_io *io = ios->cis_io; struct osc_io *oio = cl2osc_io(env, ios); struct osc_object *osc = cl2osc(ios->cis_obj); struct cl_page *page; struct cl_page *last_page; struct osc_page *opg; int result = 0; ENTRY; LASSERT(qin->pl_nr > 0); /* Handle partial page cases */ last_page = cl_page_list_last(qin); if (oio->oi_lockless) { page = cl_page_list_first(qin); if (page == last_page) { cl_page_clip(env, page, from, to); } else { if (from != 0) cl_page_clip(env, page, from, PAGE_SIZE); if (to != PAGE_SIZE) cl_page_clip(env, last_page, 0, to); } } while (qin->pl_nr > 0) { struct osc_async_page *oap; page = cl_page_list_first(qin); opg = osc_cl_page_osc(page, osc); oap = &opg->ops_oap; if (!list_empty(&oap->oap_rpc_item)) { CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", oap, opg); result = -EBUSY; break; } /* The page may be already in dirty cache. */ if (list_empty(&oap->oap_pending_item)) { result = osc_page_cache_add(env, &opg->ops_cl, io); if (result != 0) break; } osc_page_touch_at(env, osc2cl(osc), osc_index(opg), page == last_page ? to : PAGE_SIZE); cl_page_list_del(env, qin, page); (*cb)(env, io, page); /* Can't access page any more. Page can be in transfer and * complete at any time. */ } /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. * for mkwrite(), it's known there is no further pages. */ if (cl_io_is_sync_write(io) && oio->oi_active != NULL) { osc_extent_release(env, oio->oi_active); oio->oi_active = NULL; } CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result); RETURN(result); }
/** * Implementation of struct cl_req_operations::cro_attr_set() for osc * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq * fields. */ static void osc_req_attr_set(const struct lu_env *env, const struct cl_req_slice *slice, const struct cl_object *obj, struct cl_req_attr *attr, obd_valid flags) { struct lov_oinfo *oinfo; struct cl_req *clerq; struct cl_page *apage; /* _some_ page in @clerq */ struct cl_lock *lock; /* _some_ lock protecting @apage */ struct osc_lock *olck; struct osc_page *opg; struct obdo *oa; struct ost_lvb *lvb; oinfo = cl2osc(obj)->oo_oinfo; lvb = &oinfo->loi_lvb; oa = attr->cra_oa; if ((flags & OBD_MD_FLMTIME) != 0) { oa->o_mtime = lvb->lvb_mtime; oa->o_valid |= OBD_MD_FLMTIME; } if ((flags & OBD_MD_FLATIME) != 0) { oa->o_atime = lvb->lvb_atime; oa->o_valid |= OBD_MD_FLATIME; } if ((flags & OBD_MD_FLCTIME) != 0) { oa->o_ctime = lvb->lvb_ctime; oa->o_valid |= OBD_MD_FLCTIME; } if (flags & OBD_MD_FLGROUP) { ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLGROUP; } if (flags & OBD_MD_FLID) { ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLID; } if (flags & OBD_MD_FLHANDLE) { struct cl_object *subobj; clerq = slice->crs_req; LASSERT(!cfs_list_empty(&clerq->crq_pages)); apage = container_of(clerq->crq_pages.next, struct cl_page, cp_flight); opg = osc_cl_page_osc(apage, NULL); subobj = opg->ops_cl.cpl_obj; lock = cl_lock_at_pgoff(env, subobj, osc_index(opg), NULL, 1, 1); if (lock == NULL) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(subobj); cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, "dump uncover page!\n"); libcfs_debug_dumpstack(NULL); LBUG(); } olck = osc_lock_at(lock); LASSERT(olck != NULL); LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); /* check for lockless io. */ if (olck->ols_lock != NULL) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } cl_lock_put(env, lock); } }