/** * Lock upcall function that is executed either when a reply to ENQUEUE rpc is * received from a server, or after osc_enqueue_base() matched a local DLM * lock. */ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, int errcode) { struct osc_lock *oscl = cookie; struct cl_lock_slice *slice = &oscl->ols_cl; struct lu_env *env; int rc; ENTRY; env = cl_env_percpu_get(); /* should never happen, similar to osc_ldlm_blocking_ast(). */ LASSERT(!IS_ERR(env)); rc = ldlm_error2errno(errcode); if (oscl->ols_state == OLS_ENQUEUED) { oscl->ols_state = OLS_UPCALL_RECEIVED; } else if (oscl->ols_state == OLS_CANCELLED) { rc = -EIO; } else { CERROR("Impossible state: %d\n", oscl->ols_state); LBUG(); } if (rc == 0) osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK); /* Error handling, some errors are tolerable. */ if (oscl->ols_locklessable && rc == -EUSERS) { /* This is a tolerable error, turn this lock into * lockless lock. */ osc_object_set_contended(cl2osc(slice->cls_obj)); LASSERT(slice->cls_ops != oscl->ols_lockless_ops); /* Change this lock to ldlmlock-less lock. */ osc_lock_to_lockless(env, oscl, 1); oscl->ols_state = OLS_GRANTED; rc = 0; } else if (oscl->ols_glimpse && rc == -ENAVAIL) { LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); osc_lock_lvb_update(env, cl2osc(slice->cls_obj), NULL, &oscl->ols_lvb); /* Hide the error. */ rc = 0; } else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) { rc = -EWOULDBLOCK; } if (oscl->ols_owner != NULL) cl_sync_io_note(env, oscl->ols_owner, rc); cl_env_percpu_put(env); RETURN(rc); }
static int osc_io_ladvise_start(const struct lu_env *env, const struct cl_io_slice *slice) { int result = 0; struct cl_io *io = slice->cis_io; struct osc_io *oio = cl2osc_io(env, slice); struct cl_object *obj = slice->cis_obj; struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; struct cl_ladvise_io *lio = &io->u.ci_ladvise; struct obdo *oa = &oio->oi_oa; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; struct lu_ladvise *ladvise; struct ladvise_hdr *ladvise_hdr; int buf_size; int num_advise = 1; ENTRY; /* TODO: add multiple ladvise support in CLIO */ buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]); if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size) lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size); ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf; if (ladvise_hdr == NULL) RETURN(-ENOMEM); memset(ladvise_hdr, 0, buf_size); ladvise_hdr->lah_magic = LADVISE_MAGIC; ladvise_hdr->lah_count = num_advise; ladvise_hdr->lah_flags = lio->li_flags; memset(oa, 0, sizeof(*oa)); oa->o_oi = loi->loi_oi; oa->o_valid = OBD_MD_FLID; obdo_set_parent_fid(oa, lio->li_fid); ladvise = ladvise_hdr->lah_advise; ladvise->lla_start = lio->li_start; ladvise->lla_end = lio->li_end; ladvise->lla_advice = lio->li_advice; if (lio->li_flags & LF_ASYNC) { result = osc_ladvise_base(osc_export(cl2osc(obj)), oa, ladvise_hdr, NULL, NULL, NULL); } else { init_completion(&cbargs->opc_sync); result = osc_ladvise_base(osc_export(cl2osc(obj)), oa, ladvise_hdr, osc_async_upcall, cbargs, PTLRPCD_SET); cbargs->opc_rpc_sent = result == 0; } RETURN(result); }
static int osc_io_read_ahead(const struct lu_env *env, const struct cl_io_slice *ios, pgoff_t start, struct cl_read_ahead *ra) { struct osc_object *osc = cl2osc(ios->cis_obj); struct ldlm_lock *dlmlock; int result = -ENODATA; ENTRY; dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0); if (dlmlock != NULL) { if (dlmlock->l_req_mode != LCK_PR) { struct lustre_handle lockh; ldlm_lock2handle(dlmlock, &lockh); ldlm_lock_addref(&lockh, LCK_PR); ldlm_lock_decref(&lockh, dlmlock->l_req_mode); } ra->cra_end = cl_index(osc2cl(osc), dlmlock->l_policy_data.l_extent.end); ra->cra_release = osc_read_ahead_release; ra->cra_cbdata = dlmlock; result = 0; } RETURN(result); }
static void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj, pgoff_t idx, unsigned to) { struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; struct cl_attr *attr = &osc_env_info(env)->oti_attr; int valid; __u64 kms; /* offset within stripe */ kms = cl_offset(obj, idx) + to; cl_object_attr_lock(obj); /* * XXX old code used * * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm); * * here */ CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n", kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, loi->loi_lvb.lvb_size); valid = 0; if (kms > loi->loi_kms) { attr->cat_kms = kms; valid |= CAT_KMS; } if (kms > loi->loi_lvb.lvb_size) { attr->cat_size = kms; valid |= CAT_SIZE; } cl_object_attr_set(env, obj, attr, valid); cl_object_attr_unlock(obj); }
static int osc_io_commit_write(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice, unsigned from, unsigned to) { struct osc_io *oio = cl2osc_io(env, ios); struct osc_page *opg = cl2osc_page(slice); struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); struct osc_async_page *oap = &opg->ops_oap; LASSERT(to > 0); /* * XXX instead of calling osc_page_touch() here and in * osc_io_fault_start() it might be more logical to introduce * cl_page_touch() method, that generic cl_io_commit_write() and page * fault code calls. */ osc_page_touch(env, cl2osc_page(slice), to); if (!client_is_remote(osc_export(obj)) && capable(CFS_CAP_SYS_RESOURCE)) oap->oap_brw_flags |= OBD_BRW_NOQUOTA; if (oio->oi_lockless) /* see osc_io_prepare_write() for lockless io handling. */ cl_page_clip(env, slice->cpl_page, from, to); return 0; }
static void osc_io_setattr_end(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_io *io = slice->cis_io; struct osc_io *oio = cl2osc_io(env, slice); struct cl_object *obj = slice->cis_obj; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; int result = 0; if (cbargs->opc_rpc_sent) { wait_for_completion(&cbargs->opc_sync); result = io->ci_result = cbargs->opc_rc; } if (result == 0) { if (oio->oi_lockless) { /* lockless truncate */ struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev); LASSERT(cl_io_is_trunc(io)); /* XXX: Need a lock. */ osd->od_stats.os_lockless_truncates++; } } if (cl_io_is_trunc(io)) { __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_trunc_check(env, io, oio, size); if (oio->oi_trunc != NULL) { osc_cache_truncate_end(env, oio, cl2osc(obj)); oio->oi_trunc = NULL; } } }
/** * Updates object attributes from a lock value block (lvb) received together * with the DLM lock reply from the server. Copy of osc_update_enqueue() * logic. * * This can be optimized to not update attributes when lock is a result of a * local match. * * Called under lock and resource spin-locks. */ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, int rc) { struct ost_lvb *lvb; struct cl_object *obj; struct lov_oinfo *oinfo; struct cl_attr *attr; unsigned valid; if (!(olck->ols_flags & LDLM_FL_LVB_READY)) return; lvb = &olck->ols_lvb; obj = olck->ols_cl.cls_obj; oinfo = cl2osc(obj)->oo_oinfo; attr = &osc_env_info(env)->oti_attr; valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; cl_lvb2attr(attr, lvb); cl_object_attr_lock(obj); if (rc == 0) { struct ldlm_lock *dlmlock; __u64 size; dlmlock = olck->ols_lock; LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further * A lock on [x,y] means a KMS of up to y + 1 bytes! */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64 ", kms="LPU64, lvb->lvb_size, size); valid |= CAT_KMS; attr->cat_kms = size; } else { LDLM_DEBUG(dlmlock, "lock acquired, setting rss=" LPU64"; leaving kms="LPU64", end="LPU64, lvb->lvb_size, oinfo->loi_kms, dlmlock->l_policy_data.l_extent.end); } ldlm_lock_allow_match_locked(dlmlock); } else if (rc == -ENAVAIL && olck->ols_glimpse) { CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving" " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms); } else valid = 0; if (valid != 0) cl_object_attr_set(env, obj, attr, valid); cl_object_attr_unlock(obj); }
static int osc_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; cl_lvb2attr(attr, &oinfo->loi_lvb); attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0; return 0; }
static int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb) { struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; lvb->lvb_size = oinfo->loi_kms; lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks; return 0; }
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, struct lustre_handle *lockh, bool lvb_update) { struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(lockh, 0); LASSERT(dlmlock); /* lock reference taken by ldlm_handle2lock_long() is * owned by osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); oscl->ols_has_ref = 1; LASSERT(!oscl->ols_dlmlock); oscl->ols_dlmlock = dlmlock; /* This may be a matched lock for glimpse request, do not hold * lock reference in that case. */ if (!oscl->ols_glimpse) { /* hold a refc for non glimpse lock which will * be released in osc_lock_cancel() */ lustre_handle_copy(&oscl->ols_handle, lockh); ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); oscl->ols_hold = 1; } /* Lock must have been granted. */ lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; /* extend the lock extent, otherwise it will have problem when * we decide whether to grant a lockless lock. */ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); descr->cld_start = cl_index(descr->cld_obj, ext->start); descr->cld_end = cl_index(descr->cld_obj, ext->end); descr->cld_gid = ext->gid; /* no lvb update for matched lock */ if (lvb_update) { LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), dlmlock, NULL); } LINVRNT(osc_lock_invariant(oscl)); } unlock_res_and_lock(dlmlock); LASSERT(oscl->ols_state != OLS_GRANTED); oscl->ols_state = OLS_GRANTED; }
/** * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock * and ldlm_lock caches. */ static int mdc_dlm_blocking_ast0(const struct lu_env *env, struct ldlm_lock *dlmlock, void *data, int flag) { struct cl_object *obj = NULL; int result = 0; bool discard; enum cl_lock_mode mode = CLM_READ; ENTRY; LASSERT(flag == LDLM_CB_CANCELING); LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { dlmlock->l_ast_data = NULL; unlock_res_and_lock(dlmlock); RETURN(0); } discard = ldlm_is_discard_data(dlmlock); if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) mode = CLM_WRITE; if (dlmlock->l_ast_data != NULL) { obj = osc2cl(dlmlock->l_ast_data); dlmlock->l_ast_data = NULL; cl_object_get(obj); } ldlm_set_kms_ignore(dlmlock); unlock_res_and_lock(dlmlock); /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or * the object has been destroyed. */ if (obj != NULL) { struct cl_attr *attr = &osc_env_info(env)->oti_attr; /* Destroy pages covered by the extent of the DLM lock */ result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0), CL_PAGE_EOF, mode, discard); /* Losing a lock, set KMS to 0. * NB: assumed that DOM lock covers whole data on MDT. */ /* losing a lock, update kms */ lock_res_and_lock(dlmlock); cl_object_attr_lock(obj); attr->cat_kms = 0; cl_object_attr_update(env, obj, attr, CAT_KMS); cl_object_attr_unlock(obj); unlock_res_and_lock(dlmlock); cl_object_put(env, obj); } RETURN(result); }
static void osc_io_rw_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); struct client_obd *cli = osc_cli(osc); if (oio->oi_lru_reserved > 0) { atomic_add(oio->oi_lru_reserved, cli->cl_lru_left); oio->oi_lru_reserved = 0; } }
static int osc_object_prune(const struct lu_env *env, struct cl_object *obj) { struct osc_object *osc = cl2osc(obj); struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname; /* DLM locks don't hold a reference of osc_object so we have to * clear it before the object is being destroyed. */ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname, osc_object_ast_clear, osc); return 0; }
static int osc_io_data_version_start(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; struct osc_io *oio = cl2osc_io(env, slice); struct obdo *oa = &oio->oi_oa; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; struct osc_object *obj = cl2osc(slice->cis_obj); struct lov_oinfo *loi = obj->oo_oinfo; struct obd_export *exp = osc_export(obj); struct ptlrpc_request *req; struct ost_body *body; struct osc_data_version_args *dva; int rc; ENTRY; memset(oa, 0, sizeof(*oa)); oa->o_oi = loi->loi_oi; oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) { oa->o_valid |= OBD_MD_FLFLAGS; oa->o_flags |= OBD_FL_SRVLOCK; if (dv->dv_flags & LL_DV_WR_FLUSH) oa->o_flags |= OBD_FL_FLUSH; } init_completion(&cbargs->opc_sync); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); if (req == NULL) RETURN(-ENOMEM); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); if (rc < 0) { ptlrpc_request_free(req); RETURN(rc); } body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); ptlrpc_request_set_replen(req); req->rq_interpret_reply = osc_data_version_interpret; CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args)); dva = ptlrpc_req_async_args(req); dva->dva_oio = oio; ptlrpcd_add_req(req); RETURN(0); }
static void osc_io_write_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); if (oio->oi_lru_reserved > 0) { osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved); oio->oi_lru_reserved = 0; } oio->oi_write_osclock = NULL; osc_io_iter_fini(env, ios); }
static void osc_io_iter_fini(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); if (oio->oi_is_active) { struct osc_object *osc = cl2osc(ios->cis_obj); oio->oi_is_active = 0; LASSERT(atomic_read(&osc->oo_nr_ios) > 0); if (atomic_dec_and_test(&osc->oo_nr_ios)) wake_up_all(&osc->oo_io_waitq); } }
static int osc_object_prune(const struct lu_env *env, struct cl_object *obj) { struct osc_object *osc = cl2osc(obj); struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname; LASSERTF(osc->oo_npages == 0, DFID "still have %lu pages, obj: %p, osc: %p\n", PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc); /* DLM locks don't hold a reference of osc_object so we have to * clear it before the object is being destroyed. */ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname, osc_object_ast_clear, osc); return 0; }
void mdc_lock_lockless_cancel(const struct lu_env *env, const struct cl_lock_slice *slice) { struct osc_lock *ols = cl2osc_lock(slice); struct osc_object *osc = cl2osc(slice->cls_obj); struct cl_lock_descr *descr = &slice->cls_lock->cll_descr; int rc; LASSERT(ols->ols_dlmlock == NULL); rc = mdc_lock_flush(env, osc, descr->cld_start, descr->cld_end, descr->cld_mode, 0); if (rc != 0) CERROR("Pages for lockless lock %p were not purged(%d)\n", ols, rc); osc_lock_wake_waiters(env, osc, ols); }
static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, struct osc_io *oio, __u64 size) { struct cl_object *clob; int partial; pgoff_t start; clob = oio->oi_cl.cis_obj; start = cl_index(clob, size); partial = cl_offset(clob, start) < size; /* * Complain if there are pages in the truncated region. */ osc_page_gang_lookup(env, io, cl2osc(clob), start + partial, CL_PAGE_EOF, trunc_check_cb, (void *)&size); }
static int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct osc_object *osc = cl2osc(ios->cis_obj); struct obd_import *imp = osc_cli(osc)->cl_import; int rc = -EIO; spin_lock(&imp->imp_lock); if (likely(!imp->imp_invalid)) { struct osc_io *oio = osc_env_io(env); atomic_inc(&osc->oo_nr_ios); oio->oi_is_active = 1; rc = 0; } spin_unlock(&imp->imp_lock); return rc; }
/** * Find any ldlm lock covers the osc object. * \retval 0 not found * \retval 1 find one * \retval < 0 error */ static int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj, ldlm_iterator_t iter, void *data) { struct ldlm_res_id res_id; struct obd_device *obd; int rc = 0; ostid_build_res_name(&cl2osc(obj)->oo_oinfo->loi_oi, &res_id); obd = obj->co_lu.lo_dev->ld_obd; rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, iter, data); if (rc == LDLM_ITER_STOP) return 1; if (rc == LDLM_ITER_CONTINUE) return 0; return rc; }
/** * Breaks a link between osc_lock and dlm_lock. */ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) { struct ldlm_lock *dlmlock; spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; if (dlmlock == NULL) { spin_unlock(&osc_ast_guard); return; } olck->ols_lock = NULL; /* wb(); --- for all who checks (ols->ols_lock != NULL) before * call to osc_lock_detach() */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { struct cl_object *obj = olck->ols_cl.cls_obj; struct cl_attr *attr = &osc_env_info(env)->oti_attr; __u64 old_kms; cl_object_attr_lock(obj); /* Must get the value under the lock to avoid possible races. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. * Not a problem for the client */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); cl_object_attr_unlock(obj); } unlock_res_and_lock(dlmlock); /* release a reference taken in osc_lock_upcall0(). */ LASSERT(olck->ols_has_ref); lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); LDLM_LOCK_RELEASE(dlmlock); olck->ols_has_ref = 0; }
static int osc_io_write_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); unsigned long npages; ENTRY; if (cl_io_is_append(io)) RETURN(osc_io_iter_init(env, ios)); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~PAGE_MASK) ++npages; oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages); RETURN(osc_io_iter_init(env, ios)); }
static void osc_io_fsync_end(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync; struct cl_object *obj = slice->cis_obj; pgoff_t start = cl_index(obj, fio->fi_start); pgoff_t end = cl_index(obj, fio->fi_end); int result = 0; if (fio->fi_mode == CL_FSYNC_LOCAL) { result = osc_cache_wait_range(env, cl2osc(obj), start, end); } else if (fio->fi_mode == CL_FSYNC_ALL) { struct osc_io *oio = cl2osc_io(env, slice); struct osc_async_cbargs *cbargs = &oio->oi_cbarg; wait_for_completion(&cbargs->opc_sync); if (result == 0) result = cbargs->opc_rc; } slice->cis_io->ci_result = result; }
static int osc_io_fsync_start(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_io *io = slice->cis_io; struct cl_fsync_io *fio = &io->u.ci_fsync; struct cl_object *obj = slice->cis_obj; struct osc_object *osc = cl2osc(obj); pgoff_t start = cl_index(obj, fio->fi_start); pgoff_t end = cl_index(obj, fio->fi_end); int result = 0; ENTRY; if (fio->fi_end == OBD_OBJECT_EOF) end = CL_PAGE_EOF; result = osc_cache_writeback_range(env, osc, start, end, 0, fio->fi_mode == CL_FSYNC_DISCARD); if (result > 0) { fio->fi_nr_written += result; result = 0; } if (fio->fi_mode == CL_FSYNC_ALL) { int rc; /* we have to wait for writeback to finish before we can * send OST_SYNC RPC. This is bad because it causes extents * to be written osc by osc. However, we usually start * writeback before CL_FSYNC_ALL so this won't have any real * problem. */ rc = osc_cache_wait_range(env, osc, start, end); if (result == 0) result = rc; rc = osc_fsync_ost(env, osc, fio); if (result == 0) result = rc; } RETURN(result); }
int osc_attr_set(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid) { struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo; struct ost_lvb *lvb = &oinfo->loi_lvb; if (valid & CAT_SIZE) lvb->lvb_size = attr->cat_size; if (valid & CAT_MTIME) lvb->lvb_mtime = attr->cat_mtime; if (valid & CAT_ATIME) lvb->lvb_atime = attr->cat_atime; if (valid & CAT_CTIME) lvb->lvb_ctime = attr->cat_ctime; if (valid & CAT_BLOCKS) lvb->lvb_blocks = attr->cat_blocks; if (valid & CAT_KMS) { CDEBUG(D_CACHE, "set kms from %llu to %llu\n", oinfo->loi_kms, (__u64)attr->cat_kms); loi_kms_set(oinfo, attr->cat_kms); } return 0; }
static int osc_io_rw_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); struct client_obd *cli = osc_cli(osc); unsigned long c; unsigned int npages; unsigned int max_pages; ENTRY; if (cl_io_is_append(io)) RETURN(0); npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK) ++npages; max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; if (npages > max_pages) npages = max_pages; c = atomic_read(cli->cl_lru_left); if (c < npages && osc_lru_reclaim(cli) > 0) c = atomic_read(cli->cl_lru_left); while (c >= npages) { if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) { oio->oi_lru_reserved = npages; break; } c = atomic_read(cli->cl_lru_left); } RETURN(0); }
/** * This is called when a page is accessed within file in a way that creates * new page, if one were missing (i.e., if there were a hole at that place in * the file, or accessed page is beyond the current file size). * * Expand stripe KMS if necessary. */ static void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj, pgoff_t idx, size_t to) { struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; struct cl_attr *attr = &osc_env_info(env)->oti_attr; int valid; __u64 kms; /* offset within stripe */ kms = cl_offset(obj, idx) + to; cl_object_attr_lock(obj); /* * XXX old code used * * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm); * * here */ CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n", kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, loi->loi_lvb.lvb_size); attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME); valid = CAT_MTIME | CAT_CTIME; if (kms > loi->loi_kms) { attr->cat_kms = kms; valid |= CAT_KMS; } if (kms > loi->loi_lvb.lvb_size) { attr->cat_size = kms; valid |= CAT_SIZE; } cl_object_attr_update(env, obj, attr, valid); cl_object_attr_unlock(obj); }
/** * Implementation of struct cl_object_operations::coo_req_attr_set() for osc * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq * fields. */ static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj, struct cl_req_attr *attr) { struct lov_oinfo *oinfo; struct obdo *oa; struct ost_lvb *lvb; u64 flags = attr->cra_flags; oinfo = cl2osc(obj)->oo_oinfo; lvb = &oinfo->loi_lvb; oa = attr->cra_oa; if ((flags & OBD_MD_FLMTIME) != 0) { oa->o_mtime = lvb->lvb_mtime; oa->o_valid |= OBD_MD_FLMTIME; } if ((flags & OBD_MD_FLATIME) != 0) { oa->o_atime = lvb->lvb_atime; oa->o_valid |= OBD_MD_FLATIME; } if ((flags & OBD_MD_FLCTIME) != 0) { oa->o_ctime = lvb->lvb_ctime; oa->o_valid |= OBD_MD_FLCTIME; } if (flags & OBD_MD_FLGROUP) { ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLGROUP; } if (flags & OBD_MD_FLID) { ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLID; } if (flags & OBD_MD_FLHANDLE) { struct ldlm_lock *lock; struct osc_page *opg; opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj)); lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING); if (lock == NULL && !opg->ops_srvlock) { struct ldlm_resource *res; struct ldlm_res_id *resname; CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page, "uncovered page!\n"); resname = &osc_env_info(env)->oti_resname; ostid_build_res_name(&oinfo->loi_oi, resname); res = ldlm_resource_get( osc_export(cl2osc(obj))->exp_obd->obd_namespace, NULL, resname, LDLM_EXTENT, 0); ldlm_resource_dump(D_ERROR, res); libcfs_debug_dumpstack(NULL); LBUG(); } /* check for lockless io. */ if (lock != NULL) { oa->o_handle = lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; LDLM_LOCK_PUT(lock); } } }
static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, size_t *buflen) { struct obd_export *exp = osc_export(cl2osc(obj)); struct ldlm_res_id resid; union ldlm_policy_data policy; struct lustre_handle lockh; enum ldlm_mode mode = LCK_MINMODE; struct ptlrpc_request *req; struct fiemap *reply; char *tmp; int rc; ENTRY; fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi; if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) goto skip_locking; policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + fmkey->lfik_fiemap.fm_length + PAGE_SIZE - 1) & PAGE_MASK; ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY, &resid, LDLM_EXTENT, &policy, LCK_PR | LCK_PW, &lockh, 0); if (mode) { /* lock is cached on client */ if (mode != LCK_PR) { ldlm_lock_addref(&lockh, LCK_PR); ldlm_lock_decref(&lockh, LCK_PW); } } else { /* no cached lock, needs acquire lock on server side */ fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS; fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK; } skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); if (req == NULL) GOTO(drop_lock, rc = -ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT, sizeof(*fmkey)); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT, *buflen); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER, *buflen); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); if (rc != 0) { ptlrpc_request_free(req); GOTO(drop_lock, rc); } tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); memcpy(tmp, fmkey, sizeof(*fmkey)); tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); memcpy(tmp, fiemap, *buflen); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc != 0) GOTO(fini_req, rc); reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); if (reply == NULL) GOTO(fini_req, rc = -EPROTO); memcpy(fiemap, reply, *buflen); fini_req: ptlrpc_req_finished(req); drop_lock: if (mode) ldlm_lock_decref(&lockh, LCK_PR); RETURN(rc); }