static int osc_io_read_ahead(const struct lu_env *env, const struct cl_io_slice *ios, pgoff_t start, struct cl_read_ahead *ra) { struct osc_object *osc = cl2osc(ios->cis_obj); struct ldlm_lock *dlmlock; int result = -ENODATA; ENTRY; dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0); if (dlmlock != NULL) { if (dlmlock->l_req_mode != LCK_PR) { struct lustre_handle lockh; ldlm_lock2handle(dlmlock, &lockh); ldlm_lock_addref(&lockh, LCK_PR); ldlm_lock_decref(&lockh, dlmlock->l_req_mode); } ra->cra_end = cl_index(osc2cl(osc), dlmlock->l_policy_data.l_extent.end); ra->cra_release = osc_read_ahead_release; ra->cra_cbdata = dlmlock; result = 0; } RETURN(result); }
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, struct lustre_handle *lockh, bool lvb_update) { struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(lockh, 0); LASSERT(dlmlock); /* lock reference taken by ldlm_handle2lock_long() is * owned by osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); oscl->ols_has_ref = 1; LASSERT(!oscl->ols_dlmlock); oscl->ols_dlmlock = dlmlock; /* This may be a matched lock for glimpse request, do not hold * lock reference in that case. */ if (!oscl->ols_glimpse) { /* hold a refc for non glimpse lock which will * be released in osc_lock_cancel() */ lustre_handle_copy(&oscl->ols_handle, lockh); ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); oscl->ols_hold = 1; } /* Lock must have been granted. */ lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; /* extend the lock extent, otherwise it will have problem when * we decide whether to grant a lockless lock. */ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); descr->cld_start = cl_index(descr->cld_obj, ext->start); descr->cld_end = cl_index(descr->cld_obj, ext->end); descr->cld_gid = ext->gid; /* no lvb update for matched lock */ if (lvb_update) { LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), dlmlock, NULL); } LINVRNT(osc_lock_invariant(oscl)); } unlock_res_and_lock(dlmlock); LASSERT(oscl->ols_state != OLS_GRANTED); oscl->ols_state = OLS_GRANTED; }
static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) { struct ldlm_lock *dlmlock; ENTRY; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); LASSERT(olck->ols_lock == NULL); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); /* * Lock might be not yet granted. In this case, completion ast * (osc_ldlm_completion_ast()) comes later and finishes lock * granting. */ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) osc_lock_granted(env, olck, dlmlock, 0); unlock_res_and_lock(dlmlock); /* * osc_enqueue_interpret() decrefs asynchronous locks, counter * this. */ ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by * osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; }
static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, size_t *buflen) { struct obd_export *exp = osc_export(cl2osc(obj)); struct ldlm_res_id resid; union ldlm_policy_data policy; struct lustre_handle lockh; enum ldlm_mode mode = LCK_MINMODE; struct ptlrpc_request *req; struct fiemap *reply; char *tmp; int rc; ENTRY; fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi; if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) goto skip_locking; policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + fmkey->lfik_fiemap.fm_length + PAGE_SIZE - 1) & PAGE_MASK; ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY, &resid, LDLM_EXTENT, &policy, LCK_PR | LCK_PW, &lockh, 0); if (mode) { /* lock is cached on client */ if (mode != LCK_PR) { ldlm_lock_addref(&lockh, LCK_PR); ldlm_lock_decref(&lockh, LCK_PW); } } else { /* no cached lock, needs acquire lock on server side */ fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS; fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK; } skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); if (req == NULL) GOTO(drop_lock, rc = -ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT, sizeof(*fmkey)); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT, *buflen); req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER, *buflen); rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); if (rc != 0) { ptlrpc_request_free(req); GOTO(drop_lock, rc); } tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); memcpy(tmp, fmkey, sizeof(*fmkey)); tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); memcpy(tmp, fiemap, *buflen); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); if (rc != 0) GOTO(fini_req, rc); reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); if (reply == NULL) GOTO(fini_req, rc = -EPROTO); memcpy(fiemap, reply, *buflen); fini_req: ptlrpc_req_finished(req); drop_lock: if (mode) ldlm_lock_decref(&lockh, LCK_PR); RETURN(rc); }