static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, struct lustre_handle *lockh, bool lvb_update) { struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(lockh, 0); LASSERT(dlmlock); /* lock reference taken by ldlm_handle2lock_long() is * owned by osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); oscl->ols_has_ref = 1; LASSERT(!oscl->ols_dlmlock); oscl->ols_dlmlock = dlmlock; /* This may be a matched lock for glimpse request, do not hold * lock reference in that case. */ if (!oscl->ols_glimpse) { /* hold a refc for non glimpse lock which will * be released in osc_lock_cancel() */ lustre_handle_copy(&oscl->ols_handle, lockh); ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); oscl->ols_hold = 1; } /* Lock must have been granted. */ lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; /* extend the lock extent, otherwise it will have problem when * we decide whether to grant a lockless lock. */ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); descr->cld_start = cl_index(descr->cld_obj, ext->start); descr->cld_end = cl_index(descr->cld_obj, ext->end); descr->cld_gid = ext->gid; /* no lvb update for matched lock */ if (lvb_update) { LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), dlmlock, NULL); } LINVRNT(osc_lock_invariant(oscl)); } unlock_res_and_lock(dlmlock); LASSERT(oscl->ols_state != OLS_GRANTED); oscl->ols_state = OLS_GRANTED; }
/** * Called when a lock is granted, from an upcall (when server returned a * granted lock), or from completion AST, when server returned a blocked lock. * * Called under lock and resource spin-locks, that are released temporarily * here. */ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, struct ldlm_lock *dlmlock, int rc) { struct ldlm_extent *ext; struct cl_lock *lock; struct cl_lock_descr *descr; LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); ENTRY; if (olck->ols_state < OLS_GRANTED) { lock = olck->ols_cl.cls_lock; ext = &dlmlock->l_policy_data.l_extent; descr = &osc_env_info(env)->oti_descr; descr->cld_obj = lock->cll_descr.cld_obj; /* XXX check that ->l_granted_mode is valid. */ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); descr->cld_start = cl_index(descr->cld_obj, ext->start); descr->cld_end = cl_index(descr->cld_obj, ext->end); descr->cld_gid = ext->gid; /* * tell upper layers the extent of the lock that was actually * granted */ olck->ols_state = OLS_GRANTED; osc_lock_lvb_update(env, olck, rc); /* release DLM spin-locks to allow cl_lock_{modify,signal}() * to take a semaphore on a parent lock. This is safe, because * spin-locks are needed to protect consistency of * dlmlock->l_*_mode and LVB, and we have finished processing * them. */ unlock_res_and_lock(dlmlock); cl_lock_modify(env, lock, descr); cl_lock_signal(env, lock); LINVRNT(osc_lock_invariant(olck)); lock_res_and_lock(dlmlock); } EXIT; }