/** * Lock upcall function that is executed either when a reply to ENQUEUE rpc is * received from a server, or after osc_enqueue_base() matched a local DLM * lock. */ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, int errcode) { struct osc_lock *oscl = cookie; struct cl_lock_slice *slice = &oscl->ols_cl; struct lu_env *env; int rc; ENTRY; env = cl_env_percpu_get(); /* should never happen, similar to osc_ldlm_blocking_ast(). */ LASSERT(!IS_ERR(env)); rc = ldlm_error2errno(errcode); if (oscl->ols_state == OLS_ENQUEUED) { oscl->ols_state = OLS_UPCALL_RECEIVED; } else if (oscl->ols_state == OLS_CANCELLED) { rc = -EIO; } else { CERROR("Impossible state: %d\n", oscl->ols_state); LBUG(); } if (rc == 0) osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK); /* Error handling, some errors are tolerable. */ if (oscl->ols_locklessable && rc == -EUSERS) { /* This is a tolerable error, turn this lock into * lockless lock. */ osc_object_set_contended(cl2osc(slice->cls_obj)); LASSERT(slice->cls_ops != oscl->ols_lockless_ops); /* Change this lock to ldlmlock-less lock. */ osc_lock_to_lockless(env, oscl, 1); oscl->ols_state = OLS_GRANTED; rc = 0; } else if (oscl->ols_glimpse && rc == -ENAVAIL) { LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); osc_lock_lvb_update(env, cl2osc(slice->cls_obj), NULL, &oscl->ols_lvb); /* Hide the error. */ rc = 0; } else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) { rc = -EWOULDBLOCK; } if (oscl->ols_owner != NULL) cl_sync_io_note(env, oscl->ols_owner, rc); cl_env_percpu_put(env); RETURN(rc); }
static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) { struct ldlm_lock *dlmlock; ENTRY; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); LASSERT(olck->ols_lock == NULL); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); /* * Lock might be not yet granted. In this case, completion ast * (osc_ldlm_completion_ast()) comes later and finishes lock * granting. */ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) osc_lock_granted(env, olck, dlmlock, 0); unlock_res_and_lock(dlmlock); /* * osc_enqueue_interpret() decrefs asynchronous locks, counter * this. */ ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by * osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; }