/** * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock * and ldlm_lock caches. */ static int mdc_dlm_blocking_ast0(const struct lu_env *env, struct ldlm_lock *dlmlock, void *data, int flag) { struct cl_object *obj = NULL; int result = 0; bool discard; enum cl_lock_mode mode = CLM_READ; ENTRY; LASSERT(flag == LDLM_CB_CANCELING); LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { dlmlock->l_ast_data = NULL; unlock_res_and_lock(dlmlock); RETURN(0); } discard = ldlm_is_discard_data(dlmlock); if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) mode = CLM_WRITE; if (dlmlock->l_ast_data != NULL) { obj = osc2cl(dlmlock->l_ast_data); dlmlock->l_ast_data = NULL; cl_object_get(obj); } ldlm_set_kms_ignore(dlmlock); unlock_res_and_lock(dlmlock); /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or * the object has been destroyed. */ if (obj != NULL) { struct cl_attr *attr = &osc_env_info(env)->oti_attr; /* Destroy pages covered by the extent of the DLM lock */ result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0), CL_PAGE_EOF, mode, discard); /* Losing a lock, set KMS to 0. * NB: assumed that DOM lock covers whole data on MDT. */ /* losing a lock, update kms */ lock_res_and_lock(dlmlock); cl_object_attr_lock(obj); attr->cat_kms = 0; cl_object_attr_update(env, obj, attr, CAT_KMS); cl_object_attr_unlock(obj); unlock_res_and_lock(dlmlock); cl_object_put(env, obj); } RETURN(result); }
/** * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock * and ldlm_lock caches. */ static int osc_dlm_blocking_ast0(const struct lu_env *env, struct ldlm_lock *dlmlock, void *data, int flag) { struct cl_object *obj = NULL; int result = 0; int discard; enum cl_lock_mode mode = CLM_READ; LASSERT(flag == LDLM_CB_CANCELING); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { dlmlock->l_ast_data = NULL; unlock_res_and_lock(dlmlock); return 0; } discard = ldlm_is_discard_data(dlmlock); if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) mode = CLM_WRITE; if (dlmlock->l_ast_data) { obj = osc2cl(dlmlock->l_ast_data); dlmlock->l_ast_data = NULL; cl_object_get(obj); } unlock_res_and_lock(dlmlock); /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or * the object has been destroyed. */ if (obj) { struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent; struct cl_attr *attr = &osc_env_info(env)->oti_attr; __u64 old_kms; /* Destroy pages covered by the extent of the DLM lock */ result = osc_lock_flush(cl2osc(obj), cl_index(obj, extent->start), cl_index(obj, extent->end), mode, discard); /* losing a lock, update kms */ lock_res_and_lock(dlmlock); cl_object_attr_lock(obj); /* Must get the value under the lock to avoid race. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. * Not a problem for the client */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); cl_object_attr_unlock(obj); unlock_res_and_lock(dlmlock); cl_object_put(env, obj); } return result; }