int qsd_id_lock_cancel(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); int rc; ENTRY; lqe_write_lock(lqe); if (lqe->lqe_pending_write || lqe->lqe_waiting_write || lqe->lqe_usage || lqe->lqe_granted) { lqe_write_unlock(lqe); RETURN(0); } lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh); if (lustre_handle_is_used(&qti->qti_lockh)) { memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh)); qsd_set_qunit(lqe, 0); lqe->lqe_edquot = false; } lqe_write_unlock(lqe); rc = qsd_id_lock_match(&qti->qti_lockh, NULL); if (rc) RETURN(rc); ldlm_lock_decref_and_cancel(&qti->qti_lockh, qsd_id_einfo.ei_mode); RETURN(0); }
static int mgs_completion_ast_ir(struct ldlm_lock *lock, int flags, void *cbdata) { ENTRY; if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV))) { struct fs_db *fsdb = (struct fs_db *)lock->l_ast_data; struct lustre_handle lockh; mgs_ir_notify_complete(fsdb); ldlm_lock2handle(lock, &lockh); ldlm_lock_decref_and_cancel(&lockh, LCK_EX); } RETURN(ldlm_completion_ast(lock, flags, cbdata)); }
static int mgs_completion_ast_config(struct ldlm_lock *lock, int flags, void *cbdata) { ENTRY; if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV))) { struct fs_db *fsdb = (struct fs_db *)lock->l_ast_data; struct lustre_handle lockh; /* clear the bit before lock put */ cfs_clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags); ldlm_lock2handle(lock, &lockh); ldlm_lock_decref_and_cancel(&lockh, LCK_EX); } RETURN(ldlm_completion_ast(lock, flags, cbdata)); }