static int dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int master; int ret = 0; spin_lock(&res->spinlock); if (!__dlm_lockres_unused(res)) { spin_unlock(&res->spinlock); mlog(0, "%s:%.*s: tried to purge but not unused\n", dlm->name, res->lockname.len, res->lockname.name); return -ENOTEMPTY; } master = (res->owner == dlm->node_num); if (!master) res->state |= DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, res->lockname.name, master); if (!master) { spin_lock(&res->spinlock); /* This ensures that clear refmap is sent after the set */ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); spin_unlock(&res->spinlock); /* drop spinlock to do messaging, retake below */ spin_unlock(&dlm->spinlock); /* clear our bit from the master's refmap, ignore errors */ ret = dlm_drop_lockres_ref(dlm, res); if (ret < 0) { mlog_errno(ret); if (!dlm_is_host_down(ret)) BUG(); } mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", dlm->name, res->lockname.len, res->lockname.name, ret); spin_lock(&dlm->spinlock); } if (!list_empty(&res->purge)) { mlog(0, "removing lockres %.*s:%p from purgelist, " "master = %d\n", res->lockname.len, res->lockname.name, res, master); list_del_init(&res->purge); dlm_lockres_put(res); dlm->purge_count--; } __dlm_unhash_lockres(res); /* lockres is not in the hash now. drop the flag and wake up * any processes waiting in dlm_get_lock_resource. */ if (!master) { spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); wake_up(&res->wq); } return 0; }
static void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int master; int ret = 0; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); master = (res->owner == dlm->node_num); mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, res->lockname.len, res->lockname.name, master); if (!master) { res->state |= DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); spin_lock(&res->spinlock); __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); spin_unlock(&res->spinlock); ret = dlm_drop_lockres_ref(dlm, res); if (ret < 0) { if (!dlm_is_host_down(ret)) BUG(); } spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); } if (!list_empty(&res->purge)) { mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", dlm->name, res->lockname.len, res->lockname.name, master); list_del_init(&res->purge); dlm_lockres_put(res); dlm->purge_count--; } if (!__dlm_lockres_unused(res)) { mlog(ML_ERROR, "%s: res %.*s in use after deref\n", dlm->name, res->lockname.len, res->lockname.name); __dlm_print_one_lock_resource(res); BUG(); } __dlm_unhash_lockres(dlm, res); if (!master) { res->state &= ~DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); wake_up(&res->wq); } else spin_unlock(&res->spinlock); }
/* * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network */ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, u8 owner) { struct dlm_unlock_lock unlock; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); if (owner == dlm->node_num) { /* ended up trying to contact ourself. this means * that the lockres had been remote but became local * via a migration. just retry it, now as local */ mlog(0, "%s:%.*s: this node became the master due to a " "migration, re-evaluate now\n", dlm->name, res->lockname.len, res->lockname.name); return DLM_FORWARD; } memset(&unlock, 0, sizeof(unlock)); unlock.node_idx = dlm->node_num; unlock.flags = cpu_to_be32(flags); unlock.cookie = lock->ml.cookie; unlock.namelen = res->lockname.len; memcpy(unlock.name, res->lockname.name, unlock.namelen); vec[0].iov_len = sizeof(struct dlm_unlock_lock); vec[0].iov_base = &unlock; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, vec, veclen, owner, &status); if (tmpret >= 0) { // successfully sent and received if (status == DLM_FORWARD) mlog(0, "master was in-progress. retry\n"); ret = status; } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { /* NOTE: this seems strange, but it is what we want. * when the master goes down during a cancel or * unlock, the recovery code completes the operation * as if the master had not died, then passes the * updated state to the recovery master. this thread * just needs to finish out the operation and call * the unlockast. */ ret = DLM_NORMAL; } else { /* something bad. this will BUG in ocfs2 */ ret = dlm_err_to_dlm_status(tmpret); } } return ret; }
static void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int master; int ret = 0; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); master = (res->owner == dlm->node_num); mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, res->lockname.len, res->lockname.name, master); if (!master) { res->state |= DLM_LOCK_RES_DROPPING_REF; /* drop spinlock... retake below */ spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); spin_lock(&res->spinlock); /* This ensures that clear refmap is sent after the set */ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); spin_unlock(&res->spinlock); /* clear our bit from the master's refmap, ignore errors */ ret = dlm_drop_lockres_ref(dlm, res); if (ret < 0) { if (!dlm_is_host_down(ret)) BUG(); } spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); } if (!list_empty(&res->purge)) { mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", dlm->name, res->lockname.len, res->lockname.name, master); list_del_init(&res->purge); dlm_lockres_put(res); dlm->purge_count--; } if (!__dlm_lockres_unused(res)) { mlog(ML_ERROR, "%s: res %.*s in use after deref\n", dlm->name, res->lockname.len, res->lockname.name); __dlm_print_one_lock_resource(res); BUG(); } __dlm_unhash_lockres(dlm, res); /* lockres is not in the hash now. drop the flag and wake up * any processes waiting in dlm_get_lock_resource. */ if (!master) { res->state &= ~DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); wake_up(&res->wq); } else spin_unlock(&res->spinlock); }