static void gfs2_control_func(struct work_struct *work) { struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t block_gen, start_gen, lvb_gen, flags; int recover_set = 0; int write_lvb = 0; int recover_size; int i, error; spin_lock(&ls->ls_recover_spin); /* * No MOUNT_DONE means we're still mounting; control_mount() * will set this flag, after which this thread will take over * all further clearing of BLOCK_LOCKS. * * FIRST_MOUNT means this node is doing first mounter recovery, * for which recovery control is handled by * control_mount()/control_first_done(), not this thread. */ if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { spin_unlock(&ls->ls_recover_spin); return; } block_gen = ls->ls_recover_block; start_gen = ls->ls_recover_start; spin_unlock(&ls->ls_recover_spin); /* * Equal block_gen and start_gen implies we are between * recover_prep and recover_done callbacks, which means * dlm recovery is in progress and dlm locking is blocked. * There's no point trying to do any work until recover_done. */ if (block_gen == start_gen) return; /* * Propagate recover_submit[] and recover_result[] to lvb: * dlm_recoverd adds to recover_submit[] jids needing recovery * gfs2_recover adds to recover_result[] journal recovery results * * set lvb bit for jids in recover_submit[] if the lvb has not * yet been updated for the generation of the failure * * clear lvb bit for jids in recover_result[] if the result of * the journal recovery is SUCCESS */ error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK); if (error) { fs_err(sdp, "control lock EX error %d\n", error); return; } control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); spin_lock(&ls->ls_recover_spin); if (block_gen != ls->ls_recover_block || start_gen != ls->ls_recover_start) { fs_info(sdp, "recover generation %u block1 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); return; } recover_size = ls->ls_recover_size; if (lvb_gen <= start_gen) { /* * Clear lvb bits for jids we've successfully recovered. * Because all nodes attempt to recover failed journals, * a journal can be recovered multiple times successfully * in succession. Only the first will really do recovery, * the others find it clean, but still report a successful * recovery. So, another node may have already recovered * the jid and cleared the lvb bit for it. */ for (i = 0; i < recover_size; i++) { if (ls->ls_recover_result[i] != LM_RD_SUCCESS) continue; ls->ls_recover_result[i] = 0; if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) continue; __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); write_lvb = 1; } } if (lvb_gen == start_gen) { /* * Failed slots before start_gen are already set in lvb. */ for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < lvb_gen) ls->ls_recover_submit[i] = 0; } } else if (lvb_gen < start_gen) { /* * Failed slots before start_gen are not yet set in lvb. */ for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < start_gen) { ls->ls_recover_submit[i] = 0; __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); } } /* even if there are no bits to set, we need to write the latest generation to the lvb */ write_lvb = 1; } else { /* * we should be getting a recover_done() for lvb_gen soon */ } spin_unlock(&ls->ls_recover_spin); if (write_lvb) { control_lvb_write(ls, start_gen, ls->ls_lvb_bits); flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; } else { flags = DLM_LKF_CONVERT; } error = control_lock(sdp, DLM_LOCK_NL, flags); if (error) { fs_err(sdp, "control lock NL error %d\n", error); return; } /* * Everyone will see jid bits set in the lvb, run gfs2_recover_set(), * and clear a jid bit in the lvb if the recovery is a success. * Eventually all journals will be recovered, all jid bits will * be cleared in the lvb, and everyone will clear BLOCK_LOCKS. */ for (i = 0; i < recover_size; i++) { if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { fs_info(sdp, "recover generation %u jid %d\n", start_gen, i); gfs2_recover_set(sdp, i); recover_set++; } } if (recover_set) return; /* * No more jid bits set in lvb, all recovery is done, unblock locks * (unless a new recover_prep callback has occured blocking locks * again while working above) */ spin_lock(&ls->ls_recover_spin); if (ls->ls_recover_block == block_gen && ls->ls_recover_start == start_gen) { clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); fs_info(sdp, "recover generation %u done\n", start_gen); gfs2_glock_thaw(sdp); } else { fs_info(sdp, "recover generation %u block2 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); } }
static inline void log_set_bit(struct log_c *l, uint32_t *bs, unsigned bit) { __set_bit_le(bit, bs); l->touched_cleaned = 1; }
static void gfs2_control_func(struct work_struct *work) { struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); struct lm_lockstruct *ls = &sdp->sd_lockstruct; char lvb_bits[GDLM_LVB_SIZE]; uint32_t block_gen, start_gen, lvb_gen, flags; int recover_set = 0; int write_lvb = 0; int recover_size; int i, error; spin_lock(&ls->ls_recover_spin); if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { spin_unlock(&ls->ls_recover_spin); return; } block_gen = ls->ls_recover_block; start_gen = ls->ls_recover_start; spin_unlock(&ls->ls_recover_spin); if (block_gen == start_gen) return; error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK); if (error) { fs_err(sdp, "control lock EX error %d\n", error); return; } control_lvb_read(ls, &lvb_gen, lvb_bits); spin_lock(&ls->ls_recover_spin); if (block_gen != ls->ls_recover_block || start_gen != ls->ls_recover_start) { fs_info(sdp, "recover generation %u block1 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); return; } recover_size = ls->ls_recover_size; if (lvb_gen <= start_gen) { for (i = 0; i < recover_size; i++) { if (ls->ls_recover_result[i] != LM_RD_SUCCESS) continue; ls->ls_recover_result[i] = 0; if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) continue; __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); write_lvb = 1; } } if (lvb_gen == start_gen) { for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < lvb_gen) ls->ls_recover_submit[i] = 0; } } else if (lvb_gen < start_gen) { for (i = 0; i < recover_size; i++) { if (!ls->ls_recover_submit[i]) continue; if (ls->ls_recover_submit[i] < start_gen) { ls->ls_recover_submit[i] = 0; __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); } } write_lvb = 1; } else { } spin_unlock(&ls->ls_recover_spin); if (write_lvb) { control_lvb_write(ls, start_gen, lvb_bits); flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; } else { flags = DLM_LKF_CONVERT; } error = control_lock(sdp, DLM_LOCK_NL, flags); if (error) { fs_err(sdp, "control lock NL error %d\n", error); return; } for (i = 0; i < recover_size; i++) { if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { fs_info(sdp, "recover generation %u jid %d\n", start_gen, i); gfs2_recover_set(sdp, i); recover_set++; } } if (recover_set) return; spin_lock(&ls->ls_recover_spin); if (ls->ls_recover_block == block_gen && ls->ls_recover_start == start_gen) { clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); spin_unlock(&ls->ls_recover_spin); fs_info(sdp, "recover generation %u done\n", start_gen); gfs2_glock_thaw(sdp); } else { fs_info(sdp, "recover generation %u block2 %u %u\n", start_gen, block_gen, ls->ls_recover_block); spin_unlock(&ls->ls_recover_spin); } }