コード例 #1
0
static int control_mount(struct gfs2_sbd *sdp)
{
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
	uint32_t start_gen, block_gen, mount_gen, lvb_gen;
	int mounted_mode;
	int retries = 0;
	int error;

	memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
	memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
	memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
	ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
	init_completion(&ls->ls_sync_wait);

	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);

	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
	if (error) {
		fs_err(sdp, "control_mount control_lock NL error %d\n", error);
		return error;
	}

	error = mounted_lock(sdp, DLM_LOCK_NL, 0);
	if (error) {
		fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
		control_unlock(sdp);
		return error;
	}
	mounted_mode = DLM_LOCK_NL;

restart:
	if (retries++ && signal_pending(current)) {
		error = -EINTR;
		goto fail;
	}

	/*
	 * We always start with both locks in NL. control_lock is
	 * demoted to NL below so we don't need to do it here.
	 */

	if (mounted_mode != DLM_LOCK_NL) {
		error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
		if (error)
			goto fail;
		mounted_mode = DLM_LOCK_NL;
	}

	/*
	 * Other nodes need to do some work in dlm recovery and gfs2_control
	 * before the recover_done and control_lock will be ready for us below.
	 * A delay here is not required but often avoids having to retry.
	 */

	msleep_interruptible(500);

	/*
	 * Acquire control_lock in EX and mounted_lock in either EX or PR.
	 * control_lock lvb keeps track of any pending journal recoveries.
	 * mounted_lock indicates if any other nodes have the fs mounted.
	 */

	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
	if (error == -EAGAIN) {
		goto restart;
	} else if (error) {
		fs_err(sdp, "control_mount control_lock EX error %d\n", error);
		goto fail;
	}

	error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
	if (!error) {
		mounted_mode = DLM_LOCK_EX;
		goto locks_done;
	} else if (error != -EAGAIN) {
		fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
		goto fail;
	}

	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
	if (!error) {
		mounted_mode = DLM_LOCK_PR;
		goto locks_done;
	} else {
		/* not even -EAGAIN should happen here */
		fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
		goto fail;
	}

locks_done:
	/*
	 * If we got both locks above in EX, then we're the first mounter.
	 * If not, then we need to wait for the control_lock lvb to be
	 * updated by other mounted nodes to reflect our mount generation.
	 *
	 * In simple first mounter cases, first mounter will see zero lvb_gen,
	 * but in cases where all existing nodes leave/fail before mounting
	 * nodes finish control_mount, then all nodes will be mounting and
	 * lvb_gen will be non-zero.
	 */

	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);

	if (lvb_gen == 0xFFFFFFFF) {
		/* special value to force mount attempts to fail */
		fs_err(sdp, "control_mount control_lock disabled\n");
		error = -EINVAL;
		goto fail;
	}

	if (mounted_mode == DLM_LOCK_EX) {
		/* first mounter, keep both EX while doing first recovery */
		spin_lock(&ls->ls_recover_spin);
		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
		set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
		set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
		return 0;
	}

	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
	if (error)
		goto fail;

	/*
	 * We are not first mounter, now we need to wait for the control_lock
	 * lvb generation to be >= the generation from our first recover_done
	 * and all lvb bits to be clear (no pending journal recoveries.)
	 */

	if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
		/* journals need recovery, wait until all are clear */
		fs_info(sdp, "control_mount wait for journal recovery\n");
		goto restart;
	}

	spin_lock(&ls->ls_recover_spin);
	block_gen = ls->ls_recover_block;
	start_gen = ls->ls_recover_start;
	mount_gen = ls->ls_recover_mount;

	if (lvb_gen < mount_gen) {
		/* wait for mounted nodes to update control_lock lvb to our
		   generation, which might include new recovery bits set */
		fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	if (lvb_gen != start_gen) {
		/* wait for mounted nodes to update control_lock lvb to the
		   latest recovery generation */
		fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	if (block_gen == start_gen) {
		/* dlm recovery in progress, wait for it to finish */
		fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
	set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
	spin_unlock(&ls->ls_recover_spin);
	return 0;

fail:
	mounted_unlock(sdp);
	control_unlock(sdp);
	return error;
}
コード例 #2
0
static int control_mount(struct gfs2_sbd *sdp)
{
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
	char lvb_bits[GDLM_LVB_SIZE];
	uint32_t start_gen, block_gen, mount_gen, lvb_gen;
	int mounted_mode;
	int retries = 0;
	int error;

	memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
	memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
	memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
	ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
	init_completion(&ls->ls_sync_wait);

	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);

	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
	if (error) {
		fs_err(sdp, "control_mount control_lock NL error %d\n", error);
		return error;
	}

	error = mounted_lock(sdp, DLM_LOCK_NL, 0);
	if (error) {
		fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
		control_unlock(sdp);
		return error;
	}
	mounted_mode = DLM_LOCK_NL;

restart:
	if (retries++ && signal_pending(current)) {
		error = -EINTR;
		goto fail;
	}


	if (mounted_mode != DLM_LOCK_NL) {
		error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
		if (error)
			goto fail;
		mounted_mode = DLM_LOCK_NL;
	}


	msleep_interruptible(500);


	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
	if (error == -EAGAIN) {
		goto restart;
	} else if (error) {
		fs_err(sdp, "control_mount control_lock EX error %d\n", error);
		goto fail;
	}

	error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
	if (!error) {
		mounted_mode = DLM_LOCK_EX;
		goto locks_done;
	} else if (error != -EAGAIN) {
		fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
		goto fail;
	}

	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
	if (!error) {
		mounted_mode = DLM_LOCK_PR;
		goto locks_done;
	} else {
		
		fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
		goto fail;
	}

locks_done:

	control_lvb_read(ls, &lvb_gen, lvb_bits);

	if (lvb_gen == 0xFFFFFFFF) {
		
		fs_err(sdp, "control_mount control_lock disabled\n");
		error = -EINVAL;
		goto fail;
	}

	if (mounted_mode == DLM_LOCK_EX) {
		
		spin_lock(&ls->ls_recover_spin);
		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
		set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
		set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
		return 0;
	}

	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
	if (error)
		goto fail;


	if (!all_jid_bits_clear(lvb_bits)) {
		
		fs_info(sdp, "control_mount wait for journal recovery\n");
		goto restart;
	}

	spin_lock(&ls->ls_recover_spin);
	block_gen = ls->ls_recover_block;
	start_gen = ls->ls_recover_start;
	mount_gen = ls->ls_recover_mount;

	if (lvb_gen < mount_gen) {
		fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	if (lvb_gen != start_gen) {
		fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	if (block_gen == start_gen) {
		
		fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
			lvb_gen, ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		goto restart;
	}

	clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
	set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
	spin_unlock(&ls->ls_recover_spin);
	return 0;

fail:
	mounted_unlock(sdp);
	control_unlock(sdp);
	return error;
}
コード例 #3
0
static void gfs2_control_func(struct work_struct *work)
{
	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
	uint32_t block_gen, start_gen, lvb_gen, flags;
	int recover_set = 0;
	int write_lvb = 0;
	int recover_size;
	int i, error;

	spin_lock(&ls->ls_recover_spin);
	/*
	 * No MOUNT_DONE means we're still mounting; control_mount()
	 * will set this flag, after which this thread will take over
	 * all further clearing of BLOCK_LOCKS.
	 *
	 * FIRST_MOUNT means this node is doing first mounter recovery,
	 * for which recovery control is handled by
	 * control_mount()/control_first_done(), not this thread.
	 */
	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
		spin_unlock(&ls->ls_recover_spin);
		return;
	}
	block_gen = ls->ls_recover_block;
	start_gen = ls->ls_recover_start;
	spin_unlock(&ls->ls_recover_spin);

	/*
	 * Equal block_gen and start_gen implies we are between
	 * recover_prep and recover_done callbacks, which means
	 * dlm recovery is in progress and dlm locking is blocked.
	 * There's no point trying to do any work until recover_done.
	 */

	if (block_gen == start_gen)
		return;

	/*
	 * Propagate recover_submit[] and recover_result[] to lvb:
	 * dlm_recoverd adds to recover_submit[] jids needing recovery
	 * gfs2_recover adds to recover_result[] journal recovery results
	 *
	 * set lvb bit for jids in recover_submit[] if the lvb has not
	 * yet been updated for the generation of the failure
	 *
	 * clear lvb bit for jids in recover_result[] if the result of
	 * the journal recovery is SUCCESS
	 */

	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
	if (error) {
		fs_err(sdp, "control lock EX error %d\n", error);
		return;
	}

	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);

	spin_lock(&ls->ls_recover_spin);
	if (block_gen != ls->ls_recover_block ||
	    start_gen != ls->ls_recover_start) {
		fs_info(sdp, "recover generation %u block1 %u %u\n",
			start_gen, block_gen, ls->ls_recover_block);
		spin_unlock(&ls->ls_recover_spin);
		control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
		return;
	}

	recover_size = ls->ls_recover_size;

	if (lvb_gen <= start_gen) {
		/*
		 * Clear lvb bits for jids we've successfully recovered.
		 * Because all nodes attempt to recover failed journals,
		 * a journal can be recovered multiple times successfully
		 * in succession.  Only the first will really do recovery,
		 * the others find it clean, but still report a successful
		 * recovery.  So, another node may have already recovered
		 * the jid and cleared the lvb bit for it.
		 */
		for (i = 0; i < recover_size; i++) {
			if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
				continue;

			ls->ls_recover_result[i] = 0;

			if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
				continue;

			__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
			write_lvb = 1;
		}
	}

	if (lvb_gen == start_gen) {
		/*
		 * Failed slots before start_gen are already set in lvb.
		 */
		for (i = 0; i < recover_size; i++) {
			if (!ls->ls_recover_submit[i])
				continue;
			if (ls->ls_recover_submit[i] < lvb_gen)
				ls->ls_recover_submit[i] = 0;
		}
	} else if (lvb_gen < start_gen) {
		/*
		 * Failed slots before start_gen are not yet set in lvb.
		 */
		for (i = 0; i < recover_size; i++) {
			if (!ls->ls_recover_submit[i])
				continue;
			if (ls->ls_recover_submit[i] < start_gen) {
				ls->ls_recover_submit[i] = 0;
				__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
			}
		}
		/* even if there are no bits to set, we need to write the
		   latest generation to the lvb */
		write_lvb = 1;
	} else {
		/*
		 * we should be getting a recover_done() for lvb_gen soon
		 */
	}
	spin_unlock(&ls->ls_recover_spin);

	if (write_lvb) {
		control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
		flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
	} else {
		flags = DLM_LKF_CONVERT;
	}

	error = control_lock(sdp, DLM_LOCK_NL, flags);
	if (error) {
		fs_err(sdp, "control lock NL error %d\n", error);
		return;
	}

	/*
	 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
	 * and clear a jid bit in the lvb if the recovery is a success.
	 * Eventually all journals will be recovered, all jid bits will
	 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
	 */

	for (i = 0; i < recover_size; i++) {
		if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
			fs_info(sdp, "recover generation %u jid %d\n",
				start_gen, i);
			gfs2_recover_set(sdp, i);
			recover_set++;
		}
	}
	if (recover_set)
		return;

	/*
	 * No more jid bits set in lvb, all recovery is done, unblock locks
	 * (unless a new recover_prep callback has occured blocking locks
	 * again while working above)
	 */

	spin_lock(&ls->ls_recover_spin);
	if (ls->ls_recover_block == block_gen &&
	    ls->ls_recover_start == start_gen) {
		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		fs_info(sdp, "recover generation %u done\n", start_gen);
		gfs2_glock_thaw(sdp);
	} else {
		fs_info(sdp, "recover generation %u block2 %u %u\n",
			start_gen, block_gen, ls->ls_recover_block);
		spin_unlock(&ls->ls_recover_spin);
	}
}
コード例 #4
0
static void gfs2_control_func(struct work_struct *work)
{
	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
	char lvb_bits[GDLM_LVB_SIZE];
	uint32_t block_gen, start_gen, lvb_gen, flags;
	int recover_set = 0;
	int write_lvb = 0;
	int recover_size;
	int i, error;

	spin_lock(&ls->ls_recover_spin);
	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
		spin_unlock(&ls->ls_recover_spin);
		return;
	}
	block_gen = ls->ls_recover_block;
	start_gen = ls->ls_recover_start;
	spin_unlock(&ls->ls_recover_spin);


	if (block_gen == start_gen)
		return;


	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
	if (error) {
		fs_err(sdp, "control lock EX error %d\n", error);
		return;
	}

	control_lvb_read(ls, &lvb_gen, lvb_bits);

	spin_lock(&ls->ls_recover_spin);
	if (block_gen != ls->ls_recover_block ||
	    start_gen != ls->ls_recover_start) {
		fs_info(sdp, "recover generation %u block1 %u %u\n",
			start_gen, block_gen, ls->ls_recover_block);
		spin_unlock(&ls->ls_recover_spin);
		control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
		return;
	}

	recover_size = ls->ls_recover_size;

	if (lvb_gen <= start_gen) {
		for (i = 0; i < recover_size; i++) {
			if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
				continue;

			ls->ls_recover_result[i] = 0;

			if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET))
				continue;

			__clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
			write_lvb = 1;
		}
	}

	if (lvb_gen == start_gen) {
		for (i = 0; i < recover_size; i++) {
			if (!ls->ls_recover_submit[i])
				continue;
			if (ls->ls_recover_submit[i] < lvb_gen)
				ls->ls_recover_submit[i] = 0;
		}
	} else if (lvb_gen < start_gen) {
		for (i = 0; i < recover_size; i++) {
			if (!ls->ls_recover_submit[i])
				continue;
			if (ls->ls_recover_submit[i] < start_gen) {
				ls->ls_recover_submit[i] = 0;
				__set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
			}
		}
		write_lvb = 1;
	} else {
	}
	spin_unlock(&ls->ls_recover_spin);

	if (write_lvb) {
		control_lvb_write(ls, start_gen, lvb_bits);
		flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
	} else {
		flags = DLM_LKF_CONVERT;
	}

	error = control_lock(sdp, DLM_LOCK_NL, flags);
	if (error) {
		fs_err(sdp, "control lock NL error %d\n", error);
		return;
	}


	for (i = 0; i < recover_size; i++) {
		if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) {
			fs_info(sdp, "recover generation %u jid %d\n",
				start_gen, i);
			gfs2_recover_set(sdp, i);
			recover_set++;
		}
	}
	if (recover_set)
		return;


	spin_lock(&ls->ls_recover_spin);
	if (ls->ls_recover_block == block_gen &&
	    ls->ls_recover_start == start_gen) {
		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
		spin_unlock(&ls->ls_recover_spin);
		fs_info(sdp, "recover generation %u done\n", start_gen);
		gfs2_glock_thaw(sdp);
	} else {
		fs_info(sdp, "recover generation %u block2 %u %u\n",
			start_gen, block_gen, ls->ls_recover_block);
		spin_unlock(&ls->ls_recover_spin);
	}
}