コード例 #1
0
/*
 * Remove a volume.
 */
int
hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	hammer_volume_t volume;
	int error = 0;

	if (mp->mnt_flag & MNT_RDONLY) {
		kprintf("Cannot del volume from read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		kprintf("Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	volume = NULL;

	/*
	 * find volume by volname
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}
		KKASSERT(volume != NULL && error == 0);
		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
			break;
		}
		hammer_rel_volume(volume, 0);
		volume = NULL;
	}

	if (volume == NULL) {
		kprintf("Couldn't find volume\n");
		error = EINVAL;
		goto end;
	}

	if (volume == trans->rootvol) {
		kprintf("Cannot remove root-volume\n");
		hammer_rel_volume(volume, 0);
		error = EINVAL;
		goto end;
	}

	/*
	 *
	 */

	hmp->volume_to_remove = volume->vol_no;

	struct hammer_ioc_reblock reblock;
	bzero(&reblock, sizeof(reblock));

	reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
	reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
	reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
	reblock.key_end.obj_id = HAMMER_MAX_OBJID;
	reblock.head.flags = HAMMER_IOC_DO_FLAGS;
	reblock.free_level = 0;

	error = hammer_ioc_reblock(trans, ip, &reblock);

	if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
		error = EINTR;
	}

	if (error) {
		if (error == EINTR) {
			kprintf("reblock was interrupted\n");
		} else {
			kprintf("reblock failed: %d\n", error);
		}
		hmp->volume_to_remove = -1;
		hammer_rel_volume(volume, 0);
		goto end;
	}

	/*
	 * Sync filesystem
	 */
	int count = 0;
	while (hammer_flusher_haswork(hmp)) {
		hammer_flusher_sync(hmp);
		++count;
		if (count >= 5) {
			if (count == 5)
				kprintf("HAMMER: flushing.");
			else
				kprintf(".");
			tsleep(&count, 0, "hmrufl", hz);
		}
		if (count == 30) {
			kprintf("giving up");
			break;
		}
	}
	kprintf("\n");

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	/*
	 * We use stat later to update rootvol's bigblock stats
	 */
	struct bigblock_stat stat;
	error = hammer_free_freemap(trans, volume, &stat);
	if (error) {
		kprintf("Failed to free volume. Volume not empty!\n");
		hmp->volume_to_remove = -1;
		hammer_rel_volume(volume, 0);
		hammer_unlock(&hmp->blkmap_lock);
		hammer_sync_unlock(trans);
		goto end;
	}

	hmp->volume_to_remove = -1;

	hammer_rel_volume(volume, 0);

	/*
	 * Unload buffers
	 */
        RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
		hammer_unload_buffer, volume);

	error = hammer_unload_volume(volume, NULL);
	if (error == -1) {
		kprintf("Failed to unload volume\n");
		hammer_unlock(&hmp->blkmap_lock);
		hammer_sync_unlock(trans);
		goto end;
	}

	volume = NULL;
	--hmp->nvolumes;

	/*
	 * Set each volume's new value of the vol_count field.
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}

		KKASSERT(volume != NULL && error == 0);
		hammer_modify_volume_field(trans, volume, vol_count);
		volume->ondisk->vol_count = hmp->nvolumes;
		hammer_modify_volume_done(volume);

		/*
		 * Only changes to the header of the root volume
		 * are automatically flushed to disk. For all
		 * other volumes that we modify we do it here.
		 *
		 * No interlock is needed, volume buffers are not
		 * messed with by bioops.
		 */
		if (volume != trans->rootvol && volume->io.modified) {
			hammer_crc_set_volume(volume->ondisk);
			hammer_io_flush(&volume->io, 0);
		}

		hammer_rel_volume(volume, 0);
	}

	/*
	 * Update the total number of bigblocks
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_bigblocks);
	trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks;
	hammer_modify_volume_done(trans->rootvol);

	/*
	 * Update the number of free bigblocks
	 * (including the copy in hmp)
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_freebigblocks);
	trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks;
	hmp->copy_stat_freebigblocks =
		trans->rootvol->ondisk->vol0_stat_freebigblocks;
	hammer_modify_volume_done(trans->rootvol);
	/*
	 * Bigblock count changed so recompute the total number of blocks.
	 */
	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	/*
	 * Erase the volume header of the removed device.
	 *
	 * This is to not accidentally mount the volume again.
	 */
	struct vnode *devvp = NULL;
	error = hammer_setup_device(&devvp, ioc->device_name, 0);
	if (error) {
		kprintf("Failed to open device: %s\n", ioc->device_name);
		goto end;
	}
	KKASSERT(devvp);
	error = hammer_clear_volume_header(devvp);
	if (error) {
		kprintf("Failed to clear volume header of device: %s\n",
			ioc->device_name);
		goto end;
	}
	hammer_close_device(&devvp, 0);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	return (error);
}
コード例 #2
0
/*
 * Clean up the internal mount structure and disassociate it from the mount.
 * This may issue I/O.
 *
 * Called with fs_token held.
 */
static void
hammer_free_hmp(struct mount *mp)
{
	hammer_mount_t hmp = (void *)mp->mnt_data;
	hammer_flush_group_t flg;
	int count;
	int dummy;

	/*
	 * Flush anything dirty.  This won't even run if the
	 * filesystem errored-out.
	 */
	count = 0;
	while (hammer_flusher_haswork(hmp)) {
		hammer_flusher_sync(hmp);
		++count;
		if (count >= 5) {
			if (count == 5)
				kprintf("HAMMER: umount flushing.");
			else
				kprintf(".");
			tsleep(&dummy, 0, "hmrufl", hz);
		}
		if (count == 30) {
			kprintf("giving up\n");
			break;
		}
	}
	if (count >= 5 && count < 30)
		kprintf("\n");

	/*
	 * If the mount had a critical error we have to destroy any
	 * remaining inodes before we can finish cleaning up the flusher.
	 */
	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
		RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
			hammer_destroy_inode_callback, NULL);
	}

	/*
	 * There shouldn't be any inodes left now and any left over
	 * flush groups should now be empty.
	 */
	KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
		TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
		KKASSERT(RB_EMPTY(&flg->flush_tree));
		if (flg->refs) {
			kprintf("HAMMER: Warning, flush_group %p was "
				"not empty on umount!\n", flg);
		}
		kfree(flg, hmp->m_misc);
	}

	/*
	 * We can finally destroy the flusher
	 */
	hammer_flusher_destroy(hmp);

	/*
	 * We may have held recovered buffers due to a read-only mount.
	 * These must be discarded.
	 */
	if (hmp->ronly)
		hammer_recover_flush_buffers(hmp, NULL, -1);

	/*
	 * Unload buffers and then volumes
	 */
        RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
		hammer_unload_buffer, NULL);
	RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
		hammer_unload_volume, NULL);

	mp->mnt_data = NULL;
	mp->mnt_flag &= ~MNT_LOCAL;
	hmp->mp = NULL;
	hammer_destroy_objid_cache(hmp);
	hammer_destroy_dedup_cache(hmp);
	if (hmp->dedup_free_cache != NULL) {
		kfree(hmp->dedup_free_cache, hmp->m_misc);
		hmp->dedup_free_cache = NULL;
	}
	kmalloc_destroy(&hmp->m_misc);
	kmalloc_destroy(&hmp->m_inodes);
	lwkt_reltoken(&hmp->fs_token);
	kfree(hmp, M_HAMMER);
}
コード例 #3
0
/*
 * Flush the next sequence number until an open flush group is encountered
 * or we reach (next).  Not all sequence numbers will have flush groups
 * associated with them.  These require that the UNDO/REDO FIFO still be
 * flushed since it can take at least one additional run to synchronize
 * the FIFO, and more to also synchronize the reserve structures.
 */
static int
hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
{
	hammer_flusher_info_t info;
	hammer_flush_group_t flg;
	hammer_reserve_t resv;
	int count;
	int seq;

	/*
	 * Just in-case there's a flush race on mount.  Seq number
	 * does not change.
	 */
	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
		*nomorep = 1;
		return (hmp->flusher.done);
	}
	*nomorep = 0;

	/*
	 * Flush the next sequence number.  Sequence numbers can exist
	 * without an assigned flush group, indicating that just a FIFO flush
	 * should occur.
	 */
	seq = hmp->flusher.done + 1;
	flg = TAILQ_FIRST(&hmp->flush_group_list);
	if (flg == NULL) {
		if (seq == hmp->flusher.next) {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else if (seq == flg->seq) {
		if (flg->closed) {
			KKASSERT(flg->running == 0);
			flg->running = 1;
			if (hmp->fill_flush_group == flg) {
				hmp->fill_flush_group =
					TAILQ_NEXT(flg, flush_entry);
			}
		} else {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else {
		/*
		 * Sequence number problems can only happen if a critical
		 * filesystem error occurred which forced the filesystem into
		 * read-only mode.
		 */
		KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
		flg = NULL;
	}

	/*
	 * We only do one flg but we may have to loop/retry.
	 *
	 * Due to various races it is possible to come across a flush
	 * group which as not yet been closed.
	 */
	count = 0;
	while (flg && flg->running) {
		++count;
		if (hammer_debug_general & 0x0001) {
			hdkprintf("%d ttl=%d recs=%d\n",
				flg->seq, flg->total_count, flg->refs);
		}
		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
			break;
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);

		/*
		 * If the previous flush cycle just about exhausted our
		 * UNDO space we may have to do a dummy cycle to move the
		 * first_offset up before actually digging into a new cycle,
		 * or the new cycle will not have sufficient undo space.
		 */
		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
			hammer_flusher_finalize(&hmp->flusher.trans, 0);

		KKASSERT(hmp->next_flush_group != flg);

		/*
		 * Place the flg in the flusher structure and start the
		 * slaves running.  The slaves will compete for inodes
		 * to flush.
		 *
		 * Make a per-thread copy of the transaction.
		 */
		while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
			info->flg = flg;
			info->runstate = 1;
			info->trans = hmp->flusher.trans;
			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
			wakeup(&info->runstate);
		}

		/*
		 * Wait for all slaves to finish running
		 */
		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);

		/*
		 * Do the final finalization, clean up
		 */
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hmp->flusher.tid = hmp->flusher.trans.tid;

		hammer_done_transaction(&hmp->flusher.trans);

		/*
		 * Loop up on the same flg.  If the flg is done clean it up
		 * and break out.  We only flush one flg.
		 */
		if (RB_EMPTY(&flg->flush_tree)) {
			KKASSERT(flg->refs == 0);
			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
			kfree(flg, hmp->m_misc);
			break;
		}
		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
	}

	/*
	 * We may have pure meta-data to flush, or we may have to finish
	 * cycling the UNDO FIFO, even if there were no flush groups.
	 */
	if (count == 0 && hammer_flusher_haswork(hmp)) {
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hammer_done_transaction(&hmp->flusher.trans);
	}

	/*
	 * Clean up any freed big-blocks (typically zone-2).
	 * resv->flush_group is typically set several flush groups ahead
	 * of the free to ensure that the freed block is not reused until
	 * it can no longer be reused.
	 */
	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
		if (resv->flg_no - seq > 0)
			break;
		hammer_reserve_clrdelay(hmp, resv);
	}
	return (seq);
}