Exemplo n.º 1
0
/*
 * Enter an undo into the history.  Return EALREADY if the request completely
 * covers a previous request.
 */
int
hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset, int bytes)
{
	hammer_undo_t node;
	hammer_undo_t onode __debugvar;

	node = RB_LOOKUP(hammer_und_rb_tree, &hmp->rb_undo_root, offset);
	if (node) {
		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
		TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
		if (bytes <= node->bytes)
			return(EALREADY);
		node->bytes = bytes;
		return(0);
	}
	if (hmp->undo_alloc != HAMMER_MAX_UNDOS) {
		node = &hmp->undos[hmp->undo_alloc++];
	} else {
		node = TAILQ_FIRST(&hmp->undo_lru_list);
		TAILQ_REMOVE(&hmp->undo_lru_list, node, lru_entry);
		RB_REMOVE(hammer_und_rb_tree, &hmp->rb_undo_root, node);
	}
	node->offset = offset;
	node->bytes = bytes;
	TAILQ_INSERT_TAIL(&hmp->undo_lru_list, node, lru_entry);
	onode = RB_INSERT(hammer_und_rb_tree, &hmp->rb_undo_root, node);
	KKASSERT(onode == NULL);
	return(0);
}
Exemplo n.º 2
0
__inline hammer_dedup_cache_t
hammer_dedup_cache_lookup(hammer_mount_t hmp, hammer_crc_t crc)
{
	hammer_dedup_cache_t dcp;

	dcp = RB_LOOKUP(hammer_dedup_crc_rb_tree,
				&hmp->rb_dedup_crc_root, crc);
	return dcp;
}
Exemplo n.º 3
0
hammer_dedup_cache_t
hammer_dedup_cache_add(hammer_inode_t ip, hammer_btree_leaf_elm_t leaf)
{
	hammer_dedup_cache_t dcp, tmp;
	hammer_mount_t hmp = ip->hmp;

	if (hmp->dedup_free_cache == NULL) {
		tmp = kmalloc(sizeof(*tmp), hmp->m_misc, M_WAITOK | M_ZERO);
		if (hmp->dedup_free_cache == NULL)
			hmp->dedup_free_cache = tmp;
		else
			kfree(tmp, hmp->m_misc);
	}

	KKASSERT(leaf != NULL);

	dcp = RB_LOOKUP(hammer_dedup_crc_rb_tree,
				&hmp->rb_dedup_crc_root, leaf->data_crc);
	if (dcp != NULL) {
		RB_REMOVE(hammer_dedup_off_rb_tree,
				&hmp->rb_dedup_off_root, dcp);
		TAILQ_REMOVE(&hmp->dedup_lru_list, dcp, lru_entry);
		goto populate;
	}

	if (hmp->dedup_cache_count < hammer_live_dedup_cache_size) {
		dcp = hmp->dedup_free_cache;
		hmp->dedup_free_cache = NULL;
		++hmp->dedup_cache_count;
	} else {
		dcp = TAILQ_FIRST(&hmp->dedup_lru_list);
		RB_REMOVE(hammer_dedup_crc_rb_tree,
				&hmp->rb_dedup_crc_root, dcp);
		RB_REMOVE(hammer_dedup_off_rb_tree,
				&hmp->rb_dedup_off_root, dcp);
		TAILQ_REMOVE(&hmp->dedup_lru_list, dcp, lru_entry);
	}

	dcp->crc = leaf->data_crc;
	tmp = RB_INSERT(hammer_dedup_crc_rb_tree, &hmp->rb_dedup_crc_root, dcp);
	KKASSERT(tmp == NULL);

populate:
	dcp->hmp = ip->hmp;
	dcp->obj_id = ip->obj_id;
	dcp->localization = ip->obj_localization;
	dcp->file_offset = leaf->base.key - leaf->data_len;
	dcp->bytes = leaf->data_len;
	dcp->data_offset = leaf->data_offset;

	tmp = RB_INSERT(hammer_dedup_off_rb_tree, &hmp->rb_dedup_off_root, dcp);
	KKASSERT(tmp == NULL);
	TAILQ_INSERT_TAIL(&hmp->dedup_lru_list, dcp, lru_entry);

	return (dcp);
}
Exemplo n.º 4
0
/*
 * Lookup an inode by inode number
 */
hammer2_inode_t *
hammer2_inode_lookup(hammer2_pfsmount_t *pmp, hammer2_tid_t inum)
{
	hammer2_inode_t *ip;

	if (pmp) {
		spin_lock(&pmp->inum_spin);
		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
		if (ip)
			hammer2_inode_ref(ip);
		spin_unlock(&pmp->inum_spin);
	} else {
		ip = NULL;
	}
	return(ip);
}
Exemplo n.º 5
0
/*
 * Lookup an inode by inode number
 */
hammer2_inode_t *
hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
{
	hammer2_inode_t *ip;

	KKASSERT(pmp);
	if (pmp->spmp_hmp) {
		ip = NULL;
	} else {
		hammer2_spin_ex(&pmp->inum_spin);
		ip = RB_LOOKUP(hammer2_inode_tree, &pmp->inum_tree, inum);
		if (ip)
			hammer2_inode_ref(ip);
		hammer2_spin_unex(&pmp->inum_spin);
	}
	return(ip);
}
int
hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	hammer_volume_t volume;
	int error;

	if (mp->mnt_flag & MNT_RDONLY) {
		kprintf("Cannot add volume to read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
		kprintf("Max number of HAMMER volumes exceeded\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		kprintf("Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	/*
	 * Find an unused volume number.
	 */
	int free_vol_no = 0;
	while (free_vol_no < HAMMER_MAX_VOLUMES &&
	       RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
		++free_vol_no;
	}
	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
		kprintf("Max number of HAMMER volumes exceeded\n");
		hammer_unlock(&hmp->volume_lock);
		return (EINVAL);
	}

	struct vnode *devvp = NULL;
	error = hammer_setup_device(&devvp, ioc->device_name, 0);
	if (error)
		goto end;
	KKASSERT(devvp);
	error = hammer_format_volume_header(
		hmp,
		devvp,
		hmp->rootvol->ondisk->vol_name,
		free_vol_no,
		hmp->nvolumes+1,
		ioc->vol_size,
		ioc->boot_area_size,
		ioc->mem_area_size);
	hammer_close_device(&devvp, 0);
	if (error)
		goto end;

	error = hammer_install_volume(hmp, ioc->device_name, NULL);
	if (error)
		goto end;

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	++hmp->nvolumes;

	/*
	 * Set each volumes new value of the vol_count field.
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}
		KKASSERT(volume != NULL && error == 0);
		hammer_modify_volume_field(trans, volume, vol_count);
		volume->ondisk->vol_count = hmp->nvolumes;
		hammer_modify_volume_done(volume);

		/*
		 * Only changes to the header of the root volume
		 * are automatically flushed to disk. For all
		 * other volumes that we modify we do it here.
		 *
		 * No interlock is needed, volume buffers are not
		 * messed with by bioops.
		 */
		if (volume != trans->rootvol && volume->io.modified) {
			hammer_crc_set_volume(volume->ondisk);
			hammer_io_flush(&volume->io, 0);
		}

		hammer_rel_volume(volume, 0);
	}

	volume = hammer_get_volume(hmp, free_vol_no, &error);
	KKASSERT(volume != NULL && error == 0);

	struct bigblock_stat stat;
	error =	hammer_format_freemap(trans, volume, &stat);
	KKASSERT(error == 0);

	/*
	 * Increase the total number of bigblocks and update stat/vstat totals.
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_bigblocks);
	trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks;
	hammer_modify_volume_done(trans->rootvol);
	/*
	 * Bigblock count changed so recompute the total number of blocks.
	 */
	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);

	/*
	 * Increase the number of free bigblocks
	 * (including the copy in hmp)
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_freebigblocks);
	trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks;
	hmp->copy_stat_freebigblocks =
		trans->rootvol->ondisk->vol0_stat_freebigblocks;
	hammer_modify_volume_done(trans->rootvol);

	hammer_rel_volume(volume, 0);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	if (error)
		kprintf("An error occurred: %d\n", error);
	return (error);
}