示例#1
0
/*
 * Convert a zone-3 undo offset into a zone-2 buffer offset.
 */
hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
{
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap __debugvar;
	hammer_off_t result_offset;
	int i;

	KKASSERT((zone3_off & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_UNDO);
	root_volume = hammer_get_root_volume(hmp, errorp);
	if (*errorp)
		return(0);
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
	KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
	KKASSERT(zone3_off < undomap->alloc_offset);

	/*
	 * undo offsets[i] in zone-2 +
	 * big-block offset of zone-3 address
	 * which results zone-2 address
	 */
	i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
	result_offset = root_volume->ondisk->vol0_undo_array[i] +
			(zone3_off & HAMMER_BIGBLOCK_MASK64);

	hammer_rel_volume(root_volume, 0);
	return(result_offset);
}
static int
hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
{
	struct hammer_mount *hmp = (void *)mp->mnt_data;
	hammer_volume_t volume;
	hammer_volume_ondisk_t ondisk;
	int error;
	int64_t bfree;
	int64_t breserved;

	lwkt_gettoken(&hmp->fs_token);
	volume = hammer_get_root_volume(hmp, &error);
	if (error) {
		lwkt_reltoken(&hmp->fs_token);
		return(error);
	}
	ondisk = volume->ondisk;

	/*
	 * Basic stats
	 */
	_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
	mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
	bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
	hammer_rel_volume(volume, 0);

	mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
	mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
	if (mp->mnt_vstat.f_files < 0)
		mp->mnt_vstat.f_files = 0;
	*sbp = mp->mnt_vstat;
	lwkt_reltoken(&hmp->fs_token);
	return(0);
}
示例#3
0
/*
 * nnode is a newly allocated node, and now elm becomes the node
 * element within nnode's parent that represents a pointer to nnode,
 * or nnode becomes the root node if elm does not exist.
 */
static void
hammer_move_node(hammer_cursor_t cursor, hammer_btree_elm_t elm,
		 hammer_node_t onode, hammer_node_t nnode)
{
	int error, i;

	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));

	/*
	 * Adjust the parent's pointer to us first.
	 */
	if (elm) {
		/*
		 * We are not the root of the B-Tree
		 */
		KKASSERT(hammer_is_internal_node_elm(elm));
		hammer_modify_node(cursor->trans, cursor->parent,
				   &elm->internal.subtree_offset,
				   sizeof(elm->internal.subtree_offset));
		elm->internal.subtree_offset = nnode->node_offset;
		hammer_modify_node_done(cursor->parent);
	} else {
		/*
		 * We are the root of the B-Tree
		 */
		hammer_volume_t volume;
		volume = hammer_get_root_volume(cursor->trans->hmp, &error);
		KKASSERT(error == 0);

		hammer_modify_volume_field(cursor->trans, volume,
					   vol0_btree_root);
		volume->ondisk->vol0_btree_root = nnode->node_offset;
		hammer_modify_volume_done(volume);
		hammer_rel_volume(volume, 0);
	}

	/*
	 * Now adjust our children's pointers to us
	 * if we are an internal node.
	 */
	if (nnode->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
		for (i = 0; i < nnode->ondisk->count; ++i) {
			error = btree_set_parent_of_child(cursor->trans, nnode,
					&nnode->ondisk->elms[i]);
			if (error)
				hpanic("reblock internal node: fixup problem");
		}
	}
}
示例#4
0
/*
 * Start a simple read-only transaction.  This will not stall.
 */
void
hammer_simple_transaction(struct hammer_transaction *trans,
			  struct hammer_mount *hmp)
{
	struct timeval tv;
	int error;

	trans->type = HAMMER_TRANS_RO;
	trans->hmp = hmp;
	trans->rootvol = hammer_get_root_volume(hmp, &error);
	KKASSERT(error == 0);
	trans->tid = 0;
	trans->sync_lock_refs = 0;
	trans->flags = 0;

	getmicrotime(&tv);
	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
	trans->time32 = (u_int32_t)tv.tv_sec;
}
示例#5
0
/*
 * Convert a zone-3 undo offset into a zone-2 buffer offset.
 */
hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t zone3_off, int *errorp)
{
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap __debugvar;
	hammer_off_t result_offset;

	KKASSERT(hammer_is_zone_undo(zone3_off));
	root_volume = hammer_get_root_volume(hmp, errorp);
	if (*errorp)
		return(0);
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
	KKASSERT(hammer_is_zone_undo(undomap->alloc_offset));
	KKASSERT(zone3_off < undomap->alloc_offset);

	result_offset = hammer_xlate_to_undo(root_volume->ondisk, zone3_off);

	hammer_rel_volume(root_volume, 0);
	return(result_offset);
}
示例#6
0
/*
 * Start a transaction using a particular TID.  Used by the sync code.
 * This does not stall.
 *
 * This routine may only be called from the flusher thread.  We predispose
 * sync_lock_refs, implying serialization against the synchronization stage
 * (which the flusher is responsible for).
 */
void
hammer_start_transaction_fls(struct hammer_transaction *trans,
			     struct hammer_mount *hmp)
{
	struct timeval tv;
	int error;

	bzero(trans, sizeof(*trans));

	trans->type = HAMMER_TRANS_FLS;
	trans->hmp = hmp;
	trans->rootvol = hammer_get_root_volume(hmp, &error);
	KKASSERT(error == 0);
	trans->tid = hammer_alloc_tid(hmp, 1);
	trans->sync_lock_refs = 1;
	trans->flags = 0;

	getmicrotime(&tv);
	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
	trans->time32 = (u_int32_t)tv.tv_sec;
}
static int
hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
		 struct ucred *cred)
{
	struct hammer_mount_info info;
	hammer_mount_t hmp;
	hammer_volume_t rootvol;
	struct vnode *rootvp;
	struct vnode *devvp = NULL;
	const char *upath;	/* volume name in userspace */
	char *path;		/* volume name in system space */
	int error;
	int i;
	int master_id;
	char *next_volume_ptr = NULL;

	/*
	 * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
	 */
	if (mntpt == NULL) {
		bzero(&info, sizeof(info));
		info.asof = 0;
		info.hflags = 0;
		info.nvolumes = 1;

		next_volume_ptr = mp->mnt_stat.f_mntfromname;

		/* Count number of volumes separated by ':' */
		for (char *p = next_volume_ptr; *p != '\0'; ++p) {
			if (*p == ':') {
				++info.nvolumes;
			}
		}

		mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
	} else {
		if ((error = copyin(data, &info, sizeof(info))) != 0)
			return (error);
	}

	/*
	 * updating or new mount
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		hmp = (void *)mp->mnt_data;
		KKASSERT(hmp != NULL);
	} else {
		if (info.nvolumes <= 0 || info.nvolumes > HAMMER_MAX_VOLUMES)
			return (EINVAL);
		hmp = NULL;
	}

	/*
	 * master-id validation.  The master id may not be changed by a
	 * mount update.
	 */
	if (info.hflags & HMNT_MASTERID) {
		if (hmp && hmp->master_id != info.master_id) {
			kprintf("hammer: cannot change master id "
				"with mount update\n");
			return(EINVAL);
		}
		master_id = info.master_id;
		if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
			return (EINVAL);
	} else {
		if (hmp)
			master_id = hmp->master_id;
		else
			master_id = 0;
	}

	/*
	 * Internal mount data structure
	 */
	if (hmp == NULL) {
		hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
		mp->mnt_data = (qaddr_t)hmp;
		hmp->mp = mp;
		/*TAILQ_INIT(&hmp->recycle_list);*/

		/*
		 * Make sure kmalloc type limits are set appropriately.
		 *
		 * Our inode kmalloc group is sized based on maxvnodes
		 * (controlled by the system, not us).
		 */
		kmalloc_create(&hmp->m_misc, "HAMMER-others");
		kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");

		kmalloc_raise_limit(hmp->m_inodes, 0);	/* unlimited */

		hmp->root_btree_beg.localization = 0x00000000U;
		hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
		hmp->root_btree_beg.key = -0x8000000000000000LL;
		hmp->root_btree_beg.create_tid = 1;
		hmp->root_btree_beg.delete_tid = 1;
		hmp->root_btree_beg.rec_type = 0;
		hmp->root_btree_beg.obj_type = 0;

		hmp->root_btree_end.localization = 0xFFFFFFFFU;
		hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
		hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
		hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
		hmp->root_btree_end.delete_tid = 0;   /* special case */
		hmp->root_btree_end.rec_type = 0xFFFFU;
		hmp->root_btree_end.obj_type = 0;

		hmp->krate.freq = 1;	/* maximum reporting rate (hz) */
		hmp->krate.count = -16;	/* initial burst */

		hmp->sync_lock.refs = 1;
		hmp->free_lock.refs = 1;
		hmp->undo_lock.refs = 1;
		hmp->blkmap_lock.refs = 1;
		hmp->snapshot_lock.refs = 1;
		hmp->volume_lock.refs = 1;

		TAILQ_INIT(&hmp->delay_list);
		TAILQ_INIT(&hmp->flush_group_list);
		TAILQ_INIT(&hmp->objid_cache_list);
		TAILQ_INIT(&hmp->undo_lru_list);
		TAILQ_INIT(&hmp->reclaim_list);

		RB_INIT(&hmp->rb_dedup_crc_root);
		RB_INIT(&hmp->rb_dedup_off_root);	
		TAILQ_INIT(&hmp->dedup_lru_list);
	}
	hmp->hflags &= ~HMNT_USERFLAGS;
	hmp->hflags |= info.hflags & HMNT_USERFLAGS;

	hmp->master_id = master_id;

	if (info.asof) {
		mp->mnt_flag |= MNT_RDONLY;
		hmp->asof = info.asof;
	} else {
		hmp->asof = HAMMER_MAX_TID;
	}

	hmp->volume_to_remove = -1;

	/*
	 * Re-open read-write if originally read-only, or vise-versa.
	 *
	 * When going from read-only to read-write execute the stage2
	 * recovery if it has not already been run.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		lwkt_gettoken(&hmp->fs_token);
		error = 0;
		if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
			kprintf("HAMMER read-only -> read-write\n");
			hmp->ronly = 0;
			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
				hammer_adjust_volume_mode, NULL);
			rootvol = hammer_get_root_volume(hmp, &error);
			if (rootvol) {
				hammer_recover_flush_buffers(hmp, rootvol, 1);
				error = hammer_recover_stage2(hmp, rootvol);
				bcopy(rootvol->ondisk->vol0_blockmap,
				      hmp->blockmap,
				      sizeof(hmp->blockmap));
				hammer_rel_volume(rootvol, 0);
			}
			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
				hammer_reload_inode, NULL);
			/* kernel clears MNT_RDONLY */
		} else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			kprintf("HAMMER read-write -> read-only\n");
			hmp->ronly = 1;	/* messy */
			RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
				hammer_reload_inode, NULL);
			hmp->ronly = 0;
			hammer_flusher_sync(hmp);
			hammer_flusher_sync(hmp);
			hammer_flusher_sync(hmp);
			hmp->ronly = 1;
			RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
				hammer_adjust_volume_mode, NULL);
		}
		lwkt_reltoken(&hmp->fs_token);
		return(error);
	}

	RB_INIT(&hmp->rb_vols_root);
	RB_INIT(&hmp->rb_inos_root);
	RB_INIT(&hmp->rb_redo_root);
	RB_INIT(&hmp->rb_nods_root);
	RB_INIT(&hmp->rb_undo_root);
	RB_INIT(&hmp->rb_resv_root);
	RB_INIT(&hmp->rb_bufs_root);
	RB_INIT(&hmp->rb_pfsm_root);

	hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);

	RB_INIT(&hmp->volu_root);
	RB_INIT(&hmp->undo_root);
	RB_INIT(&hmp->data_root);
	RB_INIT(&hmp->meta_root);
	RB_INIT(&hmp->lose_root);
	TAILQ_INIT(&hmp->iorun_list);

	lwkt_token_init(&hmp->fs_token, "hammerfs");
	lwkt_token_init(&hmp->io_token, "hammerio");

	lwkt_gettoken(&hmp->fs_token);

	/*
	 * Load volumes
	 */
	path = objcache_get(namei_oc, M_WAITOK);
	hmp->nvolumes = -1;
	for (i = 0; i < info.nvolumes; ++i) {
		if (mntpt == NULL) {
			/*
			 * Root mount.
			 */
			KKASSERT(next_volume_ptr != NULL);
			strcpy(path, "");
			if (*next_volume_ptr != '/') {
				/* relative path */
				strcpy(path, "/dev/");
			}
			int k;
			for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
				if (*next_volume_ptr == '\0') {
					break;
				} else if (*next_volume_ptr == ':') {
					++next_volume_ptr;
					break;
				} else {
					path[k] = *next_volume_ptr;
					++next_volume_ptr;
				}
			}
			path[k] = '\0';

			error = 0;
			cdev_t dev = kgetdiskbyname(path);
			error = bdevvp(dev, &devvp);
			if (error) {
				kprintf("hammer_mountroot: can't find devvp\n");
			}
		} else {
			error = copyin(&info.volumes[i], &upath,
				       sizeof(char *));
			if (error == 0)
				error = copyinstr(upath, path,
						  MAXPATHLEN, NULL);
		}
		if (error == 0)
			error = hammer_install_volume(hmp, path, devvp);
		if (error)
			break;
	}
	objcache_put(namei_oc, path);

	/*
	 * Make sure we found a root volume
	 */
	if (error == 0 && hmp->rootvol == NULL) {
		kprintf("hammer_mount: No root volume found!\n");
		error = EINVAL;
	}

	/*
	 * Check that all required volumes are available
	 */
	if (error == 0 && hammer_mountcheck_volumes(hmp)) {
		kprintf("hammer_mount: Missing volumes, cannot mount!\n");
		error = EINVAL;
	}

	if (error) {
		/* called with fs_token held */
		hammer_free_hmp(mp);
		return (error);
	}

	/*
	 * No errors, setup enough of the mount point so we can lookup the
	 * root vnode.
	 */
	mp->mnt_iosize_max = MAXPHYS;
	mp->mnt_kern_flag |= MNTK_FSMID;
	mp->mnt_kern_flag |= MNTK_THR_SYNC;	/* new vsyncscan semantics */

	/*
	 * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
	 * will acquire a per-mount token prior to entry and release it
	 * on return, so even if we do not specify it we no longer get
	 * the BGL regardlless of how we are flagged.
	 */
	mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
	/*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/

	/* 
	 * note: f_iosize is used by vnode_pager_haspage() when constructing
	 * its VOP_BMAP call.
	 */
	mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
	mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;

	mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
	mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;

	mp->mnt_maxsymlinklen = 255;
	mp->mnt_flag |= MNT_LOCAL;

	vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
	vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
	vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);

	/*
	 * The root volume's ondisk pointer is only valid if we hold a
	 * reference to it.
	 */
	rootvol = hammer_get_root_volume(hmp, &error);
	if (error)
		goto failed;

	/*
	 * Perform any necessary UNDO operations.  The recovery code does
	 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
	 * and then re-copy it again after recovery is complete.
	 *
	 * If this is a read-only mount the UNDO information is retained
	 * in memory in the form of dirty buffer cache buffers, and not
	 * written back to the media.
	 */
	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
	      sizeof(hmp->blockmap));

	/*
	 * Check filesystem version
	 */
	hmp->version = rootvol->ondisk->vol_version;
	if (hmp->version < HAMMER_VOL_VERSION_MIN ||
	    hmp->version > HAMMER_VOL_VERSION_MAX) {
		kprintf("HAMMER: mount unsupported fs version %d\n",
			hmp->version);
		error = ERANGE;
		goto done;
	}

	/*
	 * The undo_rec_limit limits the size of flush groups to avoid
	 * blowing out the UNDO FIFO.  This calculation is typically in
	 * the tens of thousands and is designed primarily when small
	 * HAMMER filesystems are created.
	 */
	hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
	if (hammer_debug_general & 0x0001)
		kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);

	/*
	 * NOTE: Recover stage1 not only handles meta-data recovery, it
	 * 	 also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
	 */
	error = hammer_recover_stage1(hmp, rootvol);
	if (error) {
		kprintf("Failed to recover HAMMER filesystem on mount\n");
		goto done;
	}

	/*
	 * Finish setup now that we have a good root volume.
	 *
	 * The top 16 bits of fsid.val[1] is a pfs id.
	 */
	ksnprintf(mp->mnt_stat.f_mntfromname,
		  sizeof(mp->mnt_stat.f_mntfromname), "%s",
		  rootvol->ondisk->vol_name);
	mp->mnt_stat.f_fsid.val[0] =
		crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
	mp->mnt_stat.f_fsid.val[1] =
		crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
	mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;

	mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
	mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
				     sizeof(mp->mnt_vstat.f_fsid_uuid));

	/*
	 * Certain often-modified fields in the root volume are cached in
	 * the hammer_mount structure so we do not have to generate lots
	 * of little UNDO structures for them.
	 *
	 * Recopy after recovery.  This also has the side effect of
	 * setting our cached undo FIFO's first_offset, which serves to
	 * placemark the FIFO start for the NEXT flush cycle while the
	 * on-disk first_offset represents the LAST flush cycle.
	 */
	hmp->next_tid = rootvol->ondisk->vol0_next_tid;
	hmp->flush_tid1 = hmp->next_tid;
	hmp->flush_tid2 = hmp->next_tid;
	bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
	      sizeof(hmp->blockmap));
	hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;

	hammer_flusher_create(hmp);

	/*
	 * Locate the root directory using the root cluster's B-Tree as a
	 * starting point.  The root directory uses an obj_id of 1.
	 *
	 * FUTURE: Leave the root directory cached referenced but unlocked
	 * in hmp->rootvp (need to flush it on unmount).
	 */
	error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
	if (error)
		goto done;
	vput(rootvp);
	/*vn_unlock(hmp->rootvp);*/
	if (hmp->ronly == 0)
		error = hammer_recover_stage2(hmp, rootvol);

	/*
	 * If the stage2 recovery fails be sure to clean out all cached
	 * vnodes before throwing away the mount structure or bad things
	 * will happen.
	 */
	if (error)
		vflush(mp, 0, 0);

done:
	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		/* New mount */

		/* Populate info for mount point (NULL pad)*/
		bzero(mp->mnt_stat.f_mntonname, MNAMELEN);
		size_t size;
		if (mntpt) {
			copyinstr(mntpt, mp->mnt_stat.f_mntonname,
							MNAMELEN -1, &size);
		} else { /* Root mount */
			mp->mnt_stat.f_mntonname[0] = '/';
		}
	}
	(void)VFS_STATFS(mp, &mp->mnt_stat, cred);
	hammer_rel_volume(rootvol, 0);
failed:
	/*
	 * Cleanup and return.
	 */
	if (error) {
		/* called with fs_token held */
		hammer_free_hmp(mp);
	} else {
		lwkt_reltoken(&hmp->fs_token);
	}
	return (error);
}
示例#8
0
/*
 * Set version info
 */
static
int
hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip,
		   struct hammer_ioc_version *ver)
{
	hammer_mount_t hmp = trans->hmp;
	struct hammer_cursor cursor;
	hammer_volume_t volume;
	int error;
	int over = hmp->version;

	/*
	 * Generally do not allow downgrades.  However, version 4 can
	 * be downgraded to version 3.
	 */
	if (ver->cur_version < hmp->version) {
		if (!(ver->cur_version == 3 && hmp->version == 4))
			return(EINVAL);
	}
	if (ver->cur_version == hmp->version)
		return(0);
	if (ver->cur_version > HAMMER_VOL_VERSION_MAX)
		return(EINVAL);
	if (hmp->ronly)
		return(EROFS);

	/*
	 * Update the root volume header and the version cached in
	 * the hammer_mount structure.
	 */
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error)
		goto failed;
	hammer_lock_ex(&hmp->flusher.finalize_lock);
	hammer_sync_lock_ex(trans);
	hmp->version = ver->cur_version;

	/*
	 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
	 * must be reinitialized.
	 */
	if (over < HAMMER_VOL_VERSION_FOUR &&
	    ver->cur_version >= HAMMER_VOL_VERSION_FOUR) {
		hkprintf("upgrade undo to version 4\n");
		error = hammer_upgrade_undo_4(trans);
		if (error)
			goto failed;
	}

	/*
	 * Adjust the version in the volume header
	 */
	volume = hammer_get_root_volume(hmp, &error);
	KKASSERT(error == 0);
	hammer_modify_volume_field(cursor.trans, volume, vol_version);
	volume->ondisk->vol_version = ver->cur_version;
	hammer_modify_volume_done(volume);
	hammer_rel_volume(volume, 0);

	hammer_sync_unlock(trans);
	hammer_unlock(&hmp->flusher.finalize_lock);
failed:
	ver->head.error = error;
	hammer_done_cursor(&cursor);
	return(0);
}
示例#9
0
/*
 * Reblock a B-Tree internal node.  The parent must be adjusted to point to
 * the new copy of the internal node, and the node's children's parent
 * pointers must also be adjusted to point to the new copy.
 *
 * elm is a pointer to the parent element pointing at cursor.node.
 */
static int
hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_node_lock lockroot;
	hammer_node_t onode;
	hammer_node_t nnode;
	int error;
	int i;

	hammer_node_lock_init(&lockroot, cursor->node);
	error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
	if (error)
		goto done;

	onode = cursor->node;
	nnode = hammer_alloc_btree(cursor->trans, 0, &error);

	if (nnode == NULL)
		goto done;

	/*
	 * Move the node.  Adjust the parent's pointer to us first.
	 */
	hammer_lock_ex(&nnode->lock);
	hammer_modify_node_noundo(cursor->trans, nnode);
	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));

	if (elm) {
		/*
		 * We are not the root of the B-Tree 
		 */
		hammer_modify_node(cursor->trans, cursor->parent,
				   &elm->internal.subtree_offset,
				   sizeof(elm->internal.subtree_offset));
		elm->internal.subtree_offset = nnode->node_offset;
		hammer_modify_node_done(cursor->parent);
	} else {
		/*
		 * We are the root of the B-Tree
		 */
                hammer_volume_t volume;
                        
                volume = hammer_get_root_volume(cursor->trans->hmp, &error);
                KKASSERT(error == 0);

                hammer_modify_volume_field(cursor->trans, volume,
					   vol0_btree_root);
                volume->ondisk->vol0_btree_root = nnode->node_offset;
                hammer_modify_volume_done(volume);
                hammer_rel_volume(volume, 0);
        }

	/*
	 * Now adjust our children's pointers to us.
	 */
	for (i = 0; i < nnode->ondisk->count; ++i) {
		elm = &nnode->ondisk->elms[i];
		error = btree_set_parent(cursor->trans, nnode, elm);
		if (error)
			panic("reblock internal node: fixup problem");
	}

	/*
	 * Clean up.
	 *
	 * The new node replaces the current node in the cursor.  The cursor
	 * expects it to be locked so leave it locked.  Discard onode.
	 */
	hammer_cursor_replaced_node(onode, nnode);
	hammer_delete_node(cursor->trans, onode);

	if (hammer_debug_general & 0x4000) {
		kprintf("REBLOCK INODE %016llx -> %016llx\n",
			(long long)onode->node_offset,
			(long long)nnode->node_offset);
	}
	hammer_modify_node_done(nnode);
	cursor->node = nnode;

	hammer_unlock(&onode->lock);
	hammer_rel_node(onode);

done:
	hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
	return (error);
}
示例#10
0
/*
 * Reblock a B-Tree leaf node.  The parent must be adjusted to point to
 * the new copy of the leaf node.
 *
 * elm is a pointer to the parent element pointing at cursor.node.
 */
static int
hammer_reblock_leaf_node(struct hammer_ioc_reblock *reblock,
			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_node_t onode;
	hammer_node_t nnode;
	int error;

	/*
	 * Don't supply a hint when allocating the leaf.  Fills are done
	 * from the leaf upwards.
	 */
	onode = cursor->node;
	nnode = hammer_alloc_btree(cursor->trans, 0, &error);

	if (nnode == NULL)
		return (error);

	/*
	 * Move the node
	 */
	hammer_lock_ex(&nnode->lock);
	hammer_modify_node_noundo(cursor->trans, nnode);
	bcopy(onode->ondisk, nnode->ondisk, sizeof(*nnode->ondisk));

	if (elm) {
		/*
		 * We are not the root of the B-Tree 
		 */
		hammer_modify_node(cursor->trans, cursor->parent,
				   &elm->internal.subtree_offset,
				   sizeof(elm->internal.subtree_offset));
		elm->internal.subtree_offset = nnode->node_offset;
		hammer_modify_node_done(cursor->parent);
	} else {
		/*
		 * We are the root of the B-Tree
		 */
                hammer_volume_t volume;
                        
                volume = hammer_get_root_volume(cursor->trans->hmp, &error);
                KKASSERT(error == 0);

                hammer_modify_volume_field(cursor->trans, volume,
					   vol0_btree_root);
                volume->ondisk->vol0_btree_root = nnode->node_offset;
                hammer_modify_volume_done(volume);
                hammer_rel_volume(volume, 0);
    }

	hammer_cursor_replaced_node(onode, nnode);
	hammer_delete_node(cursor->trans, onode);

	if (hammer_debug_general & 0x4000) {
		kprintf("REBLOCK LNODE %016llx -> %016llx\n",
			(long long)onode->node_offset,
			(long long)nnode->node_offset);
	}
	hammer_modify_node_done(nnode);
	cursor->node = nnode;

	hammer_unlock(&onode->lock);
	hammer_rel_node(onode);

	return (error);
}