Exemplo n.º 1
0
/************************************************************************
 *				VOLUMES					*
 ************************************************************************
 *
 * Load a HAMMER volume by name.  Returns 0 on success or a positive error
 * code on failure.  Volumes must be loaded at mount time, get_volume() will
 * not load a new volume.
 *
 * The passed devvp is vref()'d but not locked.  This function consumes the
 * ref (typically by associating it with the volume structure).
 *
 * Calls made to hammer_load_volume() or single-threaded
 */
int
hammer_install_volume(struct hammer_mount *hmp, const char *volname,
		      struct vnode *devvp)
{
	struct mount *mp;
	hammer_volume_t volume;
	struct hammer_volume_ondisk *ondisk;
	struct nlookupdata nd;
	struct buf *bp = NULL;
	int error;
	int ronly;
	int setmp = 0;

	mp = hmp->mp;
	ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);

	/*
	 * Allocate a volume structure
	 */
	++hammer_count_volumes;
	volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
	volume->vol_name = kstrdup(volname, hmp->m_misc);
	volume->io.hmp = hmp;	/* bootstrap */
	hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
	volume->io.offset = 0LL;
	volume->io.bytes = HAMMER_BUFSIZE;

	/*
	 * Get the device vnode
	 */
	if (devvp == NULL) {
		error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
		if (error == 0)
			error = nlookup(&nd);
		if (error == 0)
			error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
		nlookup_done(&nd);
	} else {
		error = 0;
		volume->devvp = devvp;
	}

	if (error == 0) {
		if (vn_isdisk(volume->devvp, &error)) {
			error = vfs_mountedon(volume->devvp);
		}
	}
	if (error == 0 && vcount(volume->devvp) > 0)
		error = EBUSY;
	if (error == 0) {
		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
		error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
		if (error == 0) {
			error = VOP_OPEN(volume->devvp, 
					 (ronly ? FREAD : FREAD|FWRITE),
					 FSCRED, NULL);
		}
		vn_unlock(volume->devvp);
	}
	if (error) {
		hammer_free_volume(volume);
		return(error);
	}
	volume->devvp->v_rdev->si_mountpoint = mp;
	setmp = 1;

	/*
	 * Extract the volume number from the volume header and do various
	 * sanity checks.
	 */
	error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
	if (error)
		goto late_failure;
	ondisk = (void *)bp->b_data;
	if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
		kprintf("hammer_mount: volume %s has an invalid header\n",
			volume->vol_name);
		error = EFTYPE;
		goto late_failure;
	}
	volume->vol_no = ondisk->vol_no;
	volume->buffer_base = ondisk->vol_buf_beg;
	volume->vol_flags = ondisk->vol_flags;
	volume->nblocks = ondisk->vol_nblocks; 
	volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
				    ondisk->vol_buf_end - ondisk->vol_buf_beg);
	volume->maxraw_off = ondisk->vol_buf_end;

	if (RB_EMPTY(&hmp->rb_vols_root)) {
		hmp->fsid = ondisk->vol_fsid;
	} else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
		kprintf("hammer_mount: volume %s's fsid does not match "
			"other volumes\n", volume->vol_name);
		error = EFTYPE;
		goto late_failure;
	}

	/*
	 * Insert the volume structure into the red-black tree.
	 */
	if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
		kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
			volume->vol_name, volume->vol_no);
		error = EEXIST;
	}

	/*
	 * Set the root volume .  HAMMER special cases rootvol the structure.
	 * We do not hold a ref because this would prevent related I/O
	 * from being flushed.
	 */
	if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
		hmp->rootvol = volume;
		hmp->nvolumes = ondisk->vol_count;
		if (bp) {
			brelse(bp);
			bp = NULL;
		}
		hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
		hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
			(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	}
late_failure:
	if (bp)
		brelse(bp);
	if (error) {
		/*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
		if (setmp)
			volume->devvp->v_rdev->si_mountpoint = NULL;
		vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
		VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
		vn_unlock(volume->devvp);
		hammer_free_volume(volume);
	}
	return (error);
}
static int
format_callback(hammer_transaction_t trans, hammer_volume_t volume,
	hammer_buffer_t *bufferp,
	struct hammer_blockmap_layer1 *layer1,
	struct hammer_blockmap_layer2 *layer2,
	hammer_off_t phys_off,
	hammer_off_t block_off,
	void *data)
{
	struct bigblock_stat *stat = (struct bigblock_stat*)data;

	/*
	 * Calculate the usable size of the volume, which must be aligned
	 * at a bigblock (8 MB) boundary.
	 */
	hammer_off_t aligned_buf_end_off;
	aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
		& ~HAMMER_LARGEBLOCK_MASK64));

	if (layer1) {
		KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);

		hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
		bzero(layer1, sizeof(*layer1));
		layer1->phys_offset = phys_off;
		layer1->blocks_free = stat->counter;
		layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
		hammer_modify_buffer_done(*bufferp);

		stat->total_free_bigblocks += stat->counter;
		stat->counter = 0; /* reset */
	} else if (layer2) {
		hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
		bzero(layer2, sizeof(*layer2));

		if (block_off == 0) {
			/*
			 * The first entry represents the L2 bigblock itself.
			 */
			layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
			layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
			layer2->bytes_free = 0;
			++stat->total_bigblocks;
		} else if (phys_off + block_off < aligned_buf_end_off) {
			/*
			 * Available bigblock
			 */
			layer2->zone = 0;
			layer2->append_off = 0;
			layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
			++stat->total_bigblocks;
			++stat->counter;
		} else {
			/*
			 * Bigblock outside of physically available
			 * space
			 */
			layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
			layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
			layer2->bytes_free = 0;
		}

		layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
		hammer_modify_buffer_done(*bufferp);
	} else {
		KKASSERT(0);
	}

	return 0;
}
Exemplo n.º 3
0
// corresponds to hammer_install_volume
static int
hammerfs_install_volume(struct hammer_mount *hmp, struct super_block *sb) {
    struct buffer_head * bh;
    hammer_volume_t volume;
    struct hammer_volume_ondisk *ondisk;
    int error = 0;

    /*
     * Allocate a volume structure
     */
    ++hammer_count_volumes;
    volume = kzalloc(sizeof(struct hammer_volume), GFP_KERNEL);
    volume->vol_name = kstrdup(sb->s_id, GFP_KERNEL);
    volume->io.hmp = hmp;   /* bootstrap */
    volume->io.offset = 0LL;
    volume->io.bytes = HAMMER_BUFSIZE;

    volume->sb = sb;

    /*
     * Extract the volume number from the volume header and do various
     * sanity checks.
     */
    bh = sb_bread(sb, 0);
    if(!bh) {
        printk(KERN_ERR "HAMMER: %s: unable to read superblock\n", sb->s_id);
        error = -EINVAL;
        goto failed;
    }

    ondisk = (struct hammer_volume_ondisk *)bh->b_data;
    if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
        printk(KERN_ERR "hammer_mount: volume %s has an invalid header\n",
                volume->vol_name);
        error = -EINVAL;
        goto failed;
    }

    volume->ondisk = ondisk;
    volume->vol_no = ondisk->vol_no;
    volume->buffer_base = ondisk->vol_buf_beg;
    volume->vol_flags = ondisk->vol_flags;
    volume->nblocks = ondisk->vol_nblocks; 
    volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
                                ondisk->vol_buf_end - ondisk->vol_buf_beg);
    volume->maxraw_off = ondisk->vol_buf_end;

    if (RB_EMPTY(&hmp->rb_vols_root)) {
        hmp->fsid = ondisk->vol_fsid;
    } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
        printk(KERN_ERR "hammer_mount: volume %s's fsid does not match "
                        "other volumes\n", volume->vol_name);
        error = -EINVAL;
        goto failed;
    }

    /*
     * Insert the volume structure into the red-black tree.
     */
    if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
        printk(KERN_ERR "hammer_mount: volume %s has a duplicate vol_no %d\n",
            volume->vol_name, volume->vol_no);
        error = -EEXIST;
    }

    /*
     * Set the root volume .  HAMMER special cases rootvol the structure.
     * We do not hold a ref because this would prevent related I/O
     * from being flushed.
     */
    if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
        hmp->rootvol = volume;
        hmp->nvolumes = ondisk->vol_count;
    }

    return(0);

failed:
    if(bh)
        brelse(bh);
    return(error);
}
/*
 * Iterate over all usable L1 entries of the volume and
 * the corresponding L2 entries.
 */
static int
hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
	int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
		struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
		hammer_off_t, hammer_off_t, void*),
	void *data)
{
	struct hammer_mount *hmp = trans->hmp;
	hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
	hammer_buffer_t buffer = NULL;
	int error = 0;

	hammer_off_t phys_off;
	hammer_off_t block_off;
	hammer_off_t layer1_off;
	hammer_off_t layer2_off;
	hammer_off_t aligned_buf_end_off;
	struct hammer_blockmap_layer1 *layer1;
	struct hammer_blockmap_layer2 *layer2;

	/*
	 * Calculate the usable size of the volume, which
	 * must be aligned at a bigblock (8 MB) boundary.
	 */
	aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
		& ~HAMMER_LARGEBLOCK_MASK64));

	/*
	 * Iterate the volume's address space in chunks of 4 TB, where each
	 * chunk consists of at least one physically available 8 MB bigblock.
	 *
	 * For each chunk we need one L1 entry and one L2 bigblock.
	 * We use the first bigblock of each chunk as L2 block.
	 */
	for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
	     phys_off < aligned_buf_end_off;
	     phys_off += HAMMER_BLOCKMAP_LAYER2) {
		for (block_off = 0;
		     block_off < HAMMER_BLOCKMAP_LAYER2;
		     block_off += HAMMER_LARGEBLOCK_SIZE) {
			layer2_off = phys_off +
				HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
			layer2 = hammer_bread(hmp, layer2_off, &error, &buffer);
			if (error)
				goto end;

			error = callback(trans, volume, &buffer, NULL,
					 layer2, phys_off, block_off, data);
			if (error)
				goto end;
		}

		layer1_off = freemap->phys_offset +
				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
		layer1 = hammer_bread(hmp, layer1_off, &error, &buffer);
		if (error)
			goto end;

		error = callback(trans, volume, &buffer, layer1, NULL,
				 phys_off, 0, data);
		if (error)
			goto end;
	}

end:
	if (buffer) {
		hammer_rel_buffer(buffer, 0);
		buffer = NULL;
	}

	return error;
}