Пример #1
0
/*
 * Remove a volume.
 */
int
hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	struct hammer_volume_ondisk *ondisk;
	struct bigblock_stat stat;
	hammer_volume_t volume;
	int vol_no;
	int error = 0;

	if (mp->mnt_flag & MNT_RDONLY) {
		hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		hmkprintf(hmp, "Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	/*
	 * find volume by volname
	 */
	volume = NULL;
	HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		KKASSERT(volume != NULL && error == 0);
		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
			break;
		}
		hammer_rel_volume(volume, 0);
		volume = NULL;
	}
int
hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
    struct hammer_ioc_volume_list *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	hammer_volume_t volume;
	int error = 0;
	int i, cnt, len;

	for (i = 0, cnt = 0; i < HAMMER_MAX_VOLUMES && cnt < ioc->nvols; i++) {
		volume = hammer_get_volume(hmp, i, &error);
		if (volume == NULL && error == ENOENT) {
			error = 0;
			continue;
		}
		KKASSERT(volume != NULL && error == 0);

		len = strlen(volume->vol_name) + 1;
		KKASSERT(len <= MAXPATHLEN);

		error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
				len);
		if (error) {
			hammer_rel_volume(volume, 0);
			return (error);
		}
		cnt++;
		hammer_rel_volume(volume, 0);
	}
	ioc->nvols = cnt;

	return (error);
}
Пример #3
0
int
hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	hammer_mount_t hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	struct hammer_volume_ondisk ondisk;
	hammer_volume_t volume;
	int64_t total_bigblocks, empty_bigblocks;
	int free_vol_no = 0;
	int error;

	if (mp->mnt_flag & MNT_RDONLY) {
		hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		hmkprintf(hmp, "Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
		hammer_unlock(&hmp->volume_lock);
		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
		return (EINVAL);
	}

	/*
	 * Find an unused volume number.
	 */
	while (free_vol_no < HAMMER_MAX_VOLUMES &&
		hammer_volume_number_test(hmp, free_vol_no)) {
		++free_vol_no;
	}
	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
		error = EINVAL;
		goto end;
	}

	error = hammer_format_volume_header(hmp, ioc, &ondisk, free_vol_no);
	if (error)
		goto end;

	error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
	if (error)
		goto end;

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	volume = hammer_get_volume(hmp, free_vol_no, &error);
	KKASSERT(volume != NULL && error == 0);

	error =	hammer_format_freemap(trans, volume);
	KKASSERT(error == 0);

	error = hammer_count_bigblocks(hmp, volume,
			&total_bigblocks, &empty_bigblocks);
	KKASSERT(error == 0);
	KKASSERT(total_bigblocks == empty_bigblocks);

	hammer_rel_volume(volume, 0);

	++hmp->nvolumes;
	error = hammer_update_volumes_header(trans,
			total_bigblocks, empty_bigblocks);
	KKASSERT(error == 0);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	if (error)
		hmkprintf(hmp, "An error occurred: %d\n", error);
	return (error);
}
int
hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	hammer_volume_t volume;
	int error;

	if (mp->mnt_flag & MNT_RDONLY) {
		kprintf("Cannot add volume to read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
		kprintf("Max number of HAMMER volumes exceeded\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		kprintf("Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	/*
	 * Find an unused volume number.
	 */
	int free_vol_no = 0;
	while (free_vol_no < HAMMER_MAX_VOLUMES &&
	       RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
		++free_vol_no;
	}
	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
		kprintf("Max number of HAMMER volumes exceeded\n");
		hammer_unlock(&hmp->volume_lock);
		return (EINVAL);
	}

	struct vnode *devvp = NULL;
	error = hammer_setup_device(&devvp, ioc->device_name, 0);
	if (error)
		goto end;
	KKASSERT(devvp);
	error = hammer_format_volume_header(
		hmp,
		devvp,
		hmp->rootvol->ondisk->vol_name,
		free_vol_no,
		hmp->nvolumes+1,
		ioc->vol_size,
		ioc->boot_area_size,
		ioc->mem_area_size);
	hammer_close_device(&devvp, 0);
	if (error)
		goto end;

	error = hammer_install_volume(hmp, ioc->device_name, NULL);
	if (error)
		goto end;

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	++hmp->nvolumes;

	/*
	 * Set each volumes new value of the vol_count field.
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}
		KKASSERT(volume != NULL && error == 0);
		hammer_modify_volume_field(trans, volume, vol_count);
		volume->ondisk->vol_count = hmp->nvolumes;
		hammer_modify_volume_done(volume);

		/*
		 * Only changes to the header of the root volume
		 * are automatically flushed to disk. For all
		 * other volumes that we modify we do it here.
		 *
		 * No interlock is needed, volume buffers are not
		 * messed with by bioops.
		 */
		if (volume != trans->rootvol && volume->io.modified) {
			hammer_crc_set_volume(volume->ondisk);
			hammer_io_flush(&volume->io, 0);
		}

		hammer_rel_volume(volume, 0);
	}

	volume = hammer_get_volume(hmp, free_vol_no, &error);
	KKASSERT(volume != NULL && error == 0);

	struct bigblock_stat stat;
	error =	hammer_format_freemap(trans, volume, &stat);
	KKASSERT(error == 0);

	/*
	 * Increase the total number of bigblocks and update stat/vstat totals.
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_bigblocks);
	trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks;
	hammer_modify_volume_done(trans->rootvol);
	/*
	 * Bigblock count changed so recompute the total number of blocks.
	 */
	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);

	/*
	 * Increase the number of free bigblocks
	 * (including the copy in hmp)
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_freebigblocks);
	trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks;
	hmp->copy_stat_freebigblocks =
		trans->rootvol->ondisk->vol0_stat_freebigblocks;
	hammer_modify_volume_done(trans->rootvol);

	hammer_rel_volume(volume, 0);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	if (error)
		kprintf("An error occurred: %d\n", error);
	return (error);
}
/*
 * Remove a volume.
 */
int
hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	hammer_volume_t volume;
	int error = 0;

	if (mp->mnt_flag & MNT_RDONLY) {
		kprintf("Cannot del volume from read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		kprintf("Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	volume = NULL;

	/*
	 * find volume by volname
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}
		KKASSERT(volume != NULL && error == 0);
		if (strcmp(volume->vol_name, ioc->device_name) == 0) {
			break;
		}
		hammer_rel_volume(volume, 0);
		volume = NULL;
	}

	if (volume == NULL) {
		kprintf("Couldn't find volume\n");
		error = EINVAL;
		goto end;
	}

	if (volume == trans->rootvol) {
		kprintf("Cannot remove root-volume\n");
		hammer_rel_volume(volume, 0);
		error = EINVAL;
		goto end;
	}

	/*
	 *
	 */

	hmp->volume_to_remove = volume->vol_no;

	struct hammer_ioc_reblock reblock;
	bzero(&reblock, sizeof(reblock));

	reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
	reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
	reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
	reblock.key_end.obj_id = HAMMER_MAX_OBJID;
	reblock.head.flags = HAMMER_IOC_DO_FLAGS;
	reblock.free_level = 0;

	error = hammer_ioc_reblock(trans, ip, &reblock);

	if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
		error = EINTR;
	}

	if (error) {
		if (error == EINTR) {
			kprintf("reblock was interrupted\n");
		} else {
			kprintf("reblock failed: %d\n", error);
		}
		hmp->volume_to_remove = -1;
		hammer_rel_volume(volume, 0);
		goto end;
	}

	/*
	 * Sync filesystem
	 */
	int count = 0;
	while (hammer_flusher_haswork(hmp)) {
		hammer_flusher_sync(hmp);
		++count;
		if (count >= 5) {
			if (count == 5)
				kprintf("HAMMER: flushing.");
			else
				kprintf(".");
			tsleep(&count, 0, "hmrufl", hz);
		}
		if (count == 30) {
			kprintf("giving up");
			break;
		}
	}
	kprintf("\n");

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	/*
	 * We use stat later to update rootvol's bigblock stats
	 */
	struct bigblock_stat stat;
	error = hammer_free_freemap(trans, volume, &stat);
	if (error) {
		kprintf("Failed to free volume. Volume not empty!\n");
		hmp->volume_to_remove = -1;
		hammer_rel_volume(volume, 0);
		hammer_unlock(&hmp->blkmap_lock);
		hammer_sync_unlock(trans);
		goto end;
	}

	hmp->volume_to_remove = -1;

	hammer_rel_volume(volume, 0);

	/*
	 * Unload buffers
	 */
        RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
		hammer_unload_buffer, volume);

	error = hammer_unload_volume(volume, NULL);
	if (error == -1) {
		kprintf("Failed to unload volume\n");
		hammer_unlock(&hmp->blkmap_lock);
		hammer_sync_unlock(trans);
		goto end;
	}

	volume = NULL;
	--hmp->nvolumes;

	/*
	 * Set each volume's new value of the vol_count field.
	 */
	for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
		volume = hammer_get_volume(hmp, vol_no, &error);
		if (volume == NULL && error == ENOENT) {
			/*
			 * Skip unused volume numbers
			 */
			error = 0;
			continue;
		}

		KKASSERT(volume != NULL && error == 0);
		hammer_modify_volume_field(trans, volume, vol_count);
		volume->ondisk->vol_count = hmp->nvolumes;
		hammer_modify_volume_done(volume);

		/*
		 * Only changes to the header of the root volume
		 * are automatically flushed to disk. For all
		 * other volumes that we modify we do it here.
		 *
		 * No interlock is needed, volume buffers are not
		 * messed with by bioops.
		 */
		if (volume != trans->rootvol && volume->io.modified) {
			hammer_crc_set_volume(volume->ondisk);
			hammer_io_flush(&volume->io, 0);
		}

		hammer_rel_volume(volume, 0);
	}

	/*
	 * Update the total number of bigblocks
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_bigblocks);
	trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks;
	hammer_modify_volume_done(trans->rootvol);

	/*
	 * Update the number of free bigblocks
	 * (including the copy in hmp)
	 */
	hammer_modify_volume_field(trans, trans->rootvol,
		vol0_stat_freebigblocks);
	trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks;
	hmp->copy_stat_freebigblocks =
		trans->rootvol->ondisk->vol0_stat_freebigblocks;
	hammer_modify_volume_done(trans->rootvol);
	/*
	 * Bigblock count changed so recompute the total number of blocks.
	 */
	mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
	mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
	    (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	/*
	 * Erase the volume header of the removed device.
	 *
	 * This is to not accidentally mount the volume again.
	 */
	struct vnode *devvp = NULL;
	error = hammer_setup_device(&devvp, ioc->device_name, 0);
	if (error) {
		kprintf("Failed to open device: %s\n", ioc->device_name);
		goto end;
	}
	KKASSERT(devvp);
	error = hammer_clear_volume_header(devvp);
	if (error) {
		kprintf("Failed to clear volume header of device: %s\n",
			ioc->device_name);
		goto end;
	}
	hammer_close_device(&devvp, 0);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	return (error);
}
Пример #6
0
// corresponds to hammer_vop_strategy_read
int hammerfs_readpage(struct file *file, struct page *page) 
{
    void *page_addr;
    hammer_mount_t hmp;
    struct buffer_head *bh;
    struct super_block *sb;
    struct hammer_transaction trans;
    struct hammer_cursor cursor;
    struct inode *inode;
    struct hammer_inode *ip;
    hammer_base_elm_t base;
    hammer_off_t disk_offset;
    int64_t rec_offset;
    int64_t file_offset;
    int error = 0;
    int boff;
    int roff;
    int n;
    int i=0;
    int block_num;
    int block_offset;
    int bytes_read;
    int64_t sb_offset;
    hammer_off_t zone2_offset;
    int vol_no;
    hammer_volume_t volume;

    printk ("hammerfs_readpage(page->index=%d)\n", (int) page->index);

    inode = file->f_path.dentry->d_inode;
    ip = (struct hammer_inode *)inode->i_private;
    sb = inode->i_sb;
    hmp = (hammer_mount_t)sb->s_fs_info;
    hammer_simple_transaction(&trans, ip->hmp);
    hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
    file_offset = page->index * PAGE_SIZE;

    if (file_offset > inode->i_size) {
        error = -ENOSPC;
        goto done;
    }

    SetPageUptodate (page);
    page_addr = kmap (page);

    if(!page_addr) {
        error = -ENOSPC;
        goto failed;
    }

   /*
    * Key range (begin and end inclusive) to scan.  Note that the key's
    * stored in the actual records represent BASE+LEN, not BASE.  The
    * first record containing bio_offset will have a key > bio_offset.
    */
    cursor.key_beg.localization = ip->obj_localization +
                                  HAMMER_LOCALIZE_MISC;
    cursor.key_beg.obj_id = ip->obj_id;
    cursor.key_beg.create_tid = 0;
    cursor.key_beg.delete_tid = 0;
    cursor.key_beg.obj_type = 0;
    cursor.key_beg.key = file_offset + 1;
    cursor.asof = ip->obj_asof;
    cursor.flags |= HAMMER_CURSOR_ASOF;

    cursor.key_end = cursor.key_beg;
    KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);

    cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
    cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
    cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
    cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;

    error = hammer_ip_first(&cursor);
    boff = 0;

    while(error == 0) {
       /*
        * Get the base file offset of the record.  The key for
        * data records is (base + bytes) rather then (base).
        */
        base = &cursor.leaf->base;
        rec_offset = base->key - cursor.leaf->data_len;

       /*
        * Calculate the gap, if any, and zero-fill it.
        *
        * n is the offset of the start of the record verses our
        * current seek offset in the bio.
        */
        n = (int)(rec_offset - (file_offset + boff));
        if (n > 0) {
            if (n > PAGE_SIZE - boff)
                n = PAGE_SIZE - boff;
            bzero((char *)page_addr + boff, n);
            boff += n;
            n = 0;
        }

       /*
        * Calculate the data offset in the record and the number
        * of bytes we can copy.
        *
        * There are two degenerate cases.  First, boff may already
        * be at bp->b_bufsize.  Secondly, the data offset within
        * the record may exceed the record's size.
        */
        roff = -n;
        rec_offset += roff;
        n = cursor.leaf->data_len - roff;
        if (n <= 0) {
            printk("hammerfs_readpage: bad n=%d roff=%d\n", n, roff);
            n = 0;
        } else if (n > PAGE_SIZE - boff) {
            n = PAGE_SIZE - boff;
        }

       /*
        * Deal with cached truncations.  This cool bit of code
        * allows truncate()/ftruncate() to avoid having to sync
        * the file.
        *
        * If the frontend is truncated then all backend records are
        * subject to the frontend's truncation.
        *
        * If the backend is truncated then backend records on-disk
        * (but not in-memory) are subject to the backend's
        * truncation.  In-memory records owned by the backend
        * represent data written after the truncation point on the
        * backend and must not be truncated.
        *
        * Truncate operations deal with frontend buffer cache
        * buffers and frontend-owned in-memory records synchronously.
        */
       if (ip->flags & HAMMER_INODE_TRUNCATED) {
               if (hammer_cursor_ondisk(&cursor) ||
                   cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
                       if (ip->trunc_off <= rec_offset)
                               n = 0;
                       else if (ip->trunc_off < rec_offset + n)
                               n = (int)(ip->trunc_off - rec_offset);
               }
       }
       if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
               if (hammer_cursor_ondisk(&cursor)) {
                       if (ip->sync_trunc_off <= rec_offset)
                               n = 0;
                       else if (ip->sync_trunc_off < rec_offset + n)
                               n = (int)(ip->sync_trunc_off - rec_offset);
               }
       }

       /*
        * Calculate the data offset in the record and the number
        * of bytes we can copy.
        */
        disk_offset = cursor.leaf->data_offset + roff;

        // move this to hammerfs_direct_io_read
        zone2_offset = hammer_blockmap_lookup(hmp, disk_offset, &error);
        vol_no = HAMMER_VOL_DECODE(zone2_offset);
        volume = hammer_get_volume(hmp, vol_no, &error);

        // n is the number of bytes we should read, sb_offset the
        // offset on disk
        sb_offset = volume->ondisk->vol_buf_beg + (zone2_offset & HAMMER_OFF_SHORT_MASK);

        while(n > 0 && boff != PAGE_SIZE) {
            block_num = sb_offset / BLOCK_SIZE;
            block_offset = sb_offset % BLOCK_SIZE;

            // the minimum between what is available and what we can maximally provide
            bytes_read = min(BLOCK_SIZE - (int )block_offset, PAGE_SIZE - (int )boff);        

            bh = sb_bread(sb, block_num + i);
            if(!bh) {
                error = -ENOMEM;
                goto failed;
            }
            memcpy((char*)page_addr + roff, (char*)bh->b_data + boff + block_offset, bytes_read);
            brelse(bh);

            n -= bytes_read;
            boff += bytes_read;
            roff += bytes_read;
        }

       /*
        * Iterate until we have filled the request.
        */
        if (boff == PAGE_SIZE)
            break;
        error = hammer_ip_next(&cursor);
    }

    hammer_done_cursor(&cursor);
    hammer_done_transaction(&trans);

failed:
    if (PageLocked (page))
        unlock_page (page);
    kunmap (page);
done:
    return error;
}