Example #1
0
void ubh_bforget (struct ufs_buffer_head * ubh)
{
	unsigned i;
	if (!ubh) 
		return;
	for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] ) 
		bforget (ubh->bh[i]);
}
Example #2
0
/*
 * The ext4 forget function must perform a revoke if we are freeing data
 * which has been journaled.  Metadata (eg. indirect blocks) must be
 * revoked in all cases.
 *
 * "bh" may be NULL: a metadata block may have been freed from memory
 * but there may still be a record of it in the journal, and that record
 * still needs to be revoked.
 *
 * If the handle isn't valid we're not journaling, but we still need to
 * call into ext4_journal_revoke() to put the buffer head.
 */
int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
		  int is_metadata, struct inode *inode,
		  struct buffer_head *bh, ext4_fsblk_t blocknr)
{
	int err;

	might_sleep();

	trace_ext4_forget(inode, is_metadata, blocknr);
	BUFFER_TRACE(bh, "enter");

	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
		  "data mode %x\n",
		  bh, is_metadata, inode->i_mode,
		  test_opt(inode->i_sb, DATA_FLAGS));

	/* In the no journal case, we can just do a bforget and return */
	if (!ext4_handle_valid(handle)) {
		bforget(bh);
		return 0;
	}

	/* Never use the revoke function if we are doing full data
	 * journaling: there is no need to, and a V1 superblock won't
	 * support it.  Otherwise, only skip the revoke on un-journaled
	 * data blocks. */

	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
	    (!is_metadata && !ext4_should_journal_data(inode))) {
		if (bh) {
			BUFFER_TRACE(bh, "call jbd2_journal_forget");
			err = jbd2_journal_forget(handle, bh);
			if (err)
				ext4_journal_abort_handle(where, line, __func__,
							  bh, handle, err);
			return err;
		}
		return 0;
	}

	/*
	 * data!=journal && (is_metadata || should_journal_data(inode))
	 */
	BUFFER_TRACE(bh, "call jbd2_journal_revoke");
	err = jbd2_journal_revoke(handle, blocknr, bh);
	if (err) {
		ext4_journal_abort_handle(where, line, __func__,
					  bh, handle, err);
		__ext4_abort(inode->i_sb, where, line,
			   "error %d when attempting revoke", err);
	}
	BUFFER_TRACE(bh, "exit");
	return err;
}
Example #3
0
static void simplefs_destroy_inode(struct inode *vfs_inode) 
{
	struct simple_fs_inode_i *inode = SIMPLEFS_INODE(vfs_inode);
	struct simple_fs_sb_i *sb = SIMPLEFS_SB(vfs_inode->i_sb);
	if (inode->indirect_block) {
		if(!buffer_uptodate(inode->indirect_block))
			sync_dirty_buffer(inode->indirect_block);
		bforget(inode->indirect_block);
	}
	kmem_cache_free(sb->inode_cachep,inode);
}
Example #4
0
/*
 * Returns a pointer to a buffer containing at least LEN bytes of
 * filesystem starting at byte offset OFFSET into the filesystem.
 */
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
	struct buffer_head * bh_array[BLKS_PER_BUF];
	unsigned i, blocknr, buffer;
	char *data;

	if (!len)
		return NULL;
	blocknr = offset >> PAGE_CACHE_SHIFT;
	offset &= PAGE_CACHE_SIZE - 1;

	/* Check if an existing buffer already has the data.. */
	for (i = 0; i < READ_BUFFERS; i++) {
		unsigned int blk_offset;

		if (buffer_dev[i] != sb)
			continue;
		if (blocknr < buffer_blocknr[i])
			continue;
		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
		blk_offset += offset;
		if (blk_offset + len > BUFFER_SIZE)
			continue;
		return read_buffers[i] + blk_offset;
	}

	/* Ok, read in BLKS_PER_BUF pages completely first. */
	for (i = 0; i < BLKS_PER_BUF; i++)
		bh_array[i] = bread(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);

	/* Ok, copy them to the staging area without sleeping. */
	buffer = next_buffer;
	next_buffer = NEXT_BUFFER(buffer);
	buffer_blocknr[buffer] = blocknr;
	buffer_dev[buffer] = sb;

	data = read_buffers[buffer];
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct buffer_head * bh = bh_array[i];
		if (bh) {
			memcpy(data, bh->b_data, PAGE_CACHE_SIZE);
			bforget(bh);
		} else
			memset(data, 0, PAGE_CACHE_SIZE);
		data += PAGE_CACHE_SIZE;
	}
	return read_buffers[buffer] + offset;
}
Example #5
0
static int alloc_branch(struct inode *inode,
			     int num,
			     int *offsets,
			     Indirect *branch)
{
	int n = 0;
	int i;
	int parent = minix_new_block(inode);
	printk("parent new block %d\n",parent);

	branch[0].key = cpu_to_block(parent);
	if (parent) for (n = 1; n < num; n++) {
		struct buffer_head *bh;
		/* Allocate the next block */
		int nr = minix_new_block(inode);
		printk("loop new block %d",nr);
		if (!nr)
			break;
		branch[n].key = cpu_to_block(nr);
		bh = sb_getblk(inode->i_sb, parent);
		lock_buffer(bh);
		memset(bh->b_data, 0, bh->b_size);
		branch[n].bh = bh;
		branch[n].p = (block_t*) bh->b_data + offsets[n];
		*branch[n].p = branch[n].key;
		set_buffer_uptodate(bh);
		unlock_buffer(bh);
		mark_buffer_dirty_inode(bh, inode);
		parent = nr;
	}
	if (n == num)
		return 0;

	/* Allocation failed, free what we already allocated */
	for (i = 1; i < n; i++)
		bforget(branch[i].bh);
	for (i = 0; i < n; i++)
		minix_free_block(inode, block_to_cpu(branch[i].key));
	return -ENOSPC;
}
Example #6
0
static inline int splice_branch(struct inode *inode,
                                Indirect chain[DEPTH],
                                Indirect *where,
                                int num)
{
    int i;

    /* Verify that place we are splicing to is still there and vacant */

    /* Writer: pointers */
    if (!verify_chain(chain, where-1) || *where->p)
        /* Writer: end */
        goto changed;

    /* That's it */

    *where->p = where->key;

    /* Writer: end */

    /* We are done with atomic stuff, now do the rest of housekeeping */

    inode->i_ctime = CURRENT_TIME;

    /* had we spliced it onto indirect block? */
    if (where->bh)
        mark_buffer_dirty_inode(where->bh, inode);

    mark_inode_dirty(inode);
    return 0;

changed:
    for (i = 1; i < num; i++)
        bforget(where[i].bh);
    for (i = 0; i < num; i++)
        minix_free_block(inode, block_to_cpu(where[i].key));
    return -EAGAIN;
}
Example #7
0
static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
{
	struct buffer_head * bh;
	unsigned long nr;

	if (depth--) {
		for ( ; p < q ; p++) {
			nr = block_to_cpu(*p);
			if (!nr)
				continue;
			*p = 0;
			bh = sb_bread(inode->i_sb, nr);
			if (!bh)
				continue;
			free_branches(inode, (block_t*)bh->b_data,
				      block_end(bh), depth);
			bforget(bh);
			xiafs_free_block(inode, nr);
			mark_inode_dirty(inode);
		}
	} else
		free_data(inode, p, q);
}
Example #8
0
static inline int splice_branch(struct inode *inode,
				     Indirect chain[DEPTH],
				     Indirect *where,
				     int num)
{
	int i;

	write_lock(&pointers_lock);

	/* Verify that place we are splicing to is still there and vacant */
	if (!verify_chain(chain, where-1) || *where->p)
		goto changed;

	*where->p = where->key;

	write_unlock(&pointers_lock);

	/* We are done with atomic stuff, now do the rest of housekeeping */

	inode->i_ctime = current_time(inode);

	/* had we spliced it onto indirect block? */
	if (where->bh)
		mark_buffer_dirty_inode(where->bh, inode);

	mark_inode_dirty(inode);
	return 0;

changed:
	write_unlock(&pointers_lock);
	for (i = 1; i < num; i++)
		bforget(where[i].bh);
	for (i = 0; i < num; i++)
		xiafs_free_block(inode, block_to_cpu(where[i].key));
	return -EAGAIN;
}
static inline int splice_branch(struct inode *inode,
				     Indirect chain[DEPTH],
				     Indirect *where,
				     int num)
{
	int i;

	write_lock(&pointers_lock);

	/*                                                                */
	if (!verify_chain(chain, where-1) || *where->p)
		goto changed;

	*where->p = where->key;

	write_unlock(&pointers_lock);

	/*                                                                */

	inode->i_ctime = CURRENT_TIME_SEC;

	/*                                        */
	if (where->bh)
		mark_buffer_dirty_inode(where->bh, inode);

	mark_inode_dirty(inode);
	return 0;

changed:
	write_unlock(&pointers_lock);
	for (i = 1; i < num; i++)
		bforget(where[i].bh);
	for (i = 0; i < num; i++)
		minix_free_block(inode, block_to_cpu(where[i].key));
	return -EAGAIN;
}
Example #10
0
File: fs.c Project: hugh712/Jollen
/* Called to mount a filesystem by read_super() in fs/super.c.
 * Return a super block, the main structure of a filesystem.
 *
 * NOTE : Don't store a pointer to an option, as the page containing the
 * options is freed after ntfs_read_super() returns.
 *
 * NOTE : A context switch can happen in kernel code only if the code blocks
 * (= calls schedule() in kernel/sched.c). */
struct super_block *ntfs_read_super(struct super_block *sb, void *options,
		int silent)
{
	ntfs_volume *vol;
	struct buffer_head *bh;
	int i, to_read, blocksize;

	ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n");
	vol = NTFS_SB2VOL(sb);
	init_ntfs_super_block(vol);
	if (!parse_options(vol, (char*)options))
		goto ntfs_read_super_vol;
	blocksize = get_hardsect_size(sb->s_dev);
	if (blocksize < 512)
		blocksize = 512;
	if (set_blocksize(sb->s_dev, blocksize) < 0) {
		ntfs_error("Unable to set blocksize %d.\n", blocksize);
		goto ntfs_read_super_vol;
	}
	sb->s_blocksize = blocksize;
	/* Read the super block (boot block). */
	if (!(bh = sb_bread(sb, 0))) {
		ntfs_error("Reading super block failed\n");
		goto ntfs_read_super_unl;
	}
	ntfs_debug(DEBUG_OTHER, "Done reading boot block\n");
	/* Check for valid 'NTFS' boot sector. */
	if (!is_boot_sector_ntfs(bh->b_data)) {
		ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n");
		bforget(bh);
		goto ntfs_read_super_unl;
	}
	ntfs_debug(DEBUG_OTHER, "Going to init volume\n");
	if (ntfs_init_volume(vol, bh->b_data) < 0) {
		ntfs_debug(DEBUG_OTHER, "Init volume failed.\n");
		bforget(bh);
		goto ntfs_read_super_unl;
	}
	ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn);
	brelse(bh);
	NTFS_SB(vol) = sb;
	if (vol->cluster_size > PAGE_SIZE) {
		ntfs_error("Partition cluster size is not supported yet (it "
			   "is > max kernel blocksize).\n");
		goto ntfs_read_super_unl;
	}
	ntfs_debug(DEBUG_OTHER, "Done to init volume\n");
	/* Inform the kernel that a device block is a NTFS cluster. */
	sb->s_blocksize = vol->cluster_size;
	sb->s_blocksize_bits = vol->cluster_size_bits;
	if (blocksize != vol->cluster_size &&
			set_blocksize(sb->s_dev, sb->s_blocksize) < 0) {
		ntfs_error("Cluster size too small for device.\n");
		goto ntfs_read_super_unl;
	}
	ntfs_debug(DEBUG_OTHER, "set_blocksize\n");
	/* Allocate an MFT record (MFT record can be smaller than a cluster). */
	i = vol->cluster_size;
	if (i < vol->mft_record_size)
		i = vol->mft_record_size;
	if (!(vol->mft = ntfs_malloc(i)))
		goto ntfs_read_super_unl;

	/* Read at least the MFT record for $Mft. */
	to_read = vol->mft_clusters_per_record;
	if (to_read < 1)
		to_read = 1;
	for (i = 0; i < to_read; i++) {
		if (!(bh = sb_bread(sb, vol->mft_lcn + i))) {
			ntfs_error("Could not read $Mft record 0\n");
			goto ntfs_read_super_mft;
		}
		ntfs_memcpy(vol->mft + ((__s64)i << vol->cluster_size_bits),
						bh->b_data, vol->cluster_size);
		brelse(bh);
		ntfs_debug(DEBUG_OTHER, "Read cluster 0x%x\n",
							 vol->mft_lcn + i);
	}
	/* Check and fixup this MFT record */
	if (!ntfs_check_mft_record(vol, vol->mft)){
		ntfs_error("Invalid $Mft record 0\n");
		goto ntfs_read_super_mft;
	}
	/* Inform the kernel about which super operations are available. */
	sb->s_op = &ntfs_super_operations;
	sb->s_magic = NTFS_SUPER_MAGIC;
	sb->s_maxbytes = ~0ULL >> 1;
	ntfs_debug(DEBUG_OTHER, "Reading special files\n");
	if (ntfs_load_special_files(vol)) {
		ntfs_error("Error loading special files\n");
		goto ntfs_read_super_mft;
	}
	ntfs_debug(DEBUG_OTHER, "Getting RootDir\n");
	/* Get the root directory. */
	if (!(sb->s_root = d_alloc_root(iget(sb, FILE_root)))) {
		ntfs_error("Could not get root dir inode\n");
		goto ntfs_read_super_mft;
	}
ntfs_read_super_ret:
	ntfs_debug(DEBUG_OTHER, "read_super: done\n");
	return sb;
ntfs_read_super_mft:
	ntfs_free(vol->mft);
ntfs_read_super_unl:
ntfs_read_super_vol:
	sb = NULL;
	goto ntfs_read_super_ret;
}
int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
{
	struct reiserfs_super_block * sb;
        struct reiserfs_bitmap_info *bitmap;
	struct buffer_head * bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;
	
	struct reiserfs_list_bitmap * jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
	
	unsigned long int block_count, free_blocks;
	int i;
	int copy_size ;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}	
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}
       
	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
	
	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new) 
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr     = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {	    
	    /* reallocate journal bitmaps */
	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
		unlock_super(s) ;
		return -ENOMEM ;
	    }
	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
	    ** node pointers from the old journal bitmap structs, and then
	    ** transfer the new data structures into the journal struct.
	    **
	    ** using the copy_size var below allows this code to work for
	    ** both shrinking and expanding the FS.
	    */
	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
		struct reiserfs_bitmap_node **node_tmp ;
		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;

		/* just in case vfree schedules on us, copy the new
		** pointer into the journal struct before freeing the 
		** old one
		*/
		node_tmp = jb->bitmaps ;
		jb->bitmaps = jbitmap[i].bitmaps ;
		vfree(node_tmp) ;
	    }	
	
	    /* allocate additional bitmap blocks, reallocate array of bitmap
	     * block pointers */
	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
	    if (!bitmap) {
		printk("reiserfs_resize: unable to allocate memory.\n");
		return -ENOMEM;
	    }
	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
	    for (i = 0; i < bmap_nr; i++)
		bitmap[i] = SB_AP_BITMAP(s)[i];
	    for (i = bmap_nr; i < bmap_nr_new; i++) {
		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);

		set_buffer_uptodate(bitmap[i].bh);
		mark_buffer_dirty(bitmap[i].bh) ;
		sync_dirty_buffer(bitmap[i].bh);
		// update bitmap_info stuff
		bitmap[i].first_zero_hint=1;
		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
	    }	
	    /* free old bitmap blocks array */
	    vfree(SB_AP_BITMAP(s));
	    SB_AP_BITMAP(s) = bitmap;
	}
	
	/* begin transaction */
	journal_begin(&th, s, 10);

	/* correct last bitmap blocks in old and new disk layout */
	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_clear_le_bit(i, 
					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;

	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);

	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_set_le_bit(i,
					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
 
	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
 	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
	
	SB_JOURNAL(s)->j_must_wait = 1;
	journal_end(&th, s, 10);

	return 0;
}
Example #12
0
/*
 * extents_bforget: Release the corresponding buffer header.
 * NOTE: The page owned by @bh will be marked invalidated.
 *
 * @bh: The corresponding buffer header that is going to be freed.
 *
 * The pages underlying the buffer header will be unlocked.
 */
void extents_bforget(struct buffer_head *bh)
{
    clear_buffer_uptodate(bh);
    bforget(bh);
}
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
	int rc = 0;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipbmap2;
	struct inode *ipimap = sbi->ipimap;
	struct jfs_log *log = sbi->log;
	struct bmap *bmp = sbi->bmap;
	s64 newLogAddress, newFSCKAddress;
	int newFSCKSize;
	s64 newMapSize = 0, mapSize;
	s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
	s64 oldLVSize;
	s64 newFSSize;
	s64 VolumeSize;
	int newNpages = 0, nPages, newPage, xlen, t32;
	int tid;
	int log_formatted = 0;
	struct inode *iplist[1];
	struct jfs_superblock *j_sb, *j_sb2;
	s64 old_agsize;
	int agsizechanged = 0;
	struct buffer_head *bh, *bh2;

	

	if (sbi->mntflag & JFS_INLINELOG)
		oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
	else
		oldLVSize = addressPXD(&sbi->fsckpxd) +
		    lengthPXD(&sbi->fsckpxd);

	if (oldLVSize >= newLVSize) {
		printk(KERN_WARNING
		       "jfs_extendfs: volume hasn't grown, returning\n");
		goto out;
	}

	VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;

	if (VolumeSize) {
		if (newLVSize > VolumeSize) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
	} else {
		
		bh = sb_bread(sb, newLVSize - 1);
		if (!bh) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
		bforget(bh);
	}

	

	if (isReadOnly(ipbmap)) {
		printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
		rc = -EROFS;
		goto out;
	}


	if ((sbi->mntflag & JFS_INLINELOG)) {
		if (newLogSize == 0) {
			newLogSize = newLVSize >> 8;
			t32 = (1 << (20 - sbi->l2bsize)) - 1;
			newLogSize = (newLogSize + t32) & ~t32;
			newLogSize =
			    min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
		} else {
int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
{
	int err = 0;
	struct reiserfs_super_block *sb;
	struct reiserfs_bitmap_info *bitmap;
	struct reiserfs_bitmap_info *info;
	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
	struct buffer_head *bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;

	struct reiserfs_list_bitmap *jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];

	unsigned long int block_count, free_blocks;
	int i;
	int copy_size;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
	    != REISERFS_DISK_OFFSET_IN_BYTES) {
		printk
		    ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}

	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
			(reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8;

	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / (s->s_blocksize * 8);
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new)
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr = reiserfs_bmap_count(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {
		/* reallocate journal bitmaps */
		if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
			printk
			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
			return -ENOMEM;
		}
		/* the new journal bitmaps are zero filled, now we copy in the bitmap
		 ** node pointers from the old journal bitmap structs, and then
		 ** transfer the new data structures into the journal struct.
		 **
		 ** using the copy_size var below allows this code to work for
		 ** both shrinking and expanding the FS.
		 */
		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
		copy_size =
		    copy_size * sizeof(struct reiserfs_list_bitmap_node *);
		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
			struct reiserfs_bitmap_node **node_tmp;
			jb = SB_JOURNAL(s)->j_list_bitmap + i;
			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);

			/* just in case vfree schedules on us, copy the new
			 ** pointer into the journal struct before freeing the
			 ** old one
			 */
			node_tmp = jb->bitmaps;
			jb->bitmaps = jbitmap[i].bitmaps;
			vfree(node_tmp);
		}

		/* allocate additional bitmap blocks, reallocate array of bitmap
		 * block pointers */
		bitmap =
		    vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
		if (!bitmap) {
			/* Journal bitmaps are still supersized, but the memory isn't
			 * leaked, so I guess it's ok */
			printk("reiserfs_resize: unable to allocate memory.\n");
			return -ENOMEM;
		}
		for (i = 0; i < bmap_nr; i++)
			bitmap[i] = old_bitmap[i];

		/* This doesn't go through the journal, but it doesn't have to.
		 * The changes are still atomic: We're synced up when the journal
		 * transaction begins, and the new bitmaps don't matter if the
		 * transaction fails. */
		for (i = bmap_nr; i < bmap_nr_new; i++) {
			/* don't use read_bitmap_block since it will cache
			 * the uninitialized bitmap */
			bh = sb_bread(s, i * s->s_blocksize * 8);
			if (!bh) {
				vfree(bitmap);
				return -EIO;
			}
			memset(bh->b_data, 0, sb_blocksize(sb));
			reiserfs_set_le_bit(0, bh->b_data);
			reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);

			set_buffer_uptodate(bh);
			mark_buffer_dirty(bh);
			reiserfs_write_unlock(s);
			sync_dirty_buffer(bh);
			reiserfs_write_lock(s);
			// update bitmap_info stuff
			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
			brelse(bh);
		}
		/* free old bitmap blocks array */
		SB_AP_BITMAP(s) = bitmap;
		vfree(old_bitmap);
	}

	/* begin transaction, if there was an error, it's fine. Yes, we have
	 * incorrect bitmaps now, but none of it is ever going to touch the
	 * disk anyway. */
	err = journal_begin(&th, s, 10);
	if (err)
		return err;

	/* Extend old last bitmap block - new blocks have been made available */
	info = SB_AP_BITMAP(s) + bmap_nr - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
		reiserfs_clear_le_bit(i, bh->b_data);
	info->free_count += s->s_blocksize * 8 - block_r;

	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	/* Correct new last bitmap block - It may not be full */
	info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
		reiserfs_set_le_bit(i, bh->b_data);
	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	info->free_count -= s->s_blocksize * 8 - block_r_new;
	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s,
			   free_blocks + (block_count_new - block_count -
					  (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));

	SB_JOURNAL(s)->j_must_wait = 1;
	return journal_end(&th, s, 10);
}
Example #15
0
static void go_through (reiserfs_filsys_t fs)
{
    struct buffer_head * bh;
    int i;
    int what_node;
    unsigned long done = 0, total;

    if (fsck_mode (fs) == DO_TEST) {
	/* just to test pass0_correct_leaf */
	bh = bread (fs->s_dev, stats(fs)->test, fs->s_blocksize);

	/*
	if (is_leaf_bad (bh)) {
	    fsck_progress ("###############  bad #################\n");
	}
	*/
	pass0_correct_leaf (fs, bh);
	
	print_block (stdout, fs, bh, 3, -1, -1);

	if (is_leaf_bad (bh)) {
	    fsck_progress ("############### still bad #################\n");
	}
	brelse (bh);
	reiserfs_free (fs);
	exit(4);
    }


    total = reiserfs_bitmap_ones (fsck_disk_bitmap (fs));
    fsck_progress ("\nPass 0 (%lu (of %lu) blocks will be read):\n",
		   total, SB_BLOCK_COUNT (fs));


    for (i = 0; i < SB_BLOCK_COUNT (fs); i ++) {
	if (!is_to_be_read (fs, i))
	    continue;

	print_how_far (&done, total, 1, fsck_quiet (fs));

	bh = bread (fs->s_dev, i, fs->s_blocksize);
	if (!bh) {
	    /* we were reading one block at time, and failed, so mark
	       block bad */
	    fsck_progress ("pass0: reading block %lu failed\n", i);
	    continue;
	}

	if (not_data_block (fs, i))
	    reiserfs_panic ("not data block found");
	
	stats (fs)->analyzed ++;
	what_node = who_is_this (bh->b_data, fs->s_blocksize);
	if ( what_node != THE_LEAF ) {
	    brelse (bh);
	    continue;
	}
	pass0_correct_leaf (fs, bh);
	brelse (bh);
    }


#if 0
    for (i = 0; i < SB_BLOCK_COUNT (fs); i += nr_to_read) {
	to_scan = how_many_to_scan (fs, i, nr_to_read);
	if (to_scan) {
	    print_how_far (&done, total, to_scan, fsck_quiet (fs));

	    /* at least one of nr_to_read blocks is to be checked */
	    bbh = bread (fs->s_dev, i / nr_to_read, fs->s_blocksize * nr_to_read);
	    if (bbh) {
		for (j = 0; j < nr_to_read; j ++) {
		    if (!is_to_be_read (fs, i + j))
			continue;

		    if (not_data_block (fs, i + j))
			reiserfs_panic ("not data block found");

		    stats (fs)->analyzed ++;

		    data = bbh->b_data + j * fs->s_blocksize;
		    what_node = who_is_this (data, fs->s_blocksize);
		    if ( what_node != THE_LEAF ) {
			continue;
		    }

		    /* the node looks like a leaf, but it still can be
		       not perfect */
		    bh = make_buffer (fs->s_dev, i + j, fs->s_blocksize, data);

		    /*printf ("block %lu .. ", bh->b_blocknr);fflush(stdout);*/
		    pass0_correct_leaf (fs, bh);
		    /*printf ("ok\n");fflush(stdout);*/

		    brelse (bh);
		}

		if (buffer_dirty (bbh))
		    bwrite (bbh);
		bforget (bbh);
	    } else {
		done -= to_scan;
		/* bread failed */
		if (nr_to_read != 1) {
		    /* we tryied to read bunch of blocks. Try to read them by one */
		    nr_to_read = 1;
		    i --;
		    continue;
		} else {
		    /* we were reading one block at time, and failed, so mark
                       block bad */
		    fsck_progress ("pass0: block %lu is bad, marked used\n", i);
		}
	    }
	}

	if (nr_to_read == 1 && ((i + 1) % NR_TO_READ) == 0) {
	    /* we have read NR_TO_READ blocks one at time, switch back to
               reading NR_TO_READ blocks at time */
	    i -= (NR_TO_READ - 1);
	    nr_to_read = NR_TO_READ;
	}
    }
#endif


    /* just in case */
    mark_objectid_really_used (proper_id_map (fs), REISERFS_ROOT_OBJECTID);

    fsck_progress ("\n");

    if (fsck_save_leaf_bitmap (fs)) {
	reiserfs_bitmap_save (stats (fs)->new_bitmap_file_name, leaves_bitmap);
    }
}
Example #16
0
/*
 *	jfs_extendfs()
 *
 * function: extend file system;
 *
 *   |-------------------------------|----------|----------|
 *   file system space               fsck       inline log
 *                                   workspace  space
 *
 * input:
 *	new LVSize: in LV blocks (required)
 *	new LogSize: in LV blocks (optional)
 *	new FSSize: in LV blocks (optional)
 *
 * new configuration:
 * 1. set new LogSize as specified or default from new LVSize;
 * 2. compute new FSCKSize from new LVSize;
 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
 *    assert(new FSSize >= old FSSize),
 *    i.e., file system must not be shrinked;
 */
int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
{
	int rc = 0;
	struct jfs_sb_info *sbi = JFS_SBI(sb);
	struct inode *ipbmap = sbi->ipbmap;
	struct inode *ipbmap2;
	struct inode *ipimap = sbi->ipimap;
	struct jfs_log *log = sbi->log;
	struct bmap *bmp = sbi->bmap;
	s64 newLogAddress, newFSCKAddress;
	int newFSCKSize;
	s64 newMapSize = 0, mapSize;
	s64 XAddress, XSize, nblocks, xoff, xaddr, t64;
	s64 oldLVSize;
	s64 newFSSize;
	s64 VolumeSize;
	int newNpages = 0, nPages, newPage, xlen, t32;
	int tid;
	int log_formatted = 0;
	struct inode *iplist[1];
	struct jfs_superblock *j_sb, *j_sb2;
	uint old_agsize;
	struct buffer_head *bh, *bh2;

	/* If the volume hasn't grown, get out now */

	if (sbi->mntflag & JFS_INLINELOG)
		oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd);
	else
		oldLVSize = addressPXD(&sbi->fsckpxd) +
		    lengthPXD(&sbi->fsckpxd);

	if (oldLVSize >= newLVSize) {
		printk(KERN_WARNING
		       "jfs_extendfs: volume hasn't grown, returning\n");
		goto out;
	}

	VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;

	if (VolumeSize) {
		if (newLVSize > VolumeSize) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
	} else {
		/* check the device */
		bh = sb_bread(sb, newLVSize - 1);
		if (!bh) {
			printk(KERN_WARNING "jfs_extendfs: invalid size\n");
			rc = -EINVAL;
			goto out;
		}
		bforget(bh);
	}

	/* Can't extend write-protected drive */

	if (isReadOnly(ipbmap)) {
		printk(KERN_WARNING "jfs_extendfs: read-only file system\n");
		rc = -EROFS;
		goto out;
	}

	/*
	 *	reconfigure LV spaces
	 *	---------------------
	 *
	 * validate new size, or, if not specified, determine new size
	 */

	/*
	 * reconfigure inline log space:
	 */
	if ((sbi->mntflag & JFS_INLINELOG)) {
		if (newLogSize == 0) {
			/*
			 * no size specified: default to 1/256 of aggregate
			 * size; rounded up to a megabyte boundary;
			 */
			newLogSize = newLVSize >> 8;
			t32 = (1 << (20 - sbi->l2bsize)) - 1;
			newLogSize = (newLogSize + t32) & ~t32;
			newLogSize =
			    min(newLogSize, MEGABYTE32 >> sbi->l2bsize);
		} else {
Example #17
0
void pack_partition (struct super_block * s)
{
    int i, j, k;
    uint32_t blocknumber32;
    uint16_t reclen16, data16;
    __u32 done = 0;
    char * data;
    long long bytes_to_transfer = 0;
    struct buffer_head * bh;
    int total_block_number;


    total_block_number = get_total_block_number ();
    

    /* write filesystem's block size to stdout as 16 bit number */
    reclen16 = htons (s->s_blocksize);
    if (opt_pack == 'p' || opt_pack_all == 'p')
	write (1, &reclen16, sizeof (uint16_t));
    bytes_to_transfer = sizeof (uint16_t);

    /* go through blocks which are marked used in cautious bitmap */
    for (i = 0; i < SB_BMAP_NR (s); i ++) {
	for (j = 0; j < s->s_blocksize; j ++) {
	    /* make sure, that we are not out of the device */
	    if (i * s->s_blocksize * 8 + j * 8 == SB_BLOCK_COUNT (s))
		goto out_of_bitmap;

	    if (i * s->s_blocksize * 8 + j * 8 + 8 > SB_BLOCK_COUNT (s))
		die ("build_the_tree: Out of bitmap");

	    if (opt_pack_all == 0)
		if (SB_AP_BITMAP (s)[i]->b_data[j] == 0) {
		    /* skip busy block if 'a' not specified */
		    continue;
		}

	    /* read 8 blocks at once */
	    bh = bread (s->s_dev, i * s->s_blocksize + j, s->s_blocksize * 8);
	    for (k = 0; k < 8; k ++) {
		__u32 block;
		
		block = i * s->s_blocksize * 8 + j * 8 + k;
		
		if (opt_pack_all == 0 && (SB_AP_BITMAP (s)[i]->b_data[j] & (1 << k)) == 0)
		    continue;
#if 0
		if ((SB_AP_BITMAP (s)[i]->b_data[j] & (1 << k)) == 0  || /* k-th block is free */
		    block < SB_BUFFER_WITH_SB (s)->b_blocknr) /* is in skipped for drive manager area */
		    continue;
#endif
		
		print_how_far (&done, total_block_number);
		
		data = bh->b_data + k * s->s_blocksize;

		if (not_formatted_node (data, s->s_blocksize)) {
		    /* ok, could not find formatted node here. But
                       this can be commit block, or bitmap which has
                       to be transferred */
		    if (!not_data_block (s, block)) {
			/* this is usual unformatted node. Transfer
                           its number only to erase previously existed
                           formatted nodes on the partition we will
                           apply transferred metadata to */
	    
			/* size of following record in network byte order */
			reclen16 = htons (2);

			/* the record record */
			data16 = htons (MAX_HEIGHT + 1);/*?*/
			data = (char *)&data16;
		    } else {
			/* write super block and bitmap block must be transferred as are */
			/* size of record  */
			reclen16 = htons (s->s_blocksize);
	    
			/* the record itself */
			data = data;
		    }
		} else {
		    /* any kind of formatted nodes gets here (super
                       block, desc block of journal): FIXME: it would
                       be useful to be able to find commit blocks */
		    zero_direct_items (data);
		    /* FIXME: do other packing */
		    /* write size of following record */
		    reclen16 = htons (s->s_blocksize);
		    
		    /* the record itself */
		    data = data;

#if 0
		    if (blkh->blk_level > DISK_LEAF_NODE_LEVEL) {
			/* block must look like internal node on the target
			   partition. But (currently) fsck do not consider internal
			   nodes, therefore we do not have to transfer contents of
			   internal nodes */
	    
			/* size of following record in network byte order */
			reclen16 = htons (2);
	    
			/* the record itself */
			data16 = htons (DISK_LEAF_NODE_LEVEL + 1);
			data = (char *)&data16;	  
		    } else {
	    
			/* leaf node found */
			ih = (struct item_head *)(blkh + 1);
	    
			/* fill direct items with 0s */
			for (l = 0; l < blkh->blk_nr_item; l ++, ih ++)
			    if (I_IS_DIRECT_ITEM (ih)) {
				direct_items ++;
				direct_item_total_length += ih->ih_item_len;
				memset ((char *)blkh + ih->ih_item_location, 0, ih->ih_item_len);
			    }
	    
			/* write size of following record */
			reclen16 = htons (s->s_blocksize);
	    
			/* the record itself */
			data = (char *)blkh;
		    }
#endif
		}
	  
		/*fprintf (stderr, "block %d, reclen %d\n", block, ntohs (reclen16));*/
	
		/* write block number */
		blocknumber32 = htonl (block);
		bytes_to_transfer += sizeof (uint32_t) + sizeof (uint16_t) + ntohs (reclen16);
		if (opt_pack == 'p' || opt_pack_all == 'p') {
		    write (1, &blocknumber32, sizeof (uint32_t));
		    /* write record len */
		    write (1, &reclen16, sizeof (uint16_t));
		    /* write the record */
		    write (1, data, ntohs (reclen16));
		}
	    }
      
	    bforget (bh);
	}
    }
    
 out_of_bitmap:
    fprintf (stderr, "done\n");
    if (opt_pack == 'c' || opt_pack_all == 'c')
	fprintf (stderr, "Bytes to transfer %Ld, sequential 0s %d in %d sequeneces (%items (%d unreacable))\n",
		 bytes_to_transfer, direct_item_total_length, direct_items, items, unreachable_items);
    else
	fprintf (stderr, "Bytes dumped %Ld, sequential 0s %d in %d sequeneces\n",
		 bytes_to_transfer, direct_item_total_length, direct_items);
    
    
}