コード例 #1
0
ファイル: super.c プロジェクト: niubl/camera_project
void check_bitmap (struct super_block * s)
{
  int i = 0;
  int free = 0;
  char * buf;

  while (i < SB_BLOCK_COUNT (s)) {
    buf = SB_AP_BITMAP (s)[i / (s->s_blocksize * 8)]->b_data;
    if (!reiserfs_test_le_bit (i % (s->s_blocksize * 8), buf))
      free ++;
    i ++;
  }

  if (free != SB_FREE_BLOCKS (s))
    reiserfs_warning ("vs-4000: check_bitmap: %d free blocks, must be %d\n",
		      free, SB_FREE_BLOCKS (s));
}
コード例 #2
0
/* Tries to find contiguous zero bit window (given size) in given region of
 * bitmap and place new blocks there. Returns number of allocated blocks. */
static int scan_bitmap (struct reiserfs_transaction_handle *th,
                        unsigned long *start, unsigned long finish,
                        int min, int max, int unfm, unsigned long file_block)
{
    int nr_allocated=0;
    struct super_block * s = th->t_super;
    /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
     * - Hans, it is not a block number - Zam. */

    int bm, off;
    int end_bm, end_off;
    int off_max = s->s_blocksize << 3;

    PROC_INFO_INC( s, scan_bitmap.call );
    if ( SB_FREE_BLOCKS(s) <= 0)
        return 0; // No point in looking for more free blocks

    get_bit_address (s, *start, &bm, &off);
    get_bit_address (s, finish, &end_bm, &end_off);

    // With this option set first we try to find a bitmap that is at least 10%
    // free, and if that fails, then we fall back to old whole bitmap scanning
    if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
        for (; bm < end_bm; bm++, off = 0) {
            if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
                nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
            if (nr_allocated)
                goto ret;
        }
        get_bit_address (s, *start, &bm, &off);
    }

    for (; bm < end_bm; bm++, off = 0) {
        nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
        if (nr_allocated)
            goto ret;
    }

    nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);

ret:
    *start = bm * off_max + off;
    return nr_allocated;

}
コード例 #3
0
ファイル: bitmap.c プロジェクト: Dronevery/JetsonTK1-kernel
/* This function estimates how much pages we will be able to write to FS
   used for reiserfs_file_write() purposes for now. */
int reiserfs_can_fit_pages ( struct super_block *sb /* superblock of filesystem
						       to estimate space */ )
{
    int space;

    spin_lock(&REISERFS_SB(sb)->bitmap_lock);
    space = (SB_FREE_BLOCKS(sb) - REISERFS_SB(sb)->reserved_blocks) >> ( PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
    spin_unlock(&REISERFS_SB(sb)->bitmap_lock);

    return space>0?space:0;
}
コード例 #4
0
ファイル: bitmap.c プロジェクト: Dronevery/JetsonTK1-kernel
int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
                               b_blocknr_t * new_blocknrs, int amount_needed,
                               int reserved_by_us /* Amount of blocks we have
						      already reserved */)
{
    int initial_amount_needed = amount_needed;
    int ret;
    struct super_block *s = hint->th->t_super;

    /* Check if there is enough space, taking into account reserved space */
    if ( SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
            amount_needed - reserved_by_us)
        return NO_DISK_SPACE;
    /* should this be if !hint->inode &&  hint->preallocate? */
    /* do you mean hint->formatted_node can be removed ? - Zam */
    /* hint->formatted_node cannot be removed because we try to access
       inode information here, and there is often no inode assotiated with
       metadata allocations - green */

    if (!hint->formatted_node && hint->preallocate) {
        amount_needed = use_preallocated_list_if_available
                        (hint, new_blocknrs, amount_needed);
        if (amount_needed == 0)	/* all blocknrs we need we got from
                                   prealloc. list */
            return CARRY_ON;
        new_blocknrs += (initial_amount_needed - amount_needed);
    }

    /* find search start and save it in hint structure */
    determine_search_start(hint, amount_needed);
    if (hint->search_start >= SB_BLOCK_COUNT(s))
        hint->search_start = SB_BLOCK_COUNT(s) - 1;

    /* allocation itself; fill new_blocknrs and preallocation arrays */
    ret = blocknrs_and_prealloc_arrays_from_search_start
          (hint, new_blocknrs, amount_needed);

    /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
     * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
     * variant) */

    if (ret != CARRY_ON) {
        while (amount_needed ++ < initial_amount_needed) {
            reiserfs_free_block(hint->th, hint->inode, *(--new_blocknrs), 1);
        }
    }
    return ret;
}
コード例 #5
0
ファイル: bitmap.c プロジェクト: Dronevery/JetsonTK1-kernel
/* Tries to find contiguous zero bit window (given size) in given region of
 * bitmap and place new blocks there. Returns number of allocated blocks. */
static int scan_bitmap (struct reiserfs_transaction_handle *th,
                        b_blocknr_t *start, b_blocknr_t finish,
                        int min, int max, int unfm, unsigned long file_block)
{
    int nr_allocated=0;
    struct super_block * s = th->t_super;
    /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
     * - Hans, it is not a block number - Zam. */

    int bm, off;
    int end_bm, end_off;
    int off_max = s->s_blocksize << 3;

    BUG_ON (!th->t_trans_id);

    PROC_INFO_INC( s, scan_bitmap.call );
    if ( SB_FREE_BLOCKS(s) <= 0)
        return 0; // No point in looking for more free blocks

    get_bit_address (s, *start, &bm, &off);
    get_bit_address (s, finish, &end_bm, &end_off);
    if (bm > SB_BMAP_NR(s))
        return 0;
    if (end_bm > SB_BMAP_NR(s))
        end_bm = SB_BMAP_NR(s);

    /* When the bitmap is more than 10% free, anyone can allocate.
     * When it's less than 10% free, only files that already use the
     * bitmap are allowed. Once we pass 80% full, this restriction
     * is lifted.
     *
     * We do this so that files that grow later still have space close to
     * their original allocation. This improves locality, and presumably
     * performance as a result.
     *
     * This is only an allocation policy and does not make up for getting a
     * bad hint. Decent hinting must be implemented for this to work well.
     */
    if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
        for (; bm < end_bm; bm++, off = 0) {
            if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
                nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
            if (nr_allocated)
                goto ret;
        }
        /* we know from above that start is a reasonable number */
        get_bit_address (s, *start, &bm, &off);
    }

    for (; bm < end_bm; bm++, off = 0) {
        nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
        if (nr_allocated)
            goto ret;
    }

    nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);

ret:
    *start = bm * off_max + off;
    return nr_allocated;

}
コード例 #6
0
ファイル: bitmap.c プロジェクト: Dronevery/JetsonTK1-kernel
/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
 * block; */
static int scan_bitmap_block (struct reiserfs_transaction_handle *th,
                              int bmap_n, int *beg, int boundary, int min, int max, int unfm)
{
    struct super_block *s = th->t_super;
    struct reiserfs_bitmap_info *bi=&SB_AP_BITMAP(s)[bmap_n];
    int end, next;
    int org = *beg;

    BUG_ON (!th->t_trans_id);

    RFALSE(bmap_n >= SB_BMAP_NR (s), "Bitmap %d is out of range (0..%d)",bmap_n, SB_BMAP_NR (s) - 1);
    PROC_INFO_INC( s, scan_bitmap.bmap );
    /* this is unclear and lacks comments, explain how journal bitmaps
       work here for the reader.  Convey a sense of the design here. What
       is a window? */
    /* - I mean `a window of zero bits' as in description of this function - Zam. */

    if ( !bi ) {
        reiserfs_warning (s, "NULL bitmap info pointer for bitmap %d", bmap_n);
        return 0;
    }
    if (buffer_locked (bi->bh)) {
        PROC_INFO_INC( s, scan_bitmap.wait );
        __wait_on_buffer (bi->bh);
    }

    while (1) {
cont:
        if (bi->free_count < min)
            return 0; // No free blocks in this bitmap

        /* search for a first zero bit -- beggining of a window */
        *beg = reiserfs_find_next_zero_le_bit
               ((unsigned long*)(bi->bh->b_data), boundary, *beg);

        if (*beg + min > boundary) {
            /* search for a zero bit fails or the rest of bitmap block
            			      * cannot contain a zero window of minimum size */
            return 0;
        }

        if (unfm && is_block_in_journal(s,bmap_n, *beg, beg))
            continue;
        /* first zero bit found; we check next bits */
        for (end = *beg + 1;; end ++) {
            if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit (end, bi->bh->b_data)) {
                next = end;
                break;
            }
            /* finding the other end of zero bit window requires looking into journal structures (in
             * case of searching for free blocks for unformatted nodes) */
            if (unfm && is_block_in_journal(s, bmap_n, end, &next))
                break;
        }

        /* now (*beg) points to beginning of zero bits window,
         * (end) points to one bit after the window end */
        if (end - *beg >= min) { /* it seems we have found window of proper size */
            int i;
            reiserfs_prepare_for_journal (s, bi->bh, 1);
            /* try to set all blocks used checking are they still free */
            for (i = *beg; i < end; i++) {
                /* It seems that we should not check in journal again. */
                if (reiserfs_test_and_set_le_bit (i, bi->bh->b_data)) {
                    /* bit was set by another process
                     * while we slept in prepare_for_journal() */
                    PROC_INFO_INC( s, scan_bitmap.stolen );
                    if (i >= *beg + min)	{
                        /* we can continue with smaller set of allocated blocks,
                        		   * if length of this set is more or equal to `min' */
                        end = i;
                        break;
                    }
                    /* otherwise we clear all bit were set ... */
                    while (--i >= *beg)
                        reiserfs_test_and_clear_le_bit (i, bi->bh->b_data);
                    reiserfs_restore_prepared_buffer (s, bi->bh);
                    *beg = org;
                    /* ... and search again in current block from beginning */
                    goto cont;
                }
            }
            bi->free_count -= (end - *beg);
            journal_mark_dirty (th, s, bi->bh);

            /* free block count calculation */
            reiserfs_prepare_for_journal (s, SB_BUFFER_WITH_SB(s), 1);
            PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
            journal_mark_dirty (th, s, SB_BUFFER_WITH_SB(s));

            return end - (*beg);
        } else {
            *beg = next;
        }
    }
}
コード例 #7
0
int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
{
	struct reiserfs_super_block * sb;
        struct reiserfs_bitmap_info *bitmap;
	struct buffer_head * bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;
	
	struct reiserfs_list_bitmap * jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
	
	unsigned long int block_count, free_blocks;
	int i;
	int copy_size ;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}	
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr *	SB_BUFFER_WITH_SB(s)->b_size 
		!= REISERFS_DISK_OFFSET_IN_BYTES ) {
		printk("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}
       
	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
	        (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8;
	
	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / ( s->s_blocksize * 8 );
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new) 
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr     = SB_BMAP_NR(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {	    
	    /* reallocate journal bitmaps */
	    if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
		printk("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
		unlock_super(s) ;
		return -ENOMEM ;
	    }
	    /* the new journal bitmaps are zero filled, now we copy in the bitmap
	    ** node pointers from the old journal bitmap structs, and then
	    ** transfer the new data structures into the journal struct.
	    **
	    ** using the copy_size var below allows this code to work for
	    ** both shrinking and expanding the FS.
	    */
	    copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr ;
	    copy_size = copy_size * sizeof(struct reiserfs_list_bitmap_node *) ;
	    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
		struct reiserfs_bitmap_node **node_tmp ;
		jb = SB_JOURNAL(s)->j_list_bitmap + i ;
		memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size) ;

		/* just in case vfree schedules on us, copy the new
		** pointer into the journal struct before freeing the 
		** old one
		*/
		node_tmp = jb->bitmaps ;
		jb->bitmaps = jbitmap[i].bitmaps ;
		vfree(node_tmp) ;
	    }	
	
	    /* allocate additional bitmap blocks, reallocate array of bitmap
	     * block pointers */
	    bitmap = vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
	    if (!bitmap) {
		printk("reiserfs_resize: unable to allocate memory.\n");
		return -ENOMEM;
	    }
	    memset (bitmap, 0, sizeof (struct reiserfs_bitmap_info) * SB_BMAP_NR(s));
	    for (i = 0; i < bmap_nr; i++)
		bitmap[i] = SB_AP_BITMAP(s)[i];
	    for (i = bmap_nr; i < bmap_nr_new; i++) {
		bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8);
		memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb));
		reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data);

		set_buffer_uptodate(bitmap[i].bh);
		mark_buffer_dirty(bitmap[i].bh) ;
		sync_dirty_buffer(bitmap[i].bh);
		// update bitmap_info stuff
		bitmap[i].first_zero_hint=1;
		bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
	    }	
	    /* free old bitmap blocks array */
	    vfree(SB_AP_BITMAP(s));
	    SB_AP_BITMAP(s) = bitmap;
	}
	
	/* begin transaction */
	journal_begin(&th, s, 10);

	/* correct last bitmap blocks in old and new disk layout */
	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_clear_le_bit(i, 
					   SB_AP_BITMAP(s)[bmap_nr - 1].bh->b_data);
	SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r;
	if ( !SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint)
	    SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r;

	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh);

	reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
	    reiserfs_test_and_set_le_bit(i,
					 SB_AP_BITMAP(s)[bmap_nr_new - 1].bh->b_data);
	journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh);
 
	SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= s->s_blocksize * 8 - block_r_new;
	/* Extreme case where last bitmap is the only valid block in itself. */
	if ( !SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count )
	    SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0;
 	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s, free_blocks + (block_count_new - block_count - (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
	s->s_dirt = 1;

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
	
	SB_JOURNAL(s)->j_must_wait = 1;
	journal_end(&th, s, 10);

	return 0;
}
コード例 #8
0
int shrink_fs(reiserfs_filsys_t reiserfs, unsigned long blocks)
{
	unsigned long n_root_block;
	unsigned int bmap_nr_new;
	unsigned long int i;
	
	fs = reiserfs;
	rs = fs->s_rs;
	
	/* warn about alpha version */
	{
		int c;

		printf(
			"You are running BETA version of reiserfs shrinker.\n"
			"This version is only for testing or VERY CAREFUL use.\n"
			"Backup of you data is recommended.\n\n"
			"Do you want to continue? [y/N]:"
			);
		c = getchar();
		if (c != 'y' && c != 'Y')
			exit(1);
	}

	bmap_nr_new = (blocks - 1) / (8 * fs->s_blocksize) + 1;
	
	/* is shrinking possible ? */
	if (rs_block_count(rs) - blocks > rs_free_blocks(rs) + rs_bmap_nr(rs) - bmap_nr_new) {
	    fprintf(stderr, "resize_reiserfs: can\'t shrink fs; too many blocks already allocated\n");
	    return -1;
	}

	reiserfs_reopen(fs, O_RDWR);
	set_state (fs->s_rs, REISERFS_ERROR_FS);
	mark_buffer_uptodate(SB_BUFFER_WITH_SB(fs), 1);
	mark_buffer_dirty(SB_BUFFER_WITH_SB(fs));
	bwrite(SB_BUFFER_WITH_SB(fs));

	/* calculate number of data blocks */		
	blocks_used = 
	    SB_BLOCK_COUNT(fs)
	    - SB_FREE_BLOCKS(fs)
	    - SB_BMAP_NR(fs)
	    - SB_JOURNAL_SIZE(fs)
	    - REISERFS_DISK_OFFSET_IN_BYTES / fs->s_blocksize
	    - 2; /* superblock itself and 1 descriptor after the journal */

	bmp = reiserfs_create_bitmap(rs_block_count(rs));
	reiserfs_fetch_disk_bitmap(bmp, fs);

	unused_block = 1;

	if (opt_verbose) {
		printf("Processing the tree: ");
		fflush(stdout);
	}

	n_root_block = move_formatted_block(rs_root_block(rs), blocks, 0);
	if (n_root_block) {
		set_root_block (rs, n_root_block);
	}

	if (opt_verbose)
	    printf ("\n\nnodes processed (moved):\n"
		    "int        %lu (%lu),\n"
		    "leaves     %lu (%lu),\n" 
		    "unfm       %lu (%lu),\n"
		    "total      %lu (%lu).\n\n",
		    int_node_cnt, int_moved_cnt,
		    leaf_node_cnt, leaf_moved_cnt, 
		    unfm_node_cnt, unfm_moved_cnt,
		    (unsigned long)total_node_cnt, total_moved_cnt);

	if (block_count_mismatch) {
	    fprintf(stderr, "resize_reiserfs: data block count %lu"
		    " doesn\'t match data block count %lu from super block\n",
		    (unsigned long)total_node_cnt, blocks_used);
	}
#if 0
	printf("check for used blocks in truncated region\n");
	{
		unsigned long l;
		for (l = blocks; l < rs_block_count(rs); l++)
			if (is_block_used(bmp, l))
				printf("<%lu>", l);
		printf("\n");
	}
#endif /* 0 */

	reiserfs_free_bitmap_blocks(fs);
	
	set_free_blocks (rs, rs_free_blocks(rs) - (rs_block_count(rs) - blocks) + (rs_bmap_nr(rs) - bmap_nr_new));
	set_block_count (rs, blocks);
	set_bmap_nr (rs, bmap_nr_new);

	reiserfs_read_bitmap_blocks(fs);
	
	for (i = blocks; i < bmap_nr_new * fs->s_blocksize; i++)
		reiserfs_bitmap_set_bit(bmp, i);
#if 0
	PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (SB_BLOCK_COUNT(s) - blocks) + (SB_BMAP_NR(s) - bmap_nr_new));
	PUT_SB_BLOCK_COUNT(s, blocks);
	PUT_SB_BMAP_NR(s, bmap_nr_new);
#endif
	reiserfs_flush_bitmap(bmp, fs);

	return 0;
}
コード例 #9
0
int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
{
	int err = 0;
	struct reiserfs_super_block *sb;
	struct reiserfs_bitmap_info *bitmap;
	struct reiserfs_bitmap_info *info;
	struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
	struct buffer_head *bh;
	struct reiserfs_transaction_handle th;
	unsigned int bmap_nr_new, bmap_nr;
	unsigned int block_r_new, block_r;

	struct reiserfs_list_bitmap *jb;
	struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];

	unsigned long int block_count, free_blocks;
	int i;
	int copy_size;

	sb = SB_DISK_SUPER_BLOCK(s);

	if (SB_BLOCK_COUNT(s) >= block_count_new) {
		printk("can\'t shrink filesystem on-line\n");
		return -EINVAL;
	}

	/* check the device size */
	bh = sb_bread(s, block_count_new - 1);
	if (!bh) {
		printk("reiserfs_resize: can\'t read last block\n");
		return -EINVAL;
	}
	bforget(bh);

	/* old disk layout detection; those partitions can be mounted, but
	 * cannot be resized */
	if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
	    != REISERFS_DISK_OFFSET_IN_BYTES) {
		printk
		    ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
		return -ENOTSUPP;
	}

	/* count used bits in last bitmap block */
	block_r = SB_BLOCK_COUNT(s) -
			(reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8;

	/* count bitmap blocks in new fs */
	bmap_nr_new = block_count_new / (s->s_blocksize * 8);
	block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
	if (block_r_new)
		bmap_nr_new++;
	else
		block_r_new = s->s_blocksize * 8;

	/* save old values */
	block_count = SB_BLOCK_COUNT(s);
	bmap_nr = reiserfs_bmap_count(s);

	/* resizing of reiserfs bitmaps (journal and real), if needed */
	if (bmap_nr_new > bmap_nr) {
		/* reallocate journal bitmaps */
		if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
			printk
			    ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
			return -ENOMEM;
		}
		/* the new journal bitmaps are zero filled, now we copy in the bitmap
		 ** node pointers from the old journal bitmap structs, and then
		 ** transfer the new data structures into the journal struct.
		 **
		 ** using the copy_size var below allows this code to work for
		 ** both shrinking and expanding the FS.
		 */
		copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
		copy_size =
		    copy_size * sizeof(struct reiserfs_list_bitmap_node *);
		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
			struct reiserfs_bitmap_node **node_tmp;
			jb = SB_JOURNAL(s)->j_list_bitmap + i;
			memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);

			/* just in case vfree schedules on us, copy the new
			 ** pointer into the journal struct before freeing the
			 ** old one
			 */
			node_tmp = jb->bitmaps;
			jb->bitmaps = jbitmap[i].bitmaps;
			vfree(node_tmp);
		}

		/* allocate additional bitmap blocks, reallocate array of bitmap
		 * block pointers */
		bitmap =
		    vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
		if (!bitmap) {
			/* Journal bitmaps are still supersized, but the memory isn't
			 * leaked, so I guess it's ok */
			printk("reiserfs_resize: unable to allocate memory.\n");
			return -ENOMEM;
		}
		for (i = 0; i < bmap_nr; i++)
			bitmap[i] = old_bitmap[i];

		/* This doesn't go through the journal, but it doesn't have to.
		 * The changes are still atomic: We're synced up when the journal
		 * transaction begins, and the new bitmaps don't matter if the
		 * transaction fails. */
		for (i = bmap_nr; i < bmap_nr_new; i++) {
			/* don't use read_bitmap_block since it will cache
			 * the uninitialized bitmap */
			bh = sb_bread(s, i * s->s_blocksize * 8);
			if (!bh) {
				vfree(bitmap);
				return -EIO;
			}
			memset(bh->b_data, 0, sb_blocksize(sb));
			reiserfs_set_le_bit(0, bh->b_data);
			reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);

			set_buffer_uptodate(bh);
			mark_buffer_dirty(bh);
			reiserfs_write_unlock(s);
			sync_dirty_buffer(bh);
			reiserfs_write_lock(s);
			// update bitmap_info stuff
			bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
			brelse(bh);
		}
		/* free old bitmap blocks array */
		SB_AP_BITMAP(s) = bitmap;
		vfree(old_bitmap);
	}

	/* begin transaction, if there was an error, it's fine. Yes, we have
	 * incorrect bitmaps now, but none of it is ever going to touch the
	 * disk anyway. */
	err = journal_begin(&th, s, 10);
	if (err)
		return err;

	/* Extend old last bitmap block - new blocks have been made available */
	info = SB_AP_BITMAP(s) + bmap_nr - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r; i < s->s_blocksize * 8; i++)
		reiserfs_clear_le_bit(i, bh->b_data);
	info->free_count += s->s_blocksize * 8 - block_r;

	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	/* Correct new last bitmap block - It may not be full */
	info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
	bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
	if (!bh) {
		int jerr = journal_end(&th, s, 10);
		if (jerr)
			return jerr;
		return -EIO;
	}

	reiserfs_prepare_for_journal(s, bh, 1);
	for (i = block_r_new; i < s->s_blocksize * 8; i++)
		reiserfs_set_le_bit(i, bh->b_data);
	journal_mark_dirty(&th, s, bh);
	brelse(bh);

	info->free_count -= s->s_blocksize * 8 - block_r_new;
	/* update super */
	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	free_blocks = SB_FREE_BLOCKS(s);
	PUT_SB_FREE_BLOCKS(s,
			   free_blocks + (block_count_new - block_count -
					  (bmap_nr_new - bmap_nr)));
	PUT_SB_BLOCK_COUNT(s, block_count_new);
	PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);

	journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));

	SB_JOURNAL(s)->j_must_wait = 1;
	return journal_end(&th, s, 10);
}
コード例 #10
0
ファイル: bitmap.c プロジェクト: liexusong/Linux-2.4.16
/* 
** We pre-allocate 8 blocks.  Pre-allocation is used for files > 16 KB only.
** This lowers fragmentation on large files by grabbing a contiguous set of
** blocks at once.  It also limits the number of times the bitmap block is
** logged by making X number of allocation changes in a single transaction.
**
** We are using a border to divide the disk into two parts.  The first part
** is used for tree blocks, which have a very high turnover rate (they
** are constantly allocated then freed)
**
** The second part of the disk is for the unformatted nodes of larger files.
** Putting them away from the tree blocks lowers fragmentation, and makes
** it easier to group files together.  There are a number of different
** allocation schemes being tried right now, each is documented below.
**
** A great deal of the allocator's speed comes because reiserfs_get_block
** sends us the block number of the last unformatted node in the file.  Once
** a given block is allocated past the border, we don't collide with the
** blocks near the search_start again.
** 
*/
int reiserfs_new_unf_blocknrs2 (struct reiserfs_transaction_handle *th, 
				struct inode       * p_s_inode,
				unsigned long      * free_blocknrs,
				unsigned long        search_start)
{
  int ret=0, blks_gotten=0;
  unsigned long border = 0;
  unsigned long bstart = 0;
  unsigned long hash_in, hash_out;
  unsigned long saved_search_start=search_start;
  int allocated[PREALLOCATION_SIZE];
  int blks;

  if (!reiserfs_no_border(th->t_super)) {
    /* we default to having the border at the 10% mark of the disk.  This
    ** is an arbitrary decision and it needs tuning.  It also needs a limit
    ** to prevent it from taking too much space on huge drives.
    */
    bstart = (SB_BLOCK_COUNT(th->t_super) / 10); 
  }
  if (!reiserfs_no_unhashed_relocation(th->t_super)) {
    /* this is a very simple first attempt at preventing too much grouping
    ** around the border value.  Since k_dir_id is never larger than the
    ** highest allocated oid, it is far from perfect, and files will tend
    ** to be grouped towards the start of the border
    */
    border = le32_to_cpu(INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
  } else {
    /* why would we want to delcare a local variable to this if statement
    ** name border????? -chris
    ** unsigned long border = 0;
    */
    if (!reiserfs_hashed_relocation(th->t_super)) {
      hash_in = le32_to_cpu((INODE_PKEY(p_s_inode))->k_dir_id);
				/* I wonder if the CPU cost of the
                                   hash will obscure the layout
                                   effect? Of course, whether that
                                   effect is good or bad we don't
                                   know.... :-) */
      
      hash_out = keyed_hash(((char *) (&hash_in)), 4);
      border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
    }
  }
  border += bstart ;
  allocated[0] = 0 ; /* important.  Allows a check later on to see if at
                      * least one block was allocated.  This prevents false
		      * no disk space returns
		      */

  if ( (p_s_inode->i_size < 4 * 4096) || 
       !(S_ISREG(p_s_inode->i_mode)) )
    {
      if ( search_start < border 
	   || (
				/* allow us to test whether it is a
                                   good idea to prevent files from
                                   getting too far away from their
                                   packing locality by some unexpected
                                   means.  This might be poor code for
                                   directories whose files total
                                   larger than 1/10th of the disk, and
                                   it might be good code for
                                   suffering from old insertions when the disk
                                   was almost full. */
               /* changed from !reiserfs_test3(th->t_super), which doesn't
               ** seem like a good idea.  Think about adding blocks to
               ** a large file.  If you've allocated 10% of the disk
               ** in contiguous blocks, you start over at the border value
               ** for every new allocation.  This throws away all the
               ** information sent in about the last block that was allocated
               ** in the file.  Not a good general case at all.
               ** -chris
               */
	       reiserfs_test4(th->t_super) && 
	       (search_start > border + (SB_BLOCK_COUNT(th->t_super) / 10))
	       )
	   )
	search_start=border;
  
      ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				     1/*amount_needed*/, 
				     0/*use reserved blocks for root */,
				     1/*for_formatted*/,
				     0/*for prealloc */) ;  
      return ret;
    }

  /* take a block off the prealloc list and return it -Hans */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count > 0) {
    p_s_inode->u.reiserfs_i.i_prealloc_count--;
    *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block++;

    /* if no more preallocated blocks, remove inode from list */
    if (! p_s_inode->u.reiserfs_i.i_prealloc_count) {
      list_del(&p_s_inode->u.reiserfs_i.i_prealloc_list);
    }
    
    return ret;
  }

				/* else get a new preallocation for the file */
  reiserfs_discard_prealloc (th, p_s_inode);
  /* this uses the last preallocated block as the search_start.  discard
  ** prealloc does not zero out this number.
  */
  if (search_start <= p_s_inode->u.reiserfs_i.i_prealloc_block) {
    search_start = p_s_inode->u.reiserfs_i.i_prealloc_block;
  }
  
  /* doing the compare again forces search_start to be >= the border,
  ** even if the file already had prealloction done.  This seems extra,
  ** and should probably be removed
  */
  if ( search_start < border ) search_start=border; 

  /* If the disk free space is already below 10% we should 
  ** start looking for the free blocks from the beginning 
  ** of the partition, before the border line.
  */
  if ( SB_FREE_BLOCKS(th->t_super) <= (SB_BLOCK_COUNT(th->t_super) / 10) ) {
    search_start=saved_search_start;
  }

  *free_blocknrs = 0;
  blks = PREALLOCATION_SIZE-1;
  for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
    ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start, 
				   1/*amount_needed*/, 
				   0/*for root reserved*/,
				   1/*for_formatted*/,
				   (blks_gotten > 0)/*must_be_contiguous*/) ;
    /* if we didn't find a block this time, adjust blks to reflect
    ** the actual number of blocks allocated
    */ 
    if (ret != CARRY_ON) {
      blks = blks_gotten > 0 ? (blks_gotten - 1) : 0 ;
      break ;
    }
    allocated[blks_gotten]= *free_blocknrs;
#ifdef CONFIG_REISERFS_CHECK
    if ( (blks_gotten>0) && (allocated[blks_gotten] - allocated[blks_gotten-1]) != 1 ) {
      /* this should be caught by new_blocknrs now, checking code */
      reiserfs_warning("yura-1, reiserfs_new_unf_blocknrs2: pre-allocated not contiguous set of blocks!\n") ;
      reiserfs_free_block(th, allocated[blks_gotten]);
      blks = blks_gotten-1; 
      break;
    }
#endif
    if (blks_gotten==0) {
      p_s_inode->u.reiserfs_i.i_prealloc_block = *free_blocknrs;
    }
    search_start = *free_blocknrs; 
    *free_blocknrs = 0;
  }
  p_s_inode->u.reiserfs_i.i_prealloc_count = blks;
  *free_blocknrs = p_s_inode->u.reiserfs_i.i_prealloc_block;
  p_s_inode->u.reiserfs_i.i_prealloc_block++;

  /* if inode has preallocated blocks, link him to list */
  if (p_s_inode->u.reiserfs_i.i_prealloc_count) {
    list_add(&p_s_inode->u.reiserfs_i.i_prealloc_list,
	     &SB_JOURNAL(th->t_super)->j_prealloc_list);
  } 
  /* we did actually manage to get 1 block */
  if (ret != CARRY_ON && allocated[0] > 0) {
    return CARRY_ON ;
  }
  /* NO_MORE_UNUSED_CONTIGUOUS_BLOCKS should only mean something to
  ** the preallocation code.  The rest of the filesystem asks for a block
  ** and should either get it, or know the disk is full.  The code
  ** above should never allow ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCK,
  ** as it doesn't send for_prealloc = 1 to do_reiserfs_new_blocknrs
  ** unless it has already successfully allocated at least one block.
  ** Just in case, we translate into a return value the rest of the
  ** filesystem can understand.
  **
  ** It is an error to change this without making the
  ** rest of the filesystem understand NO_MORE_UNUSED_CONTIGUOUS_BLOCKS
  ** If you consider it a bug to return NO_DISK_SPACE here, fix the rest
  ** of the fs first.
  */
  if (ret == NO_MORE_UNUSED_CONTIGUOUS_BLOCKS) {
#ifdef CONFIG_REISERFS_CHECK
    reiserfs_warning("reiser-2015: this shouldn't happen, may cause false out of disk space error");
#endif
     return NO_DISK_SPACE; 
  }
  return ret;
}
コード例 #11
0
ファイル: bitmap.c プロジェクト: liexusong/Linux-2.4.16
static int do_reiserfs_new_blocknrs (struct reiserfs_transaction_handle *th,
                                     unsigned long * free_blocknrs, 
				     unsigned long search_start, 
				     int amount_needed, int priority, 
				     int for_unformatted,
				     int for_prealloc)
{
  struct super_block * s = th->t_super;
  int i, j;
  unsigned long * block_list_start = free_blocknrs;
  int init_amount_needed = amount_needed;
  unsigned long new_block = 0 ; 

    if (SB_FREE_BLOCKS (s) < SPARE_SPACE && !priority)
	/* we can answer NO_DISK_SPACE being asked for new block with
	   priority 0 */
	return NO_DISK_SPACE;

  RFALSE( !s, "vs-4090: trying to get new block from nonexistent device");
  RFALSE( search_start == MAX_B_NUM,
	  "vs-4100: we are optimizing location based on "
	  "the bogus location of a temp buffer (%lu).", search_start);
  RFALSE( amount_needed < 1 || amount_needed > 2,
	  "vs-4110: amount_needed parameter incorrect (%d)", amount_needed);

  /* We continue the while loop if another process snatches our found
   * free block from us after we find it but before we successfully
   * mark it as in use */

  while (amount_needed--) {
    /* skip over any blocknrs already gotten last time. */
    if (*(free_blocknrs) != 0) {
      RFALSE( is_reusable (s, *free_blocknrs, 1) == 0, 
	      "vs-4120: bad blocknr on free_blocknrs list");
      free_blocknrs++;
      continue;
    }
    /* look for zero bits in bitmap */
    if (find_zero_bit_in_bitmap(s,search_start, &i, &j,for_unformatted) == 0) {
      if (find_zero_bit_in_bitmap(s,search_start,&i,&j, for_unformatted) == 0) {
				/* recode without the goto and without
				   the if.  It will require a
				   duplicate for.  This is worth the
				   code clarity.  Your way was
				   admirable, and just a bit too
				   clever in saving instructions.:-)
				   I'd say create a new function, but
				   that would slow things also, yes?
				   -Hans */
free_and_return:
	for ( ; block_list_start != free_blocknrs; block_list_start++) {
	  reiserfs_free_block (th, *block_list_start);
	  *block_list_start = 0;
	}
	if (for_prealloc) 
	    return NO_MORE_UNUSED_CONTIGUOUS_BLOCKS;
	else
	    return NO_DISK_SPACE;
      }
    }
    
    /* i and j now contain the results of the search. i = bitmap block
       number containing free block, j = offset in this block.  we
       compute the blocknr which is our result, store it in
       free_blocknrs, and increment the pointer so that on the next
       loop we will insert into the next location in the array.  Also
       in preparation for the next loop, search_start is changed so
       that the next search will not rescan the same range but will
       start where this search finished.  Note that while it is
       possible that schedule has occurred and blocks have been freed
       in that range, it is perhaps more important that the blocks
       returned be near each other than that they be near their other
       neighbors, and it also simplifies and speeds the code this way.  */

    /* journal: we need to make sure the block we are giving out is not
    ** a log block, horrible things would happen there.
    */
    new_block = (i * (s->s_blocksize << 3)) + j; 
    if (for_prealloc && (new_block - 1) != search_start) {
      /* preallocated blocks must be contiguous, bail if we didnt find one.
      ** this is not a bug.  We want to do the check here, before the
      ** bitmap block is prepared, and before we set the bit and log the
      ** bitmap. 
      **
      ** If we do the check after this function returns, we have to 
      ** call reiserfs_free_block for new_block, which would be pure
      ** overhead.
      **
      ** for_prealloc should only be set if the caller can deal with the
      ** NO_MORE_UNUSED_CONTIGUOUS_BLOCKS return value.  This can be
      ** returned before the disk is actually full
      */
      goto free_and_return ;
    }
    search_start = new_block ;
    if (search_start >= reiserfs_get_journal_block(s) &&
        search_start < (reiserfs_get_journal_block(s) + JOURNAL_BLOCK_COUNT)) {
	reiserfs_warning("vs-4130: reiserfs_new_blocknrs: trying to allocate log block %lu\n",
			 search_start) ;
	search_start++ ;
	amount_needed++ ;
	continue ;
    }
       

    reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[i], 1) ;

    RFALSE( buffer_locked (SB_AP_BITMAP (s)[i]) || 
	    is_reusable (s, search_start, 0) == 0,
	    "vs-4140: bitmap block is locked or bad block number found");

    /* if this bit was already set, we've scheduled, and someone else
    ** has allocated it.  loop around and try again
    */
    if (reiserfs_test_and_set_le_bit (j, SB_AP_BITMAP (s)[i]->b_data)) {
	reiserfs_warning("vs-4150: reiserfs_new_blocknrs, block not free");
	reiserfs_restore_prepared_buffer(s, SB_AP_BITMAP(s)[i]) ;
	amount_needed++ ;
	continue ;
    }    
    journal_mark_dirty (th, s, SB_AP_BITMAP (s)[i]); 
    *free_blocknrs = search_start ;
    free_blocknrs ++;
  }

  reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
  /* update free block count in super block */
  PUT_SB_FREE_BLOCKS( s, SB_FREE_BLOCKS(s) - init_amount_needed );
  journal_mark_dirty (th, s, SB_BUFFER_WITH_SB (s));
  s->s_dirt = 1;

  return CARRY_ON;
}