Exemple #1
0
/* searches in journal structures for a given block number (bmap, off). If block
   is found in reiserfs journal it suggests next free block candidate to test. */
static inline  int is_block_in_journal (struct super_block * s, int bmap, int off, int *next)
{
    unsigned int tmp;

    if (reiserfs_in_journal (s, s->s_dev, bmap, off, s->s_blocksize, 1, &tmp)) {
        if (tmp) {		/* hint supplied */
            *next = tmp;
            PROC_INFO_INC( s, scan_bitmap.in_journal_hint );
        } else {
            (*next) = off + 1;		/* inc offset to avoid looping. */
            PROC_INFO_INC( s, scan_bitmap.in_journal_nohint );
        }
        PROC_INFO_INC( s, scan_bitmap.retry );
        return 1;
    }
    return 0;
}
Exemple #2
0
/* The function is NOT SCHEDULE-SAFE! */
struct buffer_head  * reiserfs_bread (struct super_block *super, int n_block, int n_size) 
{
    struct buffer_head  *result;
    PROC_EXP( unsigned int ctx_switches = kstat.context_swtch );

    result = bread (super -> s_dev, n_block, n_size);
    PROC_INFO_INC( super, breads );
    PROC_EXP( if( kstat.context_swtch != ctx_switches ) 
	      PROC_INFO_INC( super, bread_miss ) );
    return result;
}
Exemple #3
0
static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
                                  struct inode *inode, b_blocknr_t block,
                                  int for_unformatted)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct reiserfs_bitmap_info *apbi;
    int nr, offset;

    BUG_ON (!th->t_trans_id);

    PROC_INFO_INC( s, free_block );

    rs = SB_DISK_SUPER_BLOCK (s);
    sbh = SB_BUFFER_WITH_SB (s);
    apbi = SB_AP_BITMAP(s);

    get_bit_address (s, block, &nr, &offset);

    if (nr >= sb_bmap_nr (rs)) {
        reiserfs_warning (s, "vs-4075: reiserfs_free_block: "
                          "block %lu is out of range on %s",
                          block, reiserfs_bdevname (s));
        return;
    }

    reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;

    /* clear bit for the given block in bit map */
    if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
        reiserfs_warning (s, "vs-4080: reiserfs_free_block: "
                          "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
                          reiserfs_bdevname (s), block);
    }
    apbi[nr].free_count ++;
    journal_mark_dirty (th, s, apbi[nr].bh);

    reiserfs_prepare_for_journal(s, sbh, 1) ;
    /* update super block */
    set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

    journal_mark_dirty (th, s, sbh);
    if (for_unformatted)
        DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
}
Exemple #4
0
/* Tries to find contiguous zero bit window (given size) in given region of
 * bitmap and place new blocks there. Returns number of allocated blocks. */
static int scan_bitmap (struct reiserfs_transaction_handle *th,
                        unsigned long *start, unsigned long finish,
                        int min, int max, int unfm, unsigned long file_block)
{
    int nr_allocated=0;
    struct super_block * s = th->t_super;
    /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
     * - Hans, it is not a block number - Zam. */

    int bm, off;
    int end_bm, end_off;
    int off_max = s->s_blocksize << 3;

    PROC_INFO_INC( s, scan_bitmap.call );
    if ( SB_FREE_BLOCKS(s) <= 0)
        return 0; // No point in looking for more free blocks

    get_bit_address (s, *start, &bm, &off);
    get_bit_address (s, finish, &end_bm, &end_off);

    // With this option set first we try to find a bitmap that is at least 10%
    // free, and if that fails, then we fall back to old whole bitmap scanning
    if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
        for (; bm < end_bm; bm++, off = 0) {
            if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
                nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
            if (nr_allocated)
                goto ret;
        }
        get_bit_address (s, *start, &bm, &off);
    }

    for (; bm < end_bm; bm++, off = 0) {
        nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
        if (nr_allocated)
            goto ret;
    }

    nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);

ret:
    *start = bm * off_max + off;
    return nr_allocated;

}
Exemple #5
0
static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
                                  b_blocknr_t block)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct reiserfs_bitmap_info *apbi;
    int nr, offset;

    PROC_INFO_INC( s, free_block );

    rs = SB_DISK_SUPER_BLOCK (s);
    sbh = SB_BUFFER_WITH_SB (s);
    apbi = SB_AP_BITMAP(s);

    get_bit_address (s, block, &nr, &offset);

    if (nr >= sb_bmap_nr (rs)) {
        reiserfs_warning ("vs-4075: reiserfs_free_block: "
                          "block %lu is out of range on %s\n",
                          block, bdevname(s->s_dev));
        return;
    }

    reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;

    /* clear bit for the given block in bit map */
    if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
        reiserfs_warning ("vs-4080: reiserfs_free_block: "
                          "free_block (%04x:%lu)[dev:blocknr]: bit already cleared\n",
                          s->s_dev, block);
    }
    if (offset < apbi[nr].first_zero_hint) {
        apbi[nr].first_zero_hint = offset;
    }
    apbi[nr].free_count ++;
    journal_mark_dirty (th, s, apbi[nr].bh);

    reiserfs_prepare_for_journal(s, sbh, 1) ;
    /* update super block */
    set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

    journal_mark_dirty (th, s, sbh);
}
Exemple #6
0
				/* I wonder if it would be less modest
                                   now that we use journaling. -Hans */
void reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long block)
{
    struct super_block * s = th->t_super;
    struct reiserfs_super_block * rs;
    struct buffer_head * sbh;
    struct buffer_head ** apbh;
    int nr, offset;

  RFALSE(!s, "vs-4060: trying to free block on nonexistent device");
  RFALSE(is_reusable (s, block, 1) == 0, "vs-4070: can not free such block");

  PROC_INFO_INC( s, free_block );

  rs = SB_DISK_SUPER_BLOCK (s);
  sbh = SB_BUFFER_WITH_SB (s);
  apbh = SB_AP_BITMAP (s);

  get_bit_address (s, block, &nr, &offset);

  /* mark it before we clear it, just in case */
  journal_mark_freed(th, s, block) ;

  reiserfs_prepare_for_journal(s, apbh[nr], 1 ) ;

  /* clear bit for the given block in bit map */
  if (!reiserfs_test_and_clear_le_bit (offset, apbh[nr]->b_data)) {
      reiserfs_warning ("vs-4080: reiserfs_free_block: "
			"free_block (%04x:%lu)[dev:blocknr]: bit already cleared\n", 
	    s->s_dev, block);
  }
  journal_mark_dirty (th, s, apbh[nr]);

  reiserfs_prepare_for_journal(s, sbh, 1) ;
  /* update super block */
  set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );

  journal_mark_dirty (th, s, sbh);
  s->s_dirt = 1;
}
Exemple #7
0
/* Tries to find contiguous zero bit window (given size) in given region of
 * bitmap and place new blocks there. Returns number of allocated blocks. */
static int scan_bitmap (struct reiserfs_transaction_handle *th,
                        b_blocknr_t *start, b_blocknr_t finish,
                        int min, int max, int unfm, unsigned long file_block)
{
    int nr_allocated=0;
    struct super_block * s = th->t_super;
    /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
     * - Hans, it is not a block number - Zam. */

    int bm, off;
    int end_bm, end_off;
    int off_max = s->s_blocksize << 3;

    BUG_ON (!th->t_trans_id);

    PROC_INFO_INC( s, scan_bitmap.call );
    if ( SB_FREE_BLOCKS(s) <= 0)
        return 0; // No point in looking for more free blocks

    get_bit_address (s, *start, &bm, &off);
    get_bit_address (s, finish, &end_bm, &end_off);
    if (bm > SB_BMAP_NR(s))
        return 0;
    if (end_bm > SB_BMAP_NR(s))
        end_bm = SB_BMAP_NR(s);

    /* When the bitmap is more than 10% free, anyone can allocate.
     * When it's less than 10% free, only files that already use the
     * bitmap are allowed. Once we pass 80% full, this restriction
     * is lifted.
     *
     * We do this so that files that grow later still have space close to
     * their original allocation. This improves locality, and presumably
     * performance as a result.
     *
     * This is only an allocation policy and does not make up for getting a
     * bad hint. Decent hinting must be implemented for this to work well.
     */
    if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
        for (; bm < end_bm; bm++, off = 0) {
            if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
                nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
            if (nr_allocated)
                goto ret;
        }
        /* we know from above that start is a reasonable number */
        get_bit_address (s, *start, &bm, &off);
    }

    for (; bm < end_bm; bm++, off = 0) {
        nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
        if (nr_allocated)
            goto ret;
    }

    nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);

ret:
    *start = bm * off_max + off;
    return nr_allocated;

}
Exemple #8
0
/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
 * block; */
static int scan_bitmap_block (struct reiserfs_transaction_handle *th,
                              int bmap_n, int *beg, int boundary, int min, int max, int unfm)
{
    struct super_block *s = th->t_super;
    struct reiserfs_bitmap_info *bi=&SB_AP_BITMAP(s)[bmap_n];
    int end, next;
    int org = *beg;

    BUG_ON (!th->t_trans_id);

    RFALSE(bmap_n >= SB_BMAP_NR (s), "Bitmap %d is out of range (0..%d)",bmap_n, SB_BMAP_NR (s) - 1);
    PROC_INFO_INC( s, scan_bitmap.bmap );
    /* this is unclear and lacks comments, explain how journal bitmaps
       work here for the reader.  Convey a sense of the design here. What
       is a window? */
    /* - I mean `a window of zero bits' as in description of this function - Zam. */

    if ( !bi ) {
        reiserfs_warning (s, "NULL bitmap info pointer for bitmap %d", bmap_n);
        return 0;
    }
    if (buffer_locked (bi->bh)) {
        PROC_INFO_INC( s, scan_bitmap.wait );
        __wait_on_buffer (bi->bh);
    }

    while (1) {
cont:
        if (bi->free_count < min)
            return 0; // No free blocks in this bitmap

        /* search for a first zero bit -- beggining of a window */
        *beg = reiserfs_find_next_zero_le_bit
               ((unsigned long*)(bi->bh->b_data), boundary, *beg);

        if (*beg + min > boundary) {
            /* search for a zero bit fails or the rest of bitmap block
            			      * cannot contain a zero window of minimum size */
            return 0;
        }

        if (unfm && is_block_in_journal(s,bmap_n, *beg, beg))
            continue;
        /* first zero bit found; we check next bits */
        for (end = *beg + 1;; end ++) {
            if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit (end, bi->bh->b_data)) {
                next = end;
                break;
            }
            /* finding the other end of zero bit window requires looking into journal structures (in
             * case of searching for free blocks for unformatted nodes) */
            if (unfm && is_block_in_journal(s, bmap_n, end, &next))
                break;
        }

        /* now (*beg) points to beginning of zero bits window,
         * (end) points to one bit after the window end */
        if (end - *beg >= min) { /* it seems we have found window of proper size */
            int i;
            reiserfs_prepare_for_journal (s, bi->bh, 1);
            /* try to set all blocks used checking are they still free */
            for (i = *beg; i < end; i++) {
                /* It seems that we should not check in journal again. */
                if (reiserfs_test_and_set_le_bit (i, bi->bh->b_data)) {
                    /* bit was set by another process
                     * while we slept in prepare_for_journal() */
                    PROC_INFO_INC( s, scan_bitmap.stolen );
                    if (i >= *beg + min)	{
                        /* we can continue with smaller set of allocated blocks,
                        		   * if length of this set is more or equal to `min' */
                        end = i;
                        break;
                    }
                    /* otherwise we clear all bit were set ... */
                    while (--i >= *beg)
                        reiserfs_test_and_clear_le_bit (i, bi->bh->b_data);
                    reiserfs_restore_prepared_buffer (s, bi->bh);
                    *beg = org;
                    /* ... and search again in current block from beginning */
                    goto cont;
                }
            }
            bi->free_count -= (end - *beg);
            journal_mark_dirty (th, s, bi->bh);

            /* free block count calculation */
            reiserfs_prepare_for_journal (s, SB_BUFFER_WITH_SB(s), 1);
            PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
            journal_mark_dirty (th, s, SB_BUFFER_WITH_SB(s));

            return end - (*beg);
        } else {
            *beg = next;
        }
    }
}
int balance_internal(struct tree_balance *tb,	/* tree_balance structure               */
		     int h,	/* level of the tree                    */
		     int child_pos, struct item_head *insert_key,	/* key for insertion on higher level    */
		     struct buffer_head **insert_ptr	/* node for insertion on higher level */
    )
    /* if inserting/pasting
       {
       child_pos is the position of the node-pointer in S[h] that        *
       pointed to S[h-1] before balancing of the h-1 level;              *
       this means that new pointers and items must be inserted AFTER *
       child_pos
       }
       else
       {
       it is the position of the leftmost pointer that must be deleted (together with
       its corresponding key to the left of the pointer)
       as a result of the previous level's balancing.
       }
     */
{
	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
	struct buffer_info bi;
	int order;		/* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
	int insert_num, n, k;
	struct buffer_head *S_new;
	struct item_head new_insert_key;
	struct buffer_head *new_insert_ptr = NULL;
	struct item_head *new_insert_key_addr = insert_key;

	RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h);

	PROC_INFO_INC(tb->tb_sb, balance_at[h]);

	order =
	    (tbSh) ? PATH_H_POSITION(tb->tb_path,
				     h + 1) /*tb->S[h]->b_item_order */ : 0;

	/* Using insert_size[h] calculate the number insert_num of items
	   that must be inserted to or deleted from S[h]. */
	insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));

	/* Check whether insert_num is proper * */
	RFALSE(insert_num < -2 || insert_num > 2,
	       "incorrect number of items inserted to the internal node (%d)",
	       insert_num);
	RFALSE(h > 1 && (insert_num > 1 || insert_num < -1),
	       "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level",
	       insert_num, h);

	/* Make balance in case insert_num < 0 */
	if (insert_num < 0) {
		balance_internal_when_delete(tb, h, child_pos);
		return order;
	}

	k = 0;
	if (tb->lnum[h] > 0) {
		/* shift lnum[h] items from S[h] to the left neighbor L[h].
		   check how many of new items fall into L[h] or CFL[h] after
		   shifting */
		n = B_NR_ITEMS(tb->L[h]);	/* number of items in L[h] */
		if (tb->lnum[h] <= child_pos) {
			/* new items don't fall into L[h] or CFL[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h]);
			/*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
			child_pos -= tb->lnum[h];
		} else if (tb->lnum[h] > child_pos + insert_num) {
			/* all new items fall into L[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h] - insert_num);
			/*                  internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
			   tb->lnum[h]-insert_num);
			 */
			/* insert insert_num keys and node-pointers into L[h] */
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next */
					       n + child_pos + 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into L[h] or CFL[h], but some don't fall */
			internal_shift1_left(tb, h, child_pos + 1);
			/* calculate number of new items that fall into L[h] */
			k = tb->lnum[h] - child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next, */
					       n + child_pos + 1, k,
					       insert_key, insert_ptr);

			replace_lkey(tb, h, insert_key + k);

			/* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
			dc = B_N_CHILD(tbSh, 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr[k]) -
				    B_FREE_SPACE(insert_ptr[k]));
			put_dc_block_number(dc, insert_ptr[k]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tbSh, 0);

			k++;
			insert_key += k;
			insert_ptr += k;
			insert_num -= k;
			child_pos = 0;
		}
	}
	/* tb->lnum[h] > 0 */
	if (tb->rnum[h] > 0) {
		/*shift rnum[h] items from S[h] to the right neighbor R[h] */
		/* check how many of new items fall into R or CFR after shifting */
		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		if (n - tb->rnum[h] >= child_pos)
			/* new items fall into S[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h]);
		else if (n + insert_num - tb->rnum[h] < child_pos) {
			/* all new items fall into R[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
			   tb->rnum[h] - insert_num); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h] - insert_num);

			/* insert insert_num keys and node-pointers into R[h] */
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h],tb->S[h-1]->b_next */
					       child_pos - n - insert_num +
					       tb->rnum[h] - 1,
					       insert_num, insert_key,
					       insert_ptr);
			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* one of the items falls into CFR[h] */
			internal_shift1_right(tb, h, n - child_pos + 1);
			/* calculate number of new items that fall into R[h] */
			k = tb->rnum[h] - n + child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h], tb->R[h]->b_child, */
					       0, k, insert_key + 1,
					       insert_ptr + 1);

			replace_rkey(tb, h, insert_key + insert_num - k - 1);

			/* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
			dc = B_N_CHILD(tb->R[h], 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr
						   [insert_num - k - 1]) -
				    B_FREE_SPACE(insert_ptr
						 [insert_num - k - 1]));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tb->R[h], 0);

			insert_num -= (k + 1);
		}
	}

    /** Fill new node that appears instead of S[h] **/
	RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
	RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");

	if (!tb->blknum[h]) {	/* node S[h] is empty now */
		RFALSE(!tbSh, "S[h] is equal NULL");

		/* do what is needed for buffer thrown from tree */
		reiserfs_invalidate_buffer(tb, tbSh);
		return order;
	}

	if (!tbSh) {
		/* create new root */
		struct disk_child *dc;
		struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
		struct block_head *blkh;

		if (tb->blknum[h] != 1)
			reiserfs_panic(NULL, "ibalance-3", "One new node "
				       "required for creating the new root");
		/* S[h] = empty buffer from the list FEB. */
		tbSh = get_FEB(tb);
		blkh = B_BLK_HEAD(tbSh);
		set_blkh_level(blkh, h + 1);

		/* Put the unique node-pointer to S[h] that points to S[h-1]. */

		dc = B_N_CHILD(tbSh, 0);
		put_dc_block_number(dc, tbSh_1->b_blocknr);
		put_dc_size(dc,
			    (MAX_CHILD_SIZE(tbSh_1) - B_FREE_SPACE(tbSh_1)));

		tb->insert_size[h] -= DC_SIZE;
		set_blkh_free_space(blkh, blkh_free_space(blkh) - DC_SIZE);

		do_balance_mark_internal_dirty(tb, tbSh, 0);

		/*&&&&&&&&&&&&&&&&&&&&&&&& */
		check_internal(tbSh);
		/*&&&&&&&&&&&&&&&&&&&&&&&& */

		/* put new root into path structure */
		PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) =
		    tbSh;

		/* Change root in structure super block. */
		PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr);
		PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1);
		do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
	}

	if (tb->blknum[h] == 2) {
		int snum;
		struct buffer_info dest_bi, src_bi;

		/* S_new = free buffer from list FEB */
		S_new = get_FEB(tb);

		set_blkh_level(B_BLK_HEAD(S_new), h + 1);

		dest_bi.tb = tb;
		dest_bi.bi_bh = S_new;
		dest_bi.bi_parent = NULL;
		dest_bi.bi_position = 0;
		src_bi.tb = tb;
		src_bi.bi_bh = tbSh;
		src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);

		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		snum = (insert_num + n + 1) / 2;
		if (n - snum >= child_pos) {
			/* new items don't fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n - snum)'th key in S[h] */
			memcpy(&new_insert_key, B_N_PDELIM_KEY(tbSh, n - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST, snum, 0);
			/*            internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
		} else if (n + insert_num - snum < child_pos) {
			/* all new items fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n + insert_item - snum)'th key in S[h] */
			memcpy(&new_insert_key,
			       B_N_PDELIM_KEY(tbSh, n + insert_num - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     snum - insert_num, 0);
			/*                  internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */

			/* insert insert_num keys and node-pointers into S_new */
			internal_insert_childs(&dest_bi,
					       /*S_new,tb->S[h-1]->b_next, */
					       child_pos - n - insert_num +
					       snum - 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into S_new, but some don't fall */
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     n - child_pos + 1, 1);
			/*                  internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
			/* calculate number of new items that fall into S_new */
			k = snum - n + child_pos - 1;

			internal_insert_childs(&dest_bi, /*S_new, */ 0, k,
					       insert_key + 1, insert_ptr + 1);

			/* new_insert_key = insert_key[insert_num - k - 1] */
			memcpy(&new_insert_key, insert_key + insert_num - k - 1,
			       KEY_SIZE);
			/* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */

			dc = B_N_CHILD(S_new, 0);
			put_dc_size(dc,
				    (MAX_CHILD_SIZE
				     (insert_ptr[insert_num - k - 1]) -
				     B_FREE_SPACE(insert_ptr
						  [insert_num - k - 1])));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, S_new, 0);

			insert_num -= (k + 1);
		}
		/* new_insert_ptr = node_pointer to S_new */
		new_insert_ptr = S_new;

		RFALSE(!buffer_journaled(S_new) || buffer_journal_dirty(S_new)
		       || buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
		       S_new);

		// S_new is released in unfix_nodes
	}

	n = B_NR_ITEMS(tbSh);	/*number of items in S[h] */

	if (0 <= child_pos && child_pos <= n && insert_num > 0) {
		bi.tb = tb;
		bi.bi_bh = tbSh;
		bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
		internal_insert_childs(&bi,	/*tbSh, */
				       /*          ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next :  tb->S[h]->b_child->b_next, */
				       child_pos, insert_num, insert_key,
				       insert_ptr);
	}

	memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
	insert_ptr[0] = new_insert_ptr;

	return order;
}
/* makes object identifier unused */
void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
			       __u32 objectid_to_release)
{
	struct super_block *s = th->t_super;
	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
	__le32 *map = objectid_map(s, rs);
	int i = 0;

	BUG_ON(!th->t_trans_id);
	//return;
	check_objectid_map(s, map);

	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));

	/* start at the beginning of the objectid map (i = 0) and go to
	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
	   what we use, though it is possible that binary search would be
	   more efficient after performing lots of deletions (which is
	   when oids is large.)  We only check even i's. */
	while (i < sb_oid_cursize(rs)) {
		if (objectid_to_release == le32_to_cpu(map[i])) {
			/* This incrementation unallocates the objectid. */
			//map[i]++;
			le32_add_cpu(&map[i], 1);

			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
			if (map[i] == map[i + 1]) {
				/* shrink objectid map */
				memmove(map + i, map + i + 2,
					(sb_oid_cursize(rs) - i -
					 2) * sizeof(__u32));
				//disk_sb->s_oid_cursize -= 2;
				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);

				RFALSE(sb_oid_cursize(rs) < 2 ||
				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
			}
			return;
		}

		if (objectid_to_release > le32_to_cpu(map[i]) &&
		    objectid_to_release < le32_to_cpu(map[i + 1])) {
			/* size of objectid map is not changed */
			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
				//objectid_map[i+1]--;
				le32_add_cpu(&map[i + 1], -1);
				return;
			}

			/* JDM comparing two little-endian values for equality -- safe */
			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
				/* objectid map must be expanded, but there is no space */
				PROC_INFO_INC(s, leaked_oid);
				return;
			}

			/* expand the objectid map */
			memmove(map + i + 3, map + i + 1,
				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
			map[i + 1] = cpu_to_le32(objectid_to_release);
			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
			return;
		}
		i += 2;
	}

	reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
		       (long unsigned)objectid_to_release);
}
Exemple #11
0
/* The function is NOT SCHEDULE-SAFE! */
static int find_forward (struct super_block * s, int * bmap_nr, int * offset, int for_unformatted)
{
  int i, j;
  struct buffer_head * bh;
  unsigned long block_to_try = 0;
  unsigned long next_block_to_try = 0 ;

  PROC_INFO_INC( s, find_forward.call );

  for (i = *bmap_nr; i < SB_BMAP_NR (s); i ++, *offset = 0, 
	       PROC_INFO_INC( s, find_forward.bmap )) {
    /* get corresponding bitmap block */
    bh = SB_AP_BITMAP (s)[i];
    if (buffer_locked (bh)) {
	PROC_INFO_INC( s, find_forward.wait );
        __wait_on_buffer (bh);
    }
retry:
    j = reiserfs_find_next_zero_le_bit ((unsigned long *)bh->b_data, 
                                         s->s_blocksize << 3, *offset);

    /* wow, this really needs to be redone.  We can't allocate a block if
    ** it is in the journal somehow.  reiserfs_in_journal makes a suggestion
    ** for a good block if the one you ask for is in the journal.  Note,
    ** reiserfs_in_journal might reject the block it suggests.  The big
    ** gain from the suggestion is when a big file has been deleted, and
    ** many blocks show free in the real bitmap, but are all not free
    ** in the journal list bitmaps.
    **
    ** this whole system sucks.  The bitmaps should reflect exactly what
    ** can and can't be allocated, and the journal should update them as
    ** it goes.  TODO.
    */
    if (j < (s->s_blocksize << 3)) {
      block_to_try = (i * (s->s_blocksize << 3)) + j; 

      /* the block is not in the journal, we can proceed */
      if (!(reiserfs_in_journal(s, s->s_dev, block_to_try, s->s_blocksize, for_unformatted, &next_block_to_try))) {
	*bmap_nr = i;
	*offset = j;
	return 1;
      } 
      /* the block is in the journal */
      else if ((j+1) < (s->s_blocksize << 3)) { /* try again */
	/* reiserfs_in_journal suggested a new block to try */
	if (next_block_to_try > 0) {
	  int new_i ;
	  get_bit_address (s, next_block_to_try, &new_i, offset);

	  PROC_INFO_INC( s, find_forward.in_journal_hint );

	  /* block is not in this bitmap. reset i and continue
	  ** we only reset i if new_i is in a later bitmap.
	  */
	  if (new_i > i) {
	    i = (new_i - 1 ); /* i gets incremented by the for loop */
	    PROC_INFO_INC( s, find_forward.in_journal_out );
	    continue ;
	  }
	} else {
	  /* no suggestion was made, just try the next block */
	  *offset = j+1 ;
	}
	PROC_INFO_INC( s, find_forward.retry );
	goto retry ;
      }
    }
  }
    /* zero bit not found */
    return 0;
}