Beispiel #1
0
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
	lock_buffer(bh);
	if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
		BUG();
	}
	clear_buffer_dirty(bh);
	/*
	 * Remove the buffer from whatever list it belongs to. We are mostly
	 * interested in removing it from per-sb j_dirty_buffers list, to avoid
	 * BUG() on attempt to write not mapped buffer
	 */
	if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
		struct inode *inode = bh->b_page->mapping->host;
		struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
		spin_lock(&j->j_dirty_buffers_lock);
		list_del_init(&bh->b_assoc_buffers);
		reiserfs_free_jh(bh);
		spin_unlock(&j->j_dirty_buffers_lock);
	}
	clear_buffer_mapped(bh);
	clear_buffer_req(bh);
	clear_buffer_new(bh);
	bh->b_bdev = NULL;
	unlock_buffer(bh);
}
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh) {
  if (buffer_mapped(bh)) {
    if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
      BUG() ;
    }
    mark_buffer_clean(bh) ;
    lock_buffer(bh) ;
    clear_bit(BH_Mapped, &bh->b_state) ;
    clear_bit(BH_Req, &bh->b_state) ;
    clear_bit(BH_New, &bh->b_state) ;
    unlock_buffer(bh) ;
  }
}
Beispiel #3
0
/* when we allocate a new block (get_new_buffer, get_empty_nodes) and
   get buffer for it, it is possible that it is held by someone else
   or even by this process. In this function we wait until all other
   holders release buffer. To make sure, that current process does not
   hold we did free all buffers in tree balance structure
   (get_empty_nodes and get_nodes_for_preserving) or in path structure
   only (get_new_buffer) just before calling this */
void wait_buffer_until_released (const struct buffer_head * bh)
{
  int repeat_counter = 0;

  while (atomic_read (&(bh->b_count)) > 1) {

    if ( !(++repeat_counter % 30000000) ) {
      reiserfs_warning (NULL, "vs-3050: wait_buffer_until_released: nobody releases buffer (%b). Still waiting (%d) %cJDIRTY %cJWAIT\n",
			bh, repeat_counter, buffer_journaled(bh) ? ' ' : '!',
			buffer_journal_dirty(bh) ? ' ' : '!');
    }
    run_task_queue(&tq_disk);
    yield();
  }
  if (repeat_counter > 30000000) {
    reiserfs_warning(NULL, "vs-3051: done waiting, ignore vs-3050 messages for (%b)\n", bh) ;
  }
}
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
	lock_buffer(bh);
	if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
		BUG();
	}
	clear_buffer_dirty(bh);
	if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
		struct inode *inode = bh->b_page->mapping->host;
		struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
		spin_lock(&j->j_dirty_buffers_lock);
		list_del_init(&bh->b_assoc_buffers);
		reiserfs_free_jh(bh);
		spin_unlock(&j->j_dirty_buffers_lock);
	}
	clear_buffer_mapped(bh);
	clear_buffer_req(bh);
	clear_buffer_new(bh);
	bh->b_bdev = NULL;
	unlock_buffer(bh);
}
int balance_internal(struct tree_balance *tb,	/* tree_balance structure               */
		     int h,	/* level of the tree                    */
		     int child_pos, struct item_head *insert_key,	/* key for insertion on higher level    */
		     struct buffer_head **insert_ptr	/* node for insertion on higher level */
    )
    /* if inserting/pasting
       {
       child_pos is the position of the node-pointer in S[h] that        *
       pointed to S[h-1] before balancing of the h-1 level;              *
       this means that new pointers and items must be inserted AFTER *
       child_pos
       }
       else
       {
       it is the position of the leftmost pointer that must be deleted (together with
       its corresponding key to the left of the pointer)
       as a result of the previous level's balancing.
       }
     */
{
	struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
	struct buffer_info bi;
	int order;		/* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
	int insert_num, n, k;
	struct buffer_head *S_new;
	struct item_head new_insert_key;
	struct buffer_head *new_insert_ptr = NULL;
	struct item_head *new_insert_key_addr = insert_key;

	RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h);

	PROC_INFO_INC(tb->tb_sb, balance_at[h]);

	order =
	    (tbSh) ? PATH_H_POSITION(tb->tb_path,
				     h + 1) /*tb->S[h]->b_item_order */ : 0;

	/* Using insert_size[h] calculate the number insert_num of items
	   that must be inserted to or deleted from S[h]. */
	insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));

	/* Check whether insert_num is proper * */
	RFALSE(insert_num < -2 || insert_num > 2,
	       "incorrect number of items inserted to the internal node (%d)",
	       insert_num);
	RFALSE(h > 1 && (insert_num > 1 || insert_num < -1),
	       "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level",
	       insert_num, h);

	/* Make balance in case insert_num < 0 */
	if (insert_num < 0) {
		balance_internal_when_delete(tb, h, child_pos);
		return order;
	}

	k = 0;
	if (tb->lnum[h] > 0) {
		/* shift lnum[h] items from S[h] to the left neighbor L[h].
		   check how many of new items fall into L[h] or CFL[h] after
		   shifting */
		n = B_NR_ITEMS(tb->L[h]);	/* number of items in L[h] */
		if (tb->lnum[h] <= child_pos) {
			/* new items don't fall into L[h] or CFL[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h]);
			/*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
			child_pos -= tb->lnum[h];
		} else if (tb->lnum[h] > child_pos + insert_num) {
			/* all new items fall into L[h] */
			internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
					    tb->lnum[h] - insert_num);
			/*                  internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
			   tb->lnum[h]-insert_num);
			 */
			/* insert insert_num keys and node-pointers into L[h] */
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next */
					       n + child_pos + 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into L[h] or CFL[h], but some don't fall */
			internal_shift1_left(tb, h, child_pos + 1);
			/* calculate number of new items that fall into L[h] */
			k = tb->lnum[h] - child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->L[h];
			bi.bi_parent = tb->FL[h];
			bi.bi_position = get_left_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->L[h], tb->S[h-1]->b_next, */
					       n + child_pos + 1, k,
					       insert_key, insert_ptr);

			replace_lkey(tb, h, insert_key + k);

			/* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
			dc = B_N_CHILD(tbSh, 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr[k]) -
				    B_FREE_SPACE(insert_ptr[k]));
			put_dc_block_number(dc, insert_ptr[k]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tbSh, 0);

			k++;
			insert_key += k;
			insert_ptr += k;
			insert_num -= k;
			child_pos = 0;
		}
	}
	/* tb->lnum[h] > 0 */
	if (tb->rnum[h] > 0) {
		/*shift rnum[h] items from S[h] to the right neighbor R[h] */
		/* check how many of new items fall into R or CFR after shifting */
		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		if (n - tb->rnum[h] >= child_pos)
			/* new items fall into S[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h]);
		else if (n + insert_num - tb->rnum[h] < child_pos) {
			/* all new items fall into R[h] */
			/*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
			   tb->rnum[h] - insert_num); */
			internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
					     tb->rnum[h] - insert_num);

			/* insert insert_num keys and node-pointers into R[h] */
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h],tb->S[h-1]->b_next */
					       child_pos - n - insert_num +
					       tb->rnum[h] - 1,
					       insert_num, insert_key,
					       insert_ptr);
			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* one of the items falls into CFR[h] */
			internal_shift1_right(tb, h, n - child_pos + 1);
			/* calculate number of new items that fall into R[h] */
			k = tb->rnum[h] - n + child_pos - 1;
			bi.tb = tb;
			bi.bi_bh = tb->R[h];
			bi.bi_parent = tb->FR[h];
			bi.bi_position = get_right_neighbor_position(tb, h);
			internal_insert_childs(&bi,
					       /*tb->R[h], tb->R[h]->b_child, */
					       0, k, insert_key + 1,
					       insert_ptr + 1);

			replace_rkey(tb, h, insert_key + insert_num - k - 1);

			/* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
			dc = B_N_CHILD(tb->R[h], 0);
			put_dc_size(dc,
				    MAX_CHILD_SIZE(insert_ptr
						   [insert_num - k - 1]) -
				    B_FREE_SPACE(insert_ptr
						 [insert_num - k - 1]));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, tb->R[h], 0);

			insert_num -= (k + 1);
		}
	}

    /** Fill new node that appears instead of S[h] **/
	RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
	RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");

	if (!tb->blknum[h]) {	/* node S[h] is empty now */
		RFALSE(!tbSh, "S[h] is equal NULL");

		/* do what is needed for buffer thrown from tree */
		reiserfs_invalidate_buffer(tb, tbSh);
		return order;
	}

	if (!tbSh) {
		/* create new root */
		struct disk_child *dc;
		struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
		struct block_head *blkh;

		if (tb->blknum[h] != 1)
			reiserfs_panic(NULL, "ibalance-3", "One new node "
				       "required for creating the new root");
		/* S[h] = empty buffer from the list FEB. */
		tbSh = get_FEB(tb);
		blkh = B_BLK_HEAD(tbSh);
		set_blkh_level(blkh, h + 1);

		/* Put the unique node-pointer to S[h] that points to S[h-1]. */

		dc = B_N_CHILD(tbSh, 0);
		put_dc_block_number(dc, tbSh_1->b_blocknr);
		put_dc_size(dc,
			    (MAX_CHILD_SIZE(tbSh_1) - B_FREE_SPACE(tbSh_1)));

		tb->insert_size[h] -= DC_SIZE;
		set_blkh_free_space(blkh, blkh_free_space(blkh) - DC_SIZE);

		do_balance_mark_internal_dirty(tb, tbSh, 0);

		/*&&&&&&&&&&&&&&&&&&&&&&&& */
		check_internal(tbSh);
		/*&&&&&&&&&&&&&&&&&&&&&&&& */

		/* put new root into path structure */
		PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) =
		    tbSh;

		/* Change root in structure super block. */
		PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr);
		PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1);
		do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
	}

	if (tb->blknum[h] == 2) {
		int snum;
		struct buffer_info dest_bi, src_bi;

		/* S_new = free buffer from list FEB */
		S_new = get_FEB(tb);

		set_blkh_level(B_BLK_HEAD(S_new), h + 1);

		dest_bi.tb = tb;
		dest_bi.bi_bh = S_new;
		dest_bi.bi_parent = NULL;
		dest_bi.bi_position = 0;
		src_bi.tb = tb;
		src_bi.bi_bh = tbSh;
		src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);

		n = B_NR_ITEMS(tbSh);	/* number of items in S[h] */
		snum = (insert_num + n + 1) / 2;
		if (n - snum >= child_pos) {
			/* new items don't fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n - snum)'th key in S[h] */
			memcpy(&new_insert_key, B_N_PDELIM_KEY(tbSh, n - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST, snum, 0);
			/*            internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
		} else if (n + insert_num - snum < child_pos) {
			/* all new items fall into S_new */
			/*  store the delimiting key for the next level */
			/* new_insert_key = (n + insert_item - snum)'th key in S[h] */
			memcpy(&new_insert_key,
			       B_N_PDELIM_KEY(tbSh, n + insert_num - snum),
			       KEY_SIZE);
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     snum - insert_num, 0);
			/*                  internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */

			/* insert insert_num keys and node-pointers into S_new */
			internal_insert_childs(&dest_bi,
					       /*S_new,tb->S[h-1]->b_next, */
					       child_pos - n - insert_num +
					       snum - 1,
					       insert_num, insert_key,
					       insert_ptr);

			insert_num = 0;
		} else {
			struct disk_child *dc;

			/* some items fall into S_new, but some don't fall */
			/* last parameter is del_par */
			internal_move_pointers_items(&dest_bi, &src_bi,
						     LAST_TO_FIRST,
						     n - child_pos + 1, 1);
			/*                  internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
			/* calculate number of new items that fall into S_new */
			k = snum - n + child_pos - 1;

			internal_insert_childs(&dest_bi, /*S_new, */ 0, k,
					       insert_key + 1, insert_ptr + 1);

			/* new_insert_key = insert_key[insert_num - k - 1] */
			memcpy(&new_insert_key, insert_key + insert_num - k - 1,
			       KEY_SIZE);
			/* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */

			dc = B_N_CHILD(S_new, 0);
			put_dc_size(dc,
				    (MAX_CHILD_SIZE
				     (insert_ptr[insert_num - k - 1]) -
				     B_FREE_SPACE(insert_ptr
						  [insert_num - k - 1])));
			put_dc_block_number(dc,
					    insert_ptr[insert_num - k -
						       1]->b_blocknr);

			do_balance_mark_internal_dirty(tb, S_new, 0);

			insert_num -= (k + 1);
		}
		/* new_insert_ptr = node_pointer to S_new */
		new_insert_ptr = S_new;

		RFALSE(!buffer_journaled(S_new) || buffer_journal_dirty(S_new)
		       || buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
		       S_new);

		// S_new is released in unfix_nodes
	}

	n = B_NR_ITEMS(tbSh);	/*number of items in S[h] */

	if (0 <= child_pos && child_pos <= n && insert_num > 0) {
		bi.tb = tb;
		bi.bi_bh = tbSh;
		bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
		bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
		internal_insert_childs(&bi,	/*tbSh, */
				       /*          ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next :  tb->S[h]->b_child->b_next, */
				       child_pos, insert_num, insert_key,
				       insert_ptr);
	}

	memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
	insert_ptr[0] = new_insert_ptr;

	return order;
}