/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */ static void internal_insert_key(struct buffer_info *dest_bi, /* insert key before key with n_dest number */ int dest_position_before, struct buffer_head *src, int src_position) { struct buffer_head *dest = dest_bi->bi_bh; int nr; struct block_head *blkh; struct reiserfs_key *key; RFALSE(dest == NULL || src == NULL, "source(%p) or dest(%p) buffer is 0", src, dest); RFALSE(dest_position_before < 0 || src_position < 0, "source(%d) or dest(%d) key number less than 0", src_position, dest_position_before); RFALSE(dest_position_before > B_NR_ITEMS(dest) || src_position >= B_NR_ITEMS(src), "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))", dest_position_before, B_NR_ITEMS(dest), src_position, B_NR_ITEMS(src)); RFALSE(B_FREE_SPACE(dest) < KEY_SIZE, "no enough free space (%d) in dest buffer", B_FREE_SPACE(dest)); blkh = B_BLK_HEAD(dest); nr = blkh_nr_item(blkh); /* prepare space for inserting key */ key = internal_key(dest, dest_position_before); memmove(key + 1, key, (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE); /* insert key */ memcpy(key, internal_key(src, src_position), KEY_SIZE); /* Change dirt, free space, item number fields. */ set_blkh_nr_item(blkh, blkh_nr_item(blkh) + 1); set_blkh_free_space(blkh, blkh_free_space(blkh) - KEY_SIZE); do_balance_mark_internal_dirty(dest_bi->tb, dest, 0); if (dest_bi->bi_parent) { struct disk_child *t_dc; t_dc = B_N_CHILD(dest_bi->bi_parent, dest_bi->bi_position); put_dc_size(t_dc, dc_size(t_dc) + KEY_SIZE); do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, 0); } }
/* Returns 1 if buf looks like an internal node, 0 otherwise */ static int is_internal(char *buf, int blocksize, struct buf *bp) { int nr, used_space; struct block_head *blkh; blkh = (struct block_head *)buf; nr = blkh_level(blkh); if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) { /* This level is not possible for internal nodes */ reiserfs_log(LOG_WARNING, "this should be caught earlier\n"); return (0); } nr = blkh_nr_item(blkh); if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) { /* * For internal which is not root we might check min * number of keys */ reiserfs_log(LOG_WARNING, "number of key seems wrong\n"); return (0); } used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1); if (used_space != blocksize - blkh_free_space(blkh)) { reiserfs_log(LOG_WARNING, "is_internal: free space seems wrong\n"); return (0); } /* One may imagine much more checks */ return (1); }
/* returns 1 if buf looks like an internal node, 0 otherwise */ static int is_internal(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; int nr; int used_space; blkh = (struct block_head *)buf; nr = blkh_level(blkh); if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) { /* this level is not possible for internal nodes */ reiserfs_warning(NULL, "reiserfs-5087", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) { /* for internal which is not root we might check min number of keys */ reiserfs_warning(NULL, "reiserfs-5088", "number of key seems wrong: %z", bh); return 0; } used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1); if (used_space != blocksize - blkh_free_space(blkh)) { reiserfs_warning(NULL, "reiserfs-5089", "free space seems wrong: %z", bh); return 0; } // one may imagine much more checks return 1; }
static int print_leaf(struct buffer_head *bh, int print_mode, int first, int last) { struct block_head *blkh; struct item_head *ih; int i, nr; int from, to; if (!B_IS_ITEMS_LEVEL(bh)) return 1; check_leaf(bh); blkh = B_BLK_HEAD(bh); ih = B_N_PITEM_HEAD(bh, 0); nr = blkh_nr_item(blkh); printk ("\n===================================================================\n"); reiserfs_printk("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh); if (!(print_mode & PRINT_LEAF_ITEMS)) { reiserfs_printk("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n", &(ih->ih_key), &((ih + nr - 1)->ih_key)); return 0; } if (first < 0 || first > nr - 1) from = 0; else from = first; if (last < 0 || last > nr) to = nr; else to = last; ih += from; printk ("-------------------------------------------------------------------------------\n"); printk ("|##| type | key | ilen | free_space | version | loc |\n"); for (i = from; i < to; i++, ih++) { printk ("-------------------------------------------------------------------------------\n"); reiserfs_printk("|%2d| %h |\n", i, ih); if (print_mode & PRINT_LEAF_ITEMS) op_print_item(ih, B_I_PITEM(bh, ih)); } printk ("===================================================================\n"); return 0; }
static void check_leaf_block_head (struct buffer_head * bh) { struct block_head * blkh; int nr; blkh = B_BLK_HEAD (bh); nr = blkh_nr_item(blkh); if ( nr > (bh->b_size - BLKH_SIZE) / IH_SIZE) reiserfs_panic (0, "vs-6010: check_leaf_block_head: invalid item number %z", bh); if ( blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr ) reiserfs_panic (0, "vs-6020: check_leaf_block_head: invalid free space %z", bh); }
/* Read in the node at the current path and depth into the node cache. * You must set INFO->blocks[depth] before. */ static char * read_tree_node( __u32 blockNr, __u16 depth ) { char *cache = CACHE(depth); int num_cached = INFO->cached_slots; errnum = 0; if ( depth < num_cached ) { /* This is the cached part of the path. Check if same block is needed. */ if ( blockNr == INFO->blocks[depth] ) return cache; } else cache = CACHE(num_cached); DEBUG_F( " next read_in: block=%u (depth=%u)\n", blockNr, depth ); if ( !block_read( blockNr, 0, INFO->blocksize, cache ) ) { DEBUG_F( "block_read failed\n" ); return 0; } DEBUG_F( "FOUND: blk_level=%u, blk_nr_item=%u, blk_free_space=%u\n", blkh_level(BLOCKHEAD(cache)), blkh_nr_item(BLOCKHEAD(cache)), le16_to_cpu(BLOCKHEAD(cache)->blk_free_space) ); /* Make sure it has the right node level */ if ( blkh_level(BLOCKHEAD(cache)) != depth ) { DEBUG_F( "depth = %u != %u\n", blkh_level(BLOCKHEAD(cache)), depth ); DEBUG_LEAVE(FILE_ERR_BAD_FSYS); errnum = FILE_ERR_BAD_FSYS; return 0; } INFO->blocks[depth] = blockNr; return cache; }
static int is_leaf(char *buf, int blocksize, struct buf *bp) { struct item_head *ih; struct block_head *blkh; int used_space, prev_location, i, nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_log(LOG_WARNING, "this should be caught earlier"); return (0); } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* Item number is too big or too small */ reiserfs_log(LOG_WARNING, "nr_item seems wrong\n"); return (0); } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* * Free space does not match to calculated amount of * use space */ reiserfs_log(LOG_WARNING, "free space seems wrong\n"); return (0); } /* FIXME: it is_leaf will hit performance too much - we may have * return 1 here */ /* Check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_log(LOG_WARNING, "wrong item type for item\n"); return (0); } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_log(LOG_WARNING, "item location seems wrong\n"); return (0); } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_log(LOG_WARNING, "item length seems wrong\n"); return (0); } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_log(LOG_WARNING, "item location seems wrong (second one)\n"); return (0); } prev_location = ih_location(ih); } /* One may imagine much more checks */ return 1; }
/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest */ static void internal_copy_pointers_items(struct buffer_info *dest_bi, struct buffer_head *src, int last_first, int cpy_num) { /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST * * as delimiting key have already inserted to buffer dest.*/ struct buffer_head *dest = dest_bi->bi_bh; int nr_dest, nr_src; int dest_order, src_order; struct block_head *blkh; struct reiserfs_key *key; struct disk_child *dc; nr_src = B_NR_ITEMS(src); RFALSE(dest == NULL || src == NULL, "src (%p) or dest (%p) buffer is 0", src, dest); RFALSE(last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST, "invalid last_first parameter (%d)", last_first); RFALSE(nr_src < cpy_num - 1, "no so many items (%d) in src (%d)", cpy_num, nr_src); RFALSE(cpy_num < 0, "cpy_num less than 0 (%d)", cpy_num); RFALSE(cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest), "cpy_num (%d) + item number in dest (%d) can not be > MAX_NR_KEY(%d)", cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest)); if (cpy_num == 0) return; /* coping */ blkh = B_BLK_HEAD(dest); nr_dest = blkh_nr_item(blkh); /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest; */ /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0; */ (last_first == LAST_TO_FIRST) ? (dest_order = 0, src_order = nr_src - cpy_num + 1) : (dest_order = nr_dest, src_order = 0); /* prepare space for cpy_num pointers */ dc = B_N_CHILD(dest, dest_order); memmove(dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE); /* insert pointers */ memcpy(dc, B_N_CHILD(src, src_order), DC_SIZE * cpy_num); /* prepare space for cpy_num - 1 item headers */ key = B_N_PDELIM_KEY(dest, dest_order); memmove(key + cpy_num - 1, key, KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest + cpy_num)); /* insert headers */ memcpy(key, B_N_PDELIM_KEY(src, src_order), KEY_SIZE * (cpy_num - 1)); /* sizes, item number */ set_blkh_nr_item(blkh, blkh_nr_item(blkh) + (cpy_num - 1)); set_blkh_free_space(blkh, blkh_free_space(blkh) - (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num)); do_balance_mark_internal_dirty(dest_bi->tb, dest, 0); /*&&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(dest); /*&&&&&&&&&&&&&&&&&&&&&&&& */ if (dest_bi->bi_parent) { struct disk_child *t_dc; t_dc = B_N_CHILD(dest_bi->bi_parent, dest_bi->bi_position); put_dc_size(t_dc, dc_size(t_dc) + (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num)); do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, 0); /*&&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(dest_bi->bi_parent); /*&&&&&&&&&&&&&&&&&&&&&&&& */ } }
/* Delete del_num items and node pointers from buffer cur starting from * * the first_i'th item and first_p'th pointers respectively. */ static void internal_delete_pointers_items(struct buffer_info *cur_bi, int first_p, int first_i, int del_num) { struct buffer_head *cur = cur_bi->bi_bh; int nr; struct block_head *blkh; struct reiserfs_key *key; struct disk_child *dc; RFALSE(cur == NULL, "buffer is 0"); RFALSE(del_num < 0, "negative number of items (%d) can not be deleted", del_num); RFALSE(first_p < 0 || first_p + del_num > B_NR_ITEMS(cur) + 1 || first_i < 0, "first pointer order (%d) < 0 or " "no so many pointers (%d), only (%d) or " "first key order %d < 0", first_p, first_p + del_num, B_NR_ITEMS(cur) + 1, first_i); if (del_num == 0) return; blkh = B_BLK_HEAD(cur); nr = blkh_nr_item(blkh); if (first_p == 0 && del_num == nr + 1) { RFALSE(first_i != 0, "1st deleted key must have order 0, not %d", first_i); make_empty_node(cur_bi); return; } RFALSE(first_i + del_num > B_NR_ITEMS(cur), "first_i = %d del_num = %d " "no so many keys (%d) in the node (%b)(%z)", first_i, del_num, first_i + del_num, cur, cur); /* deleting */ dc = B_N_CHILD(cur, first_p); memmove(dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE); key = B_N_PDELIM_KEY(cur, first_i); memmove(key, key + del_num, (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - del_num) * DC_SIZE); /* sizes, item number */ set_blkh_nr_item(blkh, blkh_nr_item(blkh) - del_num); set_blkh_free_space(blkh, blkh_free_space(blkh) + (del_num * (KEY_SIZE + DC_SIZE))); do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); /*&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(cur); /*&&&&&&&&&&&&&&&&&&&&&&& */ if (cur_bi->bi_parent) { struct disk_child *t_dc; t_dc = B_N_CHILD(cur_bi->bi_parent, cur_bi->bi_position); put_dc_size(t_dc, dc_size(t_dc) - (del_num * (KEY_SIZE + DC_SIZE))); do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0); /*&&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(cur_bi->bi_parent); /*&&&&&&&&&&&&&&&&&&&&&&&& */ } }
/* Insert count node pointers into buffer cur before position to + 1. * Insert count items into buffer cur before position to. * Items and node pointers are specified by inserted and bh respectively. */ static void internal_insert_childs(struct buffer_info *cur_bi, int to, int count, struct item_head *inserted, struct buffer_head **bh) { struct buffer_head *cur = cur_bi->bi_bh; struct block_head *blkh; int nr; struct reiserfs_key *ih; struct disk_child new_dc[2]; struct disk_child *dc; int i; if (count <= 0) return; blkh = B_BLK_HEAD(cur); nr = blkh_nr_item(blkh); RFALSE(count > 2, "too many children (%d) are to be inserted", count); RFALSE(B_FREE_SPACE(cur) < count * (KEY_SIZE + DC_SIZE), "no enough free space (%d), needed %d bytes", B_FREE_SPACE(cur), count * (KEY_SIZE + DC_SIZE)); /* prepare space for count disk_child */ dc = B_N_CHILD(cur, to + 1); memmove(dc + count, dc, (nr + 1 - (to + 1)) * DC_SIZE); /* copy to_be_insert disk children */ for (i = 0; i < count; i++) { put_dc_size(&(new_dc[i]), MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i])); put_dc_block_number(&(new_dc[i]), bh[i]->b_blocknr); } memcpy(dc, new_dc, DC_SIZE * count); /* prepare space for count items */ ih = B_N_PDELIM_KEY(cur, ((to == -1) ? 0 : to)); memmove(ih + count, ih, (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE); /* copy item headers (keys) */ memcpy(ih, inserted, KEY_SIZE); if (count > 1) memcpy(ih + 1, inserted + 1, KEY_SIZE); /* sizes, item number */ set_blkh_nr_item(blkh, blkh_nr_item(blkh) + count); set_blkh_free_space(blkh, blkh_free_space(blkh) - count * (DC_SIZE + KEY_SIZE)); do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); /*&&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(cur); /*&&&&&&&&&&&&&&&&&&&&&&&& */ if (cur_bi->bi_parent) { struct disk_child *t_dc = B_N_CHILD(cur_bi->bi_parent, cur_bi->bi_position); put_dc_size(t_dc, dc_size(t_dc) + (count * (DC_SIZE + KEY_SIZE))); do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0); /*&&&&&&&&&&&&&&&&&&&&&&&& */ check_internal(cur_bi->bi_parent); /*&&&&&&&&&&&&&&&&&&&&&&&& */ } }
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; struct item_head *ih; int used_space; int prev_location; int i; int nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_warning(NULL, "reiserfs-5080", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* item number is too big or too small */ reiserfs_warning(NULL, "reiserfs-5081", "nr_item seems wrong: %z", bh); return 0; } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* free space does not match to calculated amount of use space */ reiserfs_warning(NULL, "reiserfs-5082", "free space seems wrong: %z", bh); return 0; } // return 1 here /* check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_warning(NULL, "reiserfs-5083", "wrong item type for item %h", ih); return 0; } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_warning(NULL, "reiserfs-5084", "item location seems wrong: %h", ih); return 0; } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_warning(NULL, "reiserfs-5085", "item length seems wrong: %h", ih); return 0; } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_warning(NULL, "reiserfs-5086", "item location seems wrong " "(second one): %h", ih); return 0; } prev_location = ih_location(ih); } // one may imagine much more checks return 1; }
/* preconditions: reiserfs_read_super already executed, therefore * INFO block is valid * returns: 0 if error (errnum is set), * nonzero iff we were able to find the key successfully. * postconditions: on a nonzero return, the current_ih and * current_item fields describe the key that equals the * searched key. INFO->next_key contains the next key after * the searched key. * side effects: messes around with the cache. */ static int search_stat( __u32 dir_id, __u32 objectid ) { char *cache; int depth; int nr_item; int i; struct item_head *ih; errnum = 0; DEBUG_F( "search_stat:\n key %u:%u:0:0\n", le32_to_cpu(dir_id), le32_to_cpu(objectid) ); depth = INFO->tree_depth; cache = ROOT; DEBUG_F( "depth = %d\n", depth ); while ( depth > BLKH_LEVEL_LEAF ) { struct key *key; nr_item = blkh_nr_item(BLOCKHEAD( cache )); key = KEY( cache ); for ( i = 0; i < nr_item; i++ ) { if (le32_to_cpu(key->k_dir_id) > le32_to_cpu(dir_id) || (key->k_dir_id == dir_id && (le32_to_cpu(key->k_objectid) > le32_to_cpu(objectid) || (key->k_objectid == objectid && (key->u.k_offset_v1.k_offset | key->u.k_offset_v1.k_uniqueness) > 0)))) break; key++; } DEBUG_F( " depth=%d, i=%d/%d\n", depth, i, nr_item ); INFO->next_key_nr[depth] = ( i == nr_item ) ? 0 : i + 1; cache = read_tree_node( dc_block_number(&(DC(cache)[i])), --depth ); if ( !cache ) return 0; } /* cache == LEAF */ nr_item = blkh_nr_item(BLOCKHEAD(LEAF)); ih = ITEMHEAD; DEBUG_F( "nr_item = %d\n", nr_item ); for ( i = 0; i < nr_item; i++ ) { if ( ih->ih_key.k_dir_id == dir_id && ih->ih_key.k_objectid == objectid && ih->ih_key.u.k_offset_v1.k_offset == 0 && ih->ih_key.u.k_offset_v1.k_uniqueness == 0 ) { DEBUG_F( " depth=%d, i=%d/%d\n", depth, i, nr_item ); INFO->current_ih = ih; INFO->current_item = &LEAF[ih_location(ih)]; return 1; } ih++; } DEBUG_LEAVE(FILE_ERR_BAD_FSYS); errnum = FILE_ERR_BAD_FSYS; return 0; }
/* Get the next key, i.e. the key following the last retrieved key in * tree order. INFO->current_ih and * INFO->current_info are adapted accordingly. */ static int next_key( void ) { __u16 depth; struct item_head *ih = INFO->current_ih + 1; char *cache; DEBUG_F( "next_key:\n old ih: key %u:%u:%u:%u version:%u\n", le32_to_cpu(INFO->current_ih->ih_key.k_dir_id), le32_to_cpu(INFO->current_ih->ih_key.k_objectid), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness), ih_version(INFO->current_ih) ); if ( ih == &ITEMHEAD[blkh_nr_item(BLOCKHEAD( LEAF ))] ) { depth = BLKH_LEVEL_LEAF; /* The last item, was the last in the leaf node. * Read in the next * * block */ do { if ( depth == INFO->tree_depth ) { /* There are no more keys at all. * Return a dummy item with * * MAX_KEY */ ih = ( struct item_head * ) &BLOCKHEAD( LEAF )->blk_right_delim_key; goto found; } depth++; DEBUG_F( " depth=%u, i=%u\n", depth, INFO->next_key_nr[depth] ); } while ( INFO->next_key_nr[depth] == 0 ); if ( depth == INFO->tree_depth ) cache = ROOT; else if ( depth <= INFO->cached_slots ) cache = CACHE( depth ); else { cache = read_tree_node( INFO->blocks[depth], --depth ); if ( !cache ) return 0; } do { __u16 nr_item = blkh_nr_item(BLOCKHEAD( cache )); int key_nr = INFO->next_key_nr[depth]++; DEBUG_F( " depth=%u, i=%u/%u\n", depth, key_nr, nr_item ); if ( key_nr == nr_item ) /* This is the last item in this block, set the next_key_nr * * to 0 */ INFO->next_key_nr[depth] = 0; cache = read_tree_node( dc_block_number( &(DC( cache )[key_nr])), --depth ); if ( !cache ) return 0; } while ( depth > BLKH_LEVEL_LEAF ); ih = ITEMHEAD; } found: INFO->current_ih = ih; INFO->current_item = &LEAF[ih_location(ih)]; DEBUG_F( " new ih: key %u:%u:%u:%u version:%u\n", le32_to_cpu(INFO->current_ih->ih_key.k_dir_id), le32_to_cpu(INFO->current_ih->ih_key.k_objectid), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness), ih_version(INFO->current_ih) ); return 1; }