static void sprintf_item_head (char * buf, struct item_head * ih) { if (ih) { sprintf (buf, "%s", (ih_version (ih) == ITEM_VERSION_2) ? "*NEW* " : "*OLD*"); sprintf_le_key (buf + strlen (buf), &(ih->ih_key)); sprintf (buf + strlen (buf), ", item_len %d, item_location %d, " "free_space(entry_count) %d", ih_item_len(ih), ih_location(ih), ih_free_space (ih)); } else sprintf (buf, "[NULL]"); }
static void sprintf_item_head (char * buf, struct item_head * ih) { if (ih) { strcpy (buf, (ih_version (ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*"); sprintf_le_key (buf + strlen (buf), &(ih->ih_key)); sprintf (buf + strlen (buf), ", item_len %d, item_location %d, " "free_space(entry_count) %d", ih_item_len(ih), ih_location(ih), ih_free_space (ih)); } else sprintf (buf, "[NULL]"); }
static int is_leaf(char *buf, int blocksize, struct buf *bp) { struct item_head *ih; struct block_head *blkh; int used_space, prev_location, i, nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_log(LOG_WARNING, "this should be caught earlier"); return (0); } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* Item number is too big or too small */ reiserfs_log(LOG_WARNING, "nr_item seems wrong\n"); return (0); } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* * Free space does not match to calculated amount of * use space */ reiserfs_log(LOG_WARNING, "free space seems wrong\n"); return (0); } /* FIXME: it is_leaf will hit performance too much - we may have * return 1 here */ /* Check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_log(LOG_WARNING, "wrong item type for item\n"); return (0); } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_log(LOG_WARNING, "item location seems wrong\n"); return (0); } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_log(LOG_WARNING, "item length seems wrong\n"); return (0); } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_log(LOG_WARNING, "item location seems wrong (second one)\n"); return (0); } prev_location = ih_location(ih); } /* One may imagine much more checks */ return 1; }
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; struct item_head *ih; int used_space; int prev_location; int i; int nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_warning(NULL, "reiserfs-5080", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* item number is too big or too small */ reiserfs_warning(NULL, "reiserfs-5081", "nr_item seems wrong: %z", bh); return 0; } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* free space does not match to calculated amount of use space */ reiserfs_warning(NULL, "reiserfs-5082", "free space seems wrong: %z", bh); return 0; } // return 1 here /* check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_warning(NULL, "reiserfs-5083", "wrong item type for item %h", ih); return 0; } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_warning(NULL, "reiserfs-5084", "item location seems wrong: %h", ih); return 0; } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_warning(NULL, "reiserfs-5085", "item length seems wrong: %h", ih); return 0; } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_warning(NULL, "reiserfs-5086", "item location seems wrong " "(second one): %h", ih); return 0; } prev_location = ih_location(ih); } // one may imagine much more checks return 1; }
/* preconditions: reiserfs_read_super already executed, therefore * INFO block is valid * returns: 0 if error (errnum is set), * nonzero iff we were able to find the key successfully. * postconditions: on a nonzero return, the current_ih and * current_item fields describe the key that equals the * searched key. INFO->next_key contains the next key after * the searched key. * side effects: messes around with the cache. */ static int search_stat( __u32 dir_id, __u32 objectid ) { char *cache; int depth; int nr_item; int i; struct item_head *ih; errnum = 0; DEBUG_F( "search_stat:\n key %u:%u:0:0\n", le32_to_cpu(dir_id), le32_to_cpu(objectid) ); depth = INFO->tree_depth; cache = ROOT; DEBUG_F( "depth = %d\n", depth ); while ( depth > BLKH_LEVEL_LEAF ) { struct key *key; nr_item = blkh_nr_item(BLOCKHEAD( cache )); key = KEY( cache ); for ( i = 0; i < nr_item; i++ ) { if (le32_to_cpu(key->k_dir_id) > le32_to_cpu(dir_id) || (key->k_dir_id == dir_id && (le32_to_cpu(key->k_objectid) > le32_to_cpu(objectid) || (key->k_objectid == objectid && (key->u.k_offset_v1.k_offset | key->u.k_offset_v1.k_uniqueness) > 0)))) break; key++; } DEBUG_F( " depth=%d, i=%d/%d\n", depth, i, nr_item ); INFO->next_key_nr[depth] = ( i == nr_item ) ? 0 : i + 1; cache = read_tree_node( dc_block_number(&(DC(cache)[i])), --depth ); if ( !cache ) return 0; } /* cache == LEAF */ nr_item = blkh_nr_item(BLOCKHEAD(LEAF)); ih = ITEMHEAD; DEBUG_F( "nr_item = %d\n", nr_item ); for ( i = 0; i < nr_item; i++ ) { if ( ih->ih_key.k_dir_id == dir_id && ih->ih_key.k_objectid == objectid && ih->ih_key.u.k_offset_v1.k_offset == 0 && ih->ih_key.u.k_offset_v1.k_uniqueness == 0 ) { DEBUG_F( " depth=%d, i=%d/%d\n", depth, i, nr_item ); INFO->current_ih = ih; INFO->current_item = &LEAF[ih_location(ih)]; return 1; } ih++; } DEBUG_LEAVE(FILE_ERR_BAD_FSYS); errnum = FILE_ERR_BAD_FSYS; return 0; }
/* Get the next key, i.e. the key following the last retrieved key in * tree order. INFO->current_ih and * INFO->current_info are adapted accordingly. */ static int next_key( void ) { __u16 depth; struct item_head *ih = INFO->current_ih + 1; char *cache; DEBUG_F( "next_key:\n old ih: key %u:%u:%u:%u version:%u\n", le32_to_cpu(INFO->current_ih->ih_key.k_dir_id), le32_to_cpu(INFO->current_ih->ih_key.k_objectid), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness), ih_version(INFO->current_ih) ); if ( ih == &ITEMHEAD[blkh_nr_item(BLOCKHEAD( LEAF ))] ) { depth = BLKH_LEVEL_LEAF; /* The last item, was the last in the leaf node. * Read in the next * * block */ do { if ( depth == INFO->tree_depth ) { /* There are no more keys at all. * Return a dummy item with * * MAX_KEY */ ih = ( struct item_head * ) &BLOCKHEAD( LEAF )->blk_right_delim_key; goto found; } depth++; DEBUG_F( " depth=%u, i=%u\n", depth, INFO->next_key_nr[depth] ); } while ( INFO->next_key_nr[depth] == 0 ); if ( depth == INFO->tree_depth ) cache = ROOT; else if ( depth <= INFO->cached_slots ) cache = CACHE( depth ); else { cache = read_tree_node( INFO->blocks[depth], --depth ); if ( !cache ) return 0; } do { __u16 nr_item = blkh_nr_item(BLOCKHEAD( cache )); int key_nr = INFO->next_key_nr[depth]++; DEBUG_F( " depth=%u, i=%u/%u\n", depth, key_nr, nr_item ); if ( key_nr == nr_item ) /* This is the last item in this block, set the next_key_nr * * to 0 */ INFO->next_key_nr[depth] = 0; cache = read_tree_node( dc_block_number( &(DC( cache )[key_nr])), --depth ); if ( !cache ) return 0; } while ( depth > BLKH_LEVEL_LEAF ); ih = ITEMHEAD; } found: INFO->current_ih = ih; INFO->current_item = &LEAF[ih_location(ih)]; DEBUG_F( " new ih: key %u:%u:%u:%u version:%u\n", le32_to_cpu(INFO->current_ih->ih_key.k_dir_id), le32_to_cpu(INFO->current_ih->ih_key.k_objectid), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_offset), le32_to_cpu(INFO->current_ih->ih_key.u.k_offset_v1.k_uniqueness), ih_version(INFO->current_ih) ); return 1; }