__forceinline void stack_allocator::clean_garbage(){ while(!empty() && is_block_unused(_beg+overhead)){ _beg = _beg+overhead+block_size(_beg+overhead); assert(_beg<=_end); }; assert(empty() || is_block_used(_beg+overhead)); };
/* pointers to data block which get into tree are checked with this */ int still_bad_unfm_ptr_2 (unsigned long block) { if (!block) return 0; if (is_block_used (block)) return 1; if (block >= stats (fs)->all_blocks) return 1; return 0; }
static void reiserfsck_check_tree (int dev, int block, int size, check_function_t comp_func) { struct buffer_head * bh; int what_node; bh = bread (dev, block, size); if (bh == 0) reiserfs_panic("reiserfsck_check_tree: unable to read %lu block on device 0x%x\n", block, dev); if (!B_IS_IN_TREE (bh)) { reiserfs_panic (0, "reiserfsck_check_tree: buffer (%b %z) not in tree", bh, bh); } what_node = who_is_this (bh->b_data, bh->b_size); if (what_node != THE_LEAF && what_node != THE_INTERNAL) die ("Not formatted node"); if (!is_block_used (bh->b_blocknr)) die ("Not marked as used"); if (is_leaf_node (bh) && is_leaf_bad_xx (bh)) die ("Bad leaf"); if (is_internal_node(bh) && is_internal_bad (bh)) die ("bad internal"); if (is_internal_node (bh)) { int i; struct disk_child * dc; dc = B_N_CHILD (bh, 0); for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) { reiserfsck_check_tree (dev, dc_block_number(dc), size, comp_func); g_dkey = B_N_PDELIM_KEY (bh, i); } } else if (is_leaf_node (bh)) { g_right = bh; if (g_left != 0 && g_dkey != 0) { comp_func (); brelse (g_left); } g_left = g_right; return; } else { reiserfs_panic ("reiserfsck_check_tree: block %lu has bad block type (%b)", bh->b_blocknr, bh); } brelse (bh); }
__forceinline bool stack_allocator::deallocate(void * vP){ int* P = (int*)vP; if(is_block_used(P) != true){ perror("Deallocation failed: bad pointer\n");fflush(stdout); abort(); }; //mark for detetion mark_block_unused(P); if(_beg+overhead == P){//this is the top block of this allocator //delete it _beg = _beg+overhead+block_size(_beg+overhead); if(empty() || is_block_unused(_beg+overhead)){//buffer is empty or more unused blocks //triger cascade deallocation return true; }; }; return false; };
static void reiserfsck_check_cached_tree (int dev, int block, int size) { struct buffer_head * bh; int what_node; bh = find_buffer(dev, block, size); if (bh == 0) return; if (!buffer_uptodate (bh)) { die ("reiserfsck_check_cached_tree: found notuptodate buffer"); } bh->b_count ++; if (!B_IS_IN_TREE (bh)) { die ("reiserfsck_check_cached_tree: buffer (%b %z) not in tree", bh, bh); } what_node = who_is_this (bh->b_data, bh->b_size); if ((what_node != THE_LEAF && what_node != THE_INTERNAL) || !is_block_used (bh->b_blocknr) || (is_leaf_node (bh) && is_leaf_bad (bh)) || (is_internal_node(bh) && is_internal_bad (bh))) die ("reiserfsck_check_cached_tree: bad node in the tree"); if (is_internal_node (bh)) { int i; struct disk_child * dc; dc = B_N_CHILD (bh, 0); for (i = 0; i <= B_NR_ITEMS (bh); i ++, dc ++) { reiserfsck_check_cached_tree (dev, dc_block_number(dc), size); g_dkey = B_N_PDELIM_KEY (bh, i); } } else if (is_leaf_node (bh)) { brelse (bh); return; } else { reiserfs_panic ("reiserfsck_check_cached_tree: block %lu has bad block type (%b)", bh->b_blocknr, bh); } brelse (bh); }
int shrink_fs(reiserfs_filsys_t reiserfs, unsigned long blocks) { unsigned long n_root_block; unsigned int bmap_nr_new; unsigned long int i; fs = reiserfs; rs = fs->s_rs; /* warn about alpha version */ { int c; printf( "You are running BETA version of reiserfs shrinker.\n" "This version is only for testing or VERY CAREFUL use.\n" "Backup of you data is recommended.\n\n" "Do you want to continue? [y/N]:" ); c = getchar(); if (c != 'y' && c != 'Y') exit(1); } bmap_nr_new = (blocks - 1) / (8 * fs->s_blocksize) + 1; /* is shrinking possible ? */ if (rs_block_count(rs) - blocks > rs_free_blocks(rs) + rs_bmap_nr(rs) - bmap_nr_new) { fprintf(stderr, "resize_reiserfs: can\'t shrink fs; too many blocks already allocated\n"); return -1; } reiserfs_reopen(fs, O_RDWR); set_state (fs->s_rs, REISERFS_ERROR_FS); mark_buffer_uptodate(SB_BUFFER_WITH_SB(fs), 1); mark_buffer_dirty(SB_BUFFER_WITH_SB(fs)); bwrite(SB_BUFFER_WITH_SB(fs)); /* calculate number of data blocks */ blocks_used = SB_BLOCK_COUNT(fs) - SB_FREE_BLOCKS(fs) - SB_BMAP_NR(fs) - SB_JOURNAL_SIZE(fs) - REISERFS_DISK_OFFSET_IN_BYTES / fs->s_blocksize - 2; /* superblock itself and 1 descriptor after the journal */ bmp = reiserfs_create_bitmap(rs_block_count(rs)); reiserfs_fetch_disk_bitmap(bmp, fs); unused_block = 1; if (opt_verbose) { printf("Processing the tree: "); fflush(stdout); } n_root_block = move_formatted_block(rs_root_block(rs), blocks, 0); if (n_root_block) { set_root_block (rs, n_root_block); } if (opt_verbose) printf ("\n\nnodes processed (moved):\n" "int %lu (%lu),\n" "leaves %lu (%lu),\n" "unfm %lu (%lu),\n" "total %lu (%lu).\n\n", int_node_cnt, int_moved_cnt, leaf_node_cnt, leaf_moved_cnt, unfm_node_cnt, unfm_moved_cnt, (unsigned long)total_node_cnt, total_moved_cnt); if (block_count_mismatch) { fprintf(stderr, "resize_reiserfs: data block count %lu" " doesn\'t match data block count %lu from super block\n", (unsigned long)total_node_cnt, blocks_used); } #if 0 printf("check for used blocks in truncated region\n"); { unsigned long l; for (l = blocks; l < rs_block_count(rs); l++) if (is_block_used(bmp, l)) printf("<%lu>", l); printf("\n"); } #endif /* 0 */ reiserfs_free_bitmap_blocks(fs); set_free_blocks (rs, rs_free_blocks(rs) - (rs_block_count(rs) - blocks) + (rs_bmap_nr(rs) - bmap_nr_new)); set_block_count (rs, blocks); set_bmap_nr (rs, bmap_nr_new); reiserfs_read_bitmap_blocks(fs); for (i = blocks; i < bmap_nr_new * fs->s_blocksize; i++) reiserfs_bitmap_set_bit(bmp, i); #if 0 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (SB_BLOCK_COUNT(s) - blocks) + (SB_BMAP_NR(s) - bmap_nr_new)); PUT_SB_BLOCK_COUNT(s, blocks); PUT_SB_BMAP_NR(s, bmap_nr_new); #endif reiserfs_flush_bitmap(bmp, fs); return 0; }