/** * reiser4_delete_object_common - delete_object of file_plugin * @inode: inode to be deleted * * This is common implementation of delete_object method of file_plugin. It * applies to object its deletion consists of removing two items - stat data * and safe-link. */ int reiser4_delete_object_common(struct inode *inode) { int result; assert("nikita-1477", inode != NULL); /* FIXME: if file body deletion failed (i/o error, for instance), inode->i_size can be != 0 here */ assert("nikita-3420", inode->i_size == 0 || S_ISLNK(inode->i_mode)); assert("nikita-3421", inode->i_nlink == 0); if (!reiser4_inode_get_flag(inode, REISER4_NO_SD)) { reiser4_block_nr reserve; /* grab space which is needed to remove 2 items from the tree: stat data and safe-link */ reserve = 2 * estimate_one_item_removal(reiser4_tree_by_inode(inode)); if (reiser4_grab_space_force(reserve, BA_RESERVED | BA_CAN_COMMIT)) return RETERR(-ENOSPC); result = common_object_delete_no_reserve(inode); } else result = 0; return result; }
/* Allocate "real" disk blocks by calling a proper space allocation plugin * method. Blocks are allocated in one contiguous disk region. The plugin * independent part accounts blocks by subtracting allocated amount from grabbed * or fake block counter and add the same amount to the counter of allocated * blocks. * * @hint -- a reiser4 blocknr hint object which contains further block * allocation hints and parameters (search start, a stage of block * which will be mapped to disk, etc.), * @blk -- an out parameter for the beginning of the allocated region, * @len -- in/out parameter, it should contain the maximum number of allocated * blocks, after block allocation completes, it contains the length of * allocated disk region. * @flags -- see reiser4_ba_flags_t description. * * @return -- 0 if success, error code otherwise. */ int reiser4_alloc_blocks(reiser4_blocknr_hint * hint, reiser4_block_nr * blk, reiser4_block_nr * len, reiser4_ba_flags_t flags) { __u64 needed = *len; reiser4_context *ctx; reiser4_super_info_data *sbinfo; int ret; assert("zam-986", hint != NULL); ctx = get_current_context(); sbinfo = get_super_private(ctx->super); /* For write-optimized data we use default search start value, which is * close to last write location. */ if (flags & BA_USE_DEFAULT_SEARCH_START) get_blocknr_hint_default(&hint->blk); /* VITALY: allocator should grab this for internal/tx-lists/similar only. */ /* VS-FIXME-HANS: why is this comment above addressed to vitaly (from vitaly)?*/ if (hint->block_stage == BLOCK_NOT_COUNTED) { ret = reiser4_grab_space_force(*len, flags); if (ret != 0) return ret; } ret = sa_alloc_blocks(reiser4_get_space_allocator(ctx->super), hint, (int)needed, blk, len); if (!ret) { assert("zam-680", *blk < reiser4_block_count(ctx->super)); assert("zam-681", *blk + *len <= reiser4_block_count(ctx->super)); if (flags & BA_PERMANENT) { /* we assume that current atom exists at this moment */ txn_atom *atom = get_current_atom_locked(); atom->nr_blocks_allocated += *len; spin_unlock_atom(atom); } switch (hint->block_stage) { case BLOCK_NOT_COUNTED: case BLOCK_GRABBED: grabbed2used(ctx, sbinfo, *len); break; case BLOCK_UNALLOCATED: fake_allocated2used(sbinfo, *len, flags); break; case BLOCK_FLUSH_RESERVED: { txn_atom *atom = get_current_atom_locked(); flush_reserved2used(atom, *len); spin_unlock_atom(atom); } break; default: impossible("zam-531", "wrong block stage"); } } else { assert("zam-821", ergo(hint->max_dist == 0 && !hint->backward, ret != -ENOSPC)); if (hint->block_stage == BLOCK_NOT_COUNTED) grabbed2free(ctx, sbinfo, needed); } return ret; }