static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, u64 gd_blkno, u16 *bits_left) { int ret; u16 found; struct buffer_head *group_bh = NULL; struct ocfs2_group_desc *gd; struct inode *alloc_inode = ac->ac_inode; ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, &group_bh, OCFS2_BH_CACHED, alloc_inode); if (ret < 0) { mlog_errno(ret); return ret; } gd = (struct ocfs2_group_desc *) group_bh->b_data; if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); ret = -EIO; goto out; } ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, bit_off, &found); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } *num_bits = found; ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, *num_bits, le16_to_cpu(gd->bg_chain)); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, *bit_off, *num_bits); if (ret < 0) mlog_errno(ret); *bits_left = le16_to_cpu(gd->bg_free_bits_count); out: brelse(group_bh); return ret; }
static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, u64 gd_blkno, u16 *bits_left) { int ret; u16 found; struct buffer_head *group_bh = NULL; struct ocfs2_group_desc *gd; struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data; struct inode *alloc_inode = ac->ac_inode; ret = ocfs2_read_group_descriptor(alloc_inode, di, gd_blkno, &group_bh); if (ret < 0) { mlog_errno(ret); return ret; } gd = (struct ocfs2_group_desc *) group_bh->b_data; ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, ac->ac_max_block, bit_off, &found); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } *num_bits = found; ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, *num_bits, le16_to_cpu(gd->bg_chain)); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, *bit_off, *num_bits); if (ret < 0) mlog_errno(ret); *bits_left = le16_to_cpu(gd->bg_free_bits_count); out: brelse(group_bh); return ret; }
static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, u32 cpos, u32 phys_cpos, u32 *new_phys_cpos, u32 len, int ext_flags) { int ret, credits = 0, extra_blocks = 0, goal_bit = 0; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct inode *gb_inode = NULL; struct buffer_head *gb_bh = NULL; struct buffer_head *gd_bh = NULL; struct ocfs2_group_desc *gd; struct ocfs2_refcount_tree *ref_tree = NULL; u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb, context->range->me_threshold); u64 phys_blkno, new_phys_blkno; phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); BUG_ON(!context->refcount_loc); ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); return ret; } ret = ocfs2_prepare_refcount_change_for_del(inode, context->refcount_loc, phys_blkno, len, &credits, &extra_blocks); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, &context->meta_ac, NULL, extra_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } /* * need to count 2 extra credits for global_bitmap inode and * group descriptor. */ credits += OCFS2_INODE_UPDATE_CREDITS + 1; /* * ocfs2_move_extent() didn't reserve any clusters in lock_allocators() * logic, while we still need to lock the global_bitmap. */ gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!gb_inode) { mlog(ML_ERROR, "unable to get global_bitmap inode\n"); ret = -EIO; goto out; } mutex_lock(&gb_inode->i_mutex); ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1); if (ret) { mlog_errno(ret); goto out_unlock_gb_mutex; } mutex_lock(&tl_inode->i_mutex); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock_tl_inode; } new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos); ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT, &goal_bit, &gd_bh); if (ret) { mlog_errno(ret); goto out_commit; } /* * probe the victim cluster group to find a proper * region to fit wanted movement, it even will perfrom * a best-effort attempt by compromising to a threshold * around the goal. */ ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, new_phys_cpos); if (!*new_phys_cpos) { ret = -ENOSPC; goto out_commit; } ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, *new_phys_cpos, ext_flags); if (ret) { mlog_errno(ret); goto out_commit; } gd = (struct ocfs2_group_desc *)gd_bh->b_data; ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len, le16_to_cpu(gd->bg_chain)); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, goal_bit, len); if (ret) mlog_errno(ret); /* * Here we should write the new page out first if we are * in write-back mode. */ ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); brelse(gd_bh); out_unlock_tl_inode: mutex_unlock(&tl_inode->i_mutex); ocfs2_inode_unlock(gb_inode, 1); out_unlock_gb_mutex: mutex_unlock(&gb_inode->i_mutex); brelse(gb_bh); iput(gb_inode); out: if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; }
static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, u64 *bg_blkno, u16 *bits_left) { int status; u16 chain, tmp_bits; u32 tmp_used; u64 next_group; struct inode *alloc_inode = ac->ac_inode; struct buffer_head *group_bh = NULL; struct buffer_head *prev_group_bh = NULL; struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data; struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; struct ocfs2_group_desc *bg; chain = ac->ac_chain; mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n", bits_wanted, chain, (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), le64_to_cpu(cl->cl_recs[chain].c_blkno), &group_bh, OCFS2_BH_CACHED, alloc_inode); if (status < 0) { mlog_errno(status); goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); if (status) { mlog_errno(status); goto bail; } status = -ENOSPC; /* for now, the chain search is a bit simplistic. We just use * the 1st group with any empty bits. */ while ((status = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, bit_off, &tmp_bits)) == -ENOSPC) { if (!bg->bg_next_group) break; if (prev_group_bh) { brelse(prev_group_bh); prev_group_bh = NULL; } next_group = le64_to_cpu(bg->bg_next_group); prev_group_bh = group_bh; group_bh = NULL; status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), next_group, &group_bh, OCFS2_BH_CACHED, alloc_inode); if (status < 0) { mlog_errno(status); goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); if (status) { mlog_errno(status); goto bail; } } if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } mlog(0, "alloc succeeds: we give %u bits from block group %llu\n", tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno)); *num_bits = tmp_bits; BUG_ON(*num_bits == 0); /* * Keep track of previous block descriptor read. When * we find a target, if we have read more than X * number of descriptors, and the target is reasonably * empty, relink him to top of his chain. * * We've read 0 extra blocks and only send one more to * the transaction, yet the next guy to search has a * much easier time. * * Do this *after* figuring out how many bits we're taking out * of our target group. */ if (ac->ac_allow_chain_relink && (prev_group_bh) && (ocfs2_block_group_reasonably_empty(bg, *num_bits))) { status = ocfs2_relink_block_group(handle, alloc_inode, ac->ac_bh, group_bh, prev_group_bh, chain); if (status < 0) { mlog_errno(status); goto bail; } } /* Ok, claim our bits now: set the info on dinode, chainlist * and then the group */ status = ocfs2_journal_access(handle, alloc_inode, ac->ac_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used); le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits)); status = ocfs2_journal_dirty(handle, ac->ac_bh); if (status < 0) { mlog_errno(status); goto bail; } status = ocfs2_block_group_set_bits(handle, alloc_inode, bg, group_bh, *bit_off, *num_bits); if (status < 0) { mlog_errno(status); goto bail; } mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits, (unsigned long long)le64_to_cpu(fe->i_blkno)); *bg_blkno = le64_to_cpu(bg->bg_blkno); *bits_left = le16_to_cpu(bg->bg_free_bits_count); bail: if (group_bh) brelse(group_bh); if (prev_group_bh) brelse(prev_group_bh); mlog_exit(status); return status; }