static int __commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { u64 start; u64 end; struct extent_buffer *eb; struct extent_io_tree *tree = &root->fs_info->extent_cache; int ret; while(1) { ret = find_first_extent_bit(tree, 0, &start, &end, EXTENT_DIRTY); if (ret) break; while(start <= end) { eb = find_first_extent_buffer(tree, start); BUG_ON(!eb || eb->start != start); ret = write_tree_block(trans, root, eb); BUG_ON(ret); start += eb->len; clear_extent_buffer_dirty(eb); free_extent_buffer(eb); } } return 0; }
static int custom_alloc_extent(struct btrfs_root *root, u64 num_bytes, u64 hint_byte, struct btrfs_key *ins) { u64 start; u64 end; u64 last = hint_byte; int ret; int wrapped = 0; struct btrfs_block_group_cache *cache; while (1) { ret = find_first_extent_bit(&root->fs_info->free_space_cache, last, &start, &end, EXTENT_DIRTY); if (ret) { if (wrapped++ == 0) { last = 0; continue; } else { goto fail; } } start = max(last, start); last = end + 1; if (last - start < num_bytes) continue; last = start + num_bytes; if (test_range_bit(&root->fs_info->pinned_extents, start, last - 1, EXTENT_DIRTY, 0)) continue; cache = btrfs_lookup_block_group(root->fs_info, start); BUG_ON(!cache); if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM || last > cache->key.objectid + cache->key.offset) { last = cache->key.objectid + cache->key.offset; continue; } if (cache->flags & (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) { last = cache->key.objectid + cache->key.offset; continue; } clear_extent_dirty(&root->fs_info->free_space_cache, start, start + num_bytes - 1, 0); ins->objectid = start; ins->offset = num_bytes; ins->type = BTRFS_EXTENT_ITEM_KEY; return 0; } fail: fprintf(stderr, "not enough free space\n"); return -ENOSPC; }
int btrfs_write_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark) { int ret; int err = 0; int werr = 0; struct page *page; struct inode *btree_inode = root->fs_info->btree_inode; u64 start = 0; u64 end; unsigned long index; while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, mark); if (ret) break; while (start <= end) { cond_resched(); index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; page = find_get_page(btree_inode->i_mapping, index); if (!page) continue; btree_lock_page_hook(page); if (!page->mapping) { unlock_page(page); page_cache_release(page); continue; } if (PageWriteback(page)) { if (PageDirty(page)) wait_on_page_writeback(page); else { unlock_page(page); page_cache_release(page); continue; } } err = write_one_page(page, 0); if (err) werr = err; page_cache_release(page); } } if (err) werr = err; return werr; }