static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct extent_map *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); WARN_ON(!entry->in_tree); if (offset < entry->start) p = &(*p)->rb_left; else if (offset >= extent_map_end(entry)) p = &(*p)->rb_right; else return parent; } entry = rb_entry(node, struct extent_map, rb_node); entry->in_tree = 1; rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; }
/* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) return 0; /* * don't merge compressed extents, we need to know their * actual size */ if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) return 0; if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && ((next->block_start == EXTENT_MAP_HOLE && prev->block_start == EXTENT_MAP_HOLE) || (next->block_start == EXTENT_MAP_INLINE && prev->block_start == EXTENT_MAP_INLINE) || (next->block_start == EXTENT_MAP_DELALLOC && prev->block_start == EXTENT_MAP_DELALLOC) || (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && next->block_start == extent_map_block_end(prev)))) { return 1; } return 0; }
static int tree_insert(struct rb_root *root, struct extent_map *em) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct extent_map *entry = NULL; struct rb_node *orig_parent = NULL; u64 end = range_end(em->start, em->len); while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); WARN_ON(!entry->in_tree); if (em->start < entry->start) p = &(*p)->rb_left; else if (em->start >= extent_map_end(entry)) p = &(*p)->rb_right; else return -EEXIST; } orig_parent = parent; while (parent && em->start >= extent_map_end(entry)) { parent = rb_next(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; parent = orig_parent; entry = rb_entry(parent, struct extent_map, rb_node); while (parent && em->start < entry->start) { parent = rb_prev(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; em->in_tree = 1; rb_link_node(&em->rb_node, orig_parent, p); rb_insert_color(&em->rb_node, root); return 0; }
/* * search through the tree for an extent_map with a given offset. If * it can't be found, try to find some neighboring extents */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; WARN_ON(!entry->in_tree); if (offset < entry->start) n = n->rb_left; else if (offset >= extent_map_end(entry)) n = n->rb_right; else return n; } if (prev_ret) { orig_prev = prev; while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *prev_ret = prev; prev = orig_prev; } if (next_ret) { prev_entry = rb_entry(prev, struct extent_map, rb_node); while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *next_ret = prev; } return NULL; }
/* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) return 0; /* * don't merge compressed extents, we need to know their * actual size */ if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) return 0; if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || test_bit(EXTENT_FLAG_LOGGING, &next->flags)) return 0; /* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad. */ if (!list_empty(&prev->list) || !list_empty(&next->list)) return 0; if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && ((next->block_start == EXTENT_MAP_HOLE && prev->block_start == EXTENT_MAP_HOLE) || (next->block_start == EXTENT_MAP_INLINE && prev->block_start == EXTENT_MAP_INLINE) || (next->block_start == EXTENT_MAP_DELALLOC && prev->block_start == EXTENT_MAP_DELALLOC) || (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && next->block_start == extent_map_block_end(prev)))) { return 1; } return 0; }
static int should_defrag_range(struct inode *inode, u64 start, u64 len, int thresh, u64 *last_len, u64 *skip, u64 *defrag_end) { struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; if (thresh == 0) thresh = 256 * 1024; /* * make sure that once we start defragging and extent, we keep on * defragging it */ if (start < *defrag_end) return 1; *skip = 0; /* * hopefully we have this extent in the tree already, try without * the full extent lock */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); read_unlock(&em_tree->lock); if (!em) { /* get the big lock and read metadata off disk */ lock_extent(io_tree, start, start + len - 1, GFP_NOFS); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); if (IS_ERR(em)) return 0; } /* this will cover holes, and inline extents */ if (em->block_start >= EXTENT_MAP_LAST_BYTE) ret = 0; /* * we hit a real extent, if it is big don't bother defragging it again */ if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) ret = 0; /* * last_len ends up being a counter of how many bytes we've defragged. * every time we choose not to defrag an extent, we reset *last_len * so that the next tiny extent will force a defrag. * * The end result of this is that tiny extents before a single big * extent will force at least part of that big extent to be defragged. */ if (ret) { *last_len += len; *defrag_end = extent_map_end(em); } else { *last_len = 0; *skip = extent_map_end(em); *defrag_end = 0; } free_extent_map(em); return ret; }