static void btrfs_dev_replace_update_device_in_mapping_tree( struct btrfs_fs_info *fs_info, struct btrfs_device *srcdev, struct btrfs_device *tgtdev) { struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; struct extent_map *em; struct map_lookup *map; u64 start = 0; int i; write_lock(&em_tree->lock); do { em = lookup_extent_mapping(em_tree, start, (u64)-1); if (!em) break; map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) if (srcdev == map->stripes[i].dev) map->stripes[i].dev = tgtdev; start = em->start + em->len; free_extent_map(em); } while (start); write_unlock(&em_tree->lock); }
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) { int ret = 0; struct extent_map *merge = NULL; struct rb_node *rb; struct extent_map *em; write_lock(&tree->lock); em = lookup_extent_mapping(tree, start, len); WARN_ON(em->start != start || !em); if (!em) goto out; clear_bit(EXTENT_FLAG_PINNED, &em->flags); if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; em->block_len += merge->block_len; em->block_start = merge->block_start; merge->in_tree = 0; rb_erase(&merge->rb_node, &tree->map); free_extent_map(merge); } }
/** * add_extent_mapping - add new extent map to the extent tree * @tree: tree to insert new map in * @em: map to insert * * Insert @em into @tree or perform a simple forward/backward merge with * existing mappings. The extent_map struct passed in will be inserted * into the tree directly, with an additional reference taken, or a * reference dropped if the merge attempt was sucessfull. */ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { int ret = 0; struct extent_map *merge = NULL; struct rb_node *rb; struct extent_map *exist; exist = lookup_extent_mapping(tree, em->start, em->len); if (exist) { free_extent_map(exist); ret = -EEXIST; goto out; } assert_spin_locked(&tree->lock); rb = tree_insert(&tree->map, em->start, &em->rb_node); if (rb) { ret = -EEXIST; free_extent_map(merge); goto out; } atomic_inc(&em->refs); if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; em->block_len += merge->block_len; em->block_start = merge->block_start; merge->in_tree = 0; rb_erase(&merge->rb_node, &tree->map); free_extent_map(merge); } }
static int should_defrag_range(struct inode *inode, u64 start, u64 len, int thresh, u64 *last_len, u64 *skip, u64 *defrag_end) { struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; if (thresh == 0) thresh = 256 * 1024; /* * make sure that once we start defragging and extent, we keep on * defragging it */ if (start < *defrag_end) return 1; *skip = 0; /* * hopefully we have this extent in the tree already, try without * the full extent lock */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); read_unlock(&em_tree->lock); if (!em) { /* get the big lock and read metadata off disk */ lock_extent(io_tree, start, start + len - 1, GFP_NOFS); em = btrfs_get_extent(inode, NULL, 0, start, len, 0); unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); if (IS_ERR(em)) return 0; } /* this will cover holes, and inline extents */ if (em->block_start >= EXTENT_MAP_LAST_BYTE) ret = 0; /* * we hit a real extent, if it is big don't bother defragging it again */ if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) ret = 0; /* * last_len ends up being a counter of how many bytes we've defragged. * every time we choose not to defrag an extent, we reset *last_len * so that the next tiny extent will force a defrag. * * The end result of this is that tiny extents before a single big * extent will force at least part of that big extent to be defragged. */ if (ret) { *last_len += len; *defrag_end = extent_map_end(em); } else { *last_len = 0; *skip = extent_map_end(em); *defrag_end = 0; } free_extent_map(em); return ret; }
/* * this drops all the extents in the cache that intersect the range * [start, end]. Existing extents are split as required. */ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; struct extent_map *split2 = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; u64 len = end - start + 1; int ret; int testend = 1; unsigned long flags; int compressed = 0; WARN_ON(end < start); if (end == (u64)-1) { len = (u64)-1; testend = 0; } while (1) { if (!split) split = alloc_extent_map(GFP_NOFS); if (!split2) split2 = alloc_extent_map(GFP_NOFS); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (!em) { write_unlock(&em_tree->lock); break; } flags = em->flags; if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { if (testend && em->start + em->len >= start + len) { free_extent_map(em); write_unlock(&em_tree->lock); break; } start = em->start + em->len; if (testend) len = start + len - (em->start + em->len); free_extent_map(em); write_unlock(&em_tree->lock); continue; } compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); clear_bit(EXTENT_FLAG_PINNED, &em->flags); remove_extent_mapping(em_tree, em); if (em->block_start < EXTENT_MAP_LAST_BYTE && em->start < start) { split->start = em->start; split->len = start - em->start; split->orig_start = em->orig_start; split->block_start = em->block_start; if (compressed) split->block_len = em->block_len; else split->block_len = split->len; split->bdev = em->bdev; split->flags = flags; ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); split = split2; split2 = NULL; } if (em->block_start < EXTENT_MAP_LAST_BYTE && testend && em->start + em->len > start + len) { u64 diff = start + len - em->start; split->start = start + len; split->len = em->start + em->len - (start + len); split->bdev = em->bdev; split->flags = flags; if (compressed) { split->block_len = em->block_len; split->block_start = em->block_start; split->orig_start = em->orig_start; } else { split->block_len = split->len; split->block_start = em->block_start + diff; split->orig_start = split->start; } ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); split = NULL; } write_unlock(&em_tree->lock); /* once for us */ free_extent_map(em); /* once for the tree*/ free_extent_map(em); } if (split) free_extent_map(split); if (split2) free_extent_map(split2); return 0; }