int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.memory, base); phys_addr_t end = base + memblock_cap_size(base, &size); if (idx == -1) return 0; return memblock.memory.regions[idx].base <= base && (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size) >= end; }
/** * memblock_isolate_range - isolate given range into disjoint memblocks * @type: memblock type to isolate range for * @base: base of range to isolate * @size: size of range to isolate * @start_rgn: out parameter for the start of isolated region * @end_rgn: out parameter for the end of isolated region * * Walk @type and ensure that regions don't cross the boundaries defined by * [@base,@base+@size). Crossing regions are split at the boundaries, * which may create at most two more regions. The index of the first * region inside the range is returned in *@start_rgn and end in *@end_rgn. * * RETURNS: * 0 on success, -errno on failure. */ static int __init_memblock memblock_isolate_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int *start_rgn, int *end_rgn) { phys_addr_t end = base + memblock_cap_size(base, &size); int i; *start_rgn = *end_rgn = 0; /* we'll create at most two more regions */ while (type->cnt + 2 > type->max) if (memblock_double_array(type) < 0) return -ENOMEM; for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rbase = rgn->base; phys_addr_t rend = rbase + rgn->size; if (rbase >= end) break; if (rend <= base) continue; if (rbase < base) { /* * @rgn intersects from below. Split and continue * to process the next region - the new top half. */ rgn->base = base; rgn->size -= base - rbase; type->total_size -= base - rbase; memblock_insert_region(type, i, rbase, base - rbase, memblock_get_region_node(rgn)); } else if (rend > end) { /* * @rgn intersects from above. Split and redo the * current region - the new bottom half. */ rgn->base = end; rgn->size -= end - rbase; type->total_size -= end - rbase; memblock_insert_region(type, i--, rbase, end - rbase, memblock_get_region_node(rgn)); } else { /* @rgn is fully contained, record it */ if (!*end_rgn) *start_rgn = i; *end_rgn = i + 1; } } return 0; }
static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + memblock_cap_size(base, &size); int i; /* Walk through the array for collisions */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rend = rgn->base + rgn->size; /* Nothing more to do, exit */ if (rgn->base > end || rgn->size == 0) break; /* If we fully enclose the block, drop it */ if (base <= rgn->base && end >= rend) { memblock_remove_region(type, i--); continue; } /* If we are fully enclosed within a block * then we need to split it and we are done */ if (base > rgn->base && end < rend) { rgn->size = base - rgn->base; if (!memblock_add_region(type, end, rend - end)) return 0; /* Failure to split is bad, we at least * restore the block before erroring */ rgn->size = rend - rgn->base; WARN_ON(1); return -1; } /* Check if we need to trim the bottom of a block */ if (rgn->base < end && rend > end) { rgn->size -= end - rgn->base; rgn->base = end; break; } /* And check if we need to trim the top of a block */ if (base < rend) rgn->size -= rend - base; } return 0; }
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { memblock_cap_size(base, &size); return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; }
static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + memblock_cap_size(base, &size); int i, slot = -1; /* First try and coalesce this MEMBLOCK with others */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rend = rgn->base + rgn->size; /* Exit if there's no possible hits */ if (rgn->base > end || rgn->size == 0) break; /* Check if we are fully enclosed within an existing * block */ if (rgn->base <= base && rend >= end) return 0; /* Check if we overlap or are adjacent with the bottom * of a block. */ if (base < rgn->base && end >= rgn->base) { /* If we can't coalesce, create a new block */ if (!memblock_memory_can_coalesce(base, size, rgn->base, rgn->size)) { /* Overlap & can't coalesce are mutually * exclusive, if you do that, be prepared * for trouble */ WARN_ON(end != rgn->base); goto new_block; } /* We extend the bottom of the block down to our * base */ rgn->base = base; rgn->size = rend - base; /* Return if we have nothing else to allocate * (fully coalesced) */ if (rend >= end) return 0; /* We continue processing from the end of the * coalesced block. */ base = rend; size = end - base; } /* Now check if we overlap or are adjacent with the * top of a block */ if (base <= rend && end >= rend) { /* If we can't coalesce, create a new block */ if (!memblock_memory_can_coalesce(rgn->base, rgn->size, base, size)) { /* Overlap & can't coalesce are mutually * exclusive, if you do that, be prepared * for trouble */ WARN_ON(rend != base); goto new_block; } /* We adjust our base down to enclose the * original block and destroy it. It will be * part of our new allocation. Since we've * freed an entry, we know we won't fail * to allocate one later, so we won't risk * losing the original block allocation. */ size += (base - rgn->base); base = rgn->base; memblock_remove_region(type, i--); } } /* If the array is empty, special case, replace the fake * filler region and return */ if ((type->cnt == 1) && (type->regions[0].size == 0)) { type->regions[0].base = base; type->regions[0].size = size; return 0; } new_block: /* If we are out of space, we fail. It's too late to resize the array * but then this shouldn't have happened in the first place. */ if (WARN_ON(type->cnt >= type->max)) return -1; /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ for (i = type->cnt - 1; i >= 0; i--) { if (base < type->regions[i].base) { type->regions[i+1].base = type->regions[i].base; type->regions[i+1].size = type->regions[i].size; } else { type->regions[i+1].base = base; type->regions[i+1].size = size; slot = i + 1; break; } } if (base < type->regions[0].base) { type->regions[0].base = base; type->regions[0].size = size; slot = 0; } type->cnt++; /* The array is full ? Try to resize it. If that fails, we undo * our allocation and return an error */ if (type->cnt == type->max && memblock_double_array(type)) { BUG_ON(slot < 0); memblock_remove_region(type, slot); return -1; } return 0; }
/** * memblock_add_region - add new memblock region * @type: memblock type to add new region into * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region * * Add new memblock region [@base,@base+@size) into @type. The new region * is allowed to overlap with existing ones - overlaps don't affect already * existing regions. @type is guaranteed to be minimal (all neighbouring * compatible regions are merged) after the addition. * * RETURNS: * 0 on success, -errno on failure. */ static int __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid) { bool insert = false; phys_addr_t obase = base; phys_addr_t end = base + memblock_cap_size(base, &size); int i, nr_new; /* special case for empty array */ if (type->regions[0].size == 0) { WARN_ON(type->cnt != 1 || type->total_size); type->regions[0].base = base; type->regions[0].size = size; memblock_set_region_node(&type->regions[0], nid); type->total_size = size; return 0; } repeat: /* * The following is executed twice. Once with %false @insert and * then with %true. The first counts the number of regions needed * to accomodate the new area. The second actually inserts them. */ base = obase; nr_new = 0; for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rbase = rgn->base; phys_addr_t rend = rbase + rgn->size; if (rbase >= end) break; if (rend <= base) continue; /* * @rgn overlaps. If it separates the lower part of new * area, insert that portion. */ if (rbase > base) { nr_new++; if (insert) memblock_insert_region(type, i++, base, rbase - base, nid); } /* area below @rend is dealt with, forget about it */ base = min(rend, end); } /* insert the remaining portion */ if (base < end) { nr_new++; if (insert) memblock_insert_region(type, i, base, end - base, nid); } /* * If this was the first round, resize array and repeat for actual * insertions; otherwise, merge and return. */ if (!insert) { while (type->cnt + nr_new > type->max) if (memblock_double_array(type) < 0) return -ENOMEM; insert = true; goto repeat; } else { memblock_merge_regions(type); return 0; } }