static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + memblock_cap_size(base, &size); int i; /* Walk through the array for collisions */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rend = rgn->base + rgn->size; /* Nothing more to do, exit */ if (rgn->base > end || rgn->size == 0) break; /* If we fully enclose the block, drop it */ if (base <= rgn->base && end >= rend) { memblock_remove_region(type, i--); continue; } /* If we are fully enclosed within a block * then we need to split it and we are done */ if (base > rgn->base && end < rend) { rgn->size = base - rgn->base; if (!memblock_add_region(type, end, rend - end)) return 0; /* Failure to split is bad, we at least * restore the block before erroring */ rgn->size = rend - rgn->base; WARN_ON(1); return -1; } /* Check if we need to trim the bottom of a block */ if (rgn->base < end && rend > end) { rgn->size -= end - rgn->base; rgn->base = end; break; } /* And check if we need to trim the top of a block */ if (base < rend) rgn->size -= rend - base; } return 0; }
static int __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { int start_rgn, end_rgn; int i, ret; ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret; for (i = end_rgn - 1; i >= start_rgn; i--) memblock_remove_region(type, i); return 0; }
static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) { u64 rgnbegin, rgnend; u64 end = base + size; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ /* Find the region where (base, size) belongs to */ for (i=0; i < rgn->cnt; i++) { rgnbegin = rgn->region[i].base; rgnend = rgnbegin + rgn->region[i].size; if ((rgnbegin <= base) && (end <= rgnend)) break; } /* Didn't find the region */ if (i == rgn->cnt) return -1; /* Check to see if we are removing entire region */ if ((rgnbegin == base) && (rgnend == end)) { memblock_remove_region(rgn, i); return 0; } /* Check to see if region is matching at the front */ if (rgnbegin == base) { rgn->region[i].base = end; rgn->region[i].size -= size; return 0; } /* Check to see if the region is matching at the end */ if (rgnend == end) { rgn->region[i].size -= size; return 0; } /* * We need to split the entry - adjust the current one to the * beginging of the hole and add the region after hole. */ rgn->region[i].size = base - rgn->region[i].base; return memblock_add_region(rgn, end, rgnend - end); }
/* You must call memblock_analyze() after this. */ void __init memblock_enforce_memory_limit(u64 memory_limit) { unsigned long i; u64 limit; struct memblock_property *p; if (!memory_limit) return; /* Truncate the memblock regions to satisfy the memory limit. */ limit = memory_limit; for (i = 0; i < memblock.memory.cnt; i++) { if (limit > memblock.memory.region[i].size) { limit -= memblock.memory.region[i].size; continue; } memblock.memory.region[i].size = limit; memblock.memory.cnt = i + 1; break; } if (memblock.memory.region[0].size < memblock.rmo_size) memblock.rmo_size = memblock.memory.region[0].size; memory_limit = memblock_end_of_DRAM(); /* And truncate any reserves above the limit also. */ for (i = 0; i < memblock.reserved.cnt; i++) { p = &memblock.reserved.region[i]; if (p->base > memory_limit) p->size = 0; else if ((p->base + p->size) > memory_limit) p->size = memory_limit - p->base; if (p->size == 0) { memblock_remove_region(&memblock.reserved, i); i--; } } }
static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + memblock_cap_size(base, &size); int i, slot = -1; /* First try and coalesce this MEMBLOCK with others */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rend = rgn->base + rgn->size; /* Exit if there's no possible hits */ if (rgn->base > end || rgn->size == 0) break; /* Check if we are fully enclosed within an existing * block */ if (rgn->base <= base && rend >= end) return 0; /* Check if we overlap or are adjacent with the bottom * of a block. */ if (base < rgn->base && end >= rgn->base) { /* If we can't coalesce, create a new block */ if (!memblock_memory_can_coalesce(base, size, rgn->base, rgn->size)) { /* Overlap & can't coalesce are mutually * exclusive, if you do that, be prepared * for trouble */ WARN_ON(end != rgn->base); goto new_block; } /* We extend the bottom of the block down to our * base */ rgn->base = base; rgn->size = rend - base; /* Return if we have nothing else to allocate * (fully coalesced) */ if (rend >= end) return 0; /* We continue processing from the end of the * coalesced block. */ base = rend; size = end - base; } /* Now check if we overlap or are adjacent with the * top of a block */ if (base <= rend && end >= rend) { /* If we can't coalesce, create a new block */ if (!memblock_memory_can_coalesce(rgn->base, rgn->size, base, size)) { /* Overlap & can't coalesce are mutually * exclusive, if you do that, be prepared * for trouble */ WARN_ON(rend != base); goto new_block; } /* We adjust our base down to enclose the * original block and destroy it. It will be * part of our new allocation. Since we've * freed an entry, we know we won't fail * to allocate one later, so we won't risk * losing the original block allocation. */ size += (base - rgn->base); base = rgn->base; memblock_remove_region(type, i--); } } /* If the array is empty, special case, replace the fake * filler region and return */ if ((type->cnt == 1) && (type->regions[0].size == 0)) { type->regions[0].base = base; type->regions[0].size = size; return 0; } new_block: /* If we are out of space, we fail. It's too late to resize the array * but then this shouldn't have happened in the first place. */ if (WARN_ON(type->cnt >= type->max)) return -1; /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ for (i = type->cnt - 1; i >= 0; i--) { if (base < type->regions[i].base) { type->regions[i+1].base = type->regions[i].base; type->regions[i+1].size = type->regions[i].size; } else { type->regions[i+1].base = base; type->regions[i+1].size = size; slot = i + 1; break; } } if (base < type->regions[0].base) { type->regions[0].base = base; type->regions[0].size = size; slot = 0; } type->cnt++; /* The array is full ? Try to resize it. If that fails, we undo * our allocation and return an error */ if (type->cnt == type->max && memblock_double_array(type)) { BUG_ON(slot < 0); memblock_remove_region(type, slot); return -1; } return 0; }
/* Assumption: base addr of region 1 < base addr of region 2 */ static void memblock_coalesce_regions(struct memblock_region *rgn, unsigned long r1, unsigned long r2) { rgn->region[r1].size += rgn->region[r2].size; memblock_remove_region(rgn, r2); }