static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, phys_addr_t size, phys_addr_t align, int nid) { phys_addr_t start, end; start = mp->base; end = start + mp->size; start = memblock_align_up(start, align); while (start < end) { phys_addr_t this_end; int this_nid; this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { phys_addr_t ret = memblock_find_region(start, this_end, size, align); if (ret != MEMBLOCK_ERROR && !memblock_add_region(&memblock.reserved, ret, size)) return ret; } start = this_end; } return MEMBLOCK_ERROR; }
long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { struct memblock_type *_rgn = &memblock.reserved; BUG_ON(0 == size); return memblock_add_region(_rgn, base, size); }
long __init memblock_reserve(u64 base, u64 size) { struct memblock_region *_rgn = &memblock.reserved; BUG_ON(0 == size); return memblock_add_region(_rgn, base, size); }
long memblock_add(u64 base, u64 size) { struct memblock_region *_rgn = &memblock.memory; /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ if (base == 0) memblock.rmo_size = size; return memblock_add_region(_rgn, base, size); }
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { struct memblock_type *_rgn = &memblock.reserved; memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", (unsigned long long)base, (unsigned long long)base + size, (void *)_RET_IP_); BUG_ON(0 == size); return memblock_add_region(_rgn, base, size, MAX_NUMNODES); }
static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + memblock_cap_size(base, &size); int i; /* Walk through the array for collisions */ for (i = 0; i < type->cnt; i++) { struct memblock_region *rgn = &type->regions[i]; phys_addr_t rend = rgn->base + rgn->size; /* Nothing more to do, exit */ if (rgn->base > end || rgn->size == 0) break; /* If we fully enclose the block, drop it */ if (base <= rgn->base && end >= rend) { memblock_remove_region(type, i--); continue; } /* If we are fully enclosed within a block * then we need to split it and we are done */ if (base > rgn->base && end < rend) { rgn->size = base - rgn->base; if (!memblock_add_region(type, end, rend - end)) return 0; /* Failure to split is bad, we at least * restore the block before erroring */ rgn->size = rend - rgn->base; WARN_ON(1); return -1; } /* Check if we need to trim the bottom of a block */ if (rgn->base < end && rend > end) { rgn->size -= end - rgn->base; rgn->base = end; break; } /* And check if we need to trim the top of a block */ if (base < rend) rgn->size -= rend - base; } return 0; }
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { phys_addr_t found; /* We align the size to limit fragmentation. Without this, a lot of * small allocs quickly eat up the whole reserve array on sparc */ size = round_up(size, align); found = memblock_find_in_range(0, max_addr, size, align); if (found && !memblock_add_region(&memblock.reserved, found, size)) return found; return 0; }
static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) { u64 rgnbegin, rgnend; u64 end = base + size; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ /* Find the region where (base, size) belongs to */ for (i=0; i < rgn->cnt; i++) { rgnbegin = rgn->region[i].base; rgnend = rgnbegin + rgn->region[i].size; if ((rgnbegin <= base) && (end <= rgnend)) break; } /* Didn't find the region */ if (i == rgn->cnt) return -1; /* Check to see if we are removing entire region */ if ((rgnbegin == base) && (rgnend == end)) { memblock_remove_region(rgn, i); return 0; } /* Check to see if region is matching at the front */ if (rgnbegin == base) { rgn->region[i].base = end; rgn->region[i].size -= size; return 0; } /* Check to see if the region is matching at the end */ if (rgnend == end) { rgn->region[i].size -= size; return 0; } /* * We need to split the entry - adjust the current one to the * beginging of the hole and add the region after hole. */ rgn->region[i].size = base - rgn->region[i].base; return memblock_add_region(rgn, end, rgnend - end); }
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { phys_addr_t found; /* * We align the size to limit fragmentation. Without this, a lot of * small allocs quickly eat up the whole reserve array on sparc */ size = round_up(size, align); found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE, size, align, nid); if (found && !memblock_add_region(&memblock.reserved, found, size)) return found; return 0; }
u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) { long i, j; u64 base = 0; u64 res_base; BUG_ON(0 == size); size = memblock_align_up(size, align); /* On some platforms, make sure we allocate lowmem */ /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) max_addr = MEMBLOCK_REAL_LIMIT; for (i = memblock.memory.cnt - 1; i >= 0; i--) { u64 memblockbase = memblock.memory.region[i].base; u64 memblocksize = memblock.memory.region[i].size; if (memblocksize < size) continue; if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) base = memblock_align_down(memblockbase + memblocksize - size, align); else if (memblockbase < max_addr) { base = min(memblockbase + memblocksize, max_addr); base = memblock_align_down(base - size, align); } else continue; while (base && memblockbase <= base) { j = memblock_overlaps_region(&memblock.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ if (memblock_add_region(&memblock.reserved, base, size) < 0) return 0; return base; } res_base = memblock.reserved.region[j].base; if (res_base < size) break; base = memblock_align_down(res_base - size, align); } } return 0; }
static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, u64 size, u64 align) { u64 base, res_base; long j; base = memblock_align_down((end - size), align); while (start <= base) { j = memblock_overlaps_region(&memblock.reserved, base, size); if (j < 0) { /* this area isn't reserved, take it */ if (memblock_add_region(&memblock.reserved, base, size) < 0) base = ~(u64)0; return base; } res_base = memblock.reserved.region[j].base; if (res_base < size) break; base = memblock_align_down(res_base - size, align); } return ~(u64)0; }
long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { return memblock_add_region(&memblock.memory, base, size); }
static int __init_memblock memblock_double_array(struct memblock_type *type) { struct memblock_region *new_array, *old_array; phys_addr_t old_size, new_size, addr; int use_slab = slab_is_available(); /* We don't allow resizing until we know about the reserved regions * of memory that aren't suitable for allocation */ if (!memblock_can_resize) return -1; /* Calculate new doubled size */ old_size = type->max * sizeof(struct memblock_region); new_size = old_size << 1; /* Try to find some space for it. * * WARNING: We assume that either slab_is_available() and we use it or * we use MEMBLOCK for allocations. That means that this is unsafe to use * when bootmem is currently active (unless bootmem itself is implemented * on top of MEMBLOCK which isn't the case yet) * * This should however not be an issue for now, as we currently only * call into MEMBLOCK while it's still active, or much later when slab is * active for memory hotplug operations */ if (use_slab) { new_array = kmalloc(new_size, GFP_KERNEL); addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); } else addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); if (addr == MEMBLOCK_ERROR) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", memblock_type_name(type), type->max, type->max * 2); return -1; } new_array = __va(addr); memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); /* Found space, we now need to move the array over before * we add the reserved region since it may be our reserved * array itself that is full. */ memcpy(new_array, type->regions, old_size); memset(new_array + type->max, 0, old_size); old_array = type->regions; type->regions = new_array; type->max <<= 1; /* If we use SLAB that's it, we are done */ if (use_slab) return 0; /* Add the new reserved region now. Should not fail ! */ BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); /* If the array wasn't our static init one, then free it. We only do * that before SLAB is available as later on, we don't know whether * to use kfree or free_bootmem_pages(). Shouldn't be a big deal * anyways */ if (old_array != memblock_memory_init_regions && old_array != memblock_reserved_init_regions) memblock_free(__pa(old_array), old_size); return 0; }
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); }
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) { return memblock_add_region(&memblock.memory, base, size, nid); }