static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; ulong flags = choose_memblock_flags(); if (limit > memblock.current_limit) limit = memblock.current_limit; again: addr = memblock_find_in_range_node(size, align, goal, limit, nid, flags); if (!addr && (flags & MEMBLOCK_MIRROR)) { flags &= ~MEMBLOCK_MIRROR; pr_warn("Could not allocate %pap bytes of mirrored memory\n", &size); goto again; } if (!addr) return NULL; if (memblock_reserve(addr, size)) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; if (limit > memblock.current_limit) limit = memblock.current_limit; addr = memblock_find_in_range_node(size, align, goal, limit, nid); if (!addr) return NULL; if (memblock_reserve(addr, size)) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { phys_addr_t addr; void *ptr; unsigned long flags = choose_memblock_flags(); if (WARN_ON_ONCE(limit > memblock.current_limit)) { limit = memblock.current_limit; } again: addr = memblock_find_in_range_node(size, align, goal, limit, nid, flags); if (!addr && (flags & MEMBLOCK_MIRROR)) { flags &= ~MEMBLOCK_MIRROR; pr_warn("Could not allocate %pap bytes of mirrored memory\n", &size); goto again; } if (!addr) return NULL; if (memblock_reserve(addr, size)) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); return ptr; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; if (limit > memblock.current_limit) limit = memblock.current_limit; addr = memblock_find_in_range_node(goal, limit, size, align, nid); if (!addr) return NULL; #ifndef CONFIG_LIB ptr = phys_to_virt(addr); #else BUG_ON(total_ram == NULL); ptr = total_ram + ((unsigned long)phys_to_virt(addr) - PAGE_OFFSET); #endif memset(ptr, 0, size); memblock_reserve(addr, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { phys_addr_t found; /* * We align the size to limit fragmentation. Without this, a lot of * small allocs quickly eat up the whole reserve array on sparc */ size = round_up(size, align); found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE, size, align, nid); if (found && !memblock_reserve(found, size)) return found; return 0; }