static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; ulong flags = choose_memblock_flags(); if (limit > memblock.current_limit) limit = memblock.current_limit; again: addr = memblock_find_in_range_node(size, align, goal, limit, nid, flags); if (!addr && (flags & MEMBLOCK_MIRROR)) { flags &= ~MEMBLOCK_MIRROR; pr_warn("Could not allocate %pap bytes of mirrored memory\n", &size); goto again; } if (!addr) return NULL; if (memblock_reserve(addr, size)) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; if (limit > memblock.current_limit) limit = memblock.current_limit; addr = memblock_find_in_range_node(size, align, goal, limit, nid); if (!addr) return NULL; if (memblock_reserve(addr, size)) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
/** * memblock에 특정 크기만큼 할당 * * @return 할당한 가상주소 */ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; /**< 가상주소 */ u64 addr; /**< 물리주소 */ /* limit는 현재 메모리의 한계 */ if (limit > memblock.current_limit) limit = memblock.current_limit; addr = find_memory_core_early(nid, size, align, goal, limit); if (addr == MEMBLOCK_ERROR) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ /* memory leak이 발생하는 경우를 리포팅. * * FIXME: min_count(3번째 인자)가 0이면, 리포팅을 하지 않고, * -1이면, 무시인데, 이 경우에 굳이 쓰이는지 알수 없음. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; if (limit > memblock.current_limit) limit = memblock.current_limit; addr = memblock_find_in_range_node(goal, limit, size, align, nid); if (!addr) return NULL; #ifndef CONFIG_LIB ptr = phys_to_virt(addr); #else BUG_ON(total_ram == NULL); ptr = total_ram + ((unsigned long)phys_to_virt(addr) - PAGE_OFFSET); #endif memset(ptr, 0, size); memblock_reserve(addr, size); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { if (nents == SG_MAX_SINGLE_ALLOC) { void *ptr = (void *) __get_free_page(gfp_mask); kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); return ptr; } else return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); }
static void *__meminit alloc_page_ext(size_t size, int nid) { gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; void *addr = NULL; addr = alloc_pages_exact_nid(nid, size, flags); if (addr) { kmemleak_alloc(addr, size, 1, flags); return addr; } addr = vzalloc_node(size, nid); return addr; }
static void *__meminit alloc_page_cgroup(size_t size, int nid) { gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; void *addr = NULL; addr = alloc_pages_exact_nid(nid, size, flags); if (addr) { kmemleak_alloc(addr, size, 1, flags); return addr; } if (node_state(nid, N_HIGH_MEMORY)) addr = vzalloc_node(size, nid); else addr = vzalloc(size); return addr; }
/* * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree * helpers. */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { if (nents == SG_MAX_SINGLE_ALLOC) { /* * Kmemleak doesn't track page allocations as they are not * commonly used (in a raw form) for kernel data structures. * As we chain together a list of pages and then a normal * kmalloc (tracked by kmemleak), in order to for that last * allocation not to become decoupled (and thus a * false-positive) we need to inform kmemleak of all the * intermediate allocations. */ void *ptr = (void *) __get_free_page(gfp_mask); kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); return ptr; } else return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); }
static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); void *ret; gfp &= gfp_allowed_mask; lockdep_trace_alloc(gfp); if (size < PAGE_SIZE - align) { if (!size) return ZERO_SIZE_PTR; m = slob_alloc(size + align, gfp, align, node); if (!m) return NULL; *m = size; ret = (void *)m + align; trace_kmalloc_node(caller, ret, size, size + align, gfp, node); } else { unsigned int order = get_order(size); if (likely(order)) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); trace_kmalloc_node(caller, ret, size, PAGE_SIZE << order, gfp, node); } kmemleak_alloc(ret, size, 1, gfp); return ret; }
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { void *ptr; u64 addr; if (limit > memblock.current_limit) limit = memblock.current_limit; addr = find_memory_core_early(nid, size, align, goal, limit); if (addr == MEMBLOCK_ERROR) return NULL; ptr = phys_to_virt(addr); memset(ptr, 0, size); memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); /* * The min_count is set to 0 so that bootmem allocated blocks * are never reported as leaks. */ kmemleak_alloc(ptr, size, 0, 0); return ptr; }