示例#1
0
/**
 * kvm_cma_reserve() - reserve area for kvm hash pagetable
 *
 * This function reserves memory from early allocator. It should be
 * called by arch specific code once the early allocator (memblock or bootmem)
 * has been activated and all other subsystems have already allocated/reserved
 * memory.
 */
void __init kvm_cma_reserve(void)
{
	unsigned long align_size;
	struct memblock_region *reg;
	phys_addr_t selected_size = 0;
	/*
	 * We cannot use memblock_phys_mem_size() here, because
	 * memblock_analyze() has not been called yet.
	 */
	for_each_memblock(memory, reg)
		selected_size += memblock_region_memory_end_pfn(reg) -
				 memblock_region_memory_base_pfn(reg);

	selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
	if (selected_size) {
		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
			 (unsigned long)selected_size / SZ_1M);
		/*
		 * Old CPUs require HPT aligned on a multiple of its size. So for them
		 * make the alignment as max size we could request.
		 */
		if (!cpu_has_feature(CPU_FTR_ARCH_206))
			align_size = __rounddown_pow_of_two(selected_size);
		else
			align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;

		align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
		kvm_cma_declare_contiguous(selected_size, align_size);
	}
}
示例#2
0
static void __init request_standard_resources(void)
{
	struct memblock_region *region;
	struct resource *res;

	kernel_code.start   = virt_to_phys(_text);
	kernel_code.end     = virt_to_phys(__init_begin - 1);
	kernel_data.start   = virt_to_phys(_sdata);
	kernel_data.end     = virt_to_phys(_end - 1);

	for_each_memblock(memory, region) {
		res = alloc_bootmem_low(sizeof(*res));
		if (memblock_is_nomap(region)) {
			res->name  = "reserved";
			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
		} else {
			res->name  = "System RAM";
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
		}
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
	}
示例#3
0
文件: init.c 项目: 383530895/linux
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;

	memset(zone_size, 0, sizeof(zone_size));

	/* 4GB maximum for 32-bit only capable devices */
	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
		max_dma = PFN_DOWN(max_zone_dma_phys());
		zone_size[ZONE_DMA] = max_dma - min;
	}
	zone_size[ZONE_NORMAL] = max - max_dma;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;

		if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
			unsigned long dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}

		if (end > max_dma) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
示例#4
0
/**
 * kvm_cma_reserve() - reserve area for kvm hash pagetable
 *
 * This function reserves memory from early allocator. It should be
 * called by arch specific code once the memblock allocator
 * has been activated and all other subsystems have already allocated/reserved
 * memory.
 */
void __init kvm_cma_reserve(void)
{
	unsigned long align_size;
	struct memblock_region *reg;
	phys_addr_t selected_size = 0;

	/*
	 * We need CMA reservation only when we are in HV mode
	 */
	if (!cpu_has_feature(CPU_FTR_HVMODE))
		return;
	/*
	 * We cannot use memblock_phys_mem_size() here, because
	 * memblock_analyze() has not been called yet.
	 */
	for_each_memblock(memory, reg)
		selected_size += memblock_region_memory_end_pfn(reg) -
				 memblock_region_memory_base_pfn(reg);

	selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
	if (selected_size) {
		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
			 (unsigned long)selected_size / SZ_1M);
		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
		cma_declare_contiguous(0, selected_size, 0, align_size,
			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
	}
}
示例#5
0
文件: init.c 项目: VizXu/linux
static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
	unsigned long max_high)
{
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	struct memblock_region *reg;

	/*
	 * initialise the zones.
	 */
	memset(zone_size, 0, sizeof(zone_size));

	/*
	 * The memory size has already been determined.  If we need
	 * to do anything fancy with the allocation of this memory
	 * to the zones, now is the time to do it.
	 */
	zone_size[0] = max_low - min;

	/*
	 * Calculate the size of the holes.
	 *  holes = node_size - sum(bank_sizes)
	 */
	memcpy(zhole_size, zone_size, sizeof(zhole_size));
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start < max_low) {
			unsigned long low_end = min(end, max_low);
			zhole_size[0] -= low_end - start;
		}
	}
示例#6
0
文件: init.c 项目: 3null/linux
/*
 * This keeps memory configuration data used by a couple memory
 * initialization functions, as well as show_mem() for the skipping
 * of holes in the memory map.  It is populated by arm_add_memory().
 */
void show_mem(unsigned int filter)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0;
	struct memblock_region *reg;

	printk("Mem-info:\n");
	show_free_areas(filter);

	for_each_memblock (memory, reg) {
		unsigned int pfn1, pfn2;
		struct page *page, *end;

		pfn1 = memblock_region_memory_base_pfn(reg);
		pfn2 = memblock_region_memory_end_pfn(reg);

		page = pfn_to_page(pfn1);
		end  = pfn_to_page(pfn2 - 1) + 1;

		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
			pfn1++;
			page = pfn_to_page(pfn1);
		} while (pfn1 < pfn2);
	}
示例#7
0
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma32 = min;

	memset(zone_size, 0, sizeof(zone_size));

#ifdef CONFIG_ZONE_DMA32
	/* 4GB maximum for 32-bit only capable devices */
	max_dma32 = min(max, MAX_DMA32_PFN);
	zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
#endif
	zone_size[ZONE_NORMAL] = max - max_dma32;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;
#ifdef CONFIG_ZONE_DMA32
		if (start < max_dma32) {
			unsigned long dma_end = min(end, max_dma32);
			zhole_size[ZONE_DMA32] -= dma_end - start;
		}
#endif
		if (end > max_dma32) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma32);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
示例#8
0
	for_each_memblock(memory, reg) {
		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);

		memblock_set_node(PFN_PHYS(start_pfn),
				  PFN_PHYS(end_pfn - start_pfn),
				  &memblock.memory, 0);
	}
示例#9
0
文件: init.c 项目: Holong/LinuxKernel
// start_pfn : 뱅크 0 시작 주소의 물리 small page 번호		(0x20000)
// end_pfn   : 뱅크 0의 마지막 주소의 물리 small page 번호	(0x4f800)
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;
	// pg_data_t : struct pglist_data

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	// boot_pages : 6
	// start_pfn ~ end_pfn 까지를 bitmap으로 바꿨을 때 총 프레임의 갯수가 반환됨.
	
	// boot_pages << PAGE_SHIFT : 0x6000, L1_CACHE_BYTES : 64, _pfn_to_phys(end_pfn) : 0x4F800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));
	// non-reserved 영역에서 비트맵용 영역을 만들어 가져옴
	// 영역의 시작 주소가 반환됨(물리)

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);	// 비어 있는 함수임
	pgdat = NODE_DATA(0);
	//*pgdat = contig_page_data
	//
	//	.bdata = &bootmem_node_data[0]
	//			 현재는 0으로 되어 있는 값임
	//
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
	// bdata_list 에 등록
	// bitmap 값을 0xFF로 초기화

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
	// for (reg = memblock.memory.regions; reg < (memblock.memory.regions + memblock.memory.cnt), reg++)
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);
		// start : 0x20000
		// end   : 0xA0000

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;
		
		// start : 0x20000000, (end - start) << PAGE_SHIFT : 0x2F800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
		// start부터 end에 해당하는 bitmap을 전부 0으로 설정
		// 일단 전부 FREE로 만듬
	}
示例#10
0
文件: init.c 项目: AlexShiLucky/linux
static void __init do_init_bootmem(void)
{
	struct memblock_region *reg;

	/* Add active regions with valid PFNs. */
	for_each_memblock(memory, reg) {
		unsigned long start_pfn, end_pfn;
		start_pfn = memblock_region_memory_base_pfn(reg);
		end_pfn = memblock_region_memory_end_pfn(reg);
		__add_active_range(0, start_pfn, end_pfn);
	}
示例#11
0
// ARM10C 20131207
// min: 0x20000, max_low: 0x4f800
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	// start_pfn: 0x20000, end_pfn: 0x4f800, end_pfn - start_pfn: 0x2f800
	// boot_pages: 0x6
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);

	// boot_pages << PAGE_SHIFT: 0x6000, L1_CACHE_BYTES: 64
	// __pfn_to_phys(0x4f800); 0x4f800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);

	// pglist_data.bdata 의 bootmem_node_data 주소로 설정
	pgdat = NODE_DATA(0);

	// pgdat: ?, __phys_to_pfn(bitmap): ?, start_pfn: 0x20000, end_pfn: 0x4f800
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		// start: 0x20000
		unsigned long start = memblock_region_memory_base_pfn(reg);
		// end: 0xA0000
		unsigned long end = memblock_region_memory_end_pfn(reg);

		// end: 0xA0000, end_pfn: 0x4f800
		if (end >= end_pfn)
			// end: 0x4f800
			end = end_pfn;
		// start: 0x20000, end: 0x4f800
		if (start >= end)
			break;

		// __pfn_to_phys(0x20000): 0x20000000, (end - start) << PAGE_SHIFT: 0x2f800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
示例#12
0
static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
{
	struct memblock_region *reg;
	unsigned long total_pages = 0;

	/*
	 * We cannot use memblock_phys_mem_size() here, because
	 * memblock_analyze() has not been called yet.
	 */
	for_each_memblock(memory, reg)
		total_pages += memblock_region_memory_end_pfn(reg) -
			       memblock_region_memory_base_pfn(reg);

	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
}
示例#13
0
static void __init request_standard_resources(void)
{
	struct memblock_region *region;
	struct resource *res;
	unsigned long i = 0;
	size_t res_size;

	kernel_code.start   = __pa_symbol(_text);
	kernel_code.end     = __pa_symbol(__init_begin - 1);
	kernel_data.start   = __pa_symbol(_sdata);
	kernel_data.end     = __pa_symbol(_end - 1);

	num_standard_resources = memblock.memory.cnt;
	res_size = num_standard_resources * sizeof(*standard_resources);
	standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
	if (!standard_resources)
		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);

	for_each_memblock(memory, region) {
		res = &standard_resources[i++];
		if (memblock_is_nomap(region)) {
			res->name  = "reserved";
			res->flags = IORESOURCE_MEM;
		} else {
			res->name  = "System RAM";
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
		}
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
#ifdef CONFIG_KEXEC_CORE
		/* Userspace will find "Crash kernel" region in /proc/iomem. */
		if (crashk_res.end && crashk_res.start >= res->start &&
		    crashk_res.end <= res->end)
			request_resource(res, &crashk_res);
#endif
	}
示例#14
0
文件: mem.c 项目: 03199618/linux
/*
 * walk_memory_resource() needs to make sure there is no holes in a given
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
 * Instead it maintains it in memblock.memory structures.  Walk through the
 * memory regions, find holes and callback for contiguous regions.
 */
int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
{
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
	int ret = -1;

	for_each_memblock(memory, reg) {
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
		if (ret)
			break;
	}
示例#15
0
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
	unsigned long max_high)
{
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	/* MAX_NR_ZONES = 3*/
	struct memblock_region *reg;

	/*
	 * initialise the zones.
	 */
	memset(zone_size, 0, sizeof(zone_size));

	/*
	 * The memory size has already been determined.  If we need
	 * to do anything fancy with the allocation of this memory
	 * to the zones, now is the time to do it.
	 */
	zone_size[0] = max_low - min;
	/*! low mem size */
#ifdef CONFIG_HIGHMEM
	zone_size[ZONE_HIGHMEM] = max_high - max_low;
	/*! high mem size*/
#endif

	/*
	 * Calculate the size of the holes.
	 *  holes = node_size - sum(bank_sizes)
	 */
	memcpy(zhole_size, zone_size, sizeof(zhole_size));
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start < max_low) {
			unsigned long low_end = min(end, max_low);
			zhole_size[0] -= low_end - start;
		}
#ifdef CONFIG_HIGHMEM
		if (end > max_low) {
			unsigned long high_start = max(start, max_low);
			zhole_size[ZONE_HIGHMEM] -= end - high_start;
		}
#endif
	}
示例#16
0
文件: init.c 项目: CCNITSilchar/linux
static void __init free_highpages(void)
{
	unsigned long max_low = max_low_pfn;
	struct memblock_region *mem, *res;

	reset_all_zones_managed_pages();
	/* set highmem page free */
	for_each_memblock(memory, mem) {
		unsigned long start = memblock_region_memory_base_pfn(mem);
		unsigned long end = memblock_region_memory_end_pfn(mem);

		/* Ignore complete lowmem entries */
		if (end <= max_low)
			continue;

		if (memblock_is_nomap(mem))
			continue;

		/* Truncate partial highmem entries */
		if (start < max_low)
			start = max_low;

		/* Find and exclude any reserved regions */
		for_each_memblock(reserved, res) {
			unsigned long res_start, res_end;

			res_start = memblock_region_reserved_base_pfn(res);
			res_end = memblock_region_reserved_end_pfn(res);

			if (res_end < start)
				continue;
			if (res_start < start)
				res_start = start;
			if (res_start > end)
				res_start = end;
			if (res_end > end)
				res_end = end;
			if (res_start != start)
				free_area_high(start, res_start);
			start = res_end;
			if (start == end)
				break;
		}
示例#17
0
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;
#ifdef CONFIG_ZONE_DMA
	unsigned long max_dma_phys, dma_end;
#endif
	memset(zone_size, 0, sizeof(zone_size));

#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ZONE_DMA_ALLOW_CUSTOM_SIZE
	max_dma_phys = (unsigned long)dma_to_phys(NULL,
			(min << PAGE_SHIFT) + ZONE_DMA_SIZE_BYTES + 1);
#else
	max_dma_phys = (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
#endif /* CONFIG_ZONE_DMA_ALLOW_CUSTOM_SIZE */
	max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
	zone_size[ZONE_DMA] = max_dma - min;
#endif /* CONFIG_ZONE_DMA */
	zone_size[ZONE_NORMAL] = max - max_dma;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;

#ifdef CONFIG_ZONE_DMA
		if (start < max_dma) {
			dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}
#endif
		if (end > max_dma) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
/*
 * This keeps memory configuration data used by a couple memory
 * initialization functions, as well as show_mem() for the skipping
 * of holes in the memory map.  It is populated by arm_add_memory().
 */
void show_mem(unsigned int filter)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0;
	struct memblock_region *reg;

	printk("Mem-info:\n");
	show_free_areas(filter);

	if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
		return;

	for_each_memblock(memory, reg) {
		unsigned int pfn1, pfn2;
		struct page *page, *end;

		pfn1 = memblock_region_memory_base_pfn(reg);
		pfn2 = memblock_region_memory_end_pfn(reg);

		page = pfn_to_page(pfn1);
		end  = pfn_to_page(pfn2 - 1) + 1;

		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
			page++;
#ifdef CONFIG_SPARSEMEM
			pfn1++;
			if (!(pfn1 % PAGES_PER_SECTION))
				page = pfn_to_page(pfn1);
		} while (pfn1 < pfn2);
#else
		} while (page < end);
示例#19
0
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;

		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
示例#20
0
文件: fadump.c 项目: VizXu/linux
/*
 * Returns 1, if there are no holes in boot memory area,
 * 0 otherwise.
 */
static int is_boot_memory_area_contiguous(void)
{
	struct memblock_region *reg;
	unsigned long tstart, tend;
	unsigned long start_pfn = PHYS_PFN(RMA_START);
	unsigned long end_pfn = PHYS_PFN(RMA_START + fw_dump.boot_memory_size);
	unsigned int ret = 0;

	for_each_memblock(memory, reg) {
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
		if (tstart < tend) {
			/* Memory hole from start_pfn to tstart */
			if (tstart > start_pfn)
				break;

			if (tend == end_pfn) {
				ret = 1;
				break;
			}

			start_pfn = tend + 1;
		}
	}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;
#ifdef CONFIG_ZONE_MOVABLE_CMA
	unsigned long cma_base_pfn = get_zone_movable_cma_base() >> PAGE_SHIFT;
#endif
	memset(zone_size, 0, sizeof(zone_size));

	/* 4GB maximum for 32-bit only capable devices */
	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
		max_dma = PFN_DOWN(max_zone_dma_phys());
#ifdef CONFIG_ZONE_MOVABLE_CMA
		max_dma = min(max_dma, cma_base_pfn);
#endif
		zone_size[ZONE_DMA] = max_dma - min;
	}
#ifdef CONFIG_ZONE_MOVABLE_CMA
	zone_size[ZONE_NORMAL] = cma_base_pfn - max_dma;
	zone_size[ZONE_MOVABLE] = max - cma_base_pfn;
#else
	zone_size[ZONE_NORMAL] = max - max_dma;
#endif

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;

		if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
			unsigned long dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}

#ifdef CONFIG_ZONE_MOVABLE_CMA
		if (end > max_dma && end < cma_base_pfn) {
			unsigned long normal_end = min(end, cma_base_pfn);
			unsigned long normal_start = max(start, max_dma);

			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}

		if (end > cma_base_pfn) {
			unsigned long movable_end = min(end, max);
			unsigned long movable_start = max(start, cma_base_pfn);

			zhole_size[ZONE_MOVABLE] -= movable_end - movable_start;
		}
#else
		if (end > max_dma) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
#endif
	}
示例#22
0
文件: init.c 项目: AlexShiLucky/linux
	for_each_memblock(memory, reg) {
		int nid = memblock_get_region_node(reg);

		memory_present(nid, memblock_region_memory_base_pfn(reg),
			memblock_region_memory_end_pfn(reg));
	}