Esempio n. 1
0
static void __init find_limits(unsigned long *min, unsigned long *max_low,
			       unsigned long *max_high)
{
	*max_low = PFN_DOWN(memblock_get_current_limit());
	*min = PFN_UP(memblock_start_of_DRAM());
	*max_high = PFN_DOWN(memblock_end_of_DRAM());
}
Esempio n. 2
0
void __init bootmem_init(void)
{
	/* Reserve all memory below PHYS_OFFSET, as memory
	 * accounting doesn't work for pages below that address.
	 *
	 * If PHYS_OFFSET is zero reserve page at address 0:
	 * successfull allocations should never return NULL.
	 */
	if (PHYS_OFFSET)
		memblock_reserve(0, PHYS_OFFSET);
	else
		memblock_reserve(0, 1);

	early_init_fdt_scan_reserved_mem();

	if (!memblock_phys_mem_size())
		panic("No memory found!\n");

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = min(max_pfn, MAX_LOW_PFN);

	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

	memblock_dump_all();
}
static long long __init keystone_pv_fixup(void)
{
	long long offset;
	phys_addr_t mem_start, mem_end;

	mem_start = memblock_start_of_DRAM();
	mem_end = memblock_end_of_DRAM();

	/* nothing to do if we are running out of the <32-bit space */
	if (mem_start >= KEYSTONE_LOW_PHYS_START &&
	    mem_end   <= KEYSTONE_LOW_PHYS_END)
		return 0;

	if (mem_start < KEYSTONE_HIGH_PHYS_START ||
	    mem_end   > KEYSTONE_HIGH_PHYS_END) {
		pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
		        (u64)mem_start, (u64)mem_end);
		return 0;
	}

	offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START;

	/* Populate the arch idmap hook */
	arch_phys_to_idmap_offset = -offset;

	return offset;
}
Esempio n. 4
0
static void __init keystone_init_meminfo(void)
{
	bool lpae = IS_ENABLED(CONFIG_ARM_LPAE);
	bool pvpatch = IS_ENABLED(CONFIG_ARM_PATCH_PHYS_VIRT);
	phys_addr_t offset = PHYS_OFFSET - KEYSTONE_LOW_PHYS_START;
	phys_addr_t mem_start, mem_end;

	mem_start = memblock_start_of_DRAM();
	mem_end = memblock_end_of_DRAM();

	/* nothing to do if we are running out of the <32-bit space */
	if (mem_start >= KEYSTONE_LOW_PHYS_START &&
	    mem_end   <= KEYSTONE_LOW_PHYS_END)
		return;

	if (!lpae || !pvpatch) {
		pr_crit("Enable %s%s%s to run outside 32-bit space\n",
		      !lpae ? __stringify(CONFIG_ARM_LPAE) : "",
		      (!lpae && !pvpatch) ? " and " : "",
		      !pvpatch ? __stringify(CONFIG_ARM_PATCH_PHYS_VIRT) : "");
	}

	if (mem_start < KEYSTONE_HIGH_PHYS_START ||
	    mem_end   > KEYSTONE_HIGH_PHYS_END) {
		pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
		      (u64)mem_start, (u64)mem_end);
	}

	offset += KEYSTONE_HIGH_PHYS_START;
	__pv_phys_pfn_offset = PFN_DOWN(offset);
	__pv_offset = (offset - PAGE_OFFSET);

	/* Populate the arch idmap hook */
	arch_virt_to_idmap = keystone_virt_to_idmap;
	platform_nb.notifier_call = keystone_platform_notifier;
	keystone_dma_pfn_offset = PFN_DOWN(KEYSTONE_HIGH_PHYS_START -
						KEYSTONE_LOW_PHYS_START);

	pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
}
Esempio n. 5
0
/*
 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
 * currently assumes that for memory starting above 4G, 32-bit devices will
 * use a DMA offset.
 */
static phys_addr_t max_zone_dma_phys(void)
{
	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}