Пример #1
0
int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
	ret = __remove_pages(zone, start_pfn, nr_pages);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
Пример #2
0
void __init early_init_devtree(void *params)
{
	pr_debug(" -> early_init_devtree(%p)\n", params);

	/* Setup flat device-tree pointer */
	initial_boot_params = params;

	/* Retrieve various informations from the /chosen node of the
	 * device-tree, including the platform type, initrd location and
	 * size, TCE reserve, and more ...
	 */
	of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);

	/* Scan memory nodes and rebuild MEMBLOCKs */
	of_scan_flat_dt(early_init_dt_scan_root, NULL);
	of_scan_flat_dt(early_init_dt_scan_memory, NULL);

	/* Save command line for /proc/cmdline and then parse parameters */
	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
	parse_early_param();

	memblock_allow_resize();

	pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());

	pr_debug(" <- early_init_devtree()\n");
}
Пример #3
0
void __init bootmem_init(void)
{
	/* Reserve all memory below PHYS_OFFSET, as memory
	 * accounting doesn't work for pages below that address.
	 *
	 * If PHYS_OFFSET is zero reserve page at address 0:
	 * successfull allocations should never return NULL.
	 */
	if (PHYS_OFFSET)
		memblock_reserve(0, PHYS_OFFSET);
	else
		memblock_reserve(0, 1);

	early_init_fdt_scan_reserved_mem();

	if (!memblock_phys_mem_size())
		panic("No memory found!\n");

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = min(max_pfn, MAX_LOW_PFN);

	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

	memblock_dump_all();
}
Пример #4
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	struct pglist_data *pgdata;
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	pgdata = NODE_DATA(nid);

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size);
	if (rc) {
		pr_warning(
			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones +
		zone_for_memory(nid, start, size, 0, for_device);

	return __add_pages(nid, zone, start_pfn, nr_pages);
}
Пример #5
0
int __meminit arch_remove_memory(int nid, u64 start, u64 size,
					struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct page *page;
	int ret;

	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	if (altmap)
		page += vmem_altmap_offset(altmap);

	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	flush_inval_dcache_range(start, start + size);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
Пример #6
0
const struct machine_desc * __init
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
{
	struct tag *tags = (struct tag *)&default_tags;
	const struct machine_desc *mdesc = NULL, *p;
	char *from = default_command_line;

	default_tags.mem.start = PHYS_OFFSET;

	/*
	 * locate machine in the list of supported machines.
	 */
	for_each_machine_desc(p)
		if (machine_nr == p->nr) {
			pr_info("Machine: %s\n", p->name);
			mdesc = p;
			break;
		}

	if (!mdesc) {
		early_print("\nError: unrecognized/unsupported machine ID"
			    " (r1 = 0x%08x).\n\n", machine_nr);
		dump_machine_table(); /* does not return */
	}

	if (__atags_pointer)
		tags = phys_to_virt(__atags_pointer);
	else if (mdesc->atag_offset)
		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);

#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
	/*
	 * If we have the old style parameters, convert them to
	 * a tag list.
	 */
	if (tags->hdr.tag != ATAG_CORE)
		convert_to_tag_list(tags);
#endif
	if (tags->hdr.tag != ATAG_CORE) {
		early_print("Warning: Neither atags nor dtb found\n");
		tags = (struct tag *)&default_tags;
	}

	if (mdesc->fixup)
		mdesc->fixup(tags, &from);

	if (tags->hdr.tag == ATAG_CORE) {
		if (memblock_phys_mem_size())
			squash_mem_tags(tags);
		save_atags(tags);
		parse_tags(tags);
	}

	/* parse_early_param needs a boot_command_line */
	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);

	return mdesc;
}
Пример #7
0
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_base, crash_size;
	int ret;

	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
				&crash_size, &crash_base);
	/* no crashkernel= or invalid value specified */
	if (ret || !crash_size)
		return;

	crash_size = PAGE_ALIGN(crash_size);

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
		crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. */
		if (!memblock_is_region_memory(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region is not memory\n");
			return;
		}

		if (memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
			return;
		}

		if (!IS_ALIGNED(crash_base, SZ_2M)) {
			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
			return;
		}
	}
	memblock_reserve(crash_base, crash_size);

	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
		crash_base, crash_base + crash_size, crash_size >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
Пример #8
0
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size, nid);
	if (rc) {
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}
	flush_inval_dcache_range(start, start + size);

	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}
Пример #9
0
void __init early_init_devtree(void *params)
{
	pr_debug(" -> early_init_devtree(%p)\n", params);

	
	initial_boot_params = params;

	of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);

	
	of_scan_flat_dt(early_init_dt_scan_root, NULL);
	of_scan_flat_dt(early_init_dt_scan_memory, NULL);

	
	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
	parse_early_param();

	memblock_allow_resize();

	pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());

	pr_debug(" <- early_init_devtree()\n");
}
Пример #10
0
/*
 * Do some initial setup of the system.  The parameters are those which 
 * were passed in from the bootloader.
 */
void __init setup_system(void)
{
	DBG(" -> setup_system()\n");

	/* Apply the CPUs-specific and firmware specific fixups to kernel
	 * text (nop out sections not relevant to this CPU or this firmware)
	 */
	do_feature_fixups(cur_cpu_spec->cpu_features,
			  &__start___ftr_fixup, &__stop___ftr_fixup);
	do_feature_fixups(cur_cpu_spec->mmu_features,
			  &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
	do_feature_fixups(powerpc_firmware_features,
			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
	do_lwsync_fixups(cur_cpu_spec->cpu_features,
			 &__start___lwsync_fixup, &__stop___lwsync_fixup);

	/*
	 * Unflatten the device-tree passed by prom_init or kexec
	 */
	unflatten_device_tree();

	/*
	 * Fill the ppc64_caches & systemcfg structures with informations
 	 * retrieved from the device-tree.
	 */
	initialize_cache_info();

#ifdef CONFIG_PPC_RTAS
	/*
	 * Initialize RTAS if available
	 */
	rtas_initialize();
#endif /* CONFIG_PPC_RTAS */

	/*
	 * Check if we have an initrd provided via the device-tree
	 */
	check_for_initrd();

	/*
	 * Do some platform specific early initializations, that includes
	 * setting up the hash table pointers. It also sets up some interrupt-mapping
	 * related options that will be used by finish_device_tree()
	 */
	if (ppc_md.init_early)
		ppc_md.init_early();

 	/*
	 * We can discover serial ports now since the above did setup the
	 * hash table management for us, thus ioremap works. We do that early
	 * so that further code can be debugged
	 */
	find_legacy_serial_ports();

	/*
	 * Register early console
	 */
	register_early_udbg_console();

	/*
	 * Initialize xmon
	 */
	xmon_setup();

	smp_setup_cpu_maps();
	check_smt_enabled();

#ifdef CONFIG_SMP
	/* Release secondary cpus out of their spinloops at 0x60 now that
	 * we can map physical -> logical CPU ids
	 */
	smp_release_cpus();
#endif

	printk("Starting Linux PPC64 %s\n", init_utsname()->version);

	printk("-----------------------------------------------------\n");
	printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size);
	printk("physicalMemorySize            = 0x%llx\n", memblock_phys_mem_size());
	if (ppc64_caches.dline_size != 0x80)
		printk("ppc64_caches.dcache_line_size = 0x%x\n",
		       ppc64_caches.dline_size);
	if (ppc64_caches.iline_size != 0x80)
		printk("ppc64_caches.icache_line_size = 0x%x\n",
		       ppc64_caches.iline_size);
#ifdef CONFIG_PPC_STD_MMU_64
	if (htab_address)
		printk("htab_address                  = 0x%p\n", htab_address);
	printk("htab_hash_mask                = 0x%lx\n", htab_hash_mask);
#endif /* CONFIG_PPC_STD_MMU_64 */
	if (PHYSICAL_START > 0)
		printk("physical_start                = 0x%llx\n",
		       (unsigned long long)PHYSICAL_START);
	printk("-----------------------------------------------------\n");

	DBG(" <- setup_system()\n");
}
Пример #11
0
void __init owl_reserve(void)
{
	phys_addr_t phy_mem_size, phy_mem_end;
	unsigned int owl_ion0_start = 0;
	unsigned int owl_ion1_start = 0;

	phy_mem_size = memblock_phys_mem_size();
	if (phy_mem_size & (phy_mem_size - 1)) { /* != 2^n ? */
		uint _tmp = __fls(phy_mem_size);
		if (_tmp > 0 && (phy_mem_size & (1U << (_tmp - 1)))) {
			/* close to next boundary */
			_tmp++;
			phy_mem_size =
				(_tmp >= sizeof(phy_mem_size) * 8) ? phy_mem_size : (1U << _tmp);
		} else {
			phy_mem_size = 1U << _tmp;
		}
	}
	s_phy_mem_size_saved = phy_mem_size;
	phy_mem_end = arm_lowmem_limit;
	pr_info("%s: pyhsical memory size %u bytes, end @0x%x\n",
		__func__, phy_mem_size, phy_mem_end);

	memblock_reserve(0, 0x4000); /* reserve low 16K for DDR dqs training */

	of_scan_flat_dt(early_init_dt_scan_ion, (void*)phy_mem_size);

	phy_mem_end -= owl_fb_size;
#ifdef CONFIG_VIDEO_OWL_DSS
    owl_fb_start = phy_mem_end;
    memblock_reserve(owl_fb_start, owl_fb_size);
#endif

	phy_mem_end -= owl_kinfo_size;
    owl_kinfo_start = phy_mem_end;
    memblock_reserve(owl_kinfo_start, owl_kinfo_size);
    
#ifdef CONFIG_ION
	phy_mem_end -= owl_ion0_size;
	owl_ion0_start = phy_mem_end;
	owl_pdev_ion_data.heaps[0].base = owl_ion0_start;
	owl_pdev_ion_data.heaps[0].size = owl_ion0_size;

	/* ion_pmem */
#ifdef CONFIG_CMA
	phy_mem_end -= owl_ion1_size;
	owl_ion1_start = phy_mem_end; /* fake, not used. */
	owl_pdev_ion_data.heaps[1].base = 0;
	owl_pdev_ion_data.heaps[1].size = 0; /* prevent ion_reserve() from diging */
	owl_pdev_ion_data.heaps[1].priv = &(owl_pdev_ion_device.dev);
	dma_contiguous_set_global_reserve_size(owl_ion1_size); /* set size of the CMA global area */
#else /* no CMA */
	phy_mem_end -= owl_ion1_size;
	owl_ion1_start = phy_mem_end;
	owl_pdev_ion_data.heaps[1].base = owl_ion1_start;
	owl_pdev_ion_data.heaps[1].size = owl_ion1_size;
#endif
	ion_reserve(&owl_pdev_ion_data);
#endif

	printk(KERN_INFO "Reserved memory %uMB\n",
		(owl_ion0_size + owl_ion1_size) >> 20);
	printk(KERN_INFO 
	        "   FB:     0x%08x, %uMB\n"
	        "   KINFO:  0x%08x, %uMB\n"
	        "   ION0:   0x%08x, %uMB\n"
			"   ION1:   0x%08x, %uMB\n",
			owl_fb_start, owl_fb_size >> 20,
			owl_kinfo_start, owl_kinfo_size >> 20,
			owl_ion0_start, owl_ion0_size >> 20,
			owl_ion1_start, owl_ion1_size >> 20);
}