Пример #1
0
int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
	ret = __remove_pages(zone, start_pfn, nr_pages);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
Пример #2
0
int __meminit arch_remove_memory(int nid, u64 start, u64 size,
					struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct page *page;
	int ret;

	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	if (altmap)
		page += vmem_altmap_offset(altmap);

	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	flush_inval_dcache_range(start, start + size);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
Пример #3
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	struct pglist_data *pgdata;
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	pgdata = NODE_DATA(nid);

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size);
	if (rc) {
		pr_warning(
			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones +
		zone_for_memory(nid, start, size, 0, for_device);

	return __add_pages(nid, zone, start_pfn, nr_pages);
}
Пример #4
0
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size, nid);
	if (rc) {
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}
	flush_inval_dcache_range(start, start + size);

	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}