struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) { /* * 'memmap_start' is the virtual address for the first "struct * page" in this range of the vmemmap array. In the case of * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple * pointer arithmetic, so we can perform this to_vmem_altmap() * conversion without concern for the initialization state of * the struct page fields. */ struct page *page = (struct page *) memmap_start; struct dev_pagemap *pgmap; /* * Unconditionally retrieve a dev_pagemap associated with the * given physical address, this is only for use in the * arch_{add|remove}_memory() for setting up and tearing down * the memmap. */ rcu_read_lock(); pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); rcu_read_unlock(); return pgmap ? pgmap->altmap : NULL; }
/** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @res: "host memory" address range * @ref: a live per-cpu reference count * @altmap: optional descriptor for allocating the memmap from @res * * Notes: * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time * (or devm release event). * * 2/ @res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but * this is not enforced. */ void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap) { resource_size_t key, align_start, align_size, align_end; pgprot_t pgprot = PAGE_KERNEL; struct dev_pagemap *pgmap; struct page_map *page_map; int error, nid, is_ram; unsigned long pfn; align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) - align_start; is_ram = region_intersects(align_start, align_size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); if (is_ram == REGION_MIXED) { WARN_ONCE(1, "%s attempted on mixed region %pr\n", __func__, res); return ERR_PTR(-ENXIO); } if (is_ram == REGION_INTERSECTS) return __va(res->start); if (!ref) return ERR_PTR(-EINVAL); page_map = devres_alloc_node(devm_memremap_pages_release, sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); if (!page_map) return ERR_PTR(-ENOMEM); pgmap = &page_map->pgmap; memcpy(&page_map->res, res, sizeof(*res)); pgmap->dev = dev; if (altmap) { memcpy(&page_map->altmap, altmap, sizeof(*altmap)); pgmap->altmap = &page_map->altmap; } pgmap->ref = ref; pgmap->res = &page_map->res; mutex_lock(&pgmap_lock); error = 0; align_end = align_start + align_size - 1; for (key = align_start; key <= align_end; key += SECTION_SIZE) { struct dev_pagemap *dup; rcu_read_lock(); dup = find_dev_pagemap(key); rcu_read_unlock(); if (dup) { dev_err(dev, "%s: %pr collides with mapping for %s\n", __func__, res, dev_name(dup->dev)); error = -EBUSY; break; } error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT, page_map); if (error) { dev_err(dev, "%s: failed: %d\n", __func__, error); break; } } mutex_unlock(&pgmap_lock); if (error) goto err_radix; nid = dev_to_node(dev); if (nid < 0) nid = numa_mem_id(); error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, align_size); if (error) goto err_pfn_remap; mem_hotplug_begin(); error = arch_add_memory(nid, align_start, align_size, true); mem_hotplug_done(); if (error) goto err_add_memory; for_each_device_pfn(pfn, page_map) { struct page *page = pfn_to_page(pfn); /* * ZONE_DEVICE pages union ->lru with a ->pgmap back * pointer. It is a bug if a ZONE_DEVICE page is ever * freed or placed on a driver-private list. Seed the * storage with LIST_POISON* values. */ list_del(&page->lru); page->pgmap = pgmap; } devres_add(dev, page_map); return __va(res->start); err_add_memory: untrack_pfn(NULL, PHYS_PFN(align_start), align_size); err_pfn_remap: err_radix: pgmap_radix_release(res); devres_free(page_map); return ERR_PTR(error); }
/** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @res: "host memory" address range * @ref: a live per-cpu reference count * @altmap: optional descriptor for allocating the memmap from @res * * Notes: * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time * (or devm release event). * * 2/ @res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but * this is not enforced. */ void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap) { int is_ram = region_intersects(res->start, resource_size(res), "System RAM"); resource_size_t key, align_start, align_size, align_end; struct dev_pagemap *pgmap; struct page_map *page_map; unsigned long pfn; int error, nid; if (is_ram == REGION_MIXED) { WARN_ONCE(1, "%s attempted on mixed region %pr\n", __func__, res); return ERR_PTR(-ENXIO); } if (is_ram == REGION_INTERSECTS) return __va(res->start); if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) { dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n", __func__); return ERR_PTR(-ENXIO); } if (!ref) return ERR_PTR(-EINVAL); page_map = devres_alloc_node(devm_memremap_pages_release, sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); if (!page_map) return ERR_PTR(-ENOMEM); pgmap = &page_map->pgmap; memcpy(&page_map->res, res, sizeof(*res)); pgmap->dev = dev; if (altmap) { memcpy(&page_map->altmap, altmap, sizeof(*altmap)); pgmap->altmap = &page_map->altmap; } pgmap->ref = ref; pgmap->res = &page_map->res; mutex_lock(&pgmap_lock); error = 0; align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(resource_size(res), SECTION_SIZE); align_end = align_start + align_size - 1; for (key = align_start; key <= align_end; key += SECTION_SIZE) { struct dev_pagemap *dup; rcu_read_lock(); dup = find_dev_pagemap(key); rcu_read_unlock(); if (dup) { dev_err(dev, "%s: %pr collides with mapping for %s\n", __func__, res, dev_name(dup->dev)); error = -EBUSY; break; } error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT, page_map); if (error) { dev_err(dev, "%s: failed: %d\n", __func__, error); break; } } mutex_unlock(&pgmap_lock); if (error) goto err_radix; nid = dev_to_node(dev); if (nid < 0) nid = numa_mem_id(); error = arch_add_memory(nid, align_start, align_size, true); if (error) goto err_add_memory; for_each_device_pfn(pfn, page_map) { struct page *page = pfn_to_page(pfn); /* ZONE_DEVICE pages must never appear on a slab lru */ list_force_poison(&page->lru); page->pgmap = pgmap; } devres_add(dev, page_map); return __va(res->start); err_add_memory: err_radix: pgmap_radix_release(res); devres_free(page_map); return ERR_PTR(error); }