static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; int ret; struct page_change_data data; if (!IS_ALIGNED(addr, PAGE_SIZE)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } #ifndef CONFIG_SENTINEL if (!is_module_address(start) || !is_module_address(end - 1)) return -EINVAL; #endif data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, end); return ret; }
static int gnttab_suspend(void) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; }
static int gnttab_suspend(struct sys_device *dev, pm_message_t state) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; }
static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; int ret; struct page_change_data data; if (!IS_ALIGNED(addr, PAGE_SIZE)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } if (start < MODULES_VADDR || start >= MODULES_END) return -EINVAL; if (end < MODULES_VADDR || end >= MODULES_END) return -EINVAL; data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, end); return ret; }
int gnttab_suspend(void) { #ifndef __ia64__ apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; }
/* Taken from __dma_remap */ static void bralloc_mem_kernel_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long)page_address(page); unsigned end = start + size; apply_to_page_range(&init_mm, start, size, bralloc_mem_update_pte, &prot); dsb(); flush_tlb_kernel_range(start, end); }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; if (PageHighMem(page)) return; apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); dsb(); flush_tlb_kernel_range(start, end); }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int err; err = apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); if (err) pr_err("***%s: error=%d, pfn=%lx\n", __func__, err, page_to_pfn(page)); dsb(); flush_tlb_kernel_range(start, end); }
/* * This function assumes that the range is mapped with PAGE_SIZE pages. */ static int __change_memory_common(unsigned long start, unsigned long size, pgprot_t set_mask, pgprot_t clear_mask) { struct page_change_data data; int ret; data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, start + size); return ret; }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot, bool no_kernel_map) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); if (no_kernel_map) func = __dma_clear_pte; else func = __dma_update_pte; apply_to_page_range(&init_mm, start, size, func, &prot); mb(); flush_tlb_kernel_range(start, end); }
int __init efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md) { BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && md->type != EFI_RUNTIME_SERVICES_DATA); /* * Calling apply_to_page_range() is only safe on regions that are * guaranteed to be mapped down to pages. Since we are only called * for regions that have been mapped using efi_create_mapping() above * (and this is checked by the generic Memory Attributes table parsing * routines), there is no need to check that again here. */ return apply_to_page_range(mm, md->virt_addr, md->num_pages << EFI_PAGE_SHIFT, set_permissions, md); }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); #ifndef __ia64__ if (shared == NULL) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); shared = area->addr; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #else shared = __va(frames[0] << PAGE_SHIFT); #endif kfree(frames); return 0; }
struct vm_struct *alloc_vm_area(unsigned long size) { struct vm_struct *area; area = get_vm_area(size, VM_IOREMAP); if (area == NULL) return NULL; /* * This ensures that page tables are constructed for this region * of kernel virtual address space and mapped into init_mm. */ if (apply_to_page_range(&init_mm, (unsigned long)area->addr, area->size, f, NULL)) { free_vm_area(area); return NULL; } return area; }
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared) { int rc; struct grant_entry *shared = *__shared; if (shared == NULL) { struct vm_struct *area = xen_alloc_vm_area(PAGE_SIZE * max_nr_gframes); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); return rc; }
static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status != GNTST_okay); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; }
static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long vaddr, flags; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL); if (page == NULL) goto err; vaddr = (unsigned long)page_address(page); scrub_pages(vaddr, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); __free_page(page); goto err; } totalram_pages = --current_pages; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; }
void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); }
/* map fgmfn of domid to lpfn in the current domain */ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, unsigned int domid) { int rc; struct xen_add_to_physmap_range xatp = { .domid = DOMID_SELF, .foreign_domid = domid, .size = 1, .space = XENMAPSPACE_gmfn_foreign, }; xen_ulong_t idx = fgmfn; xen_pfn_t gpfn = lpfn; int err = 0; set_xen_guest_handle(xatp.idxs, &idx); set_xen_guest_handle(xatp.gpfns, &gpfn); set_xen_guest_handle(xatp.errs, &err); rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); if (rc || err) { pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", rc, err, lpfn, fgmfn); return 1; } return 0; } struct remap_data { xen_pfn_t fgmfn; /* foreign domain's gmfn */ pgprot_t prot; domid_t domid; struct vm_area_struct *vma; int index; struct page **pages; struct xen_remap_mfn_info *info; }; static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *info = data; struct page *page = info->pages[info->index++]; unsigned long pfn = page_to_pfn(page); pte_t pte = pfn_pte(pfn, info->prot); if (map_foreign_page(pfn, info->fgmfn, info->domid)) return -EFAULT; set_pte_at(info->vma->vm_mm, addr, ptep, pte); return 0; } int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t mfn, int nr, pgprot_t prot, unsigned domid, struct page **pages) { int err; struct remap_data data; /* TBD: Batching, current sole caller only does page at a time */ if (nr > 1) return -EINVAL; data.fgmfn = mfn; data.prot = prot; data.domid = domid; data.vma = vma; data.index = 0; data.pages = pages; err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, remap_pte_fn, &data); return err; }