static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; struct vm_struct *area; int i; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } /* * Kernel VA mappings are always live, and splitting live section * mappings into page mappings may cause TLB conflicts. This means * we have to ensure that changing the permission bits of the range * we are operating on does not result in such splitting. * * Let's restrict ourselves to mappings created by vmalloc (or vmap). * Those are guaranteed to consist entirely of page mappings, and * splitting is never needed. * * So check whether the [addr, addr + size) interval is entirely * covered by precisely one VM area that has the VM_ALLOC flag set. */ area = find_vm_area((void *)addr); if (!area || end > (unsigned long)area->addr + area->size || !(area->flags & VM_ALLOC)) return -EINVAL; if (!numpages) return 0; /* * If we are manipulating read-only permissions, apply the same * change to the linear mapping of the pages that back this VM area. */ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { for (i = 0; i < area->nr_pages; i++) { __change_memory_common((u64)page_address(area->pages[i]), PAGE_SIZE, set_mask, clear_mask); } } /* * Get rid of potentially aliasing lazily unmapped vm areas that may * have permissions set that deviate from the ones we are setting here. */ vm_unmap_aliases(); return __change_memory_common(start, size, set_mask, clear_mask); }
int set_memory_valid(unsigned long addr, int numpages, int enable) { if (enable) return __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(PTE_VALID), __pgprot(0)); else return __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(0), __pgprot(PTE_VALID)); }
void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr = (unsigned long) page_address(page); if (enable) __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(PTE_VALID), __pgprot(0)); else __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(0), __pgprot(PTE_VALID)); }
static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; struct vm_struct *area; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } /* * Kernel VA mappings are always live, and splitting live section * mappings into page mappings may cause TLB conflicts. This means * we have to ensure that changing the permission bits of the range * we are operating on does not result in such splitting. * * Let's restrict ourselves to mappings created by vmalloc (or vmap). * Those are guaranteed to consist entirely of page mappings, and * splitting is never needed. * * So check whether the [addr, addr + size) interval is entirely * covered by precisely one VM area that has the VM_ALLOC flag set. */ area = find_vm_area((void *)addr); if (!area || end > (unsigned long)area->addr + area->size || !(area->flags & VM_ALLOC)) return -EINVAL; if (!numpages) return 0; return __change_memory_common(start, size, set_mask, clear_mask); }