static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; int ret; struct page_change_data data; if (!IS_ALIGNED(addr, PAGE_SIZE)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } if (start < MODULES_VADDR || start >= MODULES_END) return -EINVAL; if (end < MODULES_VADDR || end >= MODULES_END) return -EINVAL; data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, end); return ret; }
/** * pcpu_post_unmap_tlb_flush - flush TLB after unmapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush * TLB for the regions. This can be skipped if the area is to be * returned to vmalloc as vmalloc will handle TLB flushing lazily. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); }
static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; pud_t *pud; pmd_t *pmdp; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); pud = pud_offset(pgd, addr); pmdp = pmd_offset(pud, addr); do { pmd_t pmd = *pmdp; if (!pmd_none(pmd)) { pmd_clear(pmdp); init_mm.context.kvm_seq++; if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PMD_SIZE; pmdp += 2; } while (addr < end); if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) __check_kvm_seq(current->active_mm); flush_tlb_kernel_range(virt, end); }
/* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * * Note that get_vm_area_caller() allocates a guard 4K page, so we need to * mask the size back to 4MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1)); pgd_t *pgd; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); do { pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); pmd = *pmdp; if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and * increment the kvm sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); /* * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; pgd++; } while (addr < end); flush_tlb_kernel_range(virt, end); }
static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; int ret; struct page_change_data data; if (!IS_ALIGNED(addr, PAGE_SIZE)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } #ifndef CONFIG_SENTINEL if (!is_module_address(start) || !is_module_address(end - 1)) return -EINVAL; #endif data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, end); return ret; }
/*! 2016-04-02 study -ing */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { /*! TLB (Translation lookaside buffer) is a cache for PageTableEntry */ flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); }
/* Taken from __dma_remap */ static void bralloc_mem_kernel_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long)page_address(page); unsigned end = start + size; apply_to_page_range(&init_mm, start, size, bralloc_mem_update_pte, &prot); dsb(); flush_tlb_kernel_range(start, end); }
static void protect_vm_page(unsigned long addr, int w, int must_succeed) { int err; err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed); if(err == 0) return; else if((err == -EFAULT) || (err == -ENOMEM)){ flush_tlb_kernel_range(addr, addr + PAGE_SIZE); protect_vm_page(addr, w, 1); } else panic("protect_vm_page : protect failed, errno = %d\n", err); }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; if (PageHighMem(page)) return; apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); dsb(); flush_tlb_kernel_range(start, end); }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int err; err = apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); if (err) pr_err("***%s: error=%d, pfn=%lx\n", __func__, err, page_to_pfn(page)); dsb(); flush_tlb_kernel_range(start, end); }
/* * This function assumes that the range is mapped with PAGE_SIZE pages. */ static int __change_memory_common(unsigned long start, unsigned long size, pgprot_t set_mask, pgprot_t clear_mask) { struct page_change_data data; int ret; data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, start + size); return ret; }
static void __dma_free_remap(void *cpu_addr, size_t size) { struct arm_vmregion *c; unsigned long addr; pte_t *ptep; int idx; u32 off; c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); if (!c) { printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", __func__, cpu_addr); dump_stack(); return; } if ((c->vm_end - c->vm_start) != size) { printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size); dump_stack(); size = c->vm_end - c->vm_start; } idx = CONSISTENT_PTE_INDEX(c->vm_start); off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); ptep = consistent_pte[idx] + off; addr = c->vm_start; do { pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); ptep++; addr += PAGE_SIZE; off++; if (off >= PTRS_PER_PTE) { BUG_ON(idx >= (NUM_CONSISTENT_PTES-1)); off = 0; ptep = consistent_pte[++idx]; } if (pte_none(pte) || !pte_present(pte)) printk(KERN_CRIT "%s: bad page in kernel page table\n", __func__); } while (size -= PAGE_SIZE); flush_tlb_kernel_range(c->vm_start, c->vm_end); arm_vmregion_free(&consistent_head, c); }
static void __dma_remap(struct page *page, size_t size, pgprot_t prot, bool no_kernel_map) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); if (no_kernel_map) func = __dma_clear_pte; else func = __dma_update_pte; apply_to_page_range(&init_mm, start, size, func, &prot); mb(); flush_tlb_kernel_range(start, end); }
static void shmedia_mapioaddr(unsigned long pa, unsigned long va, unsigned long flags) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep, pte; pgprot_t prot; pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); if (!flags) flags = 1; /* 1 = CB0-1 device */ pgdp = pgd_offset_k(va); if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { pudp = (pud_t *)sh64_get_page(); set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE)); } pudp = pud_offset(pgdp, va); if (pud_none(*pudp) || !pud_present(*pudp)) { pmdp = (pmd_t *)sh64_get_page(); set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE)); } pmdp = pmd_offset(pudp, va); if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { ptep = (pte_t *)sh64_get_page(); set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); } prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); pte = pfn_pte(pa >> PAGE_SHIFT, prot); ptep = pte_offset_kernel(pmdp, va); if (!pte_none(*ptep) && pte_val(*ptep) != pte_val(pte)) pte_ERROR(*ptep); set_pte(ptep, pte); flush_tlb_kernel_range(va, PAGE_SIZE); }
static void omap2_mmu_shutdown(struct omap_mmu *mmu) { exmap_clear_preserved_entries(mmu); if (dspvect_page != NULL) { unsigned long virt; down_read(&mmu->exmap_sem); virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE); flush_tlb_kernel_range(virt, virt + PAGE_SIZE); free_page((unsigned long)dspvect_page); dspvect_page = NULL; up_read(&mmu->exmap_sem); } }
/* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * * Note that get_vm_area_caller() allocates a guard 4K page, so we need to * mask the size back to 1MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; pud_t *pud; pmd_t *pmdp; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); pud = pud_offset(pgd, addr); pmdp = pmd_offset(pud, addr); do { pmd_t pmd = *pmdp; if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and * increment the kvm sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); init_mm.context.kvm_seq++; /* * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PMD_SIZE; pmdp += 2; } while (addr < end); /* * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) __check_kvm_seq(current->active_mm); flush_tlb_kernel_range(virt, end); }
static void ion_clean_and_unmap(unsigned long vaddr, pte_t *ptep, size_t size, bool memory_zero) { int i; flush_cache_vmap(vaddr, vaddr + size); if (memory_zero) memset((void *)vaddr, 0, size); dmac_flush_range((void *)vaddr, (void *)vaddr + size); for (i = 0; i < (size / PAGE_SIZE); i++) pte_clear(&init_mm, (void *)vaddr + (i * PAGE_SIZE), ptep + i); flush_cache_vunmap(vaddr, vaddr + size); flush_tlb_kernel_range(vaddr, vaddr + size); }
/* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn) { enum fixed_addresses idx; unsigned long vaddr; int type; pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte - idx))); #endif set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); return (void *)vaddr; }
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ pte_clear(&init_mm, vaddr, kmap_pte-idx); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); kmap_atomic_idx_pop(); } pagefault_enable(); }
static void flush_all_zero_pkmaps(void) { int i; flush_cache_kmaps(); for (i = 0; i < LAST_PKMAP; i++) { struct page *page; /* * zero means we don't have anything to do, * >1 means that it is still in use. Only * a count of 1 means that it is free but * needs to be unmapped */ if (pkmap_count[i] != 1) continue; pkmap_count[i] = 0; /* sanity check */ if (pte_none(pkmap_page_table[i])) BUG(); /* * Don't need an atomic fetch-and-clear op here; * no-one has the page mapped, and cannot get at * its virtual address (and hence PTE) without first * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); pte_clear(&pkmap_page_table[i]); set_page_address(page, NULL); } flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); }
static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1)); pgd_t *pgd; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); do { pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); pmd = *pmdp; if (!pmd_none(pmd)) { pmd_clear(pmdp); if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; pgd++; } while (addr < end); flush_tlb_kernel_range(virt, end); }
/* Flush a single tlb entry in the kernel */ void __flush_tlb_one(unsigned long addr) { addr &= PAGE_MASK; flush_tlb_kernel_range(addr, addr + PAGE_SIZE); }
/* * Add a new chunk of uncached memory pages to the specified pool. * * @pool: pool to add new chunk of uncached memory to * @nid: node id of node to allocate memory from, or -1 * * This is accomplished by first allocating a granule of cached memory pages * and then converting them to uncached memory pages. */ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* interrupted by a signal */ if (uc_pool->nchunks_added > nchunks_added) { /* someone added a new chunk while we were waiting */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* convert the memory pages from cached to uncached */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* * There's a small race here where it's possible for someone to * access the page through /dev/mem halfway through the conversion * to uncached - not sure it's really worth bothering about */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* flush the just introduced uncached translation from the TLB */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* * The chunk of memory pages has been converted to uncached so now we * can add it to the pool. */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* failed to convert or add the chunk so give it back to the kernel */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
void plat_iounmap_ns(void __iomem *vaddr, unsigned long size) { unmap_kernel_range_noflush((unsigned long __force)vaddr, size); flush_tlb_kernel_range((unsigned long __force)vaddr, (unsigned long __force)vaddr + size); }
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* */ if (uc_pool->nchunks_added > nchunks_added) { /* */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
void __flush_tlb_one_tt(unsigned long addr) { flush_tlb_kernel_range(addr, addr + PAGE_SIZE); }
void flush_tlb_kernel_vm_tt(void) { flush_tlb_kernel_range(start_vm, end_vm); }