/* * Set the page permissions for a particular virtual address. If the * address is a vmalloc mapping (or other non-linear mapping), then * find the linear mapping of the page and also set its protections to * match. */ static void set_aliased_prot(void *v, pgprot_t prot) { int level; pte_t *ptep; pte_t pte; unsigned long pfn; struct page *page; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); pfn = pte_pfn(*ptep); page = pfn_to_page(pfn); pte = pfn_pte(pfn, prot); if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); if (!PageHighMem(page)) { void *av = __va(PFN_PHYS(pfn)); if (av != v) if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) BUG(); } else kmap_flush_unused(); }
static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) { pte_t pte = *src_pte; if (pte_valid(pte)) { /* * Resume will overwrite areas that may be marked * read only (code, rodata). Clear the RDONLY bit from * the temporary mappings we use during restore. */ set_pte(dst_pte, pte_clear_rdonly(pte)); } else if (debug_pagealloc_enabled() && !pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if * the page isn't in use by the resume kernel. It may have * been in use by the original kernel, in which case we need * to put it back in our copy to do the restore. * * Before marking this entry valid, check the pfn should * be mapped. */ BUG_ON(!pfn_valid(pte_pfn(pte))); set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte))); } }
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (pte_exec(orig)) vaddr |= 0x1UL; if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: /* if (tb->fullmm) { put_cpu_var(tlb_batch); return; } */ nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); put_cpu_var(tlb_batch); }
/** * __replace_page - replace page in vma by new page. * based on replace_page in mm/ksm.c * * @vma: vma that holds the pte pointing to page * @addr: address the old @page is mapped at * @page: the cowed page we are replacing by kpage * @kpage: the modified page we replace page by * * Returns 0 on success, -EFAULT on failure. */ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; pte_t *ptep; int err; /* For mmu_notifiers */ const unsigned long mmun_start = addr; const unsigned long mmun_end = addr + PAGE_SIZE; struct mem_cgroup *memcg; err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, false); if (err) return err; /* For try_to_free_swap() and munlock_vma_page() below */ lock_page(old_page); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); err = -EAGAIN; ptep = page_check_address(old_page, mm, addr, &ptl, 0); if (!ptep) { mem_cgroup_cancel_charge(new_page, memcg, false); goto unlock; } get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); if (!PageAnon(old_page)) { dec_mm_counter(mm, mm_counter_file(old_page)); inc_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush_notify(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot)); page_remove_rmap(old_page, false); if (!page_mapped(old_page)) try_to_free_swap(old_page); pte_unmap_unlock(ptep, ptl); if (vma->vm_flags & VM_LOCKED) munlock_vma_page(old_page); put_page(old_page); err = 0; unlock: mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); unlock_page(old_page); return err; }
static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) { if ((pte_flags(gpte) & _PAGE_PSE) || pte_pfn(gpte) >= cpu->lg->pfn_limit) { kill_guest(cpu, "bad page table entry"); return false; } return true; }
/* * Set the page permissions for a particular virtual address. If the * address is a vmalloc mapping (or other non-linear mapping), then * find the linear mapping of the page and also set its protections to * match. */ static void set_aliased_prot(void *v, pgprot_t prot) { int level; pte_t *ptep; pte_t pte; unsigned long pfn; struct page *page; unsigned char dummy; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); pfn = pte_pfn(*ptep); page = pfn_to_page(pfn); pte = pfn_pte(pfn, prot); /* * Careful: update_va_mapping() will fail if the virtual address * we're poking isn't populated in the page tables. We don't * need to worry about the direct map (that's always in the page * tables), but we need to be careful about vmap space. In * particular, the top level page table can lazily propagate * entries between processes, so if we've switched mms since we * vmapped the target in the first place, we might not have the * top-level page table entry populated. * * We disable preemption because we want the same mm active when * we probe the target and when we issue the hypercall. We'll * have the same nominal mm, but if we're a kernel thread, lazy * mm dropping could change our pgd. * * Out of an abundance of caution, this uses __get_user() to fault * in the target address just in case there's some obscure case * in which the target address isn't readable. */ preempt_disable(); pagefault_disable(); /* Avoid warnings due to being atomic. */ __get_user(dummy, (unsigned char __user __force *)v); pagefault_enable(); if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); if (!PageHighMem(page)) { void *av = __va(PFN_PHYS(pfn)); if (av != v) if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) BUG(); } else kmap_flush_unused(); preempt_enable(); }
unsigned long eeh_token_to_phys(unsigned long token) { if (REGION_ID(token) == EEH_REGION_ID) { unsigned long vaddr = IO_TOKEN_TO_ADDR(token); pte_t *ptep = find_linux_pte(ioremap_mm.pgd, vaddr); unsigned long pa = pte_pfn(*ptep) << PAGE_SHIFT; return pa | (vaddr & (PAGE_SIZE-1)); } else return token; }
void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { unsigned long pfn = pte_pfn(*pte); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) printk(KERN_WARNING "[%lx-%lx] potential offnode " "page_structs\n", start, end - 1); }
/* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * We use it to preload an HPTE into the hash table corresponding to * the updated linux PTE. * * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; #endif unsigned long pfn = pte_pfn(pte); /* handle i-cache coherency */ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && !cpu_has_feature(CPU_FTR_NOEXECUTE) && pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ _tlbie(address); #endif if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { if (vma->vm_mm == current->active_mm) { __flush_dcache_icache((void *) address); } else flush_dcache_icache_page(page); set_bit(PG_arch_1, &page->flags); } } #ifdef CONFIG_PPC_STD_MMU /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(pte) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction * access fault and pass that down to __hash_page so we avoid * double-faulting on execution of fresh text. We have to test * for regs NULL since init will get here first thing at boot * * We also avoid filling the hash if not coming from a fault */ if (current->thread.regs == NULL) return; trap = TRAP(current->thread.regs); if (trap == 0x400) access |= _PAGE_EXEC; else if (trap != 0x300) return; hash_preload(vma->vm_mm, address, access, trap); #endif /* CONFIG_PPC_STD_MMU */ }
/** * eeh_token_to_phys - convert EEH address token to phys address * @token i/o token, should be address in the form 0xA.... */ static inline unsigned long eeh_token_to_phys(unsigned long token) { pte_t *ptep; unsigned long pa; ptep = find_linux_pte(init_mm.pgd, token); if (!ptep) return token; pa = pte_pfn(*ptep) << PAGE_SHIFT; return pa | (token & (PAGE_SIZE-1)); }
struct page * maybe_pte_to_page(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page; }
/* * This is lazy way to flush icache provided the CPU has the NX feature enabled. * This is called from set_pte. */ void mic_flush_icache_nx(pte_t *ptep, pte_t pte) { /* * Donot continue if the icache snoop is enabled * or if the NX feature doesnt exist */ if(icache_snoop || !is_nx_support) return; /* * Similar to the ia64 set_pte code * We only flush and set PG_arch_1 bit if the page is * present && page is user page && has backing page struct * && page is executable && * (page swapin or new page or page migration || * copy_on_write with page copying) */ if (pte_present(pte) && pte_user(pte) && pfn_valid(pte_pfn(pte)) && !pte_no_exec(pte) && (!pte_present(*ptep) || pte_pfn(*ptep) != pte_pfn(pte))) mic_flush_icache_lazy(pte_page(pte)); }
static long kgsl_cache_range_op(unsigned long addr, int size, unsigned int flags) { #ifdef CONFIG_OUTER_CACHE unsigned long end; #endif BUG_ON(addr & (KGSL_PAGESIZE - 1)); BUG_ON(size & (KGSL_PAGESIZE - 1)); if (flags & KGSL_CACHE_FLUSH) dmac_flush_range((const void *)addr, (const void *)(addr + size)); else if (flags & KGSL_CACHE_CLEAN) dmac_clean_range((const void *)addr, (const void *)(addr + size)); else dmac_inv_range((const void *)addr, (const void *)(addr + size)); #ifdef CONFIG_OUTER_CACHE for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) { pte_t *pte_ptr, pte; unsigned long physaddr; if (flags & KGSL_CACHE_VMALLOC_ADDR) physaddr = vmalloc_to_pfn((void *)end); else if (flags & KGSL_CACHE_USER_ADDR) { pte_ptr = kgsl_get_pte_from_vaddr(end); if (!pte_ptr) return -EINVAL; pte = *pte_ptr; physaddr = pte_pfn(pte); pte_unmap(pte_ptr); } else return -EINVAL; physaddr <<= PAGE_SHIFT; if (flags & KGSL_CACHE_FLUSH) outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE); else if (flags & KGSL_CACHE_CLEAN) outer_clean_range(physaddr, physaddr + KGSL_PAGESIZE); else outer_inv_range(physaddr, physaddr + KGSL_PAGESIZE); } #endif return 0; }
int __init wip_init(void) { unsigned long va = 0xb77e5000; int pid = 1072; //struct page p; unsigned long long pageFN; unsigned long long pa; pgd_t *pgd; pmd_t *pmd; pud_t *pud; pte_t *pte; struct mm_struct *mm; int found = 0; struct task_struct *task; for_each_process(task) { if(task->pid == pid) mm = task->mm; } pgd = pgd_offset(mm,va); if(!pgd_none(*pgd) && !pgd_bad(*pgd)) { pud = pud_offset(pgd,va); if(!pud_none(*pud) && !pud_bad(*pud)) { pmd = pmd_offset(pud,va); if(!pmd_none(*pmd) && !pmd_bad(*pmd)) { pte = pte_offset_kernel(pmd,va); if(!pte_none(*pte)) { pageFN = pte_pfn(*pte); pa = ((pageFN<<12)|(va&0x00000FFF)); found = 1; printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN); } } } } if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte)) { unsigned long long swapID = (pte_val(*pte) >> 32); found = 1; printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID); }
/* Translate address of a vmalloc'd thing to a linear map address */ static void *real_vmalloc_addr(void *x) { unsigned long addr = (unsigned long) x; pte_t *p; /* * assume we don't have huge pages in vmalloc space... * So don't worry about THP collapse/split. Called * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore. */ p = find_init_mm_pte(addr, NULL); if (!p || !pte_present(*p)) return NULL; addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); return __va(addr); }
static inline bool is_crashed_pfn_valid(unsigned long pfn) { #ifndef CONFIG_X86_PAE /* * non-PAE kdump kernel executed from a PAE one will crop high pte * bits and poke unwanted space counting again from address 0, we * don't want that. pte must fit into unsigned long. In fact the * test checks high 12 bits for being zero (pfn will be shifted left * by PAGE_SHIFT). */ return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; #else return true; #endif }
/* * Called with mm->page_table_lock held to protect against other * threads/the swapper from ripping pte's out from under us. */ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t pte = *ptep; unsigned long pfn = pte_pfn(pte); struct page *page; if (pte_present(pte) && pfn_valid(pfn)) { page = pfn_to_page(pfn); if (!PageReserved(page) && (ptep_clear_flush_dirty(vma, address, ptep) || page_test_and_clear_dirty(page))) set_page_dirty(page); } return 0; }
/* * For SH-4, we have our own implementation for ptep_get_and_clear */ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; pte_clear(mm, addr, ptep); if (!pte_not_present(pte)) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); struct address_space *mapping = page_mapping(page); if (!mapping || !mapping_writably_mapped(mapping)) __clear_bit(PG_mapped, &page->flags); } } return pte; }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn = pte_pfn(pte); if (!boot_cpu_data.dcache.n_aliases) return; page = pfn_to_page(pfn); if (pfn_valid(pfn)) { int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); if (dirty) __flush_purge_region(page_address(page), PAGE_SIZE); } }
static void __make_page_writable(void *va) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep; unsigned long addr = (unsigned long) va; pgd = pgd_offset_k(addr); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); ptep = pte_offset_kernel(pmd, addr); pte.pte = ptep->pte | _PAGE_RW; if (HYPERVISOR_update_va_mapping(addr, pte, 0)) xen_l1_entry_update(ptep, pte); /* fallback */ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END)) __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT)); }
/* * The performance critical leaf functions are made noinline otherwise gcc * inlines everything into a single function which results in too much * register pressure. */ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long mask, result; pte_t *ptep; if (tlb_type == hypervisor) { result = _PAGE_PRESENT_4V|_PAGE_P_4V; if (write) result |= _PAGE_WRITE_4V; } else { result = _PAGE_PRESENT_4U|_PAGE_P_4U; if (write) result |= _PAGE_WRITE_4U; } mask = result | _PAGE_SPECIAL; ptep = pte_offset_kernel(&pmd, addr); do { struct page *page, *head; pte_t pte = *ptep; if ((pte_val(pte) & mask) != result) return 0; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); /* The hugepage case is simplified on sparc64 because * we encode the sub-page pfn offsets into the * hugepage PTEs. We could optimize this in the future * use page_cache_add_speculative() for the hugepage case. */ page = pte_page(pte); head = compound_head(page); if (!page_cache_get_speculative(head)) return 0; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); return 0; } pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); return 1; }
static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; }
asmlinkage long long sys_my_syscall(int pid, unsigned long long va) { unsigned long long pageFN; unsigned long long pa; pgd_t *pgd; pmd_t *pmd; pud_t *pud; pte_t *pte; struct mm_struct *mm; int found = 0; struct task_struct *task; for_each_process(task) { if(task->pid == pid) mm = task->mm; } pgd = pgd_offset(mm,va); if(!pgd_none(*pgd) && !pgd_bad(*pgd)) { pud = pud_offset(pgd,va); if(!pud_none(*pud) && !pud_bad(*pud)) { pmd = pmd_offset(pud,va); if(!pmd_none(*pmd) && !pmd_bad(*pmd)) { pte = pte_offset_kernel(pmd,va); if(!pte_none(*pte)) { pageFN = pte_pfn(*pte); pa = ((pageFN<<12)|(va&0x00000FFF)); found = 1; return pa; } } } } if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte)) { unsigned long long swapID = (pte_val(*pte) >> 32); found = 1; return swapID; }
static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; unsigned long frames[pages]; int f; /* * A GDT can be up to 64k in size, which corresponds to 8192 * 8-byte entries, or 16 4k pages.. */ BUG_ON(size > 65536); BUG_ON(va & ~PAGE_MASK); for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { int level; pte_t *ptep; unsigned long pfn, mfn; void *virt; /* * The GDT is per-cpu and is in the percpu data area. * That can be virtually mapped, so we need to do a * page-walk to get the underlying MFN for the * hypercall. The page can also be in the kernel's * linear range, so we need to RO that mapping too. */ ptep = lookup_address(va, &level); BUG_ON(ptep == NULL); pfn = pte_pfn(*ptep); mfn = pfn_to_mfn(pfn); virt = __va(PFN_PHYS(pfn)); frames[f] = mfn; make_lowmem_page_readonly((void *)va); make_lowmem_page_readonly(virt); } if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) BUG(); }
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { struct mm_struct *mm = current->mm; unsigned long offset, pfn, done, size; pte_t *pte; void *from, *to; done = 0; retry: spin_lock(&mm->page_table_lock); do { pte = follow_table(mm, uaddr); if ((unsigned long) pte < 0x1000) goto fault; if (!pte_present(*pte)) { pte = (pte_t *) 0x11; goto fault; } else if (write_user && !pte_write(*pte)) { pte = (pte_t *) 0x04; goto fault; } pfn = pte_pfn(*pte); offset = uaddr & (PAGE_SIZE - 1); size = min(n - done, PAGE_SIZE - offset); if (write_user) { to = (void *)((pfn << PAGE_SHIFT) + offset); from = kptr + done; } else { from = (void *)((pfn << PAGE_SHIFT) + offset); to = kptr + done; } memcpy(to, from, size); done += size; uaddr += size; } while (done < n); spin_unlock(&mm->page_table_lock); return n - done; fault: spin_unlock(&mm->page_table_lock); if (__handle_fault(uaddr, (unsigned long) pte, write_user)) return n - done; goto retry; }
static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; unsigned long frames[pages]; int f; /* */ BUG_ON(size > 65536); BUG_ON(va & ~PAGE_MASK); for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { int level; pte_t *ptep; unsigned long pfn, mfn; void *virt; /* */ ptep = lookup_address(va, &level); BUG_ON(ptep == NULL); pfn = pte_pfn(*ptep); mfn = pfn_to_mfn(pfn); virt = __va(PFN_PHYS(pfn)); frames[f] = mfn; make_lowmem_page_readonly((void *)va); make_lowmem_page_readonly(virt); } if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) BUG(); }
/* * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() */ static pte_t do_dcache_icache_coherency(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return pte; page = pfn_to_page(pfn); if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { pr_debug("do_dcache_icache_coherency... flushing\n"); flush_dcache_icache_page(page); set_bit(PG_arch_1, &page->flags); } else pr_debug("do_dcache_icache_coherency... already clean\n"); return __pte(pte_val(pte) | _PAGE_HWEXEC); }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; pfn = pte_pfn(pte); if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && Page_dcache_dirty(page)) { if (pages_do_alias((unsigned long)page_address(page), address & PAGE_MASK)) { addr = (unsigned long) page_address(page); flush_data_cache_page(addr); } ClearPageDcacheDirty(page); } }
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) { struct page *page; unsigned long pfn = pte_pfn(pteval); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { unsigned long page_addr = (unsigned long) page_address(page); if (!cpu_has_ic_fills_f_dc || pages_do_alias(page_addr, address & PAGE_MASK)) flush_data_cache_page(page_addr); ClearPageDcacheDirty(page); } }
void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); if (page_mapping(page) && Page_dcache_dirty(page)) { addr = (unsigned long) page_address(page); if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); ClearPageDcacheDirty(page); } }