/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, int wrprot) { int i; unsigned long context = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); if (REGION_ID(addr) == USER_REGION_ID) context = mm->context.id; i = batch->index; /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. */ if (i != 0 && (context != batch->context || batch->large != pte_huge(pte))) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->context = context; batch->mm = mm; batch->large = pte_huge(pte); } batch->pte[i] = __pte(pte); batch->addr[i] = addr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (exec) vaddr |= 0x1UL; nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { flush_tsb_user_page(mm, vaddr); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); }
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (pte_exec(orig)) vaddr |= 0x1UL; if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: /* if (tb->fullmm) { put_cpu_var(tlb_batch); return; } */ nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); put_cpu_var(tlb_batch); }
/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid; unsigned int psize; int i; i = batch->index; /* We mask the address for the base page size. Huge pages will * have applied their own masking already */ addr &= PAGE_MASK; /* Get page size (maybe move back to caller) */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = mmu_huge_psize; #else BUG(); psize = pte_pagesize_index(pte); /* shutup gcc */ #endif } else psize = pte_pagesize_index(pte); /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize)) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; } if (!is_kernel_addr(addr)) { vsid = get_vsid(mm->context.id, addr); WARN_ON(vsid == 0); } else vsid = get_kernel_vsid(addr); batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); batch->pte[i] = __real_pte(__pte(pte), ptep); batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
void arch_leave_lazy_mmu_mode(void) { struct tlb_batch *tb = &__get_cpu_var(tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; }
void arch_leave_lazy_mmu_mode(void) { struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; }
/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot) { struct page *ptepage; struct mm_struct *mm; unsigned long addr; int i; unsigned long context = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); ptepage = virt_to_page(ptep); mm = (struct mm_struct *) ptepage->mapping; addr = ptepage->index + (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE); if (REGION_ID(addr) == USER_REGION_ID) context = mm->context.id; i = batch->index; /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. */ if (unlikely(i != 0 && context != batch->context)) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->context = context; batch->mm = mm; } batch->pte[i] = __pte(pte); batch->addr[i] = addr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
void generic_mach_cpu_die(void) { unsigned int cpu; local_irq_disable(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); #ifdef CONFIG_PPC64 flush_tlb_pending(); #endif cpu_set(cpu, cpu_online_map); local_irq_enable(); }