/*==========================================================================* * Name: smp_invalidate_interrupt * * Description: This routine executes on CPU which received * 'INVALIDATE_TLB_IPI'. * 1.Flush local TLB. * 2.Report flush TLB process was finished. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_invalidate_interrupt(void) { int cpu_id = smp_processor_id(); unsigned long *mmc = &flush_mm->context[cpu_id]; if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) return; if (flush_va == FLUSH_ALL) { *mmc = NO_CONTEXT; if (flush_mm == current->active_mm) activate_context(flush_mm); else cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; if (*mmc != NO_CONTEXT) { va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); } } cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); pud_t *pud = pud_offset(pgd, p3_addr); pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } }
/*==========================================================================* * Name: smp_flush_tlb_page * * Description: This routine flushes one page. * * Born on Date: 2002.02.05 * * Arguments: *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, &cpu_mask); #ifdef DEBUG_SMP if (!mm) BUG(); #endif if (*mmc != NO_CONTEXT) { local_irq_save(flags); va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); local_irq_restore(flags); } if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, vma, va); preempt_enable(); }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *dir = pgd_offset_k(p3_addr); pmd_t *pmd = pmd_offset(dir, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } __flush_tlb_page(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; if (mm->context.id != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ mm->context.id = NO_CONTEXT; if (mm == current->mm) activate_context(mm); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = mm->context.id & MMU_CONTEXT_ASID_MASK; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } while (start < end) { __flush_tlb_page(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } local_irq_restore(flags); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { __flush_tlb_page(vma, addr); flush_tlb_common(vma->vm_mm, vma, addr); }