int main(void) { unsigned int va; int asid; unsigned int stack_addr = (unsigned int) &va & ~(PAGE_SIZE - 1); // Map code & data for (va = 0; va < 0x10000; va += PAGE_SIZE) { add_itlb_mapping(va, va | TLB_GLOBAL | TLB_EXECUTABLE | TLB_PRESENT); add_dtlb_mapping(va, va | TLB_WRITABLE | TLB_GLOBAL | TLB_PRESENT); } add_dtlb_mapping(stack_addr, stack_addr | TLB_WRITABLE | TLB_GLOBAL | TLB_PRESENT); add_dtlb_mapping(IO_REGION_BASE, IO_REGION_BASE | TLB_WRITABLE | TLB_GLOBAL | TLB_PRESENT); // Map a private page into address space 1 set_asid(1); add_dtlb_mapping(VADDR1, PADDR1 | TLB_PRESENT); *((unsigned int*) PADDR1) = 0xdeadbeef; // Map a private page into address space 2 set_asid(2); add_dtlb_mapping(VADDR1, PADDR2 | TLB_PRESENT); *((unsigned int*) PADDR2) = 0xabcdefed; // Enable MMU in flags register __builtin_nyuzi_write_control_reg(CR_FAULT_HANDLER, fault_handler); __builtin_nyuzi_write_control_reg(CR_TLB_MISS_HANDLER, fault_handler); __builtin_nyuzi_write_control_reg(CR_FLAGS, FLAG_MMU_EN | FLAG_SUPERVISOR_EN); // Read value from first address space set_asid(1); printf("A1 %08x\n", *((volatile unsigned int*) VADDR1)); // CHECK: A1 deadbeef // Read value from the second address space set_asid(2); printf("A2 %08x\n", *((volatile unsigned int*) VADDR1)); // CHECK: A2 abcdefed return 0; }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } __flush_tlb_page(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned int cpu = smp_processor_id(); if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } while (start < end) { local_flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } local_irq_restore(flags); }
void tlb_overwrite_random(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1) { uint8_t asid = get_asid(); mips_tlbrwr2(hi, lo0, lo1, PAGE_MASK_4KB); set_asid(asid); }
void tlb_probe2(tlbhi_t hi, tlblo_t *lo0, tlblo_t *lo1) { uint8_t asid = get_asid(); unsigned dummy; mips_tlbprobe2(hi, lo0, lo1, &dummy); set_asid(asid); }
void tlb_write_index(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1, int i) { uint8_t asid = get_asid(); mips_tlbwi2(hi, lo0, lo1, PAGE_MASK_4KB, i); set_asid(asid); }
void tlb_read_index(tlbhi_t *hi, tlblo_t *lo0, tlblo_t *lo1, int i) { uint8_t asid = get_asid(); unsigned dummy; mips_tlbri2(hi, lo0, lo1, &dummy, i); set_asid(asid); }
void tlb_invalidate(tlbhi_t hi) { uint8_t asid = get_asid(); mips_tlbinval(hi); set_asid(asid); }