void tlb_init() { tlb_invalidate_all(); /* Shift C0_CONTEXT left, because we shift it right in tlb_refill_handler. * This is little hack to make page table sized 4MB, but causes us to * keep PTE in KSEG2. */ mips32_set_c0(C0_CONTEXT, PTE_BASE << 1); }
/** * load_pgdir - use the page table specified in @proc by @cr3 */ void load_pgdir (struct proc_struct *proc) { if (proc != NULL) current_pgdir_pa = proc->cr3; else current_pgdir_pa = boot_pgdir_pa; tlb_invalidate_all (); }
void mp_set_mm_pagetable(struct mm_struct *mm) { if (mm != NULL && mm->pgdir != NULL) ttbSet(PADDR(mm->pgdir)); else ttbSet(boot_pgdir_pa); tlb_invalidate_all(); }
void mp_set_mm_pagetable(struct mm_struct *mm) { extern uintptr_t boot_pgdir_pa; extern uintptr_t current_pgdir_pa; if (mm != NULL) current_pgdir_pa = PADDR(mm->pgdir); else current_pgdir_pa = boot_pgdir_pa; tlb_invalidate_all(); }
/** * Actually we'll modify the code at 0x900 and 0xA00 so that they'll jump to the new handler. */ static void enable_paging(void) { /* Modify the exception handler code */ extern unsigned long __post_boot_dtlb_miss; extern unsigned long __post_boot_itlb_miss; uint32_t *dtlb_handler = (uint32_t*)(0x908); uint32_t *itlb_handler = (uint32_t*)(0xa08); *dtlb_handler = ((uint32_t)&__post_boot_dtlb_miss - (uint32_t)dtlb_handler) >> 2; *itlb_handler = ((uint32_t)&__post_boot_itlb_miss - (uint32_t)itlb_handler) >> 2; /* Flush the whole TLB */ tlb_invalidate_all (); }
void __noreturn kern_init(void) { //setup_exception_vector(); tlb_invalidate_all(); /* unsigned base = 0xBE000000; int i, j; for (j = 10; j < 24; ++j) { kprintf("\nj=%d\n\n\n", j); for (i = 0; i < 10; ++i) { int *addr = (int*)(base + i * 4 + (1 << j)); kprintf("0x%08x=0x%04x\n", addr, (*addr)&0xFFFF); } } */ pic_init(); // init interrupt controller cons_init(); // init the console clock_init(); // init clock interrupt // panic("init"); check_initrd(); const char *message = "(THU.CST) os is loading ...\n\n"; kprintf(message); print_kerninfo(); #if 0 kprintf("EX\n"); __asm__ volatile("syscall"); kprintf("EX RET\n"); #endif pmm_init(); // init physical memory management vmm_init(); // init virtual memory management sched_init(); proc_init(); // init process table ide_init(); fs_init(); intr_enable(); // enable irq interrupt //*(int*)(0x00124) = 0x432; //asm volatile("divu $1, $1, $1"); cpu_idle(); }
void __noreturn kern_init(void) { //setup_exception_vector(); tlb_invalidate_all(); pic_init(); // init interrupt controller vga_init(); cons_init(); // init the console clock_init(); // init clock interrupt check_initrd(); const char *message = "(THU.CST) os is loading ...\n\n"; kprintf(message); print_kerninfo(); #if 0 kprintf("EX\n"); __asm__ volatile("syscall"); kprintf("EX RET\n"); #endif pmm_init(); // init physical memory management vmm_init(); // init virtual memory management sched_init(); proc_init(); // init process table ide_init(); fs_init(); intr_enable(); // enable irq interrupt //*(int*)(0x00124) = 0x432; //asm volatile("divu $1, $1, $1"); cpu_idle(); }
/** Invalidate all entries in TLB that belong to specified address space. * * @param asid Ignored as the ARM architecture doesn't support ASIDs. */ void tlb_invalidate_asid(asid_t asid) { tlb_invalidate_all(); // TODO: why not TLBIASID_write(asid) ? }
// invalidate both TLB // (clean and flush, meaning we write the data back) void tlb_invalidate(pde_t *pgdir, uintptr_t la) { tlb_invalidate_all(); }
void cpu_hatch(struct cpu_info *ci) { struct pmap_tlb_info * const ti = ci->ci_tlb_info; /* * Invalidate all the TLB enties (even wired ones) and then reserve * space for the wired TLB entries. */ mips3_cp0_wired_write(0); tlb_invalidate_all(); mips3_cp0_wired_write(ti->ti_wired); /* * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2). */ cpu_hwrena_setup(); /* * If we are using register zero relative addressing to access cpu_info * in the exception vectors, enter that mapping into TLB now. */ if (ci->ci_tlb_slot >= 0) { const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V | mips3_paddr_to_tlbpfn((vaddr_t)ci); const struct tlbmask tlbmask = { .tlb_hi = -PAGE_SIZE | KERNEL_PID, #if (PGSHIFT & 1) .tlb_lo0 = tlb_lo, .tlb_lo1 = tlb_lo + MIPS3_PG_NEXT, #else .tlb_lo0 = 0, .tlb_lo1 = tlb_lo, #endif .tlb_mask = -1, }; tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID); tlb_write_entry(ci->ci_tlb_slot, &tlbmask); } /* * Flush the icache just be sure. */ mips_icache_sync_all(); /* * Let this CPU do its own initialization (for things that have to be * done on the local CPU). */ (*mips_locoresw.lsw_cpu_init)(ci); // Show this CPU as present. atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT); /* * Announce we are hatched */ kcpuset_atomic_set(cpus_hatched, cpu_index(ci)); /* * Now wait to be set free! */ while (! kcpuset_isset(cpus_running, cpu_index(ci))) { /* spin, spin, spin */ } /* * initialize the MIPS count/compare clock */ mips3_cp0_count_write(ci->ci_data.cpu_cc_skew); KASSERT(ci->ci_cycles_per_hz != 0); ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz; mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr); ci->ci_data.cpu_cc_skew = 0; /* * Let this CPU do its own post-running initialization * (for things that have to be done on the local CPU). */ (*mips_locoresw.lsw_cpu_run)(ci); /* * Now turn on interrupts (and verify they are on). */ spl0(); KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl); KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci)); kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci)); /* * And do a tail call to idle_loop */ idle_loop(NULL); } void cpu_boot_secondary_processors(void) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; for (CPU_INFO_FOREACH(cii, ci)) { if (CPU_IS_PRIMARY(ci)) continue; KASSERT(ci->ci_data.cpu_idlelwp); /* * Skip this CPU if it didn't sucessfully hatch. */ if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) continue; ci->ci_data.cpu_cc_skew = mips3_cp0_count_read(); atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING); kcpuset_set(cpus_running, cpu_index(ci)); // Spin until the cpu calls idle_loop for (u_int i = 0; i < 100; i++) { if (kcpuset_isset(cpus_running, cpu_index(ci))) break; delay(1000); } } }
void cpu_hatch(struct cpu_info *ci) { struct pmap_tlb_info * const ti = ci->ci_tlb_info; /* * Invalidate all the TLB enties (even wired ones) and then reserve * space for the wired TLB entries. */ mips3_cp0_wired_write(0); tlb_invalidate_all(); mips3_cp0_wired_write(ti->ti_wired); /* * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2). */ cpu_hwrena_setup(); /* * If we are using register zero relative addressing to access cpu_info * in the exception vectors, enter that mapping into TLB now. */ if (ci->ci_tlb_slot >= 0) { const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V | mips3_paddr_to_tlbpfn((vaddr_t)ci); tlb_enter(ci->ci_tlb_slot, -PAGE_SIZE, tlb_lo); } /* * Flush the icache just be sure. */ mips_icache_sync_all(); /* * Let this CPU do its own initialization (for things that have to be * done on the local CPU). */ (*mips_locoresw.lsw_cpu_init)(ci); /* * Announce we are hatched */ CPUSET_ADD(cpus_hatched, cpu_index(ci)); /* * Now wait to be set free! */ while (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) { /* spin, spin, spin */ } /* * initialize the MIPS count/compare clock */ mips3_cp0_count_write(ci->ci_data.cpu_cc_skew); KASSERT(ci->ci_cycles_per_hz != 0); ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz; mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr); ci->ci_data.cpu_cc_skew = 0; /* * Let this CPU do its own post-running initialization * (for things that have to be done on the local CPU). */ (*mips_locoresw.lsw_cpu_run)(ci); /* * Now turn on interrupts. */ spl0(); /* * And do a tail call to idle_loop */ idle_loop(NULL); }
/* ucore use copy-on-write when forking a new process, * thus copy_range only copy pdt/pte and set their permission to * READONLY, a write will be handled in pgfault */ int copy_range(pgd_t *to, pgd_t *from, uintptr_t start, uintptr_t end, bool share) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); assert(USER_ACCESS(start, end)); do { pte_t *ptep = get_pte(from, start, 0), *nptep; if (ptep == NULL) { if (get_pud(from, start, 0) == NULL) { start = ROUNDDOWN(start + PUSIZE, PUSIZE); } else if (get_pmd(from, start, 0) == NULL) { start = ROUNDDOWN(start + PMSIZE, PMSIZE); } else { start = ROUNDDOWN(start + PTSIZE, PTSIZE); } continue ; } if (*ptep != 0) { if ((nptep = get_pte(to, start, 1)) == NULL) { return -E_NO_MEM; } int ret; //kprintf("%08x %08x %08x\n", nptep, *nptep, start); assert(*ptep != 0 && *nptep == 0); #ifdef ARCH_ARM //TODO add code to handle swap if (ptep_present(ptep)){ //no body should be able to write this page //before a W-pgfault pte_perm_t perm = PTE_P; if(ptep_u_read(ptep)) perm |= PTE_U; if(!share){ //Original page should be set to readonly! //because Copy-on-write may happen //after the current proccess modifies its page ptep_set_perm(ptep, perm); }else{ if(ptep_u_write(ptep)){ perm |= PTE_W; } } struct Page *page = pte2page(*ptep); ret = page_insert(to, page, start, perm); } #else /* ARCH_ARM */ if (ptep_present(ptep)) { pte_perm_t perm = ptep_get_perm(ptep, PTE_USER); struct Page *page = pte2page(*ptep); if (!share && ptep_s_write(ptep)) { ptep_unset_s_write(&perm); pte_perm_t perm_with_swap_stat = ptep_get_perm(ptep, PTE_SWAP); ptep_set_perm(&perm_with_swap_stat, perm); page_insert(from, page, start, perm_with_swap_stat); } ret = page_insert(to, page, start, perm); assert(ret == 0); } #endif /* ARCH_ARM */ else { #ifdef CONFIG_NO_SWAP assert(0); #endif swap_entry_t entry; ptep_copy(&entry, ptep); swap_duplicate(entry); ptep_copy(nptep, &entry); } } start += PGSIZE; } while (start != 0 && start < end); #ifdef ARCH_ARM /* we have modified the PTE of the original * process, so invalidate TLB */ tlb_invalidate_all(); #endif return 0; }