/* Flushes a TLB, including global pages. We should always have the CR4_PGE * flag set, but just in case, we'll check. Toggling this bit flushes the TLB. */ void tlb_flush_global(void) { uint32_t cr4 = rcr4(); if (cr4 & CR4_PGE) { lcr4(cr4 & ~CR4_PGE); lcr4(cr4); } else lcr3(rcr3()); }
// Setup code for APs void mp_main(void) { // We are in high EIP now, safe to switch to kern_pgdir uint32_t cr4 = rcr4(); cr4 |= (1<<4); //Open PSE lcr4(cr4); lcr3(PADDR(kern_pgdir)); cprintf("SMP: CPU %d starting\n", cpunum()); lapic_init(); env_init_percpu(); trap_init_percpu(); xchg(&thiscpu->cpu_status, CPU_STARTED); // tell boot_aps() we're up // Now that we have finished some basic setup, call sched_yield() // to start running processes on this CPU. But make sure that // only one CPU can enter the scheduler at a time! // // Your code here: lock_kernel(); sched_yield(); // Remove this after you finish Exercise 4 //for (;;); }
void cpu_init(struct cpu_info *ci) { u_int cr4 = 0; /* configure the CPU if needed */ if (ci->cpu_setup != NULL) (*ci->cpu_setup)(ci); /* * We do this here after identifycpu() because errata may affect * what we do. */ patinit(ci); /* * Enable ring 0 write protection (486 or above, but 386 * no longer supported). */ lcr0(rcr0() | CR0_WP); if (cpu_feature & CPUID_PGE) cr4 |= CR4_PGE; /* enable global TLB caching */ if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMEP) cr4 |= CR4_SMEP; #ifndef SMALL_KERNEL if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP) cr4 |= CR4_SMAP; if (ci->ci_feature_sefflags_ecx & SEFF0ECX_UMIP) cr4 |= CR4_UMIP; #endif /* * If we have FXSAVE/FXRESTOR, use them. */ if (cpu_feature & CPUID_FXSR) { cr4 |= CR4_OSFXSR; /* * If we have SSE/SSE2, enable XMM exceptions. */ if (cpu_feature & (CPUID_SSE|CPUID_SSE2)) cr4 |= CR4_OSXMMEXCPT; } /* no cr4 on most 486s */ if (cr4 != 0) lcr4(rcr4()|cr4); #ifdef MULTIPROCESSOR ci->ci_flags |= CPUF_RUNNING; tlbflushg(); #endif }
bool enable_pse(void) { uint32_t edx, cr4; cpuid(0x1, 0x0, 0, 0, 0, &edx); if (edx & CPUID_PSE_SUPPORT) { cr4 = rcr4(); cr4 |= CR4_PSE; lcr4(cr4); return 1; } else return 0; }
void cpu_init(struct cpu_info *ci) { /* configure the CPU if needed */ if (ci->cpu_setup != NULL) (*ci->cpu_setup)(ci); lcr0(rcr0() | CR0_WP); lcr4(rcr4() | CR4_DEFAULT); #ifdef MULTIPROCESSOR ci->ci_flags |= CPUF_RUNNING; #endif }
void cpu_init(struct cpu_info *ci) { /* configure the CPU if needed */ if (ci->cpu_setup != NULL) (*ci->cpu_setup)(ci); /* * We do this here after identifycpu() because errata may affect * what we do. */ patinit(ci); lcr0(rcr0() | CR0_WP); lcr4(rcr4() | CR4_DEFAULT); #ifdef MULTIPROCESSOR ci->ci_flags |= CPUF_RUNNING; tlbflushg(); #endif }
void perfmon_pcpu_init(void) { int i; if (!perfmon_supported()) return; /* Enable user level access to the performance counters */ lcr4(rcr4() | CR4_PCE); /* Reset all the counters and selectors to zero. */ write_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0); for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) { write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0); write_msr(MSR_IA32_PERFCTR0 + i, 0); } write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0); for (i = 0; i < (int) cpu_caps.fix_counters_x_proc; i++) write_msr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); perfmon_arm_irq(); }