// could consider having an API to allow these to dynamically change // MTRRs are for physical, static ranges. PAT are linear, more granular, and // more dynamic void setup_default_mtrrs(barrier_t* smp_barrier) { // disable interrupts int8_t state = 0; disable_irqsave(&state); // barrier - if we're meant to do this for all cores, we'll be // passed a pointer to an initialized barrier if (smp_barrier) waiton_barrier(smp_barrier); // disable caching cr0: set CD and clear NW lcr0((rcr0() | CR0_CD) & ~CR0_NW); // flush caches cache_flush(); // flush tlb tlb_flush_global(); // disable MTRRs, and sets default type to WB (06) #ifndef CONFIG_NOMTRRS write_msr(IA32_MTRR_DEF_TYPE, 0x00000006); // Now we can actually safely adjust the MTRRs // MTRR for IO Holes (note these are 64 bit values we are writing) // 0x000a0000 - 0x000c0000 : VGA - WC 0x01 write_msr(IA32_MTRR_PHYSBASE0, PTE_ADDR(VGAPHYSMEM) | 0x01); // if we need to have a full 64bit val, use the UINT64 macro write_msr(IA32_MTRR_PHYSMASK0, 0x0000000ffffe0800); // 0x000c0000 - 0x00100000 : IO devices (and ROM BIOS) - UC 0x00 write_msr(IA32_MTRR_PHYSBASE1, PTE_ADDR(DEVPHYSMEM) | 0x00); write_msr(IA32_MTRR_PHYSMASK1, 0x0000000ffffc0800); // APIC/IOAPIC holes /* Going to skip them, since we set their mode using PAT when we * map them in */ // make sure all other MTRR ranges are disabled (should be unnecessary) write_msr(IA32_MTRR_PHYSMASK2, 0); write_msr(IA32_MTRR_PHYSMASK3, 0); write_msr(IA32_MTRR_PHYSMASK4, 0); write_msr(IA32_MTRR_PHYSMASK5, 0); write_msr(IA32_MTRR_PHYSMASK6, 0); write_msr(IA32_MTRR_PHYSMASK7, 0); // keeps default type to WB (06), turns MTRRs on, and turns off fixed ranges write_msr(IA32_MTRR_DEF_TYPE, 0x00000806); #endif // reflush caches and TLB cache_flush(); tlb_flush_global(); // turn on caching lcr0(rcr0() & ~(CR0_CD | CR0_NW)); // barrier if (smp_barrier) waiton_barrier(smp_barrier); // enable interrupts enable_irqsave(&state); }
void pmap_pcid_configure(void) { int ccpu = cpu_number(); uintptr_t cr4 = get_cr4(); boolean_t pcid_present = FALSE; pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu); pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0); pmap_assert(cpu_mode_is64bit()); if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) { pmap_pcid_log("PMAP: PCID feature disabled\n"); printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled); kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled); } /* no_shared_cr3+PCID is currently unsupported */ #if DEBUG if (pmap_pcid_disabled == FALSE) no_shared_cr3 = FALSE; else no_shared_cr3 = TRUE; #else if (no_shared_cr3) pmap_pcid_disabled = TRUE; #endif if (pmap_pcid_disabled || no_shared_cr3) { unsigned i; /* Reset PCID status, as we may have picked up * strays if discovered prior to platform * expert initialization. */ for (i = 0; i < real_ncpus; i++) { if (cpu_datap(i)) { cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE; } pmap_pcid_ncpus = 0; } cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE; return; } /* DRKTODO: assert if features haven't been discovered yet. Redundant * invocation of cpu_mode_init and descendants masks this for now. */ if ((cpuid_features() & CPUID_FEATURE_PCID)) pcid_present = TRUE; else { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE; pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu); return; } if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu); return; } if (pcid_present == TRUE) { pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4); if (cpu_number() >= PMAP_PCID_MAX_CPUS) { panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number()); } if ((get_cr4() & CR4_PGE) == 0) { set_cr4(get_cr4() | CR4_PGE); pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu); } set_cr4(get_cr4() | CR4_PCIDE); pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4()); tlb_flush_global(); cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) { pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus); } cpu_datap(ccpu)->cpu_pmap_pcid_coherentp = cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel = &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]); cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1; } }