/*H:000 * Welcome to the Host! * * By this point your brain has been tickled by the Guest code and numbed by * the Launcher code; prepare for it to be stretched by the Host code. This is * the heart. Let's begin at the initialization routine for the Host's lg * module. */ static int __init init(void) { int err; /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ if (paravirt_enabled()) { printk("lguest is afraid of being a guest\n"); return -EPERM; } /* First we put the Switcher up in very high virtual memory. */ err = map_switcher(); if (err) goto out; /* Now we set up the pagetable implementation for the Guests. */ err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES); if (err) goto unmap; /* We might need to reserve an interrupt vector. */ err = init_interrupts(); if (err) goto free_pgtables; /* /dev/lguest needs to be registered. */ err = lguest_device_init(); if (err) goto free_interrupts; /* Finally we do some architecture-specific setup. */ lguest_arch_host_init(); /* All good! */ return 0; free_interrupts: free_interrupts(); free_pgtables: free_pagetables(); unmap: unmap_switcher(); out: return err; }
static int __init init(void) { int err; if (paravirt_enabled()) { printk("lguest is afraid of being a guest\n"); return -EPERM; } err = map_switcher(); if (err) goto out; err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES); if (err) goto unmap; err = init_interrupts(); if (err) goto free_pgtables; err = lguest_device_init(); if (err) goto free_interrupts; lguest_arch_host_init(); return 0; free_interrupts: free_interrupts(); free_pgtables: free_pagetables(); unmap: unmap_switcher(); out: return err; }
void __init tboot_probe(void) { /* Look for valid page-aligned address for shared page. */ if (!boot_params.tboot_addr) return; /* * also verify that it is mapped as we expect it before calling * set_fixmap(), to reduce chance of garbage value causing crash */ if (!e820_any_mapped(boot_params.tboot_addr, boot_params.tboot_addr, E820_RESERVED)) { pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n"); return; } /* only a natively booted kernel should be using TXT */ if (paravirt_enabled()) { pr_warning("non-0 tboot_addr but pv_ops is enabled\n"); return; } /* Map and check for tboot UUID. */ set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr); tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE); if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) { pr_warning("tboot at 0x%llx is invalid\n", boot_params.tboot_addr); tboot = NULL; return; } if (tboot->version < 5) { pr_warning("tboot version is invalid: %u\n", tboot->version); tboot = NULL; return; } pr_info("found shared page at phys addr 0x%llx:\n", boot_params.tboot_addr); pr_debug("version: %d\n", tboot->version); pr_debug("log_addr: 0x%08x\n", tboot->log_addr); pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); }
void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; /* * To determine the position of the EBDA and the * end of conventional memory, we need to look at * the BIOS data area. In a paravirtual environment * that area is absent. We'll just have to assume * that the paravirt case can handle memory setup * correctly, without our help. */ if (paravirt_enabled()) return; /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; /* start of EBDA area */ ebda_addr = get_bios_ebda(); /* * Note: some old Dells seem to need 4k EBDA without * reporting so, so just consider the memory above 0x9f000 * to be off limits (bugzilla 2990). */ /* If the EBDA address is below 128K, assume it is bogus */ if (ebda_addr < INSANE_CUTOFF) ebda_addr = LOWMEM_CAP; /* If lowmem is less than 128K, assume it is bogus */ if (lowmem < INSANE_CUTOFF) lowmem = LOWMEM_CAP; /* Use the lower of the lowmem and EBDA markers as the cutoff */ lowmem = min(lowmem, ebda_addr); lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */ /* reserve all memory between lowmem and the 1MB mark */ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); }
static __init int add_rtc_cmos(void) { #ifdef CONFIG_PNP static const char * const ids[] __initconst = { "PNP0b00", "PNP0b01", "PNP0b02", }; struct pnp_dev *dev; struct pnp_id *id; int i; pnp_for_each_dev(dev) { for (id = dev->id; id; id = id->next) { for (i = 0; i < ARRAY_SIZE(ids); i++) { if (compare_pnp_id(id, ids[i]) != 0) return 0; } } } #endif if (of_have_populated_dt()) return 0; /* Intel MID platforms don't have ioport rtc */ if (intel_mid_identify_cpu()) return -ENODEV; #ifdef CONFIG_ACPI if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { /* This warning can likely go away again in a year or two. */ pr_info("ACPI: not registering RTC platform device\n"); return -ENODEV; } #endif if (paravirt_enabled() && !paravirt_has(RTC)) return -ENODEV; platform_device_register(&rtc_device); dev_info(&rtc_device.dev, "registered platform RTC device (no PNP device found)\n"); return 0; }
void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; /* * To determine the position of the EBDA and the * end of conventional memory, we need to look at * the BIOS data area. In a paravirtual environment * that area is absent. We'll just have to assume * that the paravirt case can handle memory setup * correctly, without our help. */ if (paravirt_enabled()) return; /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; /* start of EBDA area */ ebda_addr = get_bios_ebda(); /* Fixup: bios puts an EBDA in the top 64K segment */ /* of conventional memory, but does not adjust lowmem. */ if ((lowmem - ebda_addr) <= 0x10000) lowmem = ebda_addr; /* Fixup: bios does not report an EBDA at all. */ /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ if ((ebda_addr == 0) && (lowmem >= 0x9f000)) lowmem = 0x9f000; /* Paranoia: should never happen, but... */ if ((lowmem == 0) || (lowmem >= 0x100000)) lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); }
void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; /* */ /* */ /* */ /* */ /* */ /* */ if (paravirt_enabled()) return; /* */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; /* */ ebda_addr = get_bios_ebda(); /* */ /* */ if ((lowmem - ebda_addr) <= 0x10000) lowmem = ebda_addr; /* */ /* */ if ((ebda_addr == 0) && (lowmem >= 0x9f000)) lowmem = 0x9f000; /* */ if ((lowmem == 0) || (lowmem >= 0x100000)) lowmem = 0x9f000; /* */ memblock_reserve(lowmem, 0x100000 - lowmem); }
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; #ifdef CONFIG_X86_F00F_BUG /* * All current models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. * Note that the workaround only should be initialized once... */ c->f00f_bug = 0; if (!paravirt_enabled() && c->x86 == 5) { static int f00f_workaround_enabled; c->f00f_bug = 1; if (!f00f_workaround_enabled) { trap_init_f00f_bug(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_mask < 0x6 || c->x86_mask == 0xb)) set_cpu_cap(c, X86_FEATURE_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif #ifdef CONFIG_X86_NUMAQ numaq_tsc_disable(); #endif intel_smp_check(c); }
#ifdef CONFIG_PARAVIRT if (likely(!paravirt_enabled())) __ptep_modify_prot_commit(mm, addr, pte, ptent); else #endif ptep_modify_prot_commit(mm, addr, pte, ptent);
static void intel_workarounds(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_F00F_BUG /* * All models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. Announce that the fault handler will be checking for it. * The Quark is also family 5, but does not have the same bug. */ clear_cpu_bug(c, X86_BUG_F00F); if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) { static int f00f_workaround_enabled; set_cpu_bug(c, X86_BUG_F00F); if (!f00f_workaround_enabled) { printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * PAE CPUID issue: many Pentium M report no PAE but may have a * functionally usable PAE implementation. * Forcefully enable PAE if kernel parameter "forcepae" is present. */ if (forcepae) { printk(KERN_WARNING "PAE forced!\n"); set_cpu_cap(c, X86_FEATURE_PAE); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); } /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); pr_info("CPU: Disabling hardware prefetching (Errata 037)\n"); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_mask < 0x6 || c->x86_mask == 0xb)) set_cpu_bug(c, X86_BUG_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif intel_smp_check(c); }