Пример #1
0
void __init kvm_guest_init(void)
{
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
	register_reboot_notifier(&kvm_pv_reboot_nb);

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
		paravirt_steal_enabled = true;
	}

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);

#ifdef CONFIG_SMP
	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
	register_cpu_notifier(&kvm_cpu_notifier);
#else
	kvm_guest_cpu_init();
#endif

	/*
	 * Hard lockup detection is enabled by default. Disable it, as guests
	 * can get false positives too easily, for example if the host is
	 * overcommitted.
	 */
	watchdog_enable_hardlockup_detector(false);
}
Пример #2
0
static void
ram_probe(void)
{
    dprintf(3, "Find memory size\n");
    if (CONFIG_COREBOOT) {
        coreboot_setup();
    } else if (usingXen()) {
        xen_setup();
    } else {
        // On emulators, get memory size from nvram.
        u32 rs = ((inb_cmos(CMOS_MEM_EXTMEM2_LOW) << 16)
                  | (inb_cmos(CMOS_MEM_EXTMEM2_HIGH) << 24));
        if (rs)
            rs += 16 * 1024 * 1024;
        else
            rs = (((inb_cmos(CMOS_MEM_EXTMEM_LOW) << 10)
                   | (inb_cmos(CMOS_MEM_EXTMEM_HIGH) << 18))
                  + 1 * 1024 * 1024);
        RamSize = rs;
        add_e820(0, rs, E820_RAM);

        // Check for memory over 4Gig
        u64 high = ((inb_cmos(CMOS_MEM_HIGHMEM_LOW) << 16)
                    | ((u32)inb_cmos(CMOS_MEM_HIGHMEM_MID) << 24)
                    | ((u64)inb_cmos(CMOS_MEM_HIGHMEM_HIGH) << 32));
        RamSizeOver4G = high;
        add_e820(0x100000000ull, high, E820_RAM);

        /* reserve 256KB BIOS area at the end of 4 GB */
        add_e820(0xfffc0000, 256*1024, E820_RESERVED);
    }

    // Don't declare any memory between 0xa0000 and 0x100000
    add_e820(BUILD_LOWRAM_END, BUILD_BIOS_ADDR-BUILD_LOWRAM_END, E820_HOLE);

    // Mark known areas as reserved.
    add_e820(BUILD_BIOS_ADDR, BUILD_BIOS_SIZE, E820_RESERVED);

    u32 count = qemu_cfg_e820_entries();
    if (count) {
        struct e820_reservation entry;
        int i;

        for (i = 0; i < count; i++) {
            qemu_cfg_e820_load_next(&entry);
            add_e820(entry.address, entry.length, entry.type);
        }
    } else if (kvm_para_available()) {
        // Backwards compatibility - provide hard coded range.
        // 4 pages before the bios, 3 pages for vmx tss pages, the
        // other page for EPT real mode pagetable
        add_e820(0xfffbc000, 4*4096, E820_RESERVED);
    }

    dprintf(1, "Ram Size=0x%08x (0x%016llx high)\n", RamSize, RamSizeOver4G);
}
Пример #3
0
static inline void __cpuinit
detect_hypervisor_vendor(struct cpuinfo_x86 *c)
{
	if (vmware_platform()) {
		c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
	} else if (kvm_para_available()) {
		c->x86_hyper_vendor = X86_HYPER_VENDOR_KVM;
	} else {
		c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
	}
}
Пример #4
0
void __cpuinit kvm_guest_cpu_init(void)
{
	if (!kvm_para_available())
		return;

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
		unsigned long pa;
		/* Size alignment is implied but just to make it explicit. */
		BUILD_BUG_ON(__alignof__(per_cpu_var(kvm_apic_eoi)) < 4);
		__get_cpu_var(kvm_apic_eoi) = 0;
		pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
	}
	if (has_steal_clock)
		kvm_register_steal_time();
}
Пример #5
0
static int __init kvm_guest_init(void)
{
	if (!kvm_para_available())
		goto free_tmp;

	if (!epapr_paravirt_enabled)
		goto free_tmp;

	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
		kvm_use_magic_page();

#ifdef CONFIG_PPC_BOOK3S_64
	/* Enable napping */
	powersave_nap = 1;
#endif

free_tmp:
	kvm_free_tmp();

	return 0;
}
Пример #6
0
void __init kvm_guest_init(void)
{
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
	register_reboot_notifier(&kvm_pv_reboot_nb);

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
		paravirt_steal_enabled = true;
	}

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);

#ifdef CONFIG_SMP
	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
	register_cpu_notifier(&kvm_cpu_notifier);
#else
	kvm_guest_cpu_init();
#endif
}
Пример #7
0
static void kvm_flush_tlb(void)
{
	struct kvm_mmu_op_flush_tlb ftlb = {
		.header.op = KVM_MMU_OP_FLUSH_TLB,
	};

	kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
}

static void kvm_release_pt(unsigned long pfn)
{
	struct kvm_mmu_op_release_pt rpt = {
		.header.op = KVM_MMU_OP_RELEASE_PT,
		.pt_phys = (u64)pfn << PAGE_SHIFT,
	};

	kvm_mmu_op(&rpt, sizeof rpt);
}

static void kvm_enter_lazy_mmu(void)
{
	paravirt_enter_lazy_mmu();
}

static void kvm_leave_lazy_mmu(void)
{
	struct kvm_para_state *state = kvm_para_state();

	mmu_queue_flush(state);
	paravirt_leave_lazy_mmu();
}

static void __init paravirt_ops_setup(void)
{
	pv_info.name = "KVM";
	pv_info.paravirt_enabled = 1;

	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
		pv_cpu_ops.io_delay = kvm_io_delay;

	if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
		pv_mmu_ops.set_pte = kvm_set_pte;
		pv_mmu_ops.set_pte_at = kvm_set_pte_at;
		pv_mmu_ops.set_pmd = kvm_set_pmd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
		pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
		pv_mmu_ops.pte_clear = kvm_pte_clear;
		pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif
		pv_mmu_ops.set_pud = kvm_set_pud;
#if PAGETABLE_LEVELS == 4
		pv_mmu_ops.set_pgd = kvm_set_pgd;
#endif
#endif
		pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
		pv_mmu_ops.release_pte = kvm_release_pt;
		pv_mmu_ops.release_pmd = kvm_release_pt;
		pv_mmu_ops.release_pud = kvm_release_pt;

		pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
		pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
	}
#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif
}

void __init kvm_guest_init(void)
{
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
}
Пример #8
0
static bool __init kvm_x2apic_available(void)
{
	return kvm_para_available();
}
Пример #9
0
static bool __init kvm_detect(void)
{
	if (!kvm_para_available())
		return false;
	return true;
}
Пример #10
0
void __init enable_IR_x2apic(void)
{
    unsigned long flags;
    struct IO_APIC_route_entry **ioapic_entries = NULL;
    int ret, x2apic_enabled = 0;
    int dmar_table_init_ret = 0;

#ifdef CONFIG_INTR_REMAP
    dmar_table_init_ret = dmar_table_init();
    if (dmar_table_init_ret)
        pr_debug("dmar_table_init() failed with %d:\n",
                 dmar_table_init_ret);
#endif

    ioapic_entries = alloc_ioapic_entries();
    if (!ioapic_entries) {
        pr_err("Allocate ioapic_entries failed\n");
        goto out;
    }

    ret = save_IO_APIC_setup(ioapic_entries);
    if (ret) {
        pr_info("Saving IO-APIC state failed: %d\n", ret);
        goto out;
    }

    local_irq_save(flags);
    mask_8259A();
    mask_IO_APIC_setup(ioapic_entries);

    if (dmar_table_init_ret)
        ret = 0;
    else
        ret = enable_IR();

    if (!ret) {

        if (max_physical_apicid > 255 || !kvm_para_available())
            goto nox2apic;

        x2apic_force_phys();
    }

    x2apic_enabled = 1;

    if (x2apic_supported() && !x2apic_mode) {
        x2apic_mode = 1;
        enable_x2apic();
        pr_info("Enabled x2apic\n");
    }

nox2apic:
    if (!ret)
        restore_IO_APIC_setup(ioapic_entries);
    unmask_8259A();
    local_irq_restore(flags);

out:
    if (ioapic_entries)
        free_ioapic_entries(ioapic_entries);

    if (x2apic_enabled)
        return;

    if (x2apic_preenabled)
        panic("x2apic: enabled by BIOS but kernel init failed.");
    else if (cpu_has_x2apic)
        pr_info("Not enabling x2apic, Intr-remapping init failed.\n");
}