Example #1
0
static void __init xen_smp_prepare_boot_cpu(void)
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	/* We've switched to the "real" per-cpu gdt, so make sure the
	   old memory can be recycled */
	make_lowmem_page_readwrite(&per_cpu_var(gdt_page));

	xen_setup_vcpu_info_placement();
}
Example #2
0
void __cpuinit kvm_guest_cpu_init(void)
{
	if (!kvm_para_available())
		return;

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
		unsigned long pa;
		/* Size alignment is implied but just to make it explicit. */
		BUILD_BUG_ON(__alignof__(per_cpu_var(kvm_apic_eoi)) < 4);
		__get_cpu_var(kvm_apic_eoi) = 0;
		pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
	}
	if (has_steal_clock)
		kvm_register_steal_time();
}
Example #3
0
void __init adjust_boot_vcpu_info(void)
{
	unsigned long lpfn, rpfn, lmfn, rmfn;
	pte_t *lpte, *rpte;
	unsigned int level;
	mmu_update_t mmu[2];

	/*
	 * setup_vcpu_info() cannot be used more than once for a given (v)CPU,
	 * hence we must swap the underlying MFNs of the two pages holding old
	 * and new vcpu_info of the boot CPU.
	 *
	 * Do *not* use __get_cpu_var() or percpu_{write,...}() here, as the per-
	 * CPU segment didn't get reloaded yet. Using percpu_read(), as in
	 * arch_use_lazy_mmu_mode(), though undesirable, is safe except for the
	 * accesses to variables that were updated in setup_percpu_areas().
	 */
	lpte = lookup_address((unsigned long)&per_cpu_var(vcpu_info)
			      + (__per_cpu_load - __per_cpu_start),
			      &level);
	rpte = lookup_address((unsigned long)&per_cpu(vcpu_info, 0), &level);
	BUG_ON(!lpte || !(pte_flags(*lpte) & _PAGE_PRESENT));
	BUG_ON(!rpte || !(pte_flags(*rpte) & _PAGE_PRESENT));
	lmfn = __pte_mfn(*lpte);
	rmfn = __pte_mfn(*rpte);

	if (lmfn == rmfn)
		return;

	lpfn = mfn_to_local_pfn(lmfn);
	rpfn = mfn_to_local_pfn(rmfn);

	printk(KERN_INFO
	       "Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n",
	       lpfn, rpfn, lmfn, rmfn);

	xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte)));
	xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte)));
#ifdef CONFIG_X86_64
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(lpfn<<PAGE_SHIFT),
					 pfn_pte_ma(rmfn, PAGE_KERNEL_RO), 0))
		BUG();
#endif
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(rpfn<<PAGE_SHIFT),
					 pfn_pte_ma(lmfn, PAGE_KERNEL),
					 UVMF_TLB_FLUSH))
		BUG();

	set_phys_to_machine(lpfn, rmfn);
	set_phys_to_machine(rpfn, lmfn);

	mmu[0].ptr = ((uint64_t)lmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[0].val = rpfn;
	mmu[1].ptr = ((uint64_t)rmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[1].val = lpfn;
	if (HYPERVISOR_mmu_update(mmu, 2, NULL, DOMID_SELF))
		BUG();

	/*
	 * Copy over all contents of the page just replaced, except for the
	 * vcpu_info itself, as it may have got updated after having been
	 * copied from __per_cpu_load[].
	 */
	memcpy(__va(rpfn << PAGE_SHIFT),
	       __va(lpfn << PAGE_SHIFT),
	       (unsigned long)&per_cpu_var(vcpu_info) & (PAGE_SIZE - 1));
	level = (unsigned long)(&per_cpu_var(vcpu_info) + 1) & (PAGE_SIZE - 1);
	if (level)
		memcpy(__va(rpfn << PAGE_SHIFT) + level,
		       __va(lpfn << PAGE_SHIFT) + level,
		       PAGE_SIZE - level);
}