void __init init_apic_mappings(void) { unsigned long apic_phys; if ( x2apic_enabled ) goto __next; /* * If no local APIC can be found then set up a fake all * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ if (!smp_found_config && detect_init_APIC()) { apic_phys = __pa(alloc_xenheap_page()); clear_page(__va(apic_phys)); } else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys); __next: /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = get_apic_id(); x86_cpu_to_apicid[0] = get_apic_id(); init_ioapic_mappings(); }
int vmx_add_host_load_msr(struct vcpu *v, u32 msr) { unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count; struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area; for ( i = 0; i < msr_count; i++ ) if ( msr_area[i].index == msr ) return 0; if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) ) return -ENOSPC; if ( msr_area == NULL ) { if ( (msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; v->arch.hvm_vmx.host_msr_area = msr_area; __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area)); } msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; rdmsrl(msr, msr_area[msr_count].data); v->arch.hvm_vmx.host_msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); return 0; }
static inline int get_maptrack_handle( struct grant_table *lgt) { int i; grant_handle_t handle; struct grant_mapping *new_mt; unsigned int new_mt_limit, nr_frames; if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) ) { spin_lock(&lgt->lock); if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) ) { nr_frames = nr_maptrack_frames(lgt); if ( nr_frames >= max_nr_maptrack_frames() ) { spin_unlock(&lgt->lock); return -1; } new_mt = alloc_xenheap_page(); if ( new_mt == NULL ) { spin_unlock(&lgt->lock); return -1; } clear_page(new_mt); new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE; for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ ) { new_mt[i % MAPTRACK_PER_PAGE].ref = i+1; new_mt[i % MAPTRACK_PER_PAGE].flags = 0; } lgt->maptrack[nr_frames] = new_mt; lgt->maptrack_limit = new_mt_limit; gdprintk(XENLOG_INFO, "Increased maptrack size to %u frames.\n", nr_frames + 1); handle = __get_maptrack_handle(lgt); } spin_unlock(&lgt->lock); } return handle; }
struct host_save_area *alloc_host_save_area(void) { struct host_save_area *hsa; hsa = alloc_xenheap_page(); if ( hsa == NULL ) { printk(XENLOG_WARNING "Warning: failed to allocate hsa.\n"); return NULL; } clear_page(hsa); return hsa; }
struct vmcb_struct *alloc_vmcb(void) { struct vmcb_struct *vmcb; vmcb = alloc_xenheap_page(); if ( vmcb == NULL ) { printk(XENLOG_WARNING "Warning: failed to allocate vmcb.\n"); return NULL; } clear_page(vmcb); return vmcb; }
static struct vmcs_struct *vmx_alloc_vmcs(void) { struct vmcs_struct *vmcs; if ( (vmcs = alloc_xenheap_page()) == NULL ) { gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n"); return NULL; } clear_page(vmcs); vmcs->vmcs_revision_id = vmcs_revision_id; return vmcs; }
static int construct_vmcs(struct vcpu *v) { uint16_t sysenter_cs; unsigned long sysenter_eip; vmx_vmcs_enter(v); /* VMCS controls. */ __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control); __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control); __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control); __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control; if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control); /* MSR access bitmap. */ if ( cpu_has_vmx_msr_bitmap ) { char *msr_bitmap = alloc_xenheap_page(); if ( msr_bitmap == NULL ) return -ENOMEM; memset(msr_bitmap, ~0, PAGE_SIZE); v->arch.hvm_vmx.msr_bitmap = msr_bitmap; __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap)); vmx_disable_intercept_for_msr(v, MSR_FS_BASE); vmx_disable_intercept_for_msr(v, MSR_GS_BASE); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP); } /* I/O access bitmap. */ __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap)); __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE)); /* Host GDTR base. */ __vmwrite(HOST_GDTR_BASE, GDT_VIRT_START(v)); /* Host data selectors. */ __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_FS_SELECTOR, 0); __vmwrite(HOST_GS_SELECTOR, 0); __vmwrite(HOST_FS_BASE, 0); __vmwrite(HOST_GS_BASE, 0); /* Host control registers. */ v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS; __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); __vmwrite(HOST_CR4, mmu_cr4_features); /* Host CS:RIP. */ __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS); __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler); /* Host SYSENTER CS:RIP. */ rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs); __vmwrite(HOST_SYSENTER_CS, sysenter_cs); rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip); __vmwrite(HOST_SYSENTER_EIP, sysenter_eip); /* MSR intercepts. */ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); __vmwrite(VM_ENTRY_INTR_INFO, 0); __vmwrite(CR0_GUEST_HOST_MASK, ~0UL); __vmwrite(CR4_GUEST_HOST_MASK, ~0UL); __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0); __vmwrite(CR3_TARGET_COUNT, 0); __vmwrite(GUEST_ACTIVITY_STATE, 0); /* Guest segment bases. */ __vmwrite(GUEST_ES_BASE, 0); __vmwrite(GUEST_SS_BASE, 0); __vmwrite(GUEST_DS_BASE, 0); __vmwrite(GUEST_FS_BASE, 0); __vmwrite(GUEST_GS_BASE, 0); __vmwrite(GUEST_CS_BASE, 0); /* Guest segment limits. */ __vmwrite(GUEST_ES_LIMIT, ~0u); __vmwrite(GUEST_SS_LIMIT, ~0u); __vmwrite(GUEST_DS_LIMIT, ~0u); __vmwrite(GUEST_FS_LIMIT, ~0u); __vmwrite(GUEST_GS_LIMIT, ~0u); __vmwrite(GUEST_CS_LIMIT, ~0u); /* Guest segment AR bytes. */ __vmwrite(GUEST_ES_AR_BYTES, 0xc093); /* read/write, accessed */ __vmwrite(GUEST_SS_AR_BYTES, 0xc093); __vmwrite(GUEST_DS_AR_BYTES, 0xc093); __vmwrite(GUEST_FS_AR_BYTES, 0xc093); __vmwrite(GUEST_GS_AR_BYTES, 0xc093); __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */ /* Guest IDT. */ __vmwrite(GUEST_IDTR_BASE, 0); __vmwrite(GUEST_IDTR_LIMIT, 0); /* Guest GDT. */ __vmwrite(GUEST_GDTR_BASE, 0); __vmwrite(GUEST_GDTR_LIMIT, 0); /* Guest LDT. */ __vmwrite(GUEST_LDTR_AR_BYTES, 0x0082); /* LDT */ __vmwrite(GUEST_LDTR_SELECTOR, 0); __vmwrite(GUEST_LDTR_BASE, 0); __vmwrite(GUEST_LDTR_LIMIT, 0); /* Guest TSS. */ __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */ __vmwrite(GUEST_TR_BASE, 0); __vmwrite(GUEST_TR_LIMIT, 0xff); __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); __vmwrite(GUEST_DR7, 0); __vmwrite(VMCS_LINK_POINTER, ~0UL); #if defined(__i386__) __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL); #endif __vmwrite(EXCEPTION_BITMAP, (HVM_TRAP_MASK | (1U << TRAP_page_fault) | (1U << TRAP_no_device))); v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET; hvm_update_guest_cr(v, 0); v->arch.hvm_vcpu.guest_cr[4] = 0; hvm_update_guest_cr(v, 4); if ( cpu_has_vmx_tpr_shadow ) { __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vcpu_vlapic(v)->regs_page)); __vmwrite(TPR_THRESHOLD, 0); } vmx_vmcs_exit(v); paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */ vmx_vlapic_msr_changed(v); return 0; }