void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v) { uint64_t gva, fault_addr = 0UL; struct ucode_header uhdr; size_t data_size; int32_t err; uint32_t err_code; spinlock_obtain(µ_code_lock); gva = v - sizeof(struct ucode_header); err_code = 0U; err = copy_from_gva(vcpu, &uhdr, gva, sizeof(uhdr), &err_code, &fault_addr); if (err < 0) { if (err == -EFAULT) { vcpu_inject_pf(vcpu, fault_addr, err_code); } } else { data_size = get_ucode_data_size(&uhdr) + sizeof(struct ucode_header); if (data_size > MICRO_CODE_SIZE_MAX) { pr_err("The size of microcode is greater than 0x%x", MICRO_CODE_SIZE_MAX); } else { err_code = 0U; err = copy_from_gva(vcpu, micro_code, gva, data_size, &err_code, &fault_addr); if (err < 0) { if (err == -EFAULT) { vcpu_inject_pf(vcpu, fault_addr, err_code); } } else { msr_write(MSR_IA32_BIOS_UPDT_TRIG, (uint64_t)micro_code + sizeof(struct ucode_header)); (void)get_microcode_version(); } } } spinlock_release(µ_code_lock); }
/* * Set guest LDTR according to new tss. */ static int set_guest_ldtr(guest_cpu_handle_t gcpu, seg_reg_t *gdtr, seg_reg_t *ldtr, tss32_t *tss) { desc_t desc; int r; mon_memset(ldtr, 0, sizeof(seg_reg_t)); ldtr->selector = (uint16_t)tss->ldtr; if (SELECTOR_IDX(ldtr->selector) == 0) { ldtr->ar.bits.null_bit = 1; return 0; } if (!SELECTOR_GDT(ldtr->selector)) { /* must be in gdt */ force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ldtr->selector); return -1; } r = copy_from_gva(gcpu, (uint64_t)(gdtr->base + SELECTOR_IDX(ldtr->selector)), sizeof(desc), (uint64_t)(&desc)); if (r != 0) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ldtr->selector); return -1; } parse_desc(&desc, ldtr); if ((ldtr->ar.bits.s_bit != 0) || /* must be sys desc */ !LS_LDT(ldtr->ar.bits.type) || /* must be ldt */ (ldtr->ar.bits.p_bit != 1)) { /* must be present */ force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ldtr->selector); return -1; } gcpu_set_segment_reg(gcpu, IA32_SEG_LDTR, ldtr->selector, ldtr->base, ldtr->limit, ldtr->ar.value); return 0; }
/* * This function does task switch for 32-bit MON guest. */ int task_switch_for_guest(guest_cpu_handle_t gcpu, ia32_vmx_vmcs_vmexit_info_idt_vectoring_t vec_info) { int ret; uint32_t inst_type; tss32_t tss; cr0_t cr0; dr7_t dr7; seg_reg_t gdtr; seg_reg_t old_ldtr; seg_reg_t new_ldtr; seg_reg_t new_tr; seg_reg_t old_tr; desc_t new_tss_desc; desc_t old_tss_desc; gcpu_get_gdt_reg(gcpu, (uint64_t *)&(gdtr.base), (uint32_t *)&(gdtr.limit)); gdtr.ar.value = 0x000080; cr0.value = (uint32_t)gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0); /* Find new tr & tss. */ get_task_info(gcpu, &inst_type, &(new_tr.selector), vec_info); ret = copy_from_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } parse_desc(&new_tss_desc, &new_tr); if (!IS_TSS32(new_tr.ar.bits.type)) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } /* Find old ldtr. */ gcpu_get_segment_reg(gcpu, IA32_SEG_LDTR, (uint16_t *)&(old_ldtr.selector), (uint64_t *)&(old_ldtr.base), (uint32_t *)&(old_ldtr.limit), (uint32_t *)&(old_ldtr.ar)); /* Find old tr. */ gcpu_get_segment_reg(gcpu, IA32_SEG_TR, (uint16_t *)&(old_tr.selector), (uint64_t *)&(old_tr.base), (uint32_t *)&(old_tr.limit), (uint32_t *)&(old_tr.ar)); if (!IS_TSS32_BUSY(old_tr.ar.bits.type)) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Save guest status to old tss. */ /* call, jmp or iret */ if (inst_type != TASK_SWITCH_TYPE_IDT) { gcpu_skip_guest_instruction(gcpu); } mon_memset(&tss, 0, sizeof(tss)); copy_vmcs_to_tss32(gcpu, &tss); if (inst_type == TASK_SWITCH_TYPE_IRET) { ((eflags_t *)&(tss.eflags))->bits.nested_task = 0; } ret = copy_to_gva(gcpu, /* gva of old_tss.eip */ (uint64_t)(old_tr.base + 32), /* from eip to gs: total 64 bytes */ 64, /* hva of old_tss.eip */ (uint64_t)&(tss.eip)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Read new tss from memory. */ mon_memset(&tss, 0, sizeof(tss)); ret = copy_from_gva(gcpu, (uint64_t)(new_tr.base), sizeof(tss), (uint64_t)&(tss)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } /* Clear busy bit in old tss descriptor. */ if ((inst_type == TASK_SWITCH_TYPE_JMP) || (inst_type == TASK_SWITCH_TYPE_IRET)) { ret = copy_from_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)), sizeof(old_tss_desc), (uint64_t)(&old_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Clear the B bit, and write it back. */ old_tss_desc.bits.type = TSS32_AVAL; ret = copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)), sizeof(old_tss_desc), (uint64_t)(&old_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } } /* Set busy bit in new tss descriptor. */ if (inst_type != TASK_SWITCH_TYPE_IRET) { new_tss_desc.bits.type = TSS32_BUSY; new_tr.ar.bits.type = TSS32_BUSY; ret = copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } } /* Save old tr in new tss. */ if ((inst_type == TASK_SWITCH_TYPE_CALL) || (inst_type == TASK_SWITCH_TYPE_IDT)) { /* gva of new_tss.prev_tr */ ret = copy_to_gva(gcpu, (uint64_t)(new_tr.base + 0), /* two bytes */ sizeof(old_tr.selector), /* hva */ (uint64_t)(&(old_tr.selector))); if (ret != 0) { new_tss_desc.bits.type = TSS32_AVAL; copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); gcpu_inject_ts(gcpu, new_tr.selector); return -1; } } /* Load new tr. */ gcpu_set_segment_reg(gcpu, IA32_SEG_TR, new_tr.selector, new_tr.base, new_tr.limit, new_tr.ar.value); /* Load new cr3. */ if (cr0.bits.pg) { gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3); gcpu_set_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3); } /* Load new flags. */ if ((inst_type == TASK_SWITCH_TYPE_CALL) || (inst_type == TASK_SWITCH_TYPE_IDT)) { ((eflags_t *)&(tss.eflags))->bits.nested_task = 1; } ((eflags_t *)&(tss.eflags))->bits.rsvd_1 = 1; /* Load general regs. */ gcpu_set_gp_reg(gcpu, IA32_REG_RIP, (uint64_t)tss.eip); gcpu_set_gp_reg(gcpu, IA32_REG_RFLAGS, (uint64_t)tss.eflags); gcpu_set_gp_reg(gcpu, IA32_REG_RAX, (uint64_t)tss.eax); gcpu_set_gp_reg(gcpu, IA32_REG_RCX, (uint64_t)tss.ecx); gcpu_set_gp_reg(gcpu, IA32_REG_RDX, (uint64_t)tss.edx); gcpu_set_gp_reg(gcpu, IA32_REG_RBX, (uint64_t)tss.ebx); gcpu_set_gp_reg(gcpu, IA32_REG_RBP, (uint64_t)tss.ebp); gcpu_set_gp_reg(gcpu, IA32_REG_RSP, (uint64_t)tss.esp); gcpu_set_gp_reg(gcpu, IA32_REG_RSI, (uint64_t)tss.esi); gcpu_set_gp_reg(gcpu, IA32_REG_RDI, (uint64_t)tss.edi); /* Set the TS bit in CR0. */ cr0.bits.ts = 1; gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR0, cr0.value); gcpu_set_control_reg(gcpu, IA32_CTRL_CR0, cr0.value); /* Load new ldtr. */ if (tss.ldtr != old_ldtr.selector) { if (set_guest_ldtr(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } } /* Load new seg regs. */ if (((eflags_t *)&(tss.eflags))->bits.v86_mode == 1) { uint16_t es = (uint16_t)tss.es; uint16_t cs = (uint16_t)tss.cs; uint16_t ss = (uint16_t)tss.ss; uint16_t ds = (uint16_t)tss.ds; uint16_t fs = (uint16_t)tss.fs; uint16_t gs = (uint16_t)tss.gs; /* Set v86 selector, base, limit, ar, in real-mode style. */ gcpu_set_segment_reg(gcpu, IA32_SEG_ES, es, es << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_CS, cs, cs << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_SS, ss, ss << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_DS, ds, ds << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_FS, fs, fs << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_GS, gs, gs << 4, 0xffff, 0xf3); goto all_done; } /* Load new ss. */ if (set_guest_ss(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } /* Load new es, ds, fs, gs. */ if ((set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_ES) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_DS) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_FS) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_GS) != 0)) { return -1; } /* Load new cs. */ if (set_guest_cs(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } all_done: /* Clear the LE bits in dr7. */ dr7.value = (uint32_t)gcpu_get_debug_reg(gcpu, IA32_REG_DR7); dr7.bits.l0 = 0; dr7.bits.l1 = 0; dr7.bits.l2 = 0; dr7.bits.l3 = 0; dr7.bits.le = 0; gcpu_set_debug_reg(gcpu, IA32_REG_DR7, (uint64_t)dr7.value); /* Debug trap in new task? */ if ((tss.io_base_addr & 0x00000001) != 0) { gcpu_inject_db(gcpu); return -1; } return 0; }
/* * Set guest ES, DS, FS, or GS, based on register name and new tss. */ static int set_guest_seg(guest_cpu_handle_t gcpu, seg_reg_t *gdtr, seg_reg_t *ldtr, tss32_t *tss, mon_ia32_segment_registers_t name) { desc_t desc; seg_reg_t seg; seg_reg_t *dtr; uint32_t cpl; int r; mon_memset(&seg, 0, sizeof(seg)); if (name == IA32_SEG_ES) { seg.selector = (uint16_t)tss->es; } else if (name == IA32_SEG_DS) { seg.selector = (uint16_t)tss->ds; } else if (name == IA32_SEG_FS) { seg.selector = (uint16_t)tss->fs; } else if (name == IA32_SEG_GS) { seg.selector = (uint16_t)tss->gs; } else { return -1; } cpl = SELECTOR_RPL(tss->cs); dtr = SELECTOR_GDT(seg.selector) ? gdtr : ldtr; if (SELECTOR_IDX(seg.selector) == 0) { seg.selector = 0; seg.ar.bits.null_bit = 1; goto set_seg_reg; } r = copy_from_gva(gcpu, (uint64_t)(dtr->base + SELECTOR_IDX(seg.selector)), sizeof(desc), (uint64_t)(&desc) ); if (r != 0) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, seg.selector); return -1; } parse_desc(&desc, &seg); if ((seg.ar.bits.s_bit == 0) || /* must be non-sys desc */ (IS_CODE(seg.ar.bits.type) && !IS_CODE_R(seg.ar.bits.type))) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, seg.selector); return -1; } if (seg.ar.bits.p_bit != 1) { /* Must be present. */ force_ring3_ss(gcpu); gcpu_inject_np(gcpu, seg.selector); return -1; } /* If g_bit is set, the unit is 4 KB. */ if (seg.ar.bits.g_bit == 1) { seg.limit = (seg.limit << 12) | 0xfff; } /* Priv checks. */ if (IS_CODE(seg.ar.bits.type) && !IS_CODE_CONFORM(seg.ar.bits.type)) { uint32_t rpl = (uint32_t)SELECTOR_RPL(seg.selector); if ((seg.ar.bits.dpl < cpl) || (seg.ar.bits.dpl < rpl)) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, seg.selector); return -1; } } set_seg_reg: gcpu_set_segment_reg(gcpu, name, seg.selector, seg.base, seg.limit, seg.ar.value); return 0; }
/* * Set guest CS according to new tss. */ static int set_guest_cs(guest_cpu_handle_t gcpu, seg_reg_t *gdtr, seg_reg_t *ldtr, tss32_t *tss) { desc_t desc; seg_reg_t cs; seg_reg_t *dtr; uint32_t cpl; int r; mon_memset(&cs, 0, sizeof(cs)); cs.selector = (uint16_t)tss->cs; cpl = SELECTOR_RPL(tss->cs); if (SELECTOR_IDX(cs.selector) == 0) { /* must not be null */ gcpu_inject_ts(gcpu, cs.selector); return -1; } dtr = SELECTOR_GDT(cs.selector) ? gdtr : ldtr; r = copy_from_gva(gcpu, (uint64_t)(dtr->base + SELECTOR_IDX(cs.selector)), sizeof(desc), (uint64_t)(&desc)); if (r != 0) { gcpu_inject_ts(gcpu, cs.selector); return -1; } parse_desc(&desc, &cs); if (cs.ar.bits.p_bit != 1) { /* must be present */ gcpu_inject_np(gcpu, cs.selector); return -1; } if ((cs.ar.bits.s_bit == 0) || /* must be non-sys desc */ !IS_CODE(cs.ar.bits.type)) { /* must be code */ gcpu_inject_ts(gcpu, cs.selector); return -1; } /* Priv checks */ if (IS_CODE_CONFORM(cs.ar.bits.type)) { if (cs.ar.bits.dpl > cpl) { gcpu_inject_ts(gcpu, cs.selector); return -1; } } else { if (cs.ar.bits.dpl != cpl) { gcpu_inject_ts(gcpu, cs.selector); return -1; } } /* If g_bit is set, the unit is 4 KB. */ if (cs.ar.bits.g_bit == 1) { cs.limit = (cs.limit << 12) | 0xfff; } if (!IS_ASSESSED(cs.ar.bits.type)) { SET_ASSESSED(cs.ar.bits.type); SET_ASSESSED(desc.bits.type); r = copy_to_gva(gcpu, (uint64_t)(dtr->base + (cs.selector & 0xfff8)), sizeof(desc), (uint64_t)(&desc)); if (r != 0) { gcpu_inject_ts(gcpu, cs.selector); return -1; } } cs.ar.bits.null_bit = 0; gcpu_set_segment_reg(gcpu, IA32_SEG_CS, cs.selector, cs.base, cs.limit, cs.ar.value); if (tss->eip > cs.limit) { gcpu_inject_ts(gcpu, cs.selector); return -1; } return 0; }
/* * Set guest SS according to new tss. */ static int set_guest_ss(guest_cpu_handle_t gcpu, seg_reg_t *gdtr, seg_reg_t *ldtr, tss32_t *tss) { desc_t desc; seg_reg_t ss; seg_reg_t *dtr; uint32_t cpl; int r; mon_memset(&ss, 0, sizeof(ss)); ss.selector = (uint16_t)tss->ss; cpl = SELECTOR_RPL(tss->cs); if (SELECTOR_IDX(ss.selector) == 0) { /* must not be null */ force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ss.selector); return -1; } dtr = SELECTOR_GDT(ss.selector) ? gdtr : ldtr; r = copy_from_gva(gcpu, (uint64_t)(dtr->base + SELECTOR_IDX(ss.selector)), sizeof(desc), (uint64_t)(&desc)); if (r != 0) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ss.selector); return -1; } parse_desc(&desc, &ss); if (ss.ar.bits.p_bit == 0) { /* must be present */ force_ring3_ss(gcpu); gcpu_inject_ss(gcpu, ss.selector); return -1; } if ((ss.ar.bits.s_bit == 0) || /* must be non-sys desc */ IS_CODE(ss.ar.bits.type) || /* must not be code */ !IS_DATA_RW(ss.ar.bits.type) || /* must be data with r/w */ (ss.ar.bits.dpl != cpl) || ((uint32_t)SELECTOR_RPL(ss.selector) != cpl)) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ss.selector); return -1; } /* If g_bit is set, the unit is 4 KB. */ if (ss.ar.bits.g_bit == 1) { ss.limit = (ss.limit << 12) | 0xfff; } if (!IS_ASSESSED(ss.ar.bits.type)) { SET_ASSESSED(ss.ar.bits.type); SET_ASSESSED(desc.bits.type); r = copy_to_gva(gcpu, (uint64_t)(dtr->base + SELECTOR_IDX(ss.selector)), sizeof(desc), (uint64_t)(&desc)); if (r != 0) { force_ring3_ss(gcpu); gcpu_inject_ts(gcpu, ss.selector); return -1; } } gcpu_set_segment_reg(gcpu, IA32_SEG_SS, ss.selector, ss.base, ss.limit, ss.ar.value); return 0; }