// Function : vmdb_settings_apply_to_hw // Purpose : Update GCPU DRs from its guest's VMDB context // Arguments: GUEST_CPU_HANDLE gcpu // Returns : void void vmdb_settings_apply_to_hw ( GUEST_CPU_HANDLE gcpu) { VMDB_THREAD_CONTEXT *vmdb = gcpu_get_vmdb(gcpu); if (NULL != vmdb) { UINT64 rflags; VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu); gcpu_set_debug_reg(gcpu, IA32_REG_DR7, vmdb->dr7); gcpu_set_debug_reg(gcpu, IA32_REG_DR0, vmdb->dr[0]); gcpu_set_debug_reg(gcpu, IA32_REG_DR1, vmdb->dr[1]); gcpu_set_debug_reg(gcpu, IA32_REG_DR2, vmdb->dr[2]); gcpu_set_debug_reg(gcpu, IA32_REG_DR3, vmdb->dr[3]); rflags = vmcs_read(vmcs, VMCS_GUEST_RFLAGS); if (vmdb->sstep) BIT_SET64(rflags, RFLAGS_TF_BIT); else BIT_CLR64(rflags, RFLAGS_TF_BIT); vmcs_write(vmcs, VMCS_GUEST_RFLAGS, rflags); } }
/* * This function does task switch for 32-bit MON guest. */ int task_switch_for_guest(guest_cpu_handle_t gcpu, ia32_vmx_vmcs_vmexit_info_idt_vectoring_t vec_info) { int ret; uint32_t inst_type; tss32_t tss; cr0_t cr0; dr7_t dr7; seg_reg_t gdtr; seg_reg_t old_ldtr; seg_reg_t new_ldtr; seg_reg_t new_tr; seg_reg_t old_tr; desc_t new_tss_desc; desc_t old_tss_desc; gcpu_get_gdt_reg(gcpu, (uint64_t *)&(gdtr.base), (uint32_t *)&(gdtr.limit)); gdtr.ar.value = 0x000080; cr0.value = (uint32_t)gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0); /* Find new tr & tss. */ get_task_info(gcpu, &inst_type, &(new_tr.selector), vec_info); ret = copy_from_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } parse_desc(&new_tss_desc, &new_tr); if (!IS_TSS32(new_tr.ar.bits.type)) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } /* Find old ldtr. */ gcpu_get_segment_reg(gcpu, IA32_SEG_LDTR, (uint16_t *)&(old_ldtr.selector), (uint64_t *)&(old_ldtr.base), (uint32_t *)&(old_ldtr.limit), (uint32_t *)&(old_ldtr.ar)); /* Find old tr. */ gcpu_get_segment_reg(gcpu, IA32_SEG_TR, (uint16_t *)&(old_tr.selector), (uint64_t *)&(old_tr.base), (uint32_t *)&(old_tr.limit), (uint32_t *)&(old_tr.ar)); if (!IS_TSS32_BUSY(old_tr.ar.bits.type)) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Save guest status to old tss. */ /* call, jmp or iret */ if (inst_type != TASK_SWITCH_TYPE_IDT) { gcpu_skip_guest_instruction(gcpu); } mon_memset(&tss, 0, sizeof(tss)); copy_vmcs_to_tss32(gcpu, &tss); if (inst_type == TASK_SWITCH_TYPE_IRET) { ((eflags_t *)&(tss.eflags))->bits.nested_task = 0; } ret = copy_to_gva(gcpu, /* gva of old_tss.eip */ (uint64_t)(old_tr.base + 32), /* from eip to gs: total 64 bytes */ 64, /* hva of old_tss.eip */ (uint64_t)&(tss.eip)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Read new tss from memory. */ mon_memset(&tss, 0, sizeof(tss)); ret = copy_from_gva(gcpu, (uint64_t)(new_tr.base), sizeof(tss), (uint64_t)&(tss)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } /* Clear busy bit in old tss descriptor. */ if ((inst_type == TASK_SWITCH_TYPE_JMP) || (inst_type == TASK_SWITCH_TYPE_IRET)) { ret = copy_from_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)), sizeof(old_tss_desc), (uint64_t)(&old_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } /* Clear the B bit, and write it back. */ old_tss_desc.bits.type = TSS32_AVAL; ret = copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)), sizeof(old_tss_desc), (uint64_t)(&old_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, old_tr.selector); return -1; } } /* Set busy bit in new tss descriptor. */ if (inst_type != TASK_SWITCH_TYPE_IRET) { new_tss_desc.bits.type = TSS32_BUSY; new_tr.ar.bits.type = TSS32_BUSY; ret = copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); if (ret != 0) { gcpu_inject_ts(gcpu, new_tr.selector); return -1; } } /* Save old tr in new tss. */ if ((inst_type == TASK_SWITCH_TYPE_CALL) || (inst_type == TASK_SWITCH_TYPE_IDT)) { /* gva of new_tss.prev_tr */ ret = copy_to_gva(gcpu, (uint64_t)(new_tr.base + 0), /* two bytes */ sizeof(old_tr.selector), /* hva */ (uint64_t)(&(old_tr.selector))); if (ret != 0) { new_tss_desc.bits.type = TSS32_AVAL; copy_to_gva(gcpu, (uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)), sizeof(new_tss_desc), (uint64_t)(&new_tss_desc)); gcpu_inject_ts(gcpu, new_tr.selector); return -1; } } /* Load new tr. */ gcpu_set_segment_reg(gcpu, IA32_SEG_TR, new_tr.selector, new_tr.base, new_tr.limit, new_tr.ar.value); /* Load new cr3. */ if (cr0.bits.pg) { gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3); gcpu_set_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3); } /* Load new flags. */ if ((inst_type == TASK_SWITCH_TYPE_CALL) || (inst_type == TASK_SWITCH_TYPE_IDT)) { ((eflags_t *)&(tss.eflags))->bits.nested_task = 1; } ((eflags_t *)&(tss.eflags))->bits.rsvd_1 = 1; /* Load general regs. */ gcpu_set_gp_reg(gcpu, IA32_REG_RIP, (uint64_t)tss.eip); gcpu_set_gp_reg(gcpu, IA32_REG_RFLAGS, (uint64_t)tss.eflags); gcpu_set_gp_reg(gcpu, IA32_REG_RAX, (uint64_t)tss.eax); gcpu_set_gp_reg(gcpu, IA32_REG_RCX, (uint64_t)tss.ecx); gcpu_set_gp_reg(gcpu, IA32_REG_RDX, (uint64_t)tss.edx); gcpu_set_gp_reg(gcpu, IA32_REG_RBX, (uint64_t)tss.ebx); gcpu_set_gp_reg(gcpu, IA32_REG_RBP, (uint64_t)tss.ebp); gcpu_set_gp_reg(gcpu, IA32_REG_RSP, (uint64_t)tss.esp); gcpu_set_gp_reg(gcpu, IA32_REG_RSI, (uint64_t)tss.esi); gcpu_set_gp_reg(gcpu, IA32_REG_RDI, (uint64_t)tss.edi); /* Set the TS bit in CR0. */ cr0.bits.ts = 1; gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR0, cr0.value); gcpu_set_control_reg(gcpu, IA32_CTRL_CR0, cr0.value); /* Load new ldtr. */ if (tss.ldtr != old_ldtr.selector) { if (set_guest_ldtr(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } } /* Load new seg regs. */ if (((eflags_t *)&(tss.eflags))->bits.v86_mode == 1) { uint16_t es = (uint16_t)tss.es; uint16_t cs = (uint16_t)tss.cs; uint16_t ss = (uint16_t)tss.ss; uint16_t ds = (uint16_t)tss.ds; uint16_t fs = (uint16_t)tss.fs; uint16_t gs = (uint16_t)tss.gs; /* Set v86 selector, base, limit, ar, in real-mode style. */ gcpu_set_segment_reg(gcpu, IA32_SEG_ES, es, es << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_CS, cs, cs << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_SS, ss, ss << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_DS, ds, ds << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_FS, fs, fs << 4, 0xffff, 0xf3); gcpu_set_segment_reg(gcpu, IA32_SEG_GS, gs, gs << 4, 0xffff, 0xf3); goto all_done; } /* Load new ss. */ if (set_guest_ss(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } /* Load new es, ds, fs, gs. */ if ((set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_ES) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_DS) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_FS) != 0) || (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_GS) != 0)) { return -1; } /* Load new cs. */ if (set_guest_cs(gcpu, &gdtr, &new_ldtr, &tss) != 0) { return -1; } all_done: /* Clear the LE bits in dr7. */ dr7.value = (uint32_t)gcpu_get_debug_reg(gcpu, IA32_REG_DR7); dr7.bits.l0 = 0; dr7.bits.l1 = 0; dr7.bits.l2 = 0; dr7.bits.l3 = 0; dr7.bits.le = 0; gcpu_set_debug_reg(gcpu, IA32_REG_DR7, (uint64_t)dr7.value); /* Debug trap in new task? */ if ((tss.io_base_addr & 0x00000001) != 0) { gcpu_inject_db(gcpu); return -1; } return 0; }