Exemple #1
0
/* handle int15 from real mode code
 * we use CS:IP for vmcall instruction to get indication that there is int15
 * check for E820 function, if true, then handle it
 * no other int15 function should come here */
boolean_t handle_int15_vmcall(guest_cpu_handle_t gcpu)
{
	uint16_t selector = 0;
	uint64_t base = 0;
	uint32_t limit = 0;
	uint32_t attr = 0;
	uint32_t expected_lnr_addr;
	uint32_t vmcall_lnr_addr;
	volatile uint64_t r_rax = 0, r_rdx = 0, r_rip = 0;

	if (!(0x1 & gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0))) {
		/* PE = 0?  then real mode
		 * need to get CS:IP to make sure that this VMCALL from INT15 handler */
		gcpu_get_segment_reg(gcpu,
			IA32_SEG_CS,
			&selector,
			&base,
			&limit,
			&attr);
		r_rip = gcpu_get_gp_reg(gcpu, IA32_REG_RIP);

		expected_lnr_addr = SEGMENT_OFFSET_TO_LINEAR(
			g_int15_trapped_page >> 16,
			g_int15_trapped_page +
			VMCALL_OFFSET);
		vmcall_lnr_addr =
			SEGMENT_OFFSET_TO_LINEAR((uint32_t)selector,
				(uint32_t)r_rip);

		/* check to see if the CS:IP is same as expected for VMCALL in INT15
		 * handler */
		if (expected_lnr_addr == vmcall_lnr_addr) {
			r_rax = gcpu_get_gp_reg(gcpu, IA32_REG_RAX);
			r_rdx = gcpu_get_gp_reg(gcpu, IA32_REG_RDX);
			if ((0xE820 == r_rax) && (SMAP == r_rdx)) {
				if (g_emap == NULL) {
					g_emap =
						mon_malloc(sizeof(
								e820_map_state_t));
					MON_ASSERT(g_emap != NULL);
					mon_memset(g_emap, 0,
						sizeof(e820_map_state_t));
				}
				e820_save_guest_state(gcpu, g_emap);
				g_emap->guest_handle = mon_gcpu_guest_handle(
					gcpu);
				e820_int15_handler(g_emap);
				e820_restore_guest_state(gcpu, g_emap);
				gcpu_skip_guest_instruction(gcpu);
				return TRUE;
			} else {
				MON_LOG(mask_anonymous,
					level_error,
					"INT15 wasn't handled for function 0x%x\n",
					r_rax);
				MON_DEADLOOP(); /* Should not get here */
				return FALSE;
			}
		}
	}
Exemple #2
0
VMEXIT_HANDLING_STATUS vmdb_dr_access_vmexit_handler(GUEST_CPU_HANDLE gcpu)
    {
    VMCS_OBJECT                 *vmcs = gcpu_get_vmcs(gcpu);
    IA32_VMX_EXIT_QUALIFICATION qualification;
    int                         dbreg_id;
    VMM_IA32_GP_REGISTERS       gpreg_id;

    qualification.Uint64 = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);
    gpreg_id = lkup_operand[qualification.DrAccess.MoveGpr];
    dbreg_id = (int) qualification.DrAccess.Number;
    if (6 == dbreg_id) dbreg_id = IA32_REG_DR6;
    if (7 == dbreg_id) dbreg_id = IA32_REG_DR7;

    if  (0 == qualification.DrAccess.Direction) {
        // do nothing
        }
    else {
        UINT64 reg_value = gcpu_get_debug_reg(gcpu, (VMM_IA32_DEBUG_REGISTERS)dbreg_id);
        gcpu_set_native_gp_reg(gcpu, gpreg_id, reg_value);
        }

    gcpu_skip_guest_instruction(gcpu);
    return VMEXIT_HANDLED;
    }
Exemple #3
0
/*
 * This function does task switch for 32-bit MON guest.
 */
int task_switch_for_guest(guest_cpu_handle_t gcpu,
			  ia32_vmx_vmcs_vmexit_info_idt_vectoring_t vec_info)
{
	int ret;
	uint32_t inst_type;
	tss32_t tss;

	cr0_t cr0;
	dr7_t dr7;

	seg_reg_t gdtr;
	seg_reg_t old_ldtr;
	seg_reg_t new_ldtr;

	seg_reg_t new_tr;
	seg_reg_t old_tr;
	desc_t new_tss_desc;
	desc_t old_tss_desc;

	gcpu_get_gdt_reg(gcpu, (uint64_t *)&(gdtr.base),
		(uint32_t *)&(gdtr.limit));
	gdtr.ar.value = 0x000080;

	cr0.value =
		(uint32_t)gcpu_get_guest_visible_control_reg(gcpu,
			IA32_CTRL_CR0);

	/* Find new tr & tss. */

	get_task_info(gcpu, &inst_type, &(new_tr.selector), vec_info);

	ret = copy_from_gva(gcpu,
		(uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)),
		sizeof(new_tss_desc),
		(uint64_t)(&new_tss_desc));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	parse_desc(&new_tss_desc, &new_tr);

	if (!IS_TSS32(new_tr.ar.bits.type)) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	/* Find old ldtr. */

	gcpu_get_segment_reg(gcpu, IA32_SEG_LDTR,
		(uint16_t *)&(old_ldtr.selector),
		(uint64_t *)&(old_ldtr.base),
		(uint32_t *)&(old_ldtr.limit),
		(uint32_t *)&(old_ldtr.ar));

	/* Find old tr. */

	gcpu_get_segment_reg(gcpu, IA32_SEG_TR,
		(uint16_t *)&(old_tr.selector),
		(uint64_t *)&(old_tr.base),
		(uint32_t *)&(old_tr.limit),
		(uint32_t *)&(old_tr.ar));

	if (!IS_TSS32_BUSY(old_tr.ar.bits.type)) {
		gcpu_inject_ts(gcpu, old_tr.selector);
		return -1;
	}

	/* Save guest status to old tss. */
	/* call, jmp or iret */
	if (inst_type != TASK_SWITCH_TYPE_IDT) {
		gcpu_skip_guest_instruction(gcpu);
	}

	mon_memset(&tss, 0, sizeof(tss));
	copy_vmcs_to_tss32(gcpu, &tss);

	if (inst_type == TASK_SWITCH_TYPE_IRET) {
		((eflags_t *)&(tss.eflags))->bits.nested_task = 0;
	}

	ret = copy_to_gva(gcpu,
		/* gva of old_tss.eip */
		(uint64_t)(old_tr.base + 32),
		/* from eip to gs: total 64 bytes */
		64,
		/* hva of old_tss.eip */
		(uint64_t)&(tss.eip));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, old_tr.selector);
		return -1;
	}

	/* Read new tss from memory. */

	mon_memset(&tss, 0, sizeof(tss));

	ret = copy_from_gva(gcpu,
		(uint64_t)(new_tr.base),
		sizeof(tss),
		(uint64_t)&(tss));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	/* Clear busy bit in old tss descriptor. */

	if ((inst_type == TASK_SWITCH_TYPE_JMP) ||
	    (inst_type == TASK_SWITCH_TYPE_IRET)) {
		ret = copy_from_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)),
			sizeof(old_tss_desc),
			(uint64_t)(&old_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, old_tr.selector);
			return -1;
		}

		/* Clear the B bit, and write it back. */
		old_tss_desc.bits.type = TSS32_AVAL;

		ret = copy_to_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)),
			sizeof(old_tss_desc),
			(uint64_t)(&old_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, old_tr.selector);
			return -1;
		}
	}

	/* Set busy bit in new tss descriptor. */

	if (inst_type != TASK_SWITCH_TYPE_IRET) {
		new_tss_desc.bits.type = TSS32_BUSY;
		new_tr.ar.bits.type = TSS32_BUSY;

		ret = copy_to_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)),
			sizeof(new_tss_desc),
			(uint64_t)(&new_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, new_tr.selector);
			return -1;
		}
	}

	/* Save old tr in new tss. */

	if ((inst_type == TASK_SWITCH_TYPE_CALL) ||
	    (inst_type == TASK_SWITCH_TYPE_IDT)) {
		/* gva of new_tss.prev_tr */
		ret = copy_to_gva(gcpu, (uint64_t)(new_tr.base + 0),
			/* two bytes */
			sizeof(old_tr.selector),
			/* hva */
			(uint64_t)(&(old_tr.selector)));

		if (ret != 0) {
			new_tss_desc.bits.type = TSS32_AVAL;

			copy_to_gva(gcpu,
				(uint64_t)(gdtr.base +
					   SELECTOR_IDX(new_tr.selector)),
				sizeof(new_tss_desc),
				(uint64_t)(&new_tss_desc));

			gcpu_inject_ts(gcpu, new_tr.selector);
			return -1;
		}
	}

	/* Load new tr. */

	gcpu_set_segment_reg(gcpu, IA32_SEG_TR, new_tr.selector,
		new_tr.base, new_tr.limit, new_tr.ar.value);

	/* Load new cr3. */

	if (cr0.bits.pg) {
		gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR3,
			tss.cr3);
		gcpu_set_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3);
	}

	/* Load new flags. */

	if ((inst_type == TASK_SWITCH_TYPE_CALL) ||
	    (inst_type == TASK_SWITCH_TYPE_IDT)) {
		((eflags_t *)&(tss.eflags))->bits.nested_task = 1;
	}

	((eflags_t *)&(tss.eflags))->bits.rsvd_1 = 1;

	/* Load general regs. */

	gcpu_set_gp_reg(gcpu, IA32_REG_RIP, (uint64_t)tss.eip);
	gcpu_set_gp_reg(gcpu, IA32_REG_RFLAGS, (uint64_t)tss.eflags);
	gcpu_set_gp_reg(gcpu, IA32_REG_RAX, (uint64_t)tss.eax);
	gcpu_set_gp_reg(gcpu, IA32_REG_RCX, (uint64_t)tss.ecx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RDX, (uint64_t)tss.edx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RBX, (uint64_t)tss.ebx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RBP, (uint64_t)tss.ebp);
	gcpu_set_gp_reg(gcpu, IA32_REG_RSP, (uint64_t)tss.esp);
	gcpu_set_gp_reg(gcpu, IA32_REG_RSI, (uint64_t)tss.esi);
	gcpu_set_gp_reg(gcpu, IA32_REG_RDI, (uint64_t)tss.edi);

	/* Set the TS bit in CR0. */

	cr0.bits.ts = 1;
	gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR0, cr0.value);
	gcpu_set_control_reg(gcpu, IA32_CTRL_CR0, cr0.value);

	/* Load new ldtr. */

	if (tss.ldtr != old_ldtr.selector) {
		if (set_guest_ldtr(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
			return -1;
		}
	}

	/* Load new seg regs. */

	if (((eflags_t *)&(tss.eflags))->bits.v86_mode == 1) {
		uint16_t es = (uint16_t)tss.es;
		uint16_t cs = (uint16_t)tss.cs;
		uint16_t ss = (uint16_t)tss.ss;
		uint16_t ds = (uint16_t)tss.ds;
		uint16_t fs = (uint16_t)tss.fs;
		uint16_t gs = (uint16_t)tss.gs;

		/* Set v86 selector, base, limit, ar, in real-mode style. */
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_ES,
			es,
			es << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_CS,
			cs,
			cs << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_SS,
			ss,
			ss << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_DS,
			ds,
			ds << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_FS,
			fs,
			fs << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_GS,
			gs,
			gs << 4,
				0xffff,
				0xf3);

		goto all_done;
	}

	/* Load new ss. */

	if (set_guest_ss(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
		return -1;
	}

	/* Load new es, ds, fs, gs. */

	if ((set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_ES) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_DS) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_FS) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_GS) != 0)) {
		return -1;
	}

	/* Load new cs. */

	if (set_guest_cs(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
		return -1;
	}

all_done:

	/* Clear the LE bits in dr7. */

	dr7.value = (uint32_t)gcpu_get_debug_reg(gcpu, IA32_REG_DR7);
	dr7.bits.l0 = 0;
	dr7.bits.l1 = 0;
	dr7.bits.l2 = 0;
	dr7.bits.l3 = 0;
	dr7.bits.le = 0;
	gcpu_set_debug_reg(gcpu, IA32_REG_DR7, (uint64_t)dr7.value);

	/* Debug trap in new task? */

	if ((tss.io_base_addr & 0x00000001) != 0) {
		gcpu_inject_db(gcpu);
		return -1;
	}

	return 0;
}