Пример #1
0
/* Function: starter_main
 * Description: Called by start() in starter.S. Jumps to xmon_loader - xmon loader.
 *              This function never returns back.
 * Input: Registers pushed right to left:
 *        eip0 - return address on stack,
 *        pushal - eax, ecx, edx, ebx, esp, ebp, esi, edi
 *        pushfl - flags
 */
void starter_main(uint32_t eflags,
		  uint32_t edi,
		  uint32_t esi,
		  uint32_t ebp,
		  uint32_t esp,
		  uint32_t ebx,
		  uint32_t edx,
		  uint32_t ecx,
		  uint32_t eax,
		  uint32_t eip0)
{
	uint32_t eip1;
	xmon_desc_t *td;
	mon_guest_cpu_startup_state_t *s;

	eip1 = (uint32_t)RETURN_ADDRESS();
	td = (xmon_desc_t *)((eip1 & 0xffffff00) - 0x400);

	mon_memset((void *)GUEST1_BASE(td),
		0, XMON_LOADER_BASE(td) - GUEST1_BASE(td)
		);

	s = (mon_guest_cpu_startup_state_t *)GUEST1_BASE(td);
	s->gp.reg[IA32_REG_RIP] = eip0;
	s->gp.reg[IA32_REG_RFLAGS] = eflags;
	s->gp.reg[IA32_REG_RAX] = eax;
	s->gp.reg[IA32_REG_RCX] = ecx;
	s->gp.reg[IA32_REG_RDX] = edx;
	s->gp.reg[IA32_REG_RBX] = ebx;
	s->gp.reg[IA32_REG_RSP] = esp + 4;
	s->gp.reg[IA32_REG_RBP] = ebp;
	s->gp.reg[IA32_REG_RSI] = esi;
	s->gp.reg[IA32_REG_RDI] = edi;


	save_cpu_state(s);

	if (check_vmx_support() != 0) {
		goto error;
	}

	run_xmon_loader(td);

error:

	/* clean memory */

	mon_memset((void *)((uint32_t)td + td->xmon_loader_start * 512),
		0, XMON_LOADER_HEAP_BASE(td) + XMON_LOADER_HEAP_SIZE -
		(td->xmon_loader_start) * 512);

	while (1) {
	}
}
Пример #2
0
/* handle int15 from real mode code
 * we use CS:IP for vmcall instruction to get indication that there is int15
 * check for E820 function, if true, then handle it
 * no other int15 function should come here */
boolean_t handle_int15_vmcall(guest_cpu_handle_t gcpu)
{
	uint16_t selector = 0;
	uint64_t base = 0;
	uint32_t limit = 0;
	uint32_t attr = 0;
	uint32_t expected_lnr_addr;
	uint32_t vmcall_lnr_addr;
	volatile uint64_t r_rax = 0, r_rdx = 0, r_rip = 0;

	if (!(0x1 & gcpu_get_guest_visible_control_reg(gcpu, IA32_CTRL_CR0))) {
		/* PE = 0?  then real mode
		 * need to get CS:IP to make sure that this VMCALL from INT15 handler */
		gcpu_get_segment_reg(gcpu,
			IA32_SEG_CS,
			&selector,
			&base,
			&limit,
			&attr);
		r_rip = gcpu_get_gp_reg(gcpu, IA32_REG_RIP);

		expected_lnr_addr = SEGMENT_OFFSET_TO_LINEAR(
			g_int15_trapped_page >> 16,
			g_int15_trapped_page +
			VMCALL_OFFSET);
		vmcall_lnr_addr =
			SEGMENT_OFFSET_TO_LINEAR((uint32_t)selector,
				(uint32_t)r_rip);

		/* check to see if the CS:IP is same as expected for VMCALL in INT15
		 * handler */
		if (expected_lnr_addr == vmcall_lnr_addr) {
			r_rax = gcpu_get_gp_reg(gcpu, IA32_REG_RAX);
			r_rdx = gcpu_get_gp_reg(gcpu, IA32_REG_RDX);
			if ((0xE820 == r_rax) && (SMAP == r_rdx)) {
				if (g_emap == NULL) {
					g_emap =
						mon_malloc(sizeof(
								e820_map_state_t));
					MON_ASSERT(g_emap != NULL);
					mon_memset(g_emap, 0,
						sizeof(e820_map_state_t));
				}
				e820_save_guest_state(gcpu, g_emap);
				g_emap->guest_handle = mon_gcpu_guest_handle(
					gcpu);
				e820_int15_handler(g_emap);
				e820_restore_guest_state(gcpu, g_emap);
				gcpu_skip_guest_instruction(gcpu);
				return TRUE;
			} else {
				MON_LOG(mask_anonymous,
					level_error,
					"INT15 wasn't handled for function 0x%x\n",
					r_rax);
				MON_DEADLOOP(); /* Should not get here */
				return FALSE;
			}
		}
	}
Пример #3
0
boolean_t mon_stack_initialize(IN const mon_startup_struct_t *startup_struct)
{
	uint64_t mon_stack_base_address;
	uint32_t mon_stack_size_per_cpu;
	uint32_t mon_max_allowed_cpus;

	if (startup_struct == NULL) {
		return FALSE;
	}

	mon_memset(&g_stacks_infos_s, 0, sizeof(g_stacks_infos_s));

	mon_stack_base_address =
		mon_stacks_retrieve_stacks_base_addr_from_startup_struct
			(startup_struct);
	mon_stack_size_per_cpu =
		mon_stacks_retrieve_stack_size_per_cpu_from_startup_struct
			(startup_struct);
	mon_max_allowed_cpus =
		mon_stack_retrieve_max_allowed_cpus_from_startup_struct(
			startup_struct);

	mon_stacks_info_set_stacks_base(g_stacks_infos, mon_stack_base_address);
	mon_stacks_info_set_size_of_single_stack(g_stacks_infos,
		mon_stack_size_per_cpu);
	mon_stacks_info_set_max_allowed_cpus(g_stacks_infos,
		mon_max_allowed_cpus);
	mon_stacks_info_set_num_of_exception_stacks(g_stacks_infos,
		idt_get_extra_stacks_required
			());
	mon_stacks_set_initialized(g_stacks_infos);
	return TRUE;
}
Пример #4
0
/*
 * Set guest LDTR according to new tss.
 */
static
int set_guest_ldtr(guest_cpu_handle_t gcpu, seg_reg_t *gdtr,
		   seg_reg_t *ldtr, tss32_t *tss)
{
	desc_t desc;
	int r;

	mon_memset(ldtr, 0, sizeof(seg_reg_t));
	ldtr->selector = (uint16_t)tss->ldtr;

	if (SELECTOR_IDX(ldtr->selector) == 0) {
		ldtr->ar.bits.null_bit = 1;
		return 0;
	}

	if (!SELECTOR_GDT(ldtr->selector)) {
		/* must be in gdt */
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ldtr->selector);
		return -1;
	}

	r = copy_from_gva(gcpu,
		(uint64_t)(gdtr->base + SELECTOR_IDX(ldtr->selector)),
		sizeof(desc), (uint64_t)(&desc));

	if (r != 0) {
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ldtr->selector);
		return -1;
	}

	parse_desc(&desc, ldtr);

	if ((ldtr->ar.bits.s_bit != 0) ||       /* must be sys desc */
	    !LS_LDT(ldtr->ar.bits.type) ||      /* must be ldt */
	    (ldtr->ar.bits.p_bit != 1)) {       /* must be present */
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ldtr->selector);
		return -1;
	}

	gcpu_set_segment_reg(gcpu, IA32_SEG_LDTR, ldtr->selector, ldtr->base,
		ldtr->limit, ldtr->ar.value);

	return 0;
}
Пример #5
0
/*---------------------------------------------------*/
boolean_t mtrrs_abstraction_bsp_initialize(void)
{
	uint32_t msr_addr;
	uint32_t index;

	mon_memset(&mtrrs_cached_info, 0, sizeof(mtrrs_cached_info));
	mtrrs_cached_info.ia32_mtrrcap_reg.value =
		hw_read_msr(IA32_MTRRCAP_ADDR);
	mtrrs_cached_info.ia32_mtrr_def_type.value =
		hw_read_msr(IA32_MTRR_DEF_TYPE_ADDR);

	if (mtrrs_abstraction_are_fixed_regs_supported()) {
		mtrrs_cached_info.ia32_mtrr_fix[0].value =
			hw_read_msr(IA32_MTRR_FIX64K_00000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[0].start_addr = 0x0;
		mtrrs_cached_info.ia32_mtrr_fix_range[0].end_addr = 0x7ffff;

		mtrrs_cached_info.ia32_mtrr_fix[1].value =
			hw_read_msr(IA32_MTRR_FIX16K_80000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[1].start_addr = 0x80000;
		mtrrs_cached_info.ia32_mtrr_fix_range[1].end_addr = 0x9ffff;

		mtrrs_cached_info.ia32_mtrr_fix[2].value =
			hw_read_msr(IA32_MTRR_FIX16K_A0000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[2].start_addr = 0xa0000;
		mtrrs_cached_info.ia32_mtrr_fix_range[2].end_addr = 0xbffff;

		mtrrs_cached_info.ia32_mtrr_fix[3].value =
			hw_read_msr(IA32_MTRR_FIX4K_C0000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[3].start_addr = 0xc0000;
		mtrrs_cached_info.ia32_mtrr_fix_range[3].end_addr = 0xc7fff;

		mtrrs_cached_info.ia32_mtrr_fix[4].value =
			hw_read_msr(IA32_MTRR_FIX4K_C8000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[4].start_addr = 0xc8000;
		mtrrs_cached_info.ia32_mtrr_fix_range[4].end_addr = 0xcffff;

		mtrrs_cached_info.ia32_mtrr_fix[5].value =
			hw_read_msr(IA32_MTRR_FIX4K_D0000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[5].start_addr = 0xd0000;
		mtrrs_cached_info.ia32_mtrr_fix_range[5].end_addr = 0xd7fff;

		mtrrs_cached_info.ia32_mtrr_fix[6].value =
			hw_read_msr(IA32_MTRR_FIX4K_D8000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[6].start_addr = 0xd8000;
		mtrrs_cached_info.ia32_mtrr_fix_range[6].end_addr = 0xdffff;

		mtrrs_cached_info.ia32_mtrr_fix[7].value =
			hw_read_msr(IA32_MTRR_FIX4K_E0000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[7].start_addr = 0xe0000;
		mtrrs_cached_info.ia32_mtrr_fix_range[7].end_addr = 0xe7fff;

		mtrrs_cached_info.ia32_mtrr_fix[8].value =
			hw_read_msr(IA32_MTRR_FIX4K_E8000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[8].start_addr = 0xe8000;
		mtrrs_cached_info.ia32_mtrr_fix_range[8].end_addr = 0xeffff;

		mtrrs_cached_info.ia32_mtrr_fix[9].value =
			hw_read_msr(IA32_MTRR_FIX4K_F0000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[9].start_addr = 0xf0000;
		mtrrs_cached_info.ia32_mtrr_fix_range[9].end_addr = 0xf7fff;

		mtrrs_cached_info.ia32_mtrr_fix[10].value =
			hw_read_msr(IA32_MTRR_FIX4K_F8000_ADDR);
		mtrrs_cached_info.ia32_mtrr_fix_range[10].start_addr = 0xf8000;
		mtrrs_cached_info.ia32_mtrr_fix_range[10].end_addr = 0xfffff;
	}

	for (msr_addr = IA32_MTRR_PHYSBASE0_ADDR, index = 0;
	     index < mtrrs_abstraction_get_num_of_variable_range_regs();
	     msr_addr += 2, index++) {
		if (msr_addr > IA32_MTRR_MAX_PHYSMASK_ADDR) {
			MON_LOG(mask_mon,
				level_error,
				"BSP: ERROR: MTRRs Abstraction: Variable MTRRs count > %d",
				MTRRS_ABS_NUM_OF_VAR_RANGE_MTRRS);
			MON_DEADLOOP();
		}

		mtrrs_cached_info.ia32_mtrr_var_phys_base[index].value =
			hw_read_msr(msr_addr);
		mtrrs_cached_info.ia32_mtrr_var_phys_mask[index].value =
			hw_read_msr(msr_addr + 1);
	}

	mtrr_msbs =
		~((uint64_t)(((uint64_t)1 <<
		addr_get_physical_address_size()) - 1));

	mtrrs_cached_info.is_initialized = TRUE;
	return TRUE;
}
Пример #6
0
/*
 * This function does task switch for 32-bit MON guest.
 */
int task_switch_for_guest(guest_cpu_handle_t gcpu,
			  ia32_vmx_vmcs_vmexit_info_idt_vectoring_t vec_info)
{
	int ret;
	uint32_t inst_type;
	tss32_t tss;

	cr0_t cr0;
	dr7_t dr7;

	seg_reg_t gdtr;
	seg_reg_t old_ldtr;
	seg_reg_t new_ldtr;

	seg_reg_t new_tr;
	seg_reg_t old_tr;
	desc_t new_tss_desc;
	desc_t old_tss_desc;

	gcpu_get_gdt_reg(gcpu, (uint64_t *)&(gdtr.base),
		(uint32_t *)&(gdtr.limit));
	gdtr.ar.value = 0x000080;

	cr0.value =
		(uint32_t)gcpu_get_guest_visible_control_reg(gcpu,
			IA32_CTRL_CR0);

	/* Find new tr & tss. */

	get_task_info(gcpu, &inst_type, &(new_tr.selector), vec_info);

	ret = copy_from_gva(gcpu,
		(uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)),
		sizeof(new_tss_desc),
		(uint64_t)(&new_tss_desc));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	parse_desc(&new_tss_desc, &new_tr);

	if (!IS_TSS32(new_tr.ar.bits.type)) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	/* Find old ldtr. */

	gcpu_get_segment_reg(gcpu, IA32_SEG_LDTR,
		(uint16_t *)&(old_ldtr.selector),
		(uint64_t *)&(old_ldtr.base),
		(uint32_t *)&(old_ldtr.limit),
		(uint32_t *)&(old_ldtr.ar));

	/* Find old tr. */

	gcpu_get_segment_reg(gcpu, IA32_SEG_TR,
		(uint16_t *)&(old_tr.selector),
		(uint64_t *)&(old_tr.base),
		(uint32_t *)&(old_tr.limit),
		(uint32_t *)&(old_tr.ar));

	if (!IS_TSS32_BUSY(old_tr.ar.bits.type)) {
		gcpu_inject_ts(gcpu, old_tr.selector);
		return -1;
	}

	/* Save guest status to old tss. */
	/* call, jmp or iret */
	if (inst_type != TASK_SWITCH_TYPE_IDT) {
		gcpu_skip_guest_instruction(gcpu);
	}

	mon_memset(&tss, 0, sizeof(tss));
	copy_vmcs_to_tss32(gcpu, &tss);

	if (inst_type == TASK_SWITCH_TYPE_IRET) {
		((eflags_t *)&(tss.eflags))->bits.nested_task = 0;
	}

	ret = copy_to_gva(gcpu,
		/* gva of old_tss.eip */
		(uint64_t)(old_tr.base + 32),
		/* from eip to gs: total 64 bytes */
		64,
		/* hva of old_tss.eip */
		(uint64_t)&(tss.eip));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, old_tr.selector);
		return -1;
	}

	/* Read new tss from memory. */

	mon_memset(&tss, 0, sizeof(tss));

	ret = copy_from_gva(gcpu,
		(uint64_t)(new_tr.base),
		sizeof(tss),
		(uint64_t)&(tss));

	if (ret != 0) {
		gcpu_inject_ts(gcpu, new_tr.selector);
		return -1;
	}

	/* Clear busy bit in old tss descriptor. */

	if ((inst_type == TASK_SWITCH_TYPE_JMP) ||
	    (inst_type == TASK_SWITCH_TYPE_IRET)) {
		ret = copy_from_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)),
			sizeof(old_tss_desc),
			(uint64_t)(&old_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, old_tr.selector);
			return -1;
		}

		/* Clear the B bit, and write it back. */
		old_tss_desc.bits.type = TSS32_AVAL;

		ret = copy_to_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(old_tr.selector)),
			sizeof(old_tss_desc),
			(uint64_t)(&old_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, old_tr.selector);
			return -1;
		}
	}

	/* Set busy bit in new tss descriptor. */

	if (inst_type != TASK_SWITCH_TYPE_IRET) {
		new_tss_desc.bits.type = TSS32_BUSY;
		new_tr.ar.bits.type = TSS32_BUSY;

		ret = copy_to_gva(gcpu,
			(uint64_t)(gdtr.base + SELECTOR_IDX(new_tr.selector)),
			sizeof(new_tss_desc),
			(uint64_t)(&new_tss_desc));

		if (ret != 0) {
			gcpu_inject_ts(gcpu, new_tr.selector);
			return -1;
		}
	}

	/* Save old tr in new tss. */

	if ((inst_type == TASK_SWITCH_TYPE_CALL) ||
	    (inst_type == TASK_SWITCH_TYPE_IDT)) {
		/* gva of new_tss.prev_tr */
		ret = copy_to_gva(gcpu, (uint64_t)(new_tr.base + 0),
			/* two bytes */
			sizeof(old_tr.selector),
			/* hva */
			(uint64_t)(&(old_tr.selector)));

		if (ret != 0) {
			new_tss_desc.bits.type = TSS32_AVAL;

			copy_to_gva(gcpu,
				(uint64_t)(gdtr.base +
					   SELECTOR_IDX(new_tr.selector)),
				sizeof(new_tss_desc),
				(uint64_t)(&new_tss_desc));

			gcpu_inject_ts(gcpu, new_tr.selector);
			return -1;
		}
	}

	/* Load new tr. */

	gcpu_set_segment_reg(gcpu, IA32_SEG_TR, new_tr.selector,
		new_tr.base, new_tr.limit, new_tr.ar.value);

	/* Load new cr3. */

	if (cr0.bits.pg) {
		gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR3,
			tss.cr3);
		gcpu_set_control_reg(gcpu, IA32_CTRL_CR3, tss.cr3);
	}

	/* Load new flags. */

	if ((inst_type == TASK_SWITCH_TYPE_CALL) ||
	    (inst_type == TASK_SWITCH_TYPE_IDT)) {
		((eflags_t *)&(tss.eflags))->bits.nested_task = 1;
	}

	((eflags_t *)&(tss.eflags))->bits.rsvd_1 = 1;

	/* Load general regs. */

	gcpu_set_gp_reg(gcpu, IA32_REG_RIP, (uint64_t)tss.eip);
	gcpu_set_gp_reg(gcpu, IA32_REG_RFLAGS, (uint64_t)tss.eflags);
	gcpu_set_gp_reg(gcpu, IA32_REG_RAX, (uint64_t)tss.eax);
	gcpu_set_gp_reg(gcpu, IA32_REG_RCX, (uint64_t)tss.ecx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RDX, (uint64_t)tss.edx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RBX, (uint64_t)tss.ebx);
	gcpu_set_gp_reg(gcpu, IA32_REG_RBP, (uint64_t)tss.ebp);
	gcpu_set_gp_reg(gcpu, IA32_REG_RSP, (uint64_t)tss.esp);
	gcpu_set_gp_reg(gcpu, IA32_REG_RSI, (uint64_t)tss.esi);
	gcpu_set_gp_reg(gcpu, IA32_REG_RDI, (uint64_t)tss.edi);

	/* Set the TS bit in CR0. */

	cr0.bits.ts = 1;
	gcpu_set_guest_visible_control_reg(gcpu, IA32_CTRL_CR0, cr0.value);
	gcpu_set_control_reg(gcpu, IA32_CTRL_CR0, cr0.value);

	/* Load new ldtr. */

	if (tss.ldtr != old_ldtr.selector) {
		if (set_guest_ldtr(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
			return -1;
		}
	}

	/* Load new seg regs. */

	if (((eflags_t *)&(tss.eflags))->bits.v86_mode == 1) {
		uint16_t es = (uint16_t)tss.es;
		uint16_t cs = (uint16_t)tss.cs;
		uint16_t ss = (uint16_t)tss.ss;
		uint16_t ds = (uint16_t)tss.ds;
		uint16_t fs = (uint16_t)tss.fs;
		uint16_t gs = (uint16_t)tss.gs;

		/* Set v86 selector, base, limit, ar, in real-mode style. */
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_ES,
			es,
			es << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_CS,
			cs,
			cs << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_SS,
			ss,
			ss << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_DS,
			ds,
			ds << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_FS,
			fs,
			fs << 4,
				0xffff,
				0xf3);
		gcpu_set_segment_reg(gcpu,
			IA32_SEG_GS,
			gs,
			gs << 4,
				0xffff,
				0xf3);

		goto all_done;
	}

	/* Load new ss. */

	if (set_guest_ss(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
		return -1;
	}

	/* Load new es, ds, fs, gs. */

	if ((set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_ES) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_DS) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_FS) != 0) ||
	    (set_guest_seg(gcpu, &gdtr, &new_ldtr, &tss, IA32_SEG_GS) != 0)) {
		return -1;
	}

	/* Load new cs. */

	if (set_guest_cs(gcpu, &gdtr, &new_ldtr, &tss) != 0) {
		return -1;
	}

all_done:

	/* Clear the LE bits in dr7. */

	dr7.value = (uint32_t)gcpu_get_debug_reg(gcpu, IA32_REG_DR7);
	dr7.bits.l0 = 0;
	dr7.bits.l1 = 0;
	dr7.bits.l2 = 0;
	dr7.bits.l3 = 0;
	dr7.bits.le = 0;
	gcpu_set_debug_reg(gcpu, IA32_REG_DR7, (uint64_t)dr7.value);

	/* Debug trap in new task? */

	if ((tss.io_base_addr & 0x00000001) != 0) {
		gcpu_inject_db(gcpu);
		return -1;
	}

	return 0;
}
Пример #7
0
/*
 * Set guest ES, DS, FS, or GS, based on register name and new tss.
 */
static
int set_guest_seg(guest_cpu_handle_t gcpu, seg_reg_t *gdtr, seg_reg_t *ldtr,
		  tss32_t *tss, mon_ia32_segment_registers_t name)
{
	desc_t desc;
	seg_reg_t seg;
	seg_reg_t *dtr;
	uint32_t cpl;
	int r;

	mon_memset(&seg, 0, sizeof(seg));

	if (name == IA32_SEG_ES) {
		seg.selector = (uint16_t)tss->es;
	} else if (name == IA32_SEG_DS) {
		seg.selector = (uint16_t)tss->ds;
	} else if (name == IA32_SEG_FS) {
		seg.selector = (uint16_t)tss->fs;
	} else if (name == IA32_SEG_GS) {
		seg.selector = (uint16_t)tss->gs;
	} else {
		return -1;
	}

	cpl = SELECTOR_RPL(tss->cs);

	dtr = SELECTOR_GDT(seg.selector) ? gdtr : ldtr;

	if (SELECTOR_IDX(seg.selector) == 0) {
		seg.selector = 0;
		seg.ar.bits.null_bit = 1;
		goto set_seg_reg;
	}

	r = copy_from_gva(gcpu,
		(uint64_t)(dtr->base + SELECTOR_IDX(seg.selector)),
		sizeof(desc), (uint64_t)(&desc)
		);

	if (r != 0) {
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, seg.selector);
		return -1;
	}

	parse_desc(&desc, &seg);

	if ((seg.ar.bits.s_bit == 0) || /* must be non-sys desc */
	    (IS_CODE(seg.ar.bits.type) && !IS_CODE_R(seg.ar.bits.type))) {
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, seg.selector);
		return -1;
	}

	if (seg.ar.bits.p_bit != 1) {
		/* Must be present. */
		force_ring3_ss(gcpu);
		gcpu_inject_np(gcpu, seg.selector);
		return -1;
	}

	/* If g_bit is set, the unit is 4 KB. */
	if (seg.ar.bits.g_bit == 1) {
		seg.limit = (seg.limit << 12) | 0xfff;
	}

	/* Priv checks. */
	if (IS_CODE(seg.ar.bits.type) && !IS_CODE_CONFORM(seg.ar.bits.type)) {
		uint32_t rpl = (uint32_t)SELECTOR_RPL(seg.selector);

		if ((seg.ar.bits.dpl < cpl) || (seg.ar.bits.dpl < rpl)) {
			force_ring3_ss(gcpu);
			gcpu_inject_ts(gcpu, seg.selector);
			return -1;
		}
	}

set_seg_reg:

	gcpu_set_segment_reg(gcpu, name, seg.selector, seg.base,
		seg.limit, seg.ar.value);

	return 0;
}
Пример #8
0
/*
 * Set guest CS according to new tss.
 */
static
int set_guest_cs(guest_cpu_handle_t gcpu, seg_reg_t *gdtr,
		 seg_reg_t *ldtr, tss32_t *tss)
{
	desc_t desc;
	seg_reg_t cs;
	seg_reg_t *dtr;
	uint32_t cpl;
	int r;

	mon_memset(&cs, 0, sizeof(cs));
	cs.selector = (uint16_t)tss->cs;
	cpl = SELECTOR_RPL(tss->cs);

	if (SELECTOR_IDX(cs.selector) == 0) {
		/* must not be null */
		gcpu_inject_ts(gcpu, cs.selector);
		return -1;
	}

	dtr = SELECTOR_GDT(cs.selector) ? gdtr : ldtr;

	r = copy_from_gva(gcpu,
		(uint64_t)(dtr->base + SELECTOR_IDX(cs.selector)),
		sizeof(desc),
		(uint64_t)(&desc));

	if (r != 0) {
		gcpu_inject_ts(gcpu, cs.selector);
		return -1;
	}

	parse_desc(&desc, &cs);

	if (cs.ar.bits.p_bit != 1) {
		/* must be present */
		gcpu_inject_np(gcpu, cs.selector);
		return -1;
	}

	if ((cs.ar.bits.s_bit == 0) ||          /* must be non-sys desc */
	    !IS_CODE(cs.ar.bits.type)) {        /* must be code */
		gcpu_inject_ts(gcpu, cs.selector);
		return -1;
	}

	/* Priv checks */
	if (IS_CODE_CONFORM(cs.ar.bits.type)) {
		if (cs.ar.bits.dpl > cpl) {
			gcpu_inject_ts(gcpu, cs.selector);
			return -1;
		}
	} else {
		if (cs.ar.bits.dpl != cpl) {
			gcpu_inject_ts(gcpu, cs.selector);
			return -1;
		}
	}

	/* If g_bit is set, the unit is 4 KB. */
	if (cs.ar.bits.g_bit == 1) {
		cs.limit = (cs.limit << 12) | 0xfff;
	}

	if (!IS_ASSESSED(cs.ar.bits.type)) {
		SET_ASSESSED(cs.ar.bits.type);
		SET_ASSESSED(desc.bits.type);

		r = copy_to_gva(gcpu,
			(uint64_t)(dtr->base + (cs.selector & 0xfff8)),
			sizeof(desc),
			(uint64_t)(&desc));

		if (r != 0) {
			gcpu_inject_ts(gcpu, cs.selector);
			return -1;
		}
	}

	cs.ar.bits.null_bit = 0;

	gcpu_set_segment_reg(gcpu, IA32_SEG_CS, cs.selector, cs.base,
		cs.limit, cs.ar.value);

	if (tss->eip > cs.limit) {
		gcpu_inject_ts(gcpu, cs.selector);
		return -1;
	}

	return 0;
}
Пример #9
0
/*
 * Set guest SS according to new tss.
 */
static
int set_guest_ss(guest_cpu_handle_t gcpu, seg_reg_t *gdtr,
		 seg_reg_t *ldtr, tss32_t *tss)
{
	desc_t desc;
	seg_reg_t ss;
	seg_reg_t *dtr;
	uint32_t cpl;
	int r;

	mon_memset(&ss, 0, sizeof(ss));
	ss.selector = (uint16_t)tss->ss;
	cpl = SELECTOR_RPL(tss->cs);

	if (SELECTOR_IDX(ss.selector) == 0) {
		/* must not be null */
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ss.selector);
		return -1;
	}

	dtr = SELECTOR_GDT(ss.selector) ? gdtr : ldtr;

	r = copy_from_gva(gcpu,
		(uint64_t)(dtr->base + SELECTOR_IDX(ss.selector)),
		sizeof(desc),
		(uint64_t)(&desc));

	if (r != 0) {
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ss.selector);
		return -1;
	}

	parse_desc(&desc, &ss);

	if (ss.ar.bits.p_bit == 0) {
		/* must be present */
		force_ring3_ss(gcpu);
		gcpu_inject_ss(gcpu, ss.selector);
		return -1;
	}

	if ((ss.ar.bits.s_bit == 0) ||          /* must be non-sys desc */
	    IS_CODE(ss.ar.bits.type) ||         /* must not be code */
	    !IS_DATA_RW(ss.ar.bits.type) ||     /* must be data with r/w */
	    (ss.ar.bits.dpl != cpl) ||
	    ((uint32_t)SELECTOR_RPL(ss.selector) != cpl)) {
		force_ring3_ss(gcpu);
		gcpu_inject_ts(gcpu, ss.selector);
		return -1;
	}

	/* If g_bit is set, the unit is 4 KB. */
	if (ss.ar.bits.g_bit == 1) {
		ss.limit = (ss.limit << 12) | 0xfff;
	}

	if (!IS_ASSESSED(ss.ar.bits.type)) {
		SET_ASSESSED(ss.ar.bits.type);
		SET_ASSESSED(desc.bits.type);

		r = copy_to_gva(gcpu,
			(uint64_t)(dtr->base + SELECTOR_IDX(ss.selector)),
			sizeof(desc),
			(uint64_t)(&desc));

		if (r != 0) {
			force_ring3_ss(gcpu);
			gcpu_inject_ts(gcpu, ss.selector);
			return -1;
		}
	}

	gcpu_set_segment_reg(gcpu, IA32_SEG_SS, ss.selector, ss.base,
		ss.limit, ss.ar.value);

	return 0;
}
Пример #10
0
/*
 *  FUNCTION  : elf32_load_executable
 *  PURPOSE   : Load and relocate ELF-x32 executable to memory
 *  ARGUMENTS : gen_image_access_t *image - describes image to load
 *            : elf_load_info_t *p_info - contains load-related data
 *  RETURNS   :
 *  NOTES     : Load map (addresses grow from up to bottom)
 *            :        elf header
 *            :        loadable program segments
 *            :        section headers table (optional)
 *            :        loaded sections        (optional)
 */
mon_status_t
elf32_load_executable(gen_image_access_t *image, elf_load_info_t *p_info)
{
	mon_status_t status = MON_OK;
	elf32_ehdr_t *ehdr;             /* ELF header */
	uint8_t *phdrtab;               /* Program Segment header Table */
	elf32_word_t phsize;            /* Program Segment header Table size */
	elf32_addr_t addr;
	elf32_word_t memsz;
	elf32_word_t filesz;
	int16_t i;
	elf32_phdr_t *phdr_dyn = NULL;

	ELF_CLEAR_SCREEN();

	/* map ELF header to ehdr */
	if (sizeof(elf32_ehdr_t) !=
	    mem_image_map_to_mem(image, (void **)&ehdr, (size_t)0,
		    (size_t)sizeof(elf32_ehdr_t))) {
		status = MON_ERROR;
		goto quit;
	}

	/* map Program Segment header Table to phdrtab */
	phsize = ehdr->e_phnum * ehdr->e_phentsize;
	if (mem_image_map_to_mem
		    (image, (void **)&phdrtab, (size_t)ehdr->e_phoff,
		    (size_t)phsize) != phsize) {
		status = MON_ERROR;
		goto quit;
	}

	ELF_PRINT_STRING
		("p_type :p_flags :p_offset:p_vaddr :p_paddr "
		":p_filesz:p_memsz :p_align\n");
	/* now actually copy image to its target destination */
	for (i = 0; i < (int16_t)ehdr->e_phnum; ++i) {
		elf32_phdr_t *phdr = (elf32_phdr_t *)GET_PHDR(ehdr, phdrtab, i);
		if (PT_DYNAMIC == phdr->p_type) {
			phdr_dyn = phdr;
			continue;
		}

		if (PT_LOAD != phdr->p_type || 0 == phdr->p_memsz) {
			continue;
		}

		filesz = phdr->p_filesz;
		addr = phdr->p_paddr;
		memsz = phdr->p_memsz;

		/* make sure we only load what we're supposed to! */
		if (filesz > memsz) {
			filesz = memsz;
		}

		if (mem_image_read
			    (image,
			    (void *)(size_t)(addr + p_info->relocation_offset),
			    (size_t)phdr->p_offset, (size_t)filesz) != filesz) {
			status = MON_ERROR;
			ELF_PRINT_STRING("failed to read segment from file.\n");
			goto quit;
		}

		if (filesz < memsz) { /* zero BSS if exists */
			mon_memset((void *)(size_t)(addr + filesz +
						    p_info->relocation_offset), 0,
				(size_t)(memsz - filesz));
		}
	}

	/* Update copied segments addresses */
	/* now ehdr points to the new, copied ELF header */
	ehdr = (elf32_ehdr_t *)(size_t)p_info->start_addr;
	phdrtab = (uint8_t *)(size_t)(p_info->start_addr + ehdr->e_phoff);

	for (i = 0; i < (int16_t)ehdr->e_phnum; ++i) {
		elf32_phdr_t *phdr = (elf32_phdr_t *)GET_PHDR(ehdr, phdrtab, i);

		if (0 != phdr->p_memsz) {
			phdr->p_paddr += (elf32_addr_t)p_info->relocation_offset;
			phdr->p_vaddr += (elf32_addr_t)p_info->relocation_offset;
		}
	}

	if (NULL != phdr_dyn) {
		status = elf32_do_relocation(image, p_info, phdr_dyn);
		if (MON_OK != status) {
			goto quit;
		}
	}

	/* optionally copy sections table and sections */

	if (p_info->copy_section_headers || p_info->copy_symbol_tables) {
		status = elf32_copy_section_header_table(image, p_info);
	}

	if (p_info->copy_symbol_tables) {
		status = elf32_copy_sections(image, p_info);
	}

quit:
	return status;
}
Пример #11
0
/* ---------------------------- APIs --------------------------------------- */
void guest_control_setup(guest_handle_t guest, const vmexit_control_t *request)
{
	guest_gcpu_econtext_t ctx;
	guest_cpu_handle_t gcpu;
	mon_state_t mon_state;
	cpu_id_t this_hcpu_id = hw_cpu_id();

	MON_ASSERT(guest);

	/* setup vmexit requests without applying */
	for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
	     gcpu = mon_guest_gcpu_next(&ctx))
		gcpu_control_setup_only(gcpu, request);

	/* now apply */
	mon_state = mon_get_state();

	if (MON_STATE_BOOT == mon_state) {
		/* may be run on BSP only */
		MON_ASSERT(0 == this_hcpu_id);

		/* single thread mode with all APs yet not init */
		for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
		     gcpu = mon_guest_gcpu_next(&ctx))
			gcpu_control_apply_only(gcpu);
	} else if (MON_STATE_RUN == mon_state) {
		ipc_comm_guest_struct_t ipc;
		uint32_t wait_for_ipc_count = 0;
		ipc_destination_t ipc_dst;

		mon_memset(&ipc, 0, sizeof(ipc));
		mon_memset(&ipc_dst, 0, sizeof(ipc_dst));

		/* multi-thread mode with all APs ready and running
		 * or in Wait-For-SIPI state on behalf of guest */

		ipc.guest = guest;

		/* first apply for gcpus allocated for this hw cpu */
		apply_vmexit_config(this_hcpu_id, &ipc);

		/* reset executed counter and flush memory */
		hw_assign_as_barrier(&(ipc.executed), 0);

		/* send for execution */
		ipc_dst.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF;
		wait_for_ipc_count =
			ipc_execute_handler(ipc_dst, apply_vmexit_config, &ipc);

		/* wait for execution finish */
		while (wait_for_ipc_count != ipc.executed) {
			/* avoid deadlock - process one IPC if exist */
			ipc_process_one_ipc();
			hw_pause();
		}
	} else {
		/* not supported mode */
		MON_LOG(mask_anonymous, level_trace,
			"Unsupported global mon_state=%d in"
			" guest_request_vmexit_on()\n",
			mon_state);
		MON_DEADLOOP();
	}
}