Esempio n. 1
0
/*
 * Get 64bit mask + flags set. For each 1-bit in mask consult flags bit.
 * If flags bit is 1 - increase count, esle - decrease count
 * Return bit set with 1bit for each non-zero counter */
static uint64_t gcpu_update_control_counters(uint64_t flags,
					     uint64_t mask,
					     gcpu_vmexit_control_field_counters_t
					     *counters)
{
	uint32_t idx;

	while (mask) {
		idx = (uint32_t)-1;

		hw_scan_bit_forward64(&idx, mask);

		MON_ASSERT(idx < 64);
		BIT_CLR64(mask, idx);

		if (1 == BIT_GET64(flags, idx)) {
			if (0 == counters->counters[idx]) {
				BIT_SET64(counters->bit_field, idx);
			}

			MON_ASSERT(counters->counters[idx] < 255);
			++(counters->counters[idx]);
		} else {
			MON_ASSERT(counters->counters[idx] > 0);
			--(counters->counters[idx]);

			if (0 == counters->counters[idx]) {
				BIT_CLR64(counters->bit_field, idx);
			}
		}
	}

	return counters->bit_field;
}
Esempio n. 2
0
void nmi_window_update_before_vmresume(struct _VMCS_OBJECT *vmcs)
{
    struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
    UINT64 value;

    if(nmi_is_nmi_occured() || nmi_is_pending_this()) {
        VMM_ASSERT(p_vmcs);
        value = vmcs_act_read_from_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
        BIT_SET64(value, NMI_WINDOW_BIT);
        vmcs_act_write_to_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, value);
        nmi_window_set();
    }
}
Esempio n. 3
0
void vmcs_nmi_handler(struct _VMCS_OBJECT *vmcs)
{
    struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
    UINT64  value;
    VMM_ASSERT(p_vmcs);

    // mark that NMI Window must be set, in case that SW still did not flush VMCSS to hardware
    nmi_remember_occured_nmi();

    // spoil VMCS flush process in case it is in progress
    p_vmcs->update_status = UPDATE_FAILED;

    // write directly into hardware in case that SW already did flush to CPU
    value = vmcs_act_read_from_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
    BIT_SET64(value, NMI_WINDOW_BIT);
    vmcs_act_write_to_hardware(p_vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS, value);
}
Esempio n. 4
0
// Function : vmdb_settings_apply_to_hw
// Purpose  : Update GCPU DRs from its guest's VMDB context
// Arguments: GUEST_CPU_HANDLE gcpu
// Returns  : void
void vmdb_settings_apply_to_hw ( GUEST_CPU_HANDLE gcpu)
    {
    VMDB_THREAD_CONTEXT *vmdb = gcpu_get_vmdb(gcpu);

    if (NULL != vmdb) {
        UINT64      rflags;
        VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu);

        gcpu_set_debug_reg(gcpu, IA32_REG_DR7, vmdb->dr7);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR0, vmdb->dr[0]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR1, vmdb->dr[1]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR2, vmdb->dr[2]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR3, vmdb->dr[3]);

        rflags = vmcs_read(vmcs, VMCS_GUEST_RFLAGS);
        if (vmdb->sstep)
            BIT_SET64(rflags, RFLAGS_TF_BIT);
        else
            BIT_CLR64(rflags, RFLAGS_TF_BIT);
        vmcs_write(vmcs, VMCS_GUEST_RFLAGS, rflags);
        }
    }
Esempio n. 5
0
BOOLEAN vmdb_exception_handler(GUEST_CPU_HANDLE gcpu)
    {
    VMDB_THREAD_CONTEXT    *vmdb = gcpu_get_vmdb(gcpu);
    VMCS_OBJECT            *vmcs = gcpu_get_vmcs(gcpu);
    IA32_VMX_EXIT_QUALIFICATION qualification;
    ADDRESS                 guest_rflags;
    int                     i;
#if defined DEBUG || defined ENABLE_RELEASE_VMM_LOG
    const VIRTUAL_CPU_ID   *vcpu = guest_vcpu(gcpu);
    VMM_ASSERT(vcpu);
#endif

    VMM_ASSERT(vmdb);

    qualification.Uint64 = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);

    if (qualification.DbgException.DbgRegAccess) {
        VMDB_LOG(level_print_always,"[vmdb] Debug Registers Access is NOT supported\n");
        }

    if (qualification.DbgException.SingleStep) {
        vmdb_thread_log(gcpu, "VMDB Single Step Break occurred on thread", __FUNCTION__);

        if (FALSE == event_raise(EVENT_GUEST_CPU_BREAKPOINT, gcpu, 0)) {
            VMM_DEADLOOP();
            }
        }
    else {
            for (i = 0; i < NUMBER_OF_HW_BREAKPOINTS; ++i) {
                if (BIT_GET64(qualification.DbgException.BreakPoints, i)) {
#if defined DEBUG || defined ENABLE_RELEASE_VMM_LOG
                    UINT32 db_type = (UINT32) DR7_RW_GET(vmdb->dr7, i);
#endif

                    if (0 != vmdb->skip_counter[i]) {
		      (void)vmdb->skip_counter[i];
                        continue;
                        }

		    (void)bp_type_name;
		    (void)bp_actual_length;
                    VMDB_LOG(level_print_always,"[vmdb] %s break occurred at address(%P) on thread(%d,%d)\n",
                        bp_type_name[db_type], vmdb->dr[i],
                        vcpu->guest_id, vcpu->guest_id);

                    // If it is breakpoint for the VMDB STUB, then propagate it.
                    if (FALSE == event_raise(EVENT_GUEST_CPU_SINGLE_STEP, gcpu, 0)) {
                        VMM_DEADLOOP();
                        }
                    }
                }
        }

    // Set Resume Flag to prevent breakpoint stucking
    guest_rflags = gcpu_get_native_gp_reg(gcpu, IA32_REG_RFLAGS);
    BIT_SET64(guest_rflags, RFLAGS_RF_BIT);
    gcpu_set_native_gp_reg(gcpu, IA32_REG_RFLAGS, guest_rflags);

    gcpu_vmexit_exception_resolve(gcpu);

    return TRUE;
    }