Example #1
0
INLINE void nmi_window_clear(void)
{
    nmi_window[hw_cpu_id()] = FALSE;
    if (nmi_is_nmi_occured()) {
        nmi_window[hw_cpu_id()] = TRUE;
    }
}
Example #2
0
void vmcs_act_write(struct _VMCS_OBJECT *vmcs, VMCS_FIELD field_id, UINT64 value)
{
    struct _VMCS_ACTUAL_OBJECT *p_vmcs = (struct _VMCS_ACTUAL_OBJECT *) vmcs;
    VMM_ASSERT(p_vmcs);
    if (!vmcs_sw_shadow_disable[hw_cpu_id()])
        cache64_write(p_vmcs->cache, value, (UINT32 )field_id);
    else
        vmcs_act_write_to_hardware(p_vmcs, field_id, value);
}
Example #3
0
/*-------------------------------------------------------------------------
 *
 * Preform initialization of host cpu parts of all guest CPUs that run on
 * specified host CPU.
 *
 * Should be called on the target host CPU
 *------------------------------------------------------------------------- */
void initialize_host_vmcs_regions(cpu_id_t current_cpu_id)
{
	guest_cpu_handle_t gcpu;
	scheduler_gcpu_iterator_t it;

	MON_ASSERT(current_cpu_id == hw_cpu_id());

	for (gcpu = scheduler_same_host_cpu_gcpu_first(&it, current_cpu_id);
	     gcpu != NULL; gcpu = scheduler_same_host_cpu_gcpu_next(&it))
		/* now init the host CPU part for vm-exits */
		host_cpu_vmcs_init(gcpu);
}
Example #4
0
void lock_acquire(VMM_LOCK* lock)
{
    (void)lock;
    CPU_ID this_cpu_id = hw_cpu_id();

    if (! lock) {
        return; // error
    }
    while (FALSE == lock_try_acquire(lock)) {
        VMM_ASSERT_NOLOCK(lock->owner_cpu_id != this_cpu_id);
        hw_pause();
    }
    lock->owner_cpu_id = this_cpu_id;
}
Example #5
0
void interruptible_lock_acquire(VMM_LOCK* lock)
{
    (void)lock;
    CPU_ID this_cpu_id = hw_cpu_id();
    BOOLEAN ipc_processed = FALSE;

    if (!lock) {
        return; // error
    }
    while (FALSE == lock_try_acquire(lock)) {
        ipc_processed = ipc_process_one_ipc();
        if(FALSE == ipc_processed) {
            hw_pause();
        }
    }
    lock->owner_cpu_id = this_cpu_id;
}
Example #6
0
int vmdb_cli_breakpoint_show(unsigned argc, char *args[])
    {
    GUEST_CPU_HANDLE    gcpu;
    VMDB_THREAD_CONTEXT *vmdb;
    ADDRESS             bp_address;
    int                 i;

    if (argc < 2) return -1;

    gcpu = vmdb_cli_locate_gcpu(args[1], NULL);

    if (NULL == gcpu) {
        CLI_PRINT("Invalid Guest %s\n", args[1]);
        return -1;
        }

    vmdb = gcpu_get_vmdb(gcpu);
    if (NULL == vmdb) {
        CLI_PRINT("VMDB is not attached to thread %s,%d\n", args[1], hw_cpu_id());
        return -1;
        }

    CLI_PRINT("======================================\n");
    CLI_PRINT("Single step:  %s\n", vmdb->sstep   ? "enabled" : "disabled");
    CLI_PRINT("======================================\n");
    CLI_PRINT("BP  linear address  type  len  counter\n");
    CLI_PRINT("======================================\n");

    for (i = 0; i < NUMBER_OF_HW_BREAKPOINTS; ++i) {
        CLI_PRINT("%d: ", i);

        bp_address = vmdb->dr[i];

        if (0 != bp_address && DR7_GLOBAL_GET(vmdb->dr7, i)) {
#if defined DEBUG || defined ENABLE_RELEASE_VMM_LOG
            VMDB_BREAKPOINT_TYPE     bp_type = (VMDB_BREAKPOINT_TYPE)DR7_RW_GET(vmdb->dr7, i);
            VMDB_BREAK_LENGTH_TYPE   bp_len = (VMDB_BREAK_LENGTH_TYPE)DR7_LEN_GET(vmdb->dr7, i);
#endif
            CLI_PRINT ( "%16P %5s   %d   %d", bp_address, bp_type_name[bp_type],
                bp_actual_length[bp_len], vmdb->skip_counter[i]);
            }
        CLI_PRINT("\n");
        }
    return 0;
    }
Example #7
0
void vmdb_remote_handler (CPU_ID from UNUSED, VMDB_REMOTE_PARAMS *params)
{
    VIRTUAL_CPU_ID      vcpu;
    GUEST_CPU_HANDLE    gcpu;

    do  {
        if (NULL == params) {
            VMDB_LOG(level_error,"%s called wit NULL argument\n", __FUNCTION__);
            break;
            }

        vcpu.guest_id     = params->guest_id;
        vcpu.guest_cpu_id = hw_cpu_id();
        if (NULL == (gcpu = gcpu_state(&vcpu))) {
            VMDB_LOG(level_error,"%s GCPU(%d,%d) is not found\n",
                __FUNCTION__, vcpu.guest_id, vcpu.guest_cpu_id);
            break;
            }

        switch (params->function_id) {
            case VMDB_IPC_ATTACH:
                vmdb_thread_attach(gcpu);
                break;
            case VMDB_IPC_DETACH:
                vmdb_thread_detach(gcpu);
                break;
            case VMDB_IPC_ADD_BP:
                vmdb_breakpoint_add ( gcpu, params->u.add_bp.linear_address,
                    params->u.add_bp.bp_type, params->u.add_bp.bp_len,
                    params->u.add_bp.skip_counter);
                break;
            case VMDB_IPC_DEL_BP:
                vmdb_breakpoint_delete(gcpu, params->u.del_bp.linear_address);
                break;
            case VMDB_IPC_SINGLE_STEP:
                vmdb_single_step_enable(gcpu, params->u.sstep.enable);
                break;
            default:
                VMDB_LOG(level_error,"%s GCPU(%d,%d) Unknown remote function ID(%d)\n",
                    __FUNCTION__, vcpu.guest_id, vcpu.guest_cpu_id, params->function_id);
                break;
            }

        } while (0);
    }
Example #8
0
GUEST_CPU_HANDLE vmdb_cli_locate_gcpu(char *string, BOOLEAN *apply_to_all)
    {
    VIRTUAL_CPU_ID vcpu;

    if (NULL != apply_to_all) {
        if ('*' == string[0]) {
            *apply_to_all = TRUE;
            string++;   // skip '*' symbol
            }
        else {
            apply_to_all = FALSE;
            }
        }

    vcpu.guest_id = (GUEST_ID) CLI_ATOL(string);
    vcpu.guest_cpu_id = hw_cpu_id();

    return gcpu_state(&vcpu);
    }
Example #9
0
/* apply vmexit config to the gcpu that are allocated
 * for the current host cpu */
static
void apply_vmexit_config(cpu_id_t from UNUSED, void *arg)
{
	guest_gcpu_econtext_t ctx;
	guest_cpu_handle_t gcpu;
	cpu_id_t this_hcpu_id = hw_cpu_id();

	ipc_comm_guest_struct_t *ipc = (ipc_comm_guest_struct_t *)arg;
	guest_handle_t guest = ipc->guest;
	volatile uint32_t *p_executed_count = &(ipc->executed);

	MON_ASSERT(guest);

	for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
	     gcpu = mon_guest_gcpu_next(&ctx)) {
		if (this_hcpu_id == scheduler_get_host_cpu_id(gcpu)) {
			gcpu_control_apply_only(gcpu);
		}
	}

	/* mark as done */
	hw_interlocked_increment((int32_t *)p_executed_count);
}
Example #10
0
UINT64 vmcs_act_read_from_hardware(VMCS_ACTUAL_OBJECT *p_vmcs, VMCS_FIELD field_id)
{
    UINT64           value;
    int              ret_val;
    UINT64           previous_vmcs = 0; // 0 - not replaced
    UINT32           encoding;

    VMM_DEBUG_CODE(
        if ((p_vmcs->owning_host_cpu != CPU_NEVER_USED) && (p_vmcs->owning_host_cpu != hw_cpu_id())) {
            VMM_LOG(mask_anonymous, level_trace,
                    "Trying to access VMCS, used on another CPU\n");
            VMM_DEADLOOP();
        }
    )
Example #11
0
INLINE BOOLEAN nmi_window_is_requested(void)
{
    return nmi_is_nmi_occured() || nmi_window[hw_cpu_id()];
}
Example #12
0
INLINE void nmi_window_set(void)
{
    nmi_window[hw_cpu_id()] = TRUE;
}
Example #13
0
void vmm_deadloop_internal(UINT32 file_code, UINT32 line_num, GUEST_CPU_HANDLE gcpu)
{
    static UINT32 dump_started = 0;
    char        buffer[BUFFER_SIZE], err_msg[BUFFER_SIZE];
    UINT64      rsp, stack_base;
    UINT32      size;
    CPU_ID      cpu_id;
    EXCEPT_INFO header;

    // skip dumping debug info if deadloop/assert happened before launch
    if (g_debug_gpa == 0)
        return;

    cpu_id = hw_cpu_id();
    if (cpu_id >= MAX_CPUS)
        return;

    vmm_sprintf_s(err_msg, 128, "CPU%d: %s: Error: Could not copy deadloop message back to guest\n",
            cpu_id, __FUNCTION__);

    // send cpu id, file code, line number to serial port
    vmm_printf("%02d%04d%04d\n", cpu_id, file_code, line_num);

    // must match format defined in FILE_LINE_INFO
    size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%04d%04d", file_code, line_num);

    // copy file code/line number to guest buffer at offset defined in DEADLOOP_DUMP
    // strlen(signature) + sizeof(cpu_id) + file_line[cpu]
    if (!vmm_copy_to_guest_phy_addr(gcpu,
                                   (void*)(g_debug_gpa+8+8+(cpu_id*size)),
                                   size,
                                   (void*)buffer)) {
        VMM_LOG(mask_uvmm, level_error, err_msg);
        }

    // only copy signature, VERSION, cpu_id, exception info, vmcs to guest
    // buffer once
    if (hw_interlocked_compare_exchange((INT32*)&dump_started,0,1) == 0) {
        size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%c%c%c%c%c%c%c%c%s%04d",
            DEADLOOP_SIGNATURE[0], DEADLOOP_SIGNATURE[1],
            DEADLOOP_SIGNATURE[2], DEADLOOP_SIGNATURE[3],
            DEADLOOP_SIGNATURE[4], DEADLOOP_SIGNATURE[5],
            DEADLOOP_SIGNATURE[6], DEADLOOP_SIGNATURE[7], VERSION, cpu_id);

        // copy signature and cpu_id to guest buffer
        if (!vmm_copy_to_guest_phy_addr(gcpu,
                                       (void*)(g_debug_gpa),
                                       size,
                                       (void*)buffer)) {
            VMM_LOG(mask_uvmm, level_error, err_msg);
        }

        // clear buffer erasing the signature or setting no exception flag
        vmm_zeromem(buffer, sizeof(UINT64));

        // copy exception info to guest buffer
        if (g_exception_stack != NULL) {
                vmm_memcpy((void *)&header.exception_stack, g_exception_stack, sizeof(ISR_PARAMETERS_ON_STACK));
                header.base_address = vmm_startup_data.vmm_memory_layout[uvmm_image].base_address;

            if (g_exception_stack->a.vector_id == IA32_EXCEPTION_VECTOR_PAGE_FAULT)
                header.cr2 = hw_read_cr2();

            // copy exception info to guest buffer
            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_EXCEPTION),
                                           sizeof(EXCEPT_INFO),
                                           (void*)&header)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }

                // copy GPRs to guest buffer
                if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_GPR),
                                           sizeof(VMM_GP_REGISTERS),
                                           (void*)&g_exception_gpr)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }

            // copy stack to guest buffer
            rsp = isr_error_code_required((VECTOR_ID)g_exception_stack->a.vector_id) ?
                        g_exception_stack->u.errcode_exception.sp :
                        g_exception_stack->u.exception.sp;

            vmm_stack_get_stack_pointer_for_cpu(cpu_id, &stack_base);

            size = sizeof(UINT64)*STACK_TRACE_SIZE;
            if ((rsp+size) > stack_base)
                size = (UINT32)(stack_base-rsp);

            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_STACK),
                                           size,
                                               (void*)rsp)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
            }
        } else {
            // Clear base image address indicating exception did not happen
            if (!vmm_copy_to_guest_phy_addr(gcpu,
                                           (void*)(g_debug_gpa+OFFSET_EXCEPTION),
                                           sizeof(UINT64),
					    (void*)buffer)) {
                VMM_LOG(mask_uvmm, level_error, err_msg);
	    }
        }

        // copy vmcs to guest buffer
        vmcs_dump_all(gcpu);
    }
}
Example #14
0
/* ---------------------------- APIs --------------------------------------- */
void guest_control_setup(guest_handle_t guest, const vmexit_control_t *request)
{
	guest_gcpu_econtext_t ctx;
	guest_cpu_handle_t gcpu;
	mon_state_t mon_state;
	cpu_id_t this_hcpu_id = hw_cpu_id();

	MON_ASSERT(guest);

	/* setup vmexit requests without applying */
	for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
	     gcpu = mon_guest_gcpu_next(&ctx))
		gcpu_control_setup_only(gcpu, request);

	/* now apply */
	mon_state = mon_get_state();

	if (MON_STATE_BOOT == mon_state) {
		/* may be run on BSP only */
		MON_ASSERT(0 == this_hcpu_id);

		/* single thread mode with all APs yet not init */
		for (gcpu = mon_guest_gcpu_first(guest, &ctx); gcpu;
		     gcpu = mon_guest_gcpu_next(&ctx))
			gcpu_control_apply_only(gcpu);
	} else if (MON_STATE_RUN == mon_state) {
		ipc_comm_guest_struct_t ipc;
		uint32_t wait_for_ipc_count = 0;
		ipc_destination_t ipc_dst;

		mon_memset(&ipc, 0, sizeof(ipc));
		mon_memset(&ipc_dst, 0, sizeof(ipc_dst));

		/* multi-thread mode with all APs ready and running
		 * or in Wait-For-SIPI state on behalf of guest */

		ipc.guest = guest;

		/* first apply for gcpus allocated for this hw cpu */
		apply_vmexit_config(this_hcpu_id, &ipc);

		/* reset executed counter and flush memory */
		hw_assign_as_barrier(&(ipc.executed), 0);

		/* send for execution */
		ipc_dst.addr_shorthand = IPI_DST_ALL_EXCLUDING_SELF;
		wait_for_ipc_count =
			ipc_execute_handler(ipc_dst, apply_vmexit_config, &ipc);

		/* wait for execution finish */
		while (wait_for_ipc_count != ipc.executed) {
			/* avoid deadlock - process one IPC if exist */
			ipc_process_one_ipc();
			hw_pause();
		}
	} else {
		/* not supported mode */
		MON_LOG(mask_anonymous, level_trace,
			"Unsupported global mon_state=%d in"
			" guest_request_vmexit_on()\n",
			mon_state);
		MON_DEADLOOP();
	}
}
Example #15
0
/*-------------------------------------------------------------------------
 *
 * Preform initialization of guests and guest CPUs
 *
 * Should be called on BSP only while all APs are stopped
 *
 * Return TRUE for success
 *
 *------------------------------------------------------------------------- */
boolean_t initialize_all_guests(uint32_t number_of_host_processors,
				const mon_memory_layout_t *mon_memory_layout,
				const mon_guest_startup_t *
				primary_guest_startup_state,
				uint32_t number_of_secondary_guests,
				const mon_guest_startup_t *
				secondary_guests_startup_state_array,
				const mon_application_params_struct_t *
				application_params)
{
	guest_handle_t primary_guest;
	gpm_handle_t primary_guest_startup_gpm;
	boolean_t ok = FALSE;
	/* guest_handle_t cur_guest; */
	guest_cpu_handle_t gcpu;
	guest_gcpu_econtext_t gcpu_context;

	MON_ASSERT(hw_cpu_id() == 0);
	MON_ASSERT(number_of_host_processors > 0);
	MON_ASSERT(mon_memory_layout);
	MON_ASSERT(primary_guest_startup_state);

	if (number_of_secondary_guests > 0) {
		MON_LOG(mask_anonymous, level_trace,
			"initialize_all_guests ASSERT: Secondary guests are"
			" yet not implemented\n");

		MON_ASSERT(secondary_guests_startup_state_array);

		/* init guests and allocate memory for them */

		/* shutdown temporary layout object */

		MON_DEADLOOP();
		return FALSE;
	}

	/* first init primary guest */
	MON_LOG(mask_anonymous, level_trace, "Init primary guest\n");

	/* BUGBUG: This is a workaround until loader will not do this!!! */
	BITMAP_SET(((mon_guest_startup_t *)primary_guest_startup_state)->flags,
		MON_GUEST_FLAG_REAL_BIOS_ACCESS_ENABLE |
		MON_GUEST_FLAG_LAUNCH_IMMEDIATELY);

	/* TODO: Uses global policym but should be part of mon_guest_startup_t
	 * structure.  */
	primary_guest = init_single_guest(number_of_host_processors,
		primary_guest_startup_state, NULL);
	if (!primary_guest) {
		MON_LOG(mask_anonymous, level_trace,
			"initialize_all_guests: Cannot init primary guest\n");
		MON_DEADLOOP();
		return FALSE;
	}

	guest_set_primary(primary_guest);
	primary_guest_startup_gpm = mon_guest_get_startup_gpm(primary_guest);

	/* init memory layout in the startup gpm */
	ok = init_memory_layout(mon_memory_layout,
		primary_guest_startup_gpm,
		number_of_secondary_guests > 0,
		application_params);

	/* Set active_gpm to startup gpm */
	for (gcpu = mon_guest_gcpu_first(primary_guest, &gcpu_context); gcpu;
	     gcpu = mon_guest_gcpu_next(&gcpu_context))
		mon_gcpu_set_current_gpm(gcpu, primary_guest_startup_gpm);

	MON_LOG(mask_anonymous, level_trace,
		"Primary guest initialized successfully\n");

	return TRUE;
}