BOOLEAN lock_try_acquire(VMM_LOCK* lock) { (void)lock; UINT32 expected_value = 0, current_value; UINT32 new_value = 1; if (!lock) { return FALSE; // error } current_value = hw_interlocked_compare_exchange((INT32 volatile *)(&(lock->uint32_lock)), expected_value, new_value); return (current_value == expected_value); }
static void raw_unlock(volatile uint32_t *p_lock_var) { int32_t old_value; for (;; ) { /* Loop until successfully decremented the lock variable */ old_value = *p_lock_var; if (old_value == hw_interlocked_compare_exchange((int32_t *)p_lock_var, old_value, /* Expected */ old_value - 1)) { /* New */ break; } hw_pause(); } }
/*===================== raw_lock(), raw_unlock() ========================== * * These functions are used for doing lock/unlock * without CPU identification/validation * The reason is to have the lock facility at the stage when cpu ID is unknown * e.g. for LOGs at the bootstrap time * *========================================================================= */ static void raw_lock(volatile uint32_t *p_lock_var) { uint32_t old_value; for (;; ) { /* Loop until the successfully incremented the lock variable */ /* from 0 to 1 (i.e., we are the only lockers */ old_value = hw_interlocked_compare_exchange( (int32_t *)p_lock_var, 0, /* Expected */ 1); /* New */ if (0 == old_value) { break; } hw_pause(); } }
void vmm_deadloop_internal(UINT32 file_code, UINT32 line_num, GUEST_CPU_HANDLE gcpu) { static UINT32 dump_started = 0; char buffer[BUFFER_SIZE], err_msg[BUFFER_SIZE]; UINT64 rsp, stack_base; UINT32 size; CPU_ID cpu_id; EXCEPT_INFO header; // skip dumping debug info if deadloop/assert happened before launch if (g_debug_gpa == 0) return; cpu_id = hw_cpu_id(); if (cpu_id >= MAX_CPUS) return; vmm_sprintf_s(err_msg, 128, "CPU%d: %s: Error: Could not copy deadloop message back to guest\n", cpu_id, __FUNCTION__); // send cpu id, file code, line number to serial port vmm_printf("%02d%04d%04d\n", cpu_id, file_code, line_num); // must match format defined in FILE_LINE_INFO size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%04d%04d", file_code, line_num); // copy file code/line number to guest buffer at offset defined in DEADLOOP_DUMP // strlen(signature) + sizeof(cpu_id) + file_line[cpu] if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa+8+8+(cpu_id*size)), size, (void*)buffer)) { VMM_LOG(mask_uvmm, level_error, err_msg); } // only copy signature, VERSION, cpu_id, exception info, vmcs to guest // buffer once if (hw_interlocked_compare_exchange((INT32*)&dump_started,0,1) == 0) { size = vmm_sprintf_s(buffer, BUFFER_SIZE, "%c%c%c%c%c%c%c%c%s%04d", DEADLOOP_SIGNATURE[0], DEADLOOP_SIGNATURE[1], DEADLOOP_SIGNATURE[2], DEADLOOP_SIGNATURE[3], DEADLOOP_SIGNATURE[4], DEADLOOP_SIGNATURE[5], DEADLOOP_SIGNATURE[6], DEADLOOP_SIGNATURE[7], VERSION, cpu_id); // copy signature and cpu_id to guest buffer if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa), size, (void*)buffer)) { VMM_LOG(mask_uvmm, level_error, err_msg); } // clear buffer erasing the signature or setting no exception flag vmm_zeromem(buffer, sizeof(UINT64)); // copy exception info to guest buffer if (g_exception_stack != NULL) { vmm_memcpy((void *)&header.exception_stack, g_exception_stack, sizeof(ISR_PARAMETERS_ON_STACK)); header.base_address = vmm_startup_data.vmm_memory_layout[uvmm_image].base_address; if (g_exception_stack->a.vector_id == IA32_EXCEPTION_VECTOR_PAGE_FAULT) header.cr2 = hw_read_cr2(); // copy exception info to guest buffer if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa+OFFSET_EXCEPTION), sizeof(EXCEPT_INFO), (void*)&header)) { VMM_LOG(mask_uvmm, level_error, err_msg); } // copy GPRs to guest buffer if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa+OFFSET_GPR), sizeof(VMM_GP_REGISTERS), (void*)&g_exception_gpr)) { VMM_LOG(mask_uvmm, level_error, err_msg); } // copy stack to guest buffer rsp = isr_error_code_required((VECTOR_ID)g_exception_stack->a.vector_id) ? g_exception_stack->u.errcode_exception.sp : g_exception_stack->u.exception.sp; vmm_stack_get_stack_pointer_for_cpu(cpu_id, &stack_base); size = sizeof(UINT64)*STACK_TRACE_SIZE; if ((rsp+size) > stack_base) size = (UINT32)(stack_base-rsp); if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa+OFFSET_STACK), size, (void*)rsp)) { VMM_LOG(mask_uvmm, level_error, err_msg); } } else { // Clear base image address indicating exception did not happen if (!vmm_copy_to_guest_phy_addr(gcpu, (void*)(g_debug_gpa+OFFSET_EXCEPTION), sizeof(UINT64), (void*)buffer)) { VMM_LOG(mask_uvmm, level_error, err_msg); } } // copy vmcs to guest buffer vmcs_dump_all(gcpu); } }