void vm_check_for_unpause(struct vm *vm, int vcpuid) { enum vcpu_state state; if (vm_is_paused(vm)) { if (pthread_mutex_lock(&vm->hv_pause_mtx) != 0) { xhyve_abort("error locking mutex"); } if (vm_is_paused(vm)) { // Check that we are still paused after acq lock enum vcpu_state orig_state = vm->vcpu[vcpuid].state; state = VCPU_FROZEN; if (vcpu_set_state(vm, vcpuid, state, false) != 0) { xhyve_abort("vcpu_set_state failed\n"); } vm_mem_protect(vm); // Wait for signal fprintf(stderr, "vcpu %d waiting for signal to resume\n", vcpuid); do { if (pthread_cond_wait(&vm->hv_pause_cnd, &vm->hv_pause_mtx) != 0) { xhyve_abort("pthread_cond_wait failed"); } } while (vm_is_paused(vm)); fprintf(stderr, "vcpu %d received signal, resuming\n", vcpuid); vm_mem_unprotect(vm); state = orig_state; if (vcpu_set_state(vm, vcpuid, state, false) != 0) { xhyve_abort("vcpu_set_state failed\n"); } } if (pthread_mutex_unlock(&vm->hv_pause_mtx) != 0) { xhyve_abort("mutex unlock failed"); } } }
static void vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) panic("Error %d setting state to %d\n", error, newstate); }
static void vcpu_freeze(int vcpu, bool freeze) { enum vcpu_state state; state = (freeze) ? VCPU_FROZEN : VCPU_IDLE; if (vcpu_set_state(vm, vcpu, state, freeze)) { xhyve_abort("vcpu_set_state failed\n"); } }
static void vcpu_freeze_all(bool freeze) { enum vcpu_state state; int vcpu; state = (freeze) ? VCPU_FROZEN : VCPU_IDLE; for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) { if (vcpu_set_state(vm, vcpu, state, freeze)) { xhyve_abort("vcpu_set_state failed\n"); } } }
static int vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) { int error; const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */ /* * State transitions from the vmmdev_ioctl() must always begin from * the VCPU_IDLE state. This guarantees that there is only a single * ioctl() operating on a vcpu at any point. */ if (from_idle) { while (vcpu->state != VCPU_IDLE) { pthread_mutex_lock(&vcpu->state_sleep_mtx); vcpu_unlock(vcpu); pthread_cond_timedwait_relative_np(&vcpu->state_sleep_cnd, &vcpu->state_sleep_mtx, &ts); vcpu_lock(vcpu); pthread_mutex_unlock(&vcpu->state_sleep_mtx); //msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); } } else { KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " "vcpu idle state")); } /* * The following state transitions are allowed: * IDLE -> FROZEN -> IDLE * FROZEN -> RUNNING -> FROZEN * FROZEN -> SLEEPING -> FROZEN */ switch (vcpu->state) { case VCPU_IDLE: case VCPU_RUNNING: case VCPU_SLEEPING: error = (newstate != VCPU_FROZEN); break; case VCPU_FROZEN: error = (newstate == VCPU_FROZEN); break; } if (error) return (EBUSY); vcpu->state = newstate; if (newstate == VCPU_IDLE) pthread_cond_broadcast(&vcpu->state_sleep_cnd); //wakeup(&vcpu->state); return (0); } static void vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) { int error; if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) xhyve_abort("Error %d setting state to %d\n", error, newstate); }