struct hax_vm *hax_vm_create(struct hax_state *hax) { struct hax_vm *vm; int vm_id = 0, ret; if (hax_invalid_fd(hax->fd)) { return NULL; } if (hax->vm) { return hax->vm; } vm = g_new0(struct hax_vm, 1); ret = hax_host_create_vm(hax, &vm_id); if (ret) { fprintf(stderr, "Failed to create vm %x\n", ret); goto error; } vm->id = vm_id; vm->fd = hax_host_open_vm(hax, vm_id); if (hax_invalid_fd(vm->fd)) { fprintf(stderr, "Failed to open vm %d\n", vm_id); goto error; } hax->vm = vm; return vm; error: g_free(vm); hax->vm = NULL; return NULL; }
int hax_vcpu_create(int id) { struct hax_vcpu_state *vcpu = NULL; int ret; if (!hax_global.vm) { fprintf(stderr, "vcpu %x created failed, vm is null\n", id); return -1; } if (hax_global.vm->vcpus[id]) { fprintf(stderr, "vcpu %x allocated already\n", id); return 0; } vcpu = g_new0(struct hax_vcpu_state, 1); ret = hax_host_create_vcpu(hax_global.vm->fd, id); if (ret) { fprintf(stderr, "Failed to create vcpu %x\n", id); goto error; } vcpu->vcpu_id = id; vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id); if (hax_invalid_fd(vcpu->fd)) { fprintf(stderr, "Failed to open the vcpu\n"); ret = -ENODEV; goto error; } hax_global.vm->vcpus[id] = vcpu; ret = hax_host_setup_vcpu_channel(vcpu); if (ret) { fprintf(stderr, "Invalid hax tunnel size\n"); ret = -EINVAL; goto error; } return 0; error: /* vcpu and tunnel will be closed automatically */ if (vcpu && !hax_invalid_fd(vcpu->fd)) { hax_close_fd(vcpu->fd); } hax_global.vm->vcpus[id] = NULL; g_free(vcpu); return -1; }
int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set) { int ret; hax_fd fd; HANDLE hDeviceVCPU; DWORD dSize; fd = hax_vcpu_get_fd(env); if (hax_invalid_fd(fd)) { return -1; } hDeviceVCPU = fd; if (set) { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_SET_REGS, state, sizeof(*state), NULL, 0, &dSize, (LPOVERLAPPED) NULL); } else { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_GET_REGS, NULL, 0, state, sizeof(*state), &dSize, (LPOVERLAPPED) NULL); } if (!ret) { return -EFAULT; } else { return 0; } }
int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set) { int ret; hax_fd fd; HANDLE hDeviceVCPU; DWORD dSize = 0; fd = hax_vcpu_get_fd(env); if (hax_invalid_fd(fd)) { return -1; } hDeviceVCPU = fd; if (set) { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_IOCTL_SET_FPU, fl, sizeof(*fl), NULL, 0, &dSize, (LPOVERLAPPED) NULL); } else { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_IOCTL_GET_FPU, NULL, 0, fl, sizeof(*fl), &dSize, (LPOVERLAPPED) NULL); } if (!ret) { return -EFAULT; } else { return 0; } }
int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set) { int ret; hax_fd fd; HANDLE hDeviceVCPU; DWORD dSize = 0; fd = hax_vcpu_get_fd(env); if (hax_invalid_fd(fd)) { return -1; } hDeviceVCPU = fd; if (set) { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_IOCTL_SET_MSRS, msrs, sizeof(*msrs), msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL); } else { ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_IOCTL_GET_MSRS, msrs, sizeof(*msrs), msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL); } if (!ret) { return -EFAULT; } else { return 0; } }
int hax_host_create_vm(struct hax_state *hax, int *vmid) { int ret; int vm_id = 0; DWORD dSize = 0; if (hax_invalid_fd(hax->fd)) { return -EINVAL; } if (hax->vm) { return 0; } ret = DeviceIoControl(hax->fd, HAX_IOCTL_CREATE_VM, NULL, 0, &vm_id, sizeof(vm_id), &dSize, (LPOVERLAPPED) NULL); if (!ret) { fprintf(stderr, "Failed to create VM. Error code: %lu\n", GetLastError()); return -1; } *vmid = vm_id; return 0; }
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version) { int ret; HANDLE hDevice = hax->fd; /* handle to hax module */ DWORD dSize = 0; DWORD err = 0; if (hax_invalid_fd(hDevice)) { fprintf(stderr, "Invalid fd for hax device!\n"); return -ENODEV; } ret = DeviceIoControl(hDevice, HAX_IOCTL_VERSION, NULL, 0, version, sizeof(*version), &dSize, (LPOVERLAPPED) NULL); if (!ret) { err = GetLastError(); if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) { fprintf(stderr, "hax module verion is too long to hold.\n"); } fprintf(stderr, "Failed to get Hax module version:%lu\n", err); return -EFAULT; } else { return 0; } }
int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap) { int ret; HANDLE hDevice = hax->fd; /* handle to hax module */ DWORD dSize = 0; DWORD err = 0; if (hax_invalid_fd(hDevice)) { fprintf(stderr, "Invalid fd for hax device!\n"); return -ENODEV; } ret = DeviceIoControl(hDevice, HAX_IOCTL_CAPABILITY, NULL, 0, cap, sizeof(*cap), &dSize, (LPOVERLAPPED) NULL); if (!ret) { err = GetLastError(); if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) { fprintf(stderr, "hax capability is too long to hold.\n"); } fprintf(stderr, "Failed to get Hax capability:%luu\n", err); return -EFAULT; } else { return 0; } }
bool VM_HaxEnabled() { // This function only creates a temporary state for the driver handle hax_state hax; memset(&hax, 0, sizeof(hax_state)); hax.fd = hax_mod_open(); // Is the returned handle valid? if (hax_invalid_fd(hax.fd)) return false; // Determine the capabilities from the driver if (!HaxIsAvailable(&hax)) { hax_mod_close(&hax); return false; } // Check if the version is supported if (!HaxIsSupported(&hax)) { printf("Incompatible HAX version\n"); hax_mod_close(&hax); return false; } hax_mod_close(&hax); return true; }
static int hax_init(ram_addr_t ram_size) { struct hax_state *hax = NULL; struct hax_qemu_version qversion; int ret; hax = &hax_global; memset(hax, 0, sizeof(struct hax_state)); hax->mem_quota = ram_size; hax->fd = hax_mod_open(); if (hax_invalid_fd(hax->fd)) { hax->fd = 0; ret = -ENODEV; goto error; } ret = hax_get_capability(hax); if (ret) { if (ret != -ENOSPC) { ret = -EINVAL; } goto error; } if (!hax_version_support(hax)) { ret = -EINVAL; goto error; } hax->vm = hax_vm_create(hax); if (!hax->vm) { fprintf(stderr, "Failed to create HAX VM\n"); ret = -EINVAL; goto error; } hax_memory_init(); qversion.cur_version = hax_cur_version; qversion.min_version = hax_min_version; hax_notify_qemu_version(hax->vm->fd, &qversion); cpu_interrupt_handler = hax_handle_interrupt; return ret; error: if (hax->vm) { hax_vm_destroy(hax->vm); } if (hax->fd) { hax_mod_close(hax); } return ret; }
int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion) { int ret; DWORD dSize = 0; if (hax_invalid_fd(vm_fd)) { return -EINVAL; } ret = DeviceIoControl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion, sizeof(struct hax_qemu_version), NULL, 0, &dSize, (LPOVERLAPPED) NULL); if (!ret) { fprintf(stderr, "Failed to notify qemu API version\n"); return -1; } return 0; }
int hax_inject_interrupt(CPUArchState *env, int vector) { int ret; hax_fd fd; HANDLE hDeviceVCPU; DWORD dSize; fd = hax_vcpu_get_fd(env); if (hax_invalid_fd(fd)) { return -1; } hDeviceVCPU = fd; ret = DeviceIoControl(hDeviceVCPU, HAX_VCPU_IOCTL_INTERRUPT, &vector, sizeof(vector), NULL, 0, &dSize, (LPOVERLAPPED) NULL); if (!ret) { return -EFAULT; } else { return 0; } }
hax_vcpu_state *VCpu_Create(hax_state *Hax) { if (!Hax->vm) { printf("vCPU created failed, vm is null\n"); return nullptr; } // Find the next free vCPU index int cpuId = -1; for (int i = 0; i < ARRAYSIZE(Hax->vm->vcpus); i++) { if (Hax->vm->vcpus[i]) continue; cpuId = i; break; } if (cpuId == -1) { printf("Maximum number of vCPUs have been allocated for this VM!\n"); return nullptr; } // Allocate the virtual CPU instance structure and // zero memory auto vCPU = (hax_vcpu_state *)malloc(sizeof(hax_vcpu_state)); if (!vCPU) { printf("Failed to alloc vCPU state\n"); return nullptr; } memset(vCPU, 0, sizeof(hax_vcpu_state)); // Tell the driver to create the vCPU instance vCPU->vcpu_id = cpuId; if (hax_host_create_vcpu(Hax->vm->fd, cpuId) < 0) { printf("Failed to create vCPU %x\n", cpuId); goto error; } // Grab a handle to the driver's instance vCPU->fd = hax_host_open_vcpu(Hax->vm->id, cpuId); if (hax_invalid_fd(vCPU->fd)) { printf("Failed to open the vCPU handle\n"); goto error; } // Mark the CPU index as used with a pointer Hax->vm->vcpus[cpuId] = vCPU; // Create the tunnel to kernel data if (hax_host_setup_vcpu_channel(vCPU) < 0) { printf("Invalid HAX tunnel size \n"); goto error; } return vCPU; error: // vCPU and tunnel will be closed automatically if (vCPU && !hax_invalid_fd(vCPU->fd)) hax_close_fd(vCPU->fd); Hax->vm->vcpus[cpuId] = nullptr; free(vCPU); return nullptr; }