void *_vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) { void *paddr = vm_map_gpa(ctx, gaddr, len); if (!paddr) rb_raise(rb_eException, "vm_map_gpa failed"); return paddr; }
int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt, int *fault) { void *va; uint64_t gpa; int error, i, n, off; for (i = 0; i < iovcnt; i++) { iov[i].iov_base = 0; iov[i].iov_len = 0; } while (len) { assert(iovcnt > 0); error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault); if (error || *fault) return (error); off = gpa & PAGE_MASK; n = min(len, PAGE_SIZE - off); va = vm_map_gpa(ctx, gpa, n); if (va == NULL) return (EFAULT); iov->iov_base = va; iov->iov_len = n; iov++; iovcnt--; gla += n; len -= n; } return (0); }
int grub_emu_bhyve_init(const char *name, grub_uint64_t memsz) { int err; int val; grub_uint64_t lomemsz; #ifdef VMMAPI_VERSION int need_reinit = 0; #endif err = vm_create (name); if (err != 0) { if (errno != EEXIST) { fprintf (stderr, "Could not create VM %s\n", name); return GRUB_ERR_ACCESS_DENIED; } #ifdef VMMAPI_VERSION need_reinit = 1; #endif } bhyve_ctx = vm_open (name); if (bhyve_ctx == NULL) { fprintf (stderr, "Could not open VM %s\n", name); return GRUB_ERR_BUG; } #ifdef VMMAPI_VERSION if (need_reinit) { err = vm_reinit (bhyve_ctx); if (err != 0) { fprintf (stderr, "Could not reinit VM %s\n", name); return GRUB_ERR_BUG; } } #endif val = 0; err = vm_get_capability (bhyve_ctx, 0, VM_CAP_UNRESTRICTED_GUEST, &val); if (err != 0) { fprintf (stderr, "VM unrestricted guest capability required\n"); return GRUB_ERR_BAD_DEVICE; } err = vm_set_capability (bhyve_ctx, 0, VM_CAP_UNRESTRICTED_GUEST, 1); if (err != 0) { fprintf (stderr, "Could not enable unrestricted guest for VM\n"); return GRUB_ERR_BUG; } err = vm_setup_memory (bhyve_ctx, memsz, VM_MMAP_ALL); if (err) { fprintf (stderr, "Could not setup memory for VM\n"); return GRUB_ERR_OUT_OF_MEMORY; } lomemsz = vm_get_lowmem_limit(bhyve_ctx); /* * Extract the virtual address of the mapped guest memory. */ if (memsz >= lomemsz) { bhyve_g2h.lomem = lomemsz; bhyve_g2h.himem = memsz - lomemsz; bhyve_g2h.himem_ptr = vm_map_gpa(bhyve_ctx, 4*GB, bhyve_g2h.himem); } else { bhyve_g2h.lomem = memsz; bhyve_g2h.himem = 0; } bhyve_g2h.lomem_ptr = vm_map_gpa(bhyve_ctx, 0, bhyve_g2h.lomem); /* * bhyve is going to return the following memory segments * * 0 - 640K - usable * 640K- 1MB - vga hole, BIOS, not usable. * 1MB - lomem - usable * lomem - 4G - not usable * 4G - himem - usable [optional if himem != 0] */ bhyve_info.nsegs = 2; bhyve_info.segs = bhyve_mm; bhyve_mm[0].start = 0x0; bhyve_mm[0].end = 640*1024 - 1; /* 640K */ bhyve_mm[0].type = GRUB_MEMORY_AVAILABLE; bhyve_mm[1].start = 1024*1024; bhyve_mm[1].end = (memsz > lomemsz) ? lomemsz : memsz; bhyve_mm[1].type = GRUB_MEMORY_AVAILABLE; if (memsz > lomemsz) { bhyve_info.nsegs++; bhyve_mm[2].start = 4*GB; bhyve_mm[2].end = (memsz - lomemsz) + bhyve_mm[2].start; bhyve_mm[2].type = GRUB_MEMORY_AVAILABLE; } /* The boot-code size is just the GDT that needs to be copied */ bhyve_info.bootsz = sizeof(bhyve_gdt); return 0; }
void * paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) { return (vm_map_gpa(ctx, gaddr, len)); }