int sys_aspace_virt_to_phys( id_t id, vaddr_t vaddr, paddr_t __user * paddr ) { int status; paddr_t _paddr; if ((status = aspace_virt_to_phys(id, vaddr, &_paddr)) != 0) return status; if (paddr && copy_to_user(paddr, &_paddr, sizeof(_paddr))) return -EFAULT; return 0; }
static int hypervisor_api_test(void) { volatile size_t iso_size = (size_t) &_binary_hello_world_rawdata_size; volatile vaddr_t iso_start = (vaddr_t) &_binary_hello_world_rawdata_start; volatile vaddr_t iso_end = (vaddr_t) &_binary_hello_world_rawdata_end; paddr_t iso_start_paddr; id_t my_aspace; int status; /* Make sure there is an embedded ISO image */ if (iso_size != (iso_end - iso_start)) { //printf(" Failed, no ISO image available.\n"); return -1; } printf("\n"); printf("TEST BEGIN: Hypervisor API\n"); printf(" Starting a guest OS...\n"); /* Determine the physical address of the ISO image */ aspace_get_myid(&my_aspace); aspace_virt_to_phys(my_aspace, iso_start, &iso_start_paddr); /* Fire it up! */ status = v3_start_guest(iso_start_paddr, iso_size); if (status) { printf(" Failed (status=%d).\n", status); return -1; } printf(" Success.\n"); printf("TEST END: Hypervisor API\n"); return 0; }
/* Mapping a segment is a four step process: * (1) xemem_get/attach the seg into our aspace * (2) invoke aspace_virt_to_phys on the attached region to generate a page * frame list * (3) invoke aspace_map_region on the target region in the target * aspace * (4) detach the xemem attachment (hold onto the apid) */ static int __map_hio_segment(hio_segment_t * seg, id_t aspace_id) { xemem_apid_t apid; void * local_attach; uint32_t nr_pages, page_size, i, j; int status; if (aspace_id == MY_ID) aspace_get_myid(&aspace_id); /* (1) xemem get/attach */ { struct xemem_addr addr; apid = xemem_get(seg->segid, XEMEM_RDWR); if (apid == -1) { printf("Could not get xemem segid %li\n", seg->segid); return -1; } addr.apid = apid; addr.offset = 0; local_attach = xemem_attach(addr, seg->size, NULL); if (local_attach == MAP_FAILED) { printf("Could not attach xemem apid %li (%s)\n", addr.apid, strerror(errno)); goto out_attach; } } /* (2) figure out the pfns and (3) map them to the target aspace */ { vaddr_t local_vaddr, target_vaddr; paddr_t paddr; struct pmem_region region; page_size = seg->page_size; nr_pages = seg->size / seg->page_size; for (i = 0; i < nr_pages; i++) { local_vaddr = (addr_t)local_attach + (seg->page_size * i); target_vaddr = (addr_t)seg->vaddr + (seg->page_size * i); /* (2) */ status = aspace_virt_to_phys(MY_ID, local_vaddr, &paddr); if (status != 0) { printf("aspace_virt_to_phys failed (%s)\n", strerror(errno)); goto out_virt_to_phys; } /* Temporary hack: add umem so we can use aspace_map_region below. * (the kernel won't let us map non-umem memory) */ { memset(®ion, 0, sizeof(struct pmem_region)); region.start = paddr; region.end = paddr + seg->page_size; region.type_is_set = true; region.type = PMEM_TYPE_UMEM; region.allocated_is_set = true; region.allocated = true; status = pmem_add(®ion); if (status != 0) { printf("pmem_add failed (%s)\n", strerror(errno)); goto out_umem; } } /* (3) */ status = aspace_map_region( aspace_id, target_vaddr, seg->page_size, VM_READ | VM_WRITE | VM_USER, seg->page_size, "hio", paddr ); if (status != 0) { printf("aspace_map_region failed (%d) (%s)\n", status, strerror(errno)); goto out_map_pmem; } /* Remove umem now. Unclear how to do it later */ pmem_free_umem(®ion); pmem_del(®ion); } } /* (4) teardown local mapping */ xemem_detach(local_attach); return 0; out_map_pmem: out_umem: out_virt_to_phys: for (j = 0; j < i; j++) { aspace_unmap_region( aspace_id, (addr_t)seg->vaddr + (j * seg->page_size), seg->page_size ); } xemem_detach(local_attach); out_attach: xemem_release(apid); return -1; }
static long cmd_ioctl(struct file * filp, unsigned int ioctl, unsigned long arg) { char __user * uptr = (char __user *)arg; int ret = 0; switch (ioctl) { case PISCES_STAT_FILE: { struct pisces_user_file_info file_info; loff_t file_size = 0; u64 file_handle = 0; char * file_path = NULL; memset(&file_info, 0, sizeof(struct pisces_user_file_info)); if (copy_from_user(&file_info, uptr, sizeof(struct pisces_user_file_info))) { printk(KERN_ERR "Unable to copy to file info from user space\n"); return -1; } file_path = kmem_alloc(file_info.path_len + 1); if (copy_from_user(file_path, uptr + sizeof(struct pisces_user_file_info), file_info.path_len)) { printk(KERN_ERR "Unable to copy file path from user space\n"); kmem_free(file_path); return -1; } file_handle = pisces_file_open(file_path, O_RDONLY); if (file_handle == 0) { printk(KERN_ERR "Could not find file (%s)\n", file_path); kmem_free(file_path); return -1; } kmem_free(file_path); file_size = pisces_file_size(file_handle); pisces_file_close(file_handle); return file_size; break; } case PISCES_LOAD_FILE: { struct pisces_user_file_info file_info; loff_t file_size = 0; u64 file_handle = 0; paddr_t addr_pa = 0; ssize_t bytes_read = 0; char * file_path = NULL; memset(&file_info, 0, sizeof(struct pisces_user_file_info)); if (copy_from_user(&file_info, uptr, sizeof(struct pisces_user_file_info))) { printk(KERN_ERR "Unable to copy to file info from user space\n"); return -1; } if (aspace_virt_to_phys(current->aspace->id, file_info.user_addr, &addr_pa) < 0) { printk(KERN_ERR "Invalid user address used for loading file\n"); return -1; } file_path = kmem_alloc(file_info.path_len + 1); if (copy_from_user(file_path, uptr + sizeof(struct pisces_user_file_info), file_info.path_len)) { printk(KERN_ERR "Unable to copy file path from user space\n"); kmem_free(file_path); return -1; } file_handle = pisces_file_open(file_path, O_RDONLY); if (file_handle == 0) { printk(KERN_ERR "Could not find file (%s)\n", file_path); kmem_free(file_path); return -1; } file_size = pisces_file_size(file_handle); bytes_read = pisces_file_read(file_handle, __va(addr_pa), file_size, 0); pisces_file_close(file_handle); if (bytes_read != file_size) { printk(KERN_ERR "Could not load file (%s) [only read %ld bytes]\n", file_path, bytes_read); kmem_free(file_path); return -1; } kmem_free(file_path); return 0; break; } case PISCES_WRITE_FILE: default: printk(KERN_ERR "Invalid Pisces IOCTL (%d)\n", ioctl); return -1; } return ret; }
/* * User ioctl to the HIO driver. */ static long hio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = 0; switch (cmd) { /* Get xemem address from userspace * Enter the dispatcher loop */ case HIO_IOCTL_ENGINE_ATTACH: { void *buf = (void __user *)arg; paddr_t addr_pa = 0; int *ptr; printk("HIO engine attach...\n"); if (aspace_virt_to_phys(current->aspace->id, (vaddr_t) buf, &addr_pa) < 0) { printk(KERN_ERR "Invalid user address for hio engine attach: %p\n", buf); return -1; } ptr = __va(addr_pa); printk(" buffer uva %p, pa %p, kva %p\n", buf, (void *)addr_pa, ptr); printk(" content %x\n", *ptr); engine = (struct hio_engine *)ptr; //engine_dispachter_loop(); //engine = NULL; break; } case HIO_IOCTL_SYSCALL_RET: { struct hio_syscall_ret_t cur_ret; if (copy_from_user(&cur_ret, (void __user *)arg, sizeof(struct hio_syscall_ret_t))) { printk(KERN_ERR "Error copying hio syscall cur_ret from userspace\n"); return -EFAULT; } struct pending_ret *pending_ret = &pending_ret_array[cur_ret.stub_id]; if (pending_ret->is_pending) { printk(KERN_ERR "Another pending_ret is pending for stub_id %d???\n", cur_ret.stub_id); ret = -1; break; } else { spin_lock(&pending_ret->lock); pending_ret->ret_val = cur_ret.ret_val; //printk(KERN_INFO "Return value is %d\n", pending_ret->ret_val); pending_ret->is_pending = true; spin_unlock(&pending_ret->lock); } wake_up_interruptible(&pending_ret->waitq); break; } default: ret = -ENOIOCTLCMD; break; } return ret; }