static int aspace_api_test(void) { int status; id_t my_id, new_id; printf("\n"); printf("TEST BEGIN: Address Space Management\n"); status = aspace_get_myid(&my_id); if (status) { printf("ERROR: aspace_get_myid() status=%d\n", status); return -1; } printf(" My address space ID is %u\n", my_id); printf(" Creating a new aspace: "); status = aspace_create(ANY_ID, "TEST-ASPACE", &new_id); if (status) { printf("\nERROR: aspace_create() status=%d\n", status); return -1; } printf("id=%u\n", new_id); printf(" Using SMARTMAP to map myself into aspace %u\n", new_id); status = aspace_smartmap(my_id, new_id, SMARTMAP_ALIGN, SMARTMAP_ALIGN); if (status) { printf("ERROR: aspace_smartmap() status=%d\n", status); return -1; } aspace_dump2console(new_id); status = aspace_unsmartmap(my_id, new_id); if (status) { printf("ERROR: aspace_unsmartmap() status=%d\n", status); return -1; } printf(" Destroying a aspace %u: ", new_id); status = aspace_destroy(new_id); if (status) { printf("ERROR: aspace_destroy() status=%d\n", status); return -1; } printf("OK\n"); printf("TEST END: Address Space Management\n"); return 0; }
static int task_meas_api_test(void) { printf("TEST BEGIN: Task Measurement\n"); id_t aspace_id, task_id = gettid(); uint64_t time = 0, energy = 0, unit_energy = 0; aspace_get_myid(&aspace_id); task_meas(aspace_id, task_id, &time, &energy, &unit_energy); printf(" aspace %d task %d ran for %g Seconds with total energy %g Joules and average power of %g Watts\n", aspace_id, task_id, time / 10e9, energy * pow(0.5, unit_energy), energy * pow(0.5, unit_energy) / (time/10e9)); printf("TEST END: Task Measurement\n"); return 0; }
static void unmap( void* ptr, size_t extent ) { id_t my_id; int status; // Debug1( "ptr=%p extent=%#lx\n", ptr, extent ); if ( ( status = aspace_get_myid( &my_id ) ) ) { printf("ERROR: aspace_get_myid() status=%d\n", status); return; } if ( ( status = aspace_unmap_pmem( my_id, (vaddr_t) ptr, extent ) ) ) { printf("ERROR: aspace_unmap_pmem() status=%d\n", status); } if ( ( status = aspace_del_region( my_id, (vaddr_t) ptr, extent) ) ) { printf("ERROR: aspace_del_region() status=%d\n", status); } }
static int hypervisor_api_test(void) { volatile size_t iso_size = (size_t) &_binary_hello_world_rawdata_size; volatile vaddr_t iso_start = (vaddr_t) &_binary_hello_world_rawdata_start; volatile vaddr_t iso_end = (vaddr_t) &_binary_hello_world_rawdata_end; paddr_t iso_start_paddr; id_t my_aspace; int status; /* Make sure there is an embedded ISO image */ if (iso_size != (iso_end - iso_start)) { //printf(" Failed, no ISO image available.\n"); return -1; } printf("\n"); printf("TEST BEGIN: Hypervisor API\n"); printf(" Starting a guest OS...\n"); /* Determine the physical address of the ISO image */ aspace_get_myid(&my_aspace); aspace_virt_to_phys(my_aspace, iso_start, &iso_start_paddr); /* Fire it up! */ status = v3_start_guest(iso_start_paddr, iso_size); if (status) { printf(" Failed (status=%d).\n", status); return -1; } printf(" Success.\n"); printf("TEST END: Hypervisor API\n"); return 0; }
static void* map( paddr_t paddr, size_t extent ) { id_t my_id; int status; vaddr_t vstart; // Debug1( "paddr=%#lx extent=%#lx\n", paddr, extent ); if ( ( status = aspace_get_myid( &my_id ) ) ) { printf("ERROR: aspace_get_myid() status=%d\n", status); return (void*) -1; } if ( ( status = aspace_find_hole( my_id, 0, extent, PAGE_SIZE, &vstart ) ) ) { printf("ERROR: aspace_find_hole() status=%d\n", status); return (void*) -1; } // Debug1("vstart=%#lx\n",vstart); if ( ( status = aspace_add_region( my_id, vstart, extent, VM_READ | VM_WRITE | VM_USER, PAGE_SIZE, "application") ) ) { printf("ERROR: aspace_add_region() status=%d\n", status); return (void*) -1; } if ( ( status = aspace_map_pmem( my_id, paddr, vstart, extent ) ) ) { printf("ERROR: aspace_map_pmem() status=%d\n", status); aspace_del_region( my_id, vstart, extent ); return (void*) -1; } // Debug1( "paddr=%#lx vstart=%#lx len=%#lx\n", paddr, vstart, extent ); return (void*) vstart; }
/* Mapping a segment is a four step process: * (1) xemem_get/attach the seg into our aspace * (2) invoke aspace_virt_to_phys on the attached region to generate a page * frame list * (3) invoke aspace_map_region on the target region in the target * aspace * (4) detach the xemem attachment (hold onto the apid) */ static int __map_hio_segment(hio_segment_t * seg, id_t aspace_id) { xemem_apid_t apid; void * local_attach; uint32_t nr_pages, page_size, i, j; int status; if (aspace_id == MY_ID) aspace_get_myid(&aspace_id); /* (1) xemem get/attach */ { struct xemem_addr addr; apid = xemem_get(seg->segid, XEMEM_RDWR); if (apid == -1) { printf("Could not get xemem segid %li\n", seg->segid); return -1; } addr.apid = apid; addr.offset = 0; local_attach = xemem_attach(addr, seg->size, NULL); if (local_attach == MAP_FAILED) { printf("Could not attach xemem apid %li (%s)\n", addr.apid, strerror(errno)); goto out_attach; } } /* (2) figure out the pfns and (3) map them to the target aspace */ { vaddr_t local_vaddr, target_vaddr; paddr_t paddr; struct pmem_region region; page_size = seg->page_size; nr_pages = seg->size / seg->page_size; for (i = 0; i < nr_pages; i++) { local_vaddr = (addr_t)local_attach + (seg->page_size * i); target_vaddr = (addr_t)seg->vaddr + (seg->page_size * i); /* (2) */ status = aspace_virt_to_phys(MY_ID, local_vaddr, &paddr); if (status != 0) { printf("aspace_virt_to_phys failed (%s)\n", strerror(errno)); goto out_virt_to_phys; } /* Temporary hack: add umem so we can use aspace_map_region below. * (the kernel won't let us map non-umem memory) */ { memset(®ion, 0, sizeof(struct pmem_region)); region.start = paddr; region.end = paddr + seg->page_size; region.type_is_set = true; region.type = PMEM_TYPE_UMEM; region.allocated_is_set = true; region.allocated = true; status = pmem_add(®ion); if (status != 0) { printf("pmem_add failed (%s)\n", strerror(errno)); goto out_umem; } } /* (3) */ status = aspace_map_region( aspace_id, target_vaddr, seg->page_size, VM_READ | VM_WRITE | VM_USER, seg->page_size, "hio", paddr ); if (status != 0) { printf("aspace_map_region failed (%d) (%s)\n", status, strerror(errno)); goto out_map_pmem; } /* Remove umem now. Unclear how to do it later */ pmem_free_umem(®ion); pmem_del(®ion); } } /* (4) teardown local mapping */ xemem_detach(local_attach); return 0; out_map_pmem: out_umem: out_virt_to_phys: for (j = 0; j < i; j++) { aspace_unmap_region( aspace_id, (addr_t)seg->vaddr + (j * seg->page_size), seg->page_size ); } xemem_detach(local_attach); out_attach: xemem_release(apid); return -1; }
static int start_thread(int id, id_t cpu) { int status; id_t aspace_id; vaddr_t stack_ptr; user_cpumask_t cpumask; aspace_get_myid(&aspace_id); stack_ptr = (vaddr_t)malloc(THREAD_STACK_SIZE); cpus_clear(cpumask); cpu_set(cpu, cpumask); start_state_t start_state = { .task_id = id, .user_id = 1, .group_id = 1, .aspace_id = aspace_id, .entry_point = (vaddr_t)&simple_task, .stack_ptr = stack_ptr + THREAD_STACK_SIZE, .cpu_id = cpu, .edf.slice = SLICE, .edf.period = PERIOD, }; sprintf(start_state.task_name, "cpu%u-task%d", cpu ,id); printf(" *** Creating Task %s (S: %llu, T: %llu) on cpu %d\n", start_state.task_name, (unsigned long long)start_state.edf.slice, (unsigned long long)start_state.edf.period, cpu); status = task_create(&start_state, NULL); if (status) { printf("ERROR: failed to create thread (status=%d) on cpu %d.\n", status, cpu); return -1; } return 0; } static int edf_sched_test(void) { int status; unsigned i; id_t my_id; cpu_set_t cpu_mask; pthread_attr_t attr; printf("\n EDF TEST BEGIN (Running for %d seconds)\n",TEST_TIME); status = aspace_get_myid(&my_id); if (status) { printf("ERROR: task_get_myid() status=%d\n", status); return -1; } pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 1024 * 32); printf(" My Linux process id (PID) is: %d\n", getpid()); status = pthread_getaffinity_np(pthread_self(), sizeof(cpu_mask), &cpu_mask); if (status) { printf(" ERROR: pthread_getaffinity_np() status=%d\n", status); return -1; } printf(" My task ID is %u\n", my_id); printf(" The following CPUs are in the cpumask:\n "); for (i = CPU_MIN_ID; i <= CPU_MAX_ID; i++) { if (CPU_ISSET(i, &cpu_mask)) printf("%d ", i); } printf("\n"); printf("Creating two EDF threads on each CPU:\n"); for (i = CPU_MIN_ID; i <= CPU_MAX_ID; i++) { if (CPU_ISSET(i, &cpu_mask)) { printf("CPU %d\n", i); start_thread(0x1000+i,i); start_thread(0x1100+i,i); } } printf("\n"); return 0; }
int main(int argc, char ** argv, char * envp[]) { struct pisces_cmd cmd; int pisces_fd = 0; memset(&cmd, 0, sizeof(struct pisces_cmd)); printf("Pisces Control Daemon\n"); CPU_ZERO(&enclave_cpus); /* Initialize CPU mask */ CPU_SET(0, &enclave_cpus); /* We always boot on CPU 0 */ pisces_fd = open(PISCES_CMD_PATH, O_RDWR); if (pisces_fd < 0) { printf("Error opening pisces cmd file (%s)\n", PISCES_CMD_PATH); return -1; } while (1) { int ret = 0; ret = read(pisces_fd, &cmd, sizeof(struct pisces_cmd)); if (ret != sizeof(struct pisces_cmd)) { printf("Error reading pisces CMD (ret=%d)\n", ret); break; } //printf("Command=%llu, data_len=%d\n", cmd.cmd, cmd.data_len); switch (cmd.cmd) { case ENCLAVE_CMD_ADD_MEM: { struct cmd_mem_add mem_cmd; struct pmem_region rgn; memset(&mem_cmd, 0, sizeof(struct cmd_mem_add)); memset(&rgn, 0, sizeof(struct pmem_region)); ret = read(pisces_fd, &mem_cmd, sizeof(struct cmd_mem_add)); if (ret != sizeof(struct cmd_mem_add)) { printf("Error reading pisces MEM_ADD CMD (ret=%d)\n", ret); send_resp(pisces_fd, -1); break; } rgn.start = mem_cmd.phys_addr; rgn.end = mem_cmd.phys_addr + mem_cmd.size; rgn.type_is_set = 1; rgn.type = PMEM_TYPE_UMEM; rgn.allocated_is_set = 1; rgn.allocated = 0; printf("Adding pmem (%p - %p)\n", (void *)rgn.start, (void *)rgn.end); ret = pmem_add(&rgn); printf("pmem_add returned %d\n", ret); ret = pmem_zero(&rgn); printf("pmem_zero returned %d\n", ret); send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_ADD_CPU: { struct cmd_cpu_add cpu_cmd; int logical_cpu = 0; ret = read(pisces_fd, &cpu_cmd, sizeof(struct cmd_cpu_add)); if (ret != sizeof(struct cmd_cpu_add)) { printf("Error reading pisces CPU_ADD CMD (ret=%d)\n", ret); send_resp(pisces_fd, -1); break; } printf("Adding CPU phys_id %llu, apic_id %llu\n", (unsigned long long) cpu_cmd.phys_cpu_id, (unsigned long long) cpu_cmd.apic_id); logical_cpu = phys_cpu_add(cpu_cmd.phys_cpu_id, cpu_cmd.apic_id); if (logical_cpu == -1) { printf("Error Adding CPU to Kitten\n"); send_resp(pisces_fd, -1); break; } /* Notify Palacios of New CPU */ if (issue_v3_cmd(V3_ADD_CPU, (uintptr_t)logical_cpu) == -1) { printf("Error: Could not add CPU to Palacios\n"); } CPU_SET(logical_cpu, &enclave_cpus); send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_REMOVE_CPU: { struct cmd_cpu_add cpu_cmd; int logical_cpu = 0; ret = read(pisces_fd, &cpu_cmd, sizeof(struct cmd_cpu_add)); if (ret != sizeof(struct cmd_cpu_add)) { printf("Error reading pisces CPU_ADD CMD (ret=%d)\n", ret); send_resp(pisces_fd, -1); break; } printf("Removing CPU phys_id %llu, apic_id %llu\n", (unsigned long long) cpu_cmd.phys_cpu_id, (unsigned long long) cpu_cmd.apic_id); logical_cpu = phys_cpu_remove(cpu_cmd.phys_cpu_id, cpu_cmd.apic_id); if (logical_cpu == -1) { printf("Error remove CPU to Kitten\n"); send_resp(pisces_fd, -1); break; } CPU_CLR(logical_cpu, &enclave_cpus); send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_LAUNCH_JOB: { struct cmd_launch_job * job_cmd = malloc(sizeof(struct cmd_launch_job)); int ret = 0; memset(job_cmd, 0, sizeof(struct cmd_launch_job)); ret = read(pisces_fd, job_cmd, sizeof(struct cmd_launch_job)); if (ret != sizeof(struct cmd_launch_job)) { printf("Error reading Job Launch CMD (ret = %d)\n", ret); free(job_cmd); send_resp(pisces_fd, -1); break; } ret = launch_job(pisces_fd, &(job_cmd->spec)); free(job_cmd); send_resp(pisces_fd, ret); break; } case ENCLAVE_CMD_LOAD_FILE: { struct cmd_load_file * load_cmd = malloc(sizeof(struct cmd_load_file)); int ret = 0; memset(load_cmd, 0, sizeof(struct cmd_load_file)); ret = read(pisces_fd, load_cmd, sizeof(struct cmd_load_file)); if (ret != sizeof(struct cmd_load_file)) { printf("Error reading LOAD FILE CMD (ret = %d)\n", ret); free(load_cmd); send_resp(pisces_fd, -1); break; } ret = load_file(pisces_fd, load_cmd->file_pair.lnx_file, load_cmd->file_pair.lwk_file); free(load_cmd); send_resp(pisces_fd, ret); break; } case ENCLAVE_CMD_STORE_FILE: { break; } case ENCLAVE_CMD_CREATE_VM: { struct pisces_user_file_info * file_info = NULL; struct cmd_create_vm vm_cmd; struct pmem_region rgn; struct v3_guest_img guest_img; id_t my_aspace_id; vaddr_t file_addr; size_t file_size = 0; int path_len = 0; int vm_id = -1; int status = 0; memset(&vm_cmd, 0, sizeof(struct cmd_create_vm)); memset(&rgn, 0, sizeof(struct pmem_region)); memset(&guest_img, 0, sizeof(struct v3_guest_img)); ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_create_vm)); if (ret != sizeof(struct cmd_create_vm)) { send_resp(pisces_fd, -1); printf("Error: CREATE_VM command could not be read\n"); break; } path_len = strlen((char *)vm_cmd.path.file_name) + 1; file_info = malloc(sizeof(struct pisces_user_file_info) + path_len); memset(file_info, 0, sizeof(struct pisces_user_file_info) + path_len); file_info->path_len = path_len; strncpy(file_info->path, (char *)vm_cmd.path.file_name, path_len - 1); file_size = ioctl(pisces_fd, PISCES_STAT_FILE, file_info); status = aspace_get_myid(&my_aspace_id); if (status != 0) return status; if (pmem_alloc_umem(file_size, PAGE_SIZE, &rgn)) { printf("Error: Could not allocate umem for guest image (size=%lu)\n", file_size); break; } pmem_zero(&rgn); status = aspace_map_region_anywhere( my_aspace_id, &file_addr, round_up(file_size, PAGE_SIZE), (VM_USER|VM_READ|VM_WRITE), PAGE_SIZE, "VM Image", rgn.start ); file_info->user_addr = file_addr; ioctl(pisces_fd, PISCES_LOAD_FILE, file_info); guest_img.size = file_size; guest_img.guest_data = (void *)file_info->user_addr; strncpy(guest_img.name, (char *)vm_cmd.path.vm_name, 127); /* Issue VM Create command to Palacios */ vm_id = issue_v3_cmd(V3_CREATE_GUEST, (uintptr_t)&guest_img); aspace_unmap_region(my_aspace_id, file_addr, round_up(file_size, PAGE_SIZE)); pmem_free_umem(&rgn); if (vm_id < 0) { printf("Error: Could not create VM (%s) at (%s) (err=%d)\n", vm_cmd.path.vm_name, vm_cmd.path.file_name, vm_id); send_resp(pisces_fd, vm_id); break; } printf("Created VM (%d)\n", vm_id); send_resp(pisces_fd, vm_id); break; } case ENCLAVE_CMD_FREE_VM: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Signal Palacios to Launch VM */ if (issue_v3_cmd(V3_FREE_GUEST, (uintptr_t)vm_cmd.vm_id) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_ADD_V3_PCI: { struct cmd_add_pci_dev cmd; struct v3_hw_pci_dev v3_pci_spec; int ret = 0; memset(&cmd, 0, sizeof(struct cmd_add_pci_dev)); printf("Adding V3 PCI Device\n"); ret = read(pisces_fd, &cmd, sizeof(struct cmd_add_pci_dev)); if (ret != sizeof(struct cmd_add_pci_dev)) { send_resp(pisces_fd, -1); break; } memcpy(v3_pci_spec.name, cmd.spec.name, 128); v3_pci_spec.bus = cmd.spec.bus; v3_pci_spec.dev = cmd.spec.dev; v3_pci_spec.func = cmd.spec.func; /* Issue Device Add operation to Palacios */ if (issue_v3_cmd(V3_ADD_PCI, (uintptr_t)&(v3_pci_spec)) == -1) { printf("Error: Could not add PCI device to Palacios\n"); send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_FREE_V3_PCI: { struct cmd_add_pci_dev cmd; struct v3_hw_pci_dev v3_pci_spec; int ret = 0; memset(&cmd, 0, sizeof(struct cmd_add_pci_dev)); printf("Removing V3 PCI Device\n"); ret = read(pisces_fd, &cmd, sizeof(struct cmd_add_pci_dev)); if (ret != sizeof(struct cmd_add_pci_dev)) { send_resp(pisces_fd, -1); break; } memcpy(v3_pci_spec.name, cmd.spec.name, 128); v3_pci_spec.bus = cmd.spec.bus; v3_pci_spec.dev = cmd.spec.dev; v3_pci_spec.func = cmd.spec.func; /* Issue Device Add operation to Palacios */ if (issue_v3_cmd(V3_REMOVE_PCI, (uintptr_t)&(v3_pci_spec)) == -1) { printf("Error: Could not remove PCI device from Palacios\n"); send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_LAUNCH_VM: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Signal Palacios to Launch VM */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_LAUNCH, (uintptr_t)NULL) == -1) { send_resp(pisces_fd, -1); break; } /* if (xpmem_pisces_add_dom(palacios_fd, vm_cmd.vm_id)) { printf("ERROR: Could not add connect to Palacios VM %d XPMEM channel\n", vm_cmd.vm_id); } */ send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_STOP_VM: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Signal Palacios to Launch VM */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_STOP, (uintptr_t)NULL) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_PAUSE_VM: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Signal Palacios to Launch VM */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_PAUSE, (uintptr_t)NULL) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_CONTINUE_VM: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Signal Palacios to Launch VM */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_CONTINUE, (uintptr_t)NULL) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_VM_CONS_CONNECT: { struct cmd_vm_ctrl vm_cmd; u64 cons_ring_buf = 0; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { printf("Error reading console command\n"); send_resp(pisces_fd, -1); break; } /* Signal Palacios to connect the console */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_CONSOLE_CONNECT, (uintptr_t)&cons_ring_buf) == -1) { cons_ring_buf = 0; } printf("Cons Ring Buf=%p\n", (void *)cons_ring_buf); send_resp(pisces_fd, cons_ring_buf); break; } case ENCLAVE_CMD_VM_CONS_DISCONNECT: { struct cmd_vm_ctrl vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_ctrl)); if (ret != sizeof(struct cmd_vm_ctrl)) { send_resp(pisces_fd, -1); break; } /* Send Disconnect Request to Palacios */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_CONSOLE_DISCONNECT, (uintptr_t)NULL) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_VM_CONS_KEYCODE: { struct cmd_vm_cons_keycode vm_cmd; ret = read(pisces_fd, &vm_cmd, sizeof(struct cmd_vm_cons_keycode)); if (ret != sizeof(struct cmd_vm_cons_keycode)) { send_resp(pisces_fd, -1); break; } /* Send Keycode to Palacios */ if (issue_vm_cmd(vm_cmd.vm_id, V3_VM_KEYBOARD_EVENT, vm_cmd.scan_code) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_VM_DBG: { struct cmd_vm_debug pisces_cmd; struct v3_debug_cmd v3_cmd; ret = read(pisces_fd, &pisces_cmd, sizeof(struct cmd_vm_debug)); if (ret != sizeof(struct cmd_vm_debug)) { send_resp(pisces_fd, -1); break; } v3_cmd.core = pisces_cmd.spec.core; v3_cmd.cmd = pisces_cmd.spec.cmd; if (issue_vm_cmd(pisces_cmd.spec.vm_id, V3_VM_DEBUG, (uintptr_t)&v3_cmd) == -1) { send_resp(pisces_fd, -1); break; } send_resp(pisces_fd, 0); break; } case ENCLAVE_CMD_SHUTDOWN: { if (issue_v3_cmd(V3_SHUTDOWN, 0) == -1) { printf("Error: Could not shutdown Palacios VMM\n"); send_resp(pisces_fd, -1); break; } /* Perform additional Cleanup is necessary */ send_resp(pisces_fd, 0); close(pisces_fd); exit(0); } default: { printf("Unknown Pisces Command (%llu)\n", cmd.cmd); send_resp(pisces_fd, -1); break; } } } close(pisces_fd); return 0; }
static int launch_job(int pisces_fd, struct pisces_job_spec * job_spec) { u32 page_size = (job_spec->use_large_pages ? VM_PAGE_2MB : VM_PAGE_4KB); vaddr_t file_addr = 0; cpu_set_t spec_cpus; cpu_set_t job_cpus; user_cpumask_t lwk_cpumask; int status = 0; /* Figure out which CPUs are being requested */ { int i = 0; CPU_ZERO(&spec_cpus); for (i = 0; i < 64; i++) { if ((job_spec->cpu_mask & (0x1ULL << i)) != 0) { CPU_SET(i, &spec_cpus); } } } /* Check if we can host the job on the current CPUs */ /* Create a kitten compatible cpumask */ { int i = 0; CPU_AND(&job_cpus, &spec_cpus, &enclave_cpus); if (CPU_COUNT(&job_cpus) < job_spec->num_ranks) { printf("Error: Could not find enough CPUs for job\n"); return -1; } cpus_clear(lwk_cpumask); for (i = 0; (i < CPU_MAX_ID) && (i < CPU_SETSIZE); i++) { if (CPU_ISSET(i, &job_cpus)) { cpu_set(i, lwk_cpumask); } } } /* Load exe file info memory */ { struct pisces_user_file_info * file_info = NULL; int path_len = strlen(job_spec->exe_path) + 1; size_t file_size = 0; id_t my_aspace_id; file_info = malloc(sizeof(struct pisces_user_file_info) + path_len); memset(file_info, 0, sizeof(struct pisces_user_file_info) + path_len); file_info->path_len = path_len; strncpy(file_info->path, job_spec->exe_path, path_len - 1); file_size = ioctl(pisces_fd, PISCES_STAT_FILE, file_info); status = aspace_get_myid(&my_aspace_id); if (status != 0) return status; { paddr_t pmem = elf_dflt_alloc_pmem(file_size, page_size, 0); printf("PMEM Allocated at %p (file_size=%lu) (page_size=0x%x) (pmem_size=%p)\n", (void *)pmem, file_size, page_size, (void *)round_up(file_size, page_size)); if (pmem == 0) { printf("Could not allocate space for exe file\n"); return -1; } /* Map the segment into this address space */ status = aspace_map_region_anywhere( my_aspace_id, &file_addr, round_up(file_size, page_size), (VM_USER|VM_READ|VM_WRITE), page_size, "File", pmem ); if (status) return status; file_info->user_addr = file_addr; } printf("Loading EXE into memory\n"); ioctl(pisces_fd, PISCES_LOAD_FILE, file_info); free(file_info); } printf("Job Launch Request (%s) [%s %s]\n", job_spec->name, job_spec->exe_path, job_spec->argv); /* Initialize start state for each rank */ { start_state_t * start_state = NULL; int rank = 0; /* Allocate start state for each rank */ start_state = malloc(job_spec->num_ranks * sizeof(start_state_t)); if (!start_state) { printf("malloc of start_state[] failed\n"); return -1; } for (rank = 0; rank < job_spec->num_ranks; rank++) { int cpu = 0; int i = 0; for (i = 0; i < CPU_SETSIZE; i++) { if (CPU_ISSET(i, &job_cpus)) { CPU_CLR(i, &job_cpus); cpu = i; break; } } printf("Loading Rank %d on CPU %d\n", rank, cpu); start_state[rank].task_id = ANY_ID; start_state[rank].cpu_id = ANY_ID; start_state[rank].user_id = 1; start_state[rank].group_id = 1; sprintf(start_state[rank].task_name, job_spec->name); status = elf_load((void *)file_addr, job_spec->name, ANY_ID, page_size, job_spec->heap_size, // heap_size job_spec->stack_size, // stack_size job_spec->argv, // argv_str job_spec->envp, // envp_str &start_state[rank], 0, &elf_dflt_alloc_pmem ); if (status) { printf("elf_load failed, status=%d\n", status); } if ( aspace_update_user_cpumask(start_state[rank].aspace_id, &lwk_cpumask) != 0) { printf("Error updating CPU mask\n"); return -1; } /* Setup Smartmap regions if enabled */ if (job_spec->use_smartmap) { int src = 0; int dst = 0; printf("Creating SMARTMAP mappings...\n"); for (dst = 0; dst < job_spec->num_ranks; dst++) { for (src = 0; src < job_spec->num_ranks; src++) { status = aspace_smartmap( start_state[src].aspace_id, start_state[dst].aspace_id, SMARTMAP_ALIGN + (SMARTMAP_ALIGN * src), SMARTMAP_ALIGN ); if (status) { printf("smartmap failed, status=%d\n", status); return -1; } } } printf(" OK\n"); } printf("Creating Task\n"); status = task_create(&start_state[rank], NULL); } } return 0; }