void xen_unpause(xen_interface_t *xen, domid_t domID) { do { xc_dominfo_t info = { 0 }; if (1 == xc_domain_getinfo(xen->xc, domID, 1, &info) && info.domid == domID && info.paused) xc_domain_unpause(xen->xc, domID); else break; } while (1); }
static int build(xc_interface *xch) { char cmdline[512]; uint32_t ssid; xen_domain_handle_t handle = { 0 }; int rv, xs_fd; struct xc_dom_image *dom = NULL; int limit_kb = (memory + 1) * 1024; xs_fd = open("/dev/xen/xenbus_backend", O_RDWR); if ( xs_fd == -1 ) { fprintf(stderr, "Could not open /dev/xen/xenbus_backend\n"); return -1; } if ( flask ) { rv = xc_flask_context_to_sid(xch, flask, strlen(flask), &ssid); if ( rv ) { fprintf(stderr, "xc_flask_context_to_sid failed\n"); goto err; } } else { ssid = SECINITSID_DOMU; } rv = xc_domain_create(xch, ssid, handle, XEN_DOMCTL_CDF_xs_domain, &domid, NULL); if ( rv ) { fprintf(stderr, "xc_domain_create failed\n"); goto err; } rv = xc_domain_max_vcpus(xch, domid, 1); if ( rv ) { fprintf(stderr, "xc_domain_max_vcpus failed\n"); goto err; } rv = xc_domain_setmaxmem(xch, domid, limit_kb); if ( rv ) { fprintf(stderr, "xc_domain_setmaxmem failed\n"); goto err; } rv = xc_domain_set_memmap_limit(xch, domid, limit_kb); if ( rv ) { fprintf(stderr, "xc_domain_set_memmap_limit failed\n"); goto err; } rv = ioctl(xs_fd, IOCTL_XENBUS_BACKEND_SETUP, domid); if ( rv < 0 ) { fprintf(stderr, "Xenbus setup ioctl failed\n"); goto err; } if ( param ) snprintf(cmdline, 512, "--event %d --internal-db %s", rv, param); else snprintf(cmdline, 512, "--event %d --internal-db", rv); dom = xc_dom_allocate(xch, cmdline, NULL); rv = xc_dom_kernel_file(dom, kernel); if ( rv ) { fprintf(stderr, "xc_dom_kernel_file failed\n"); goto err; } if ( ramdisk ) { rv = xc_dom_ramdisk_file(dom, ramdisk); if ( rv ) { fprintf(stderr, "xc_dom_ramdisk_file failed\n"); goto err; } } rv = xc_dom_boot_xen_init(dom, xch, domid); if ( rv ) { fprintf(stderr, "xc_dom_boot_xen_init failed\n"); goto err; } rv = xc_dom_parse_image(dom); if ( rv ) { fprintf(stderr, "xc_dom_parse_image failed\n"); goto err; } rv = xc_dom_mem_init(dom, memory); if ( rv ) { fprintf(stderr, "xc_dom_mem_init failed\n"); goto err; } rv = xc_dom_boot_mem_init(dom); if ( rv ) { fprintf(stderr, "xc_dom_boot_mem_init failed\n"); goto err; } rv = xc_dom_build_image(dom); if ( rv ) { fprintf(stderr, "xc_dom_build_image failed\n"); goto err; } rv = xc_dom_boot_image(dom); if ( rv ) { fprintf(stderr, "xc_dom_boot_image failed\n"); goto err; } rv = xc_domain_set_virq_handler(xch, domid, VIRQ_DOM_EXC); if ( rv ) { fprintf(stderr, "xc_domain_set_virq_handler failed\n"); goto err; } rv = xc_domain_unpause(xch, domid); if ( rv ) { fprintf(stderr, "xc_domain_unpause failed\n"); goto err; } rv = 0; err: if ( dom ) xc_dom_release(dom); if ( xs_fd >= 0 ) close(xs_fd); /* if we failed then destroy the domain */ if ( rv && domid != ~0 ) xc_domain_destroy(xch, domid); return rv; }
void *xc_vm_event_enable(xc_interface *xch, domid_t domain_id, int param, uint32_t *port) { void *ring_page = NULL; uint64_t pfn; xen_pfn_t ring_pfn, mmap_pfn; unsigned int op, mode; int rc1, rc2, saved_errno; if ( !port ) { errno = EINVAL; return NULL; } /* Pause the domain for ring page setup */ rc1 = xc_domain_pause(xch, domain_id); if ( rc1 != 0 ) { PERROR("Unable to pause domain\n"); return NULL; } /* Get the pfn of the ring page */ rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn); if ( rc1 != 0 ) { PERROR("Failed to get pfn of ring page\n"); goto out; } ring_pfn = pfn; mmap_pfn = pfn; rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn); if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB ) { /* Page not in the physmap, try to populate it */ rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0, &ring_pfn); if ( rc1 != 0 ) { PERROR("Failed to populate ring pfn\n"); goto out; } } mmap_pfn = ring_pfn; ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE, &mmap_pfn, 1); if ( !ring_page ) { PERROR("Could not map the ring page\n"); goto out; } switch ( param ) { case HVM_PARAM_PAGING_RING_PFN: op = XEN_VM_EVENT_ENABLE; mode = XEN_DOMCTL_VM_EVENT_OP_PAGING; break; case HVM_PARAM_MONITOR_RING_PFN: op = XEN_VM_EVENT_ENABLE; mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR; break; case HVM_PARAM_SHARING_RING_PFN: op = XEN_VM_EVENT_ENABLE; mode = XEN_DOMCTL_VM_EVENT_OP_SHARING; break; /* * This is for the outside chance that the HVM_PARAM is valid but is invalid * as far as vm_event goes. */ default: errno = EINVAL; rc1 = -1; goto out; } rc1 = xc_vm_event_control(xch, domain_id, op, mode, port); if ( rc1 != 0 ) { PERROR("Failed to enable vm_event\n"); goto out; } /* Remove the ring_pfn from the guest's physmap */ rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn); if ( rc1 != 0 ) PERROR("Failed to remove ring page from guest physmap"); out: saved_errno = errno; rc2 = xc_domain_unpause(xch, domain_id); if ( rc1 != 0 || rc2 != 0 ) { if ( rc2 != 0 ) { if ( rc1 == 0 ) saved_errno = errno; PERROR("Unable to unpause domain"); } if ( ring_page ) xenforeignmemory_unmap(xch->fmem, ring_page, 1); ring_page = NULL; errno = saved_errno; } return ring_page; }
void run_drakvuf(gpointer data, gpointer user_data) { UNUSED(user_data); struct start_drakvuf *start = data; char *command; gint rc; GThread *timer, *tcpd; restart: command = NULL; rc = 0; printf("[%i] Starting %s on domid %u\n", start->threadid, start->input, start->cloneID); start->timer = 180; g_mutex_lock(&start->timer_lock); timer = g_thread_new("timer", timer_thread, start); command = g_malloc0(snprintf(NULL, 0, CONFIG_CMD, config_script, rekall_profile, start->cloneID, injection_pid, start->threadid+1, run_folder, start->input, out_folder) + 1); sprintf(command, CONFIG_CMD, config_script, rekall_profile, start->cloneID, injection_pid, start->threadid+1, run_folder, start->input, out_folder); printf("[%i] ** RUNNING COMMAND: %s\n", start->threadid, command); g_spawn_command_line_sync(command, NULL, NULL, &rc, NULL); g_free(command); g_mutex_unlock(&start->timer_lock); g_thread_join(timer); printf("[%i] ** Preconfig finished with RC %i. Timer: %i.\n", start->threadid, rc, start->timer); if (!start->timer) goto end; tcpd = g_thread_new("tcpdump", tcpdump, start); xc_domain_unpause(xen->xc, start->cloneID); // Preconfigure script takes a bit to run in the guest sleep(30); xc_domain_pause(xen->xc, start->cloneID); start->timer = 180; g_mutex_lock(&start->timer_lock); timer = g_thread_new("timer", timer_thread, start); command = g_malloc0(snprintf(NULL, 0, DRAKVUF_CMD, drakvuf_script, rekall_profile, start->cloneID, injection_pid, start->threadid+1, run_folder, start->input, out_folder) + 1); sprintf(command, DRAKVUF_CMD, drakvuf_script, rekall_profile, start->cloneID, injection_pid, start->threadid+1, run_folder, start->input, out_folder); printf("[%i] ** RUNNING COMMAND: %s\n", start->threadid, command); g_spawn_command_line_sync(command, NULL, NULL, &rc, NULL); g_free(command); g_mutex_unlock(&start->timer_lock); g_thread_join(timer); g_thread_join(tcpd); printf("[%i] ** DRAKVUF finished with RC %i. Timer: %i\n", start->threadid, rc, start->timer); if ( start->timer ) { printf("[%i] Finished processing %s\n", start->threadid, start->input); g_mutex_unlock(&locks[start->threadid]); g_mutex_clear(&start->timer_lock); g_free(start->input); g_free(start->clone_name); g_free(start); return; } else cleanup(start->cloneID, start->threadid+1); end: printf("[%i] %s failed to execute on %u because of a timeout, creating new clone\n", start->threadid, start->input, start->cloneID); prepare(NULL, start); goto restart; }