void action_exit(int status) { // Flush all open streams fflush(0); // Close all open file descriptors struct process *process = lock_process(); int fd; for (fd = 1; fd < MAX_FDS; fd++) if (process->fds.map[fd]) { // Call raw close to trigger action logic extern int close(int fd); unlock_process(); // TODO: minor race condition here close(fd); process = lock_process(); } unlock_process(); process = lock_master_process(); // Add an exit(status) node to the subgraph struct hash data; memset(&data, 0, sizeof(data)); data.data[0] = status; new_node(process, SG_EXIT, &data); unlock_master_process(); }
/* initialize kthread */ void kthread_init(struct kthread *thread, struct eprocess *process) { ktrace("\n"); INIT_DISP_HEADER(&thread->header, ThreadObject, sizeof(struct ethread), false); /* initialize the mutant list */ INIT_LIST_HEAD(&thread->mutant_list_head); /* setup apc fields */ INIT_LIST_HEAD(&thread->apc_state.apc_list_head[0]); INIT_LIST_HEAD(&thread->apc_state.apc_list_head[1]); INIT_LIST_HEAD(&thread->saved_apc_state.apc_list_head[0]); INIT_LIST_HEAD(&thread->saved_apc_state.apc_list_head[1]); thread->apc_state.process = (struct kprocess *)process; thread->apc_state_pointer[OriginalApcEnvironment] = &thread->apc_state; thread->apc_state_pointer[AttachedApcEnvironment] = &thread->saved_apc_state; thread->apc_state_index = OriginalApcEnvironment; thread->apc_queue_lock = SPIN_LOCK_UNLOCKED; thread->apc_queueable = true; /*NOW FIXME Initialize the Suspend APC */ apc_init(&thread->suspend_apc, thread, OriginalApcEnvironment, suspend_thread_kernel_routine, NULL, suspend_thread_normal_routine, KernelMode, NULL); /* Initialize the Suspend Semaphore */ semaphore_init(&thread->suspend_semaphore, 0, 128); /* initialize the suspend semaphore */ /* FIXME: sema_init(&thread->suspend_semaphore, 0); */ /* FIXME: keinitializetimer(&thread->timer); */ arch_init_thread(thread, context); thread->base_priority = process->pcb.base_priority; thread->quantum = process->pcb.quantum_reset; thread->quantum_reset = process->pcb.quantum_reset; thread->affinity = process->pcb.affinity; thread->priority = process->pcb.base_priority; thread->user_affinity = process->pcb.affinity; thread->disable_boost = process->pcb.disable_boost; thread->auto_alignment = process->pcb.auto_alignment; /* set the thread to initalized */ thread->state = Initialized; lock_process(process); list_add_tail(&thread->thread_list_entry, &process->pcb.thread_list_head); unlock_process(process); if (!thread->win32thread) thread->win32thread = create_w32thread(process->win32process, (struct ethread *)thread); } /* end kthread_init */
NTSTATUS SERVICECALL NtTerminateProcess(IN HANDLE ProcessHandle, IN NTSTATUS ExitStatus) { struct eprocess *process; struct ethread *cur_thread; NTSTATUS status; ktrace("\n"); if ((status = ref_object_by_handle(ProcessHandle ? ProcessHandle : NtCurrentProcess(), PROCESS_TERMINATE, process_object_type, get_pre_mode(), (PVOID *) &process, NULL))) return status; cur_thread = (struct ethread *) get_current_ethread(); terminate_process(process->win32process, ExitStatus); release_object(process->win32process); lock_process(process); if (process->exit_time.quad) { unlock_process(process); deref_object(process); return STATUS_PROCESS_IS_TERMINATING; } query_sys_time(&process->exit_time); process->exit_status = (unsigned long)ExitStatus; unlock_process(process); deref_object(process); if (process == get_current_eprocess()) { cur_thread->exit_status = ExitStatus; do_group_exit((ExitStatus & 0xff) << 8); } else { struct ethread *first_thread = get_first_thread(process); first_thread->exit_status = ExitStatus; send_sig_info(SIGKILL, SEND_SIG_FORCED, first_thread->et_task->group_leader); } return STATUS_SUCCESS; }
/* initialize ethread */ void ethread_init(struct ethread *thread, struct eprocess *process, struct task_struct *tsk) { ktrace("\n"); /* attach to the containing process */ ref_object((PVOID)process); write_lock(&process->ep_lock); thread->threads_process = process; write_unlock(&process->ep_lock); /* FIXME create a thread object and hook in to the Linux task */ thread->et_task = tsk; atomic_set(&thread->et_count, 0); /* FIXME */ thread->et_ops = (struct ethread_operations *)ðread_ops; INIT_LIST_HEAD(&thread->lpc_reply_chain); INIT_LIST_HEAD(&thread->irp_list); INIT_LIST_HEAD(&thread->active_timer_list_head); thread->active_timer_list_lock = SPIN_LOCK_UNLOCKED; thread->thread_lock = SPIN_LOCK_UNLOCKED; /* FIXME: semaphore_init */ thread->cid.unique_process = process->unique_processid; thread->win32_start_address = 0; /* context->Eax, default is 0 */ lock_process(process); list_add_tail(&thread->thread_list_entry, &process->thread_list_head); unlock_process(process); add_ethread(thread->et_task, thread); if (atomic_read(&thread->et_count) == 1) /* FIXME: add this to win32_thread.c */ ref_object(thread); kthread_init(&thread->tcb, process); } /* end ethread_init */
/* * notification of exit * - the exit_status is as sys_wait() would return * - notification includes fatal signals */ static void thread_exit(struct ethread *thread, int exit_status) { struct eprocess *process = thread->threads_process; BOOLEAN last; /* if Terminated, do nothing */ ktrace("thread %p, exit_status %ld\n", thread, thread->exit_status); thread->terminated = 1; /* Can't terminate a thread if it attached another process */ if (thread->tcb.apc_state_index) { return; } /* TODO: Lower to Passive Level */ /* Lock the Process before we modify its thread entries */ lock_process(process); list_del(&thread->thread_list_entry); /* TODO: close port */ /* TODO: Rundown Win32 Structures */ /* Set the last Thread Exit Status */ process->last_thread_exit_status = thread->exit_status; /* The last Thread shuts down the Process */ if ((last = list_empty(&process->thread_list_head))) { /* Save the Exit Time if not already done by NtTerminateProcess. This happens when the last thread just terminates without explicitly terminating the process. TODO */ #if 0 process->exit_time = thread->exit_time; #endif __exit_process(process); } #if 0 if (thread->tcb.win32thread) { kfree(thread->tcb.win32thread); thread->tcb.win32thread = NULL; } #endif /* Free the TEB, if last thread, teb is freed by exit() */ if (thread->tcb.teb && !last) { delete_teb(thread->tcb.teb); thread->tcb.teb = NULL; } list_del(&thread->tcb.thread_list_entry); /* Unlock the Process */ unlock_process(process); /* Rundown Mutexes */ rundown_thread(); /* Satisfy waits */ local_irq_disable(); thread->tcb.header.signal_state = true; if (!list_empty(&thread->tcb.header.wait_list_head)) wait_test((struct dispatcher_header *)&thread->tcb, IO_NO_INCREMENT); local_irq_enable(); } /* end thread_exit() */
/* * action_execve adds an exec node to the subgraph and sets WAITLESS_PARENT * to the hash of the arguments. The first node in the child process will * use WAITLESS_PARENT as its first parent node. Note that WAITLESS_PARENT * is intentionally _not_ the same as the exec node; this encodes the idea * that child processes depend on their parent processes only through the * arguments to execve (and the current directory). * * In the case of shared spines (due to pipes or other IPC mechanisms) the * exec node encodes only the path without argv and envp and WAITLESS_PARENT * has the format #id where id is a SYSV shared memory id. This allows further * subgraph nodes from child and parent to be interleaved. Since in the shared * case the child _does_ descend directly from the exec node, an explicit * record of argv and envp would be redundant. */ int action_execve(const char *path, const char *const argv[], const char *const envp[]) { fd_map_dump(); struct process *process = lock_master_process(); int linked = process != process_info(); wlog("exec: linked %d", linked); // Pack all the arguments into a single buffer. The format is // char path[]; // uint32_t argc; // char argv[argc][]; // char is_pipe; // uint32_t envc; // char envp[envc][]; // char cwd[]; // with all strings packed together with terminating nulls. char data[4096]; char *p = data; #define ADD_STR(s) p += strlcpy(p, (s), data+sizeof(data)-p) + 1 ADD_STR(path); // encode argv char *cp = p; p += sizeof(uint32_t); // skip 4 bytes for len(argv) uint32_t i; for (i = 0; argv[i]; i++) ADD_STR(argv[i]); memcpy(cp, &i, sizeof(uint32_t)); *p++ = linked; // encode envp cp = p; p += sizeof(uint32_t); // skip 4 bytes for len(envp) uint32_t count; for (i = 0; envp[i]; i++) if (!startswith(envp[i], "WAITLESS")) { ADD_STR(envp[i]); count++; } memcpy(cp, &count, sizeof(uint32_t)); // encode pwd if (!real_getcwd(p, data+sizeof(data)-p)) die("action_execve: getcwd failed: %s", strerror(errno)); int n = p - data + strlen(p) + 1; #undef ADD_STR // Store exec data and create a corresponding exec node struct hash data_hash; remember_hash_memory(&data_hash, data, n); new_node(process, SG_EXEC, &data_hash); // Add the program to the snapshot struct hash path_hash, program_hash; remember_hash_path(&path_hash, path); struct snapshot_entry *entry = snapshot_update(&program_hash, path, &path_hash, 1); if (entry->writing) die("can't exec '%s' while it is being written", path); // TODO: block instead of dying entry->read = 1; shared_map_unlock(&snapshot); // TODO: Run ldd/otool and hash all shared library dependencies as well // TODO: Complain if the program is statically linked // TODO: If we decide it's worth it, parse #! lines and hash interpreters // too. That seems easy enough to probably be worth it. // If not linked, set parents to the new child values if (!linked) { process->parents.n = 2; process->parents.p[0] = data_hash; process->parents.p[1] = program_hash; } unlock_master_process(); // Update process flags process = lock_process(); int old_flags = process->flags; process->flags = 0; p = rindex(path, '/'); const char *name = p ? p+1 : path; if (!strcmp(name, "as")) process->flags |= HACK_SKIP_O_STAT; else if (strstr(name, "-gcc-")) { for (i = 1; argv[i]; i++) if (!strcmp(argv[i], "-c")) { process->flags |= HACK_SKIP_O_STAT; break; } } unlock_process(); // Do the exec int ret = real_execve(path, argv, envp); // An error must have occurred; reset flags back to old value process = lock_process(); process->flags = old_flags; unlock_process(); return ret; }
/* * Add a fork node to the subgraph add then add an additional parent of * either all zeroes or all ones depending on whether we're in child or parent, * respectively. */ pid_t action_fork(void) { // Lock both parent (self) and master struct process *process = lock_process(); struct process *master = process->master ? find_process_info(process->master) : process; if (process != master) spin_lock(&master->lock); // Save mutable information about process struct fd_map fds = process->fds; int flags = process->flags; // Analyze open file descriptors int linked = 0, fd; for (fd = 0; fd < MAX_FDS; fd++) { int slot = fds.map[fd]; if (slot) { struct fd_info *info = fds.info + slot; if (info->flags & WO_PIPE) { wlog("fork: fd %d as pipe", fd); linked = 1; } else if (info->flags & O_WRONLY) // TODO: Enforce that files aren't written by more than // one process. This requires tracking writes, etc. wlog("fork: fd %d open for write", fd); else // TODO: Link processes that share open read descriptors, or // possibly create duplicate read nodes for more precision. wlog("fork: fd %d open for read", fd); } } struct hash zero_hash, one_hash; memset(&zero_hash, 0, sizeof(struct hash)); memset(&one_hash, -1, sizeof(struct hash)); // Add a fork node to the subgraph new_node(master, SG_FORK, linked ? &zero_hash : &zero_hash); struct hash fork_node = master->parents.p[0]; wlog("fork: linked %d", linked); // Actually fork pid_t pid = real_fork(); if (pid < 0) die("action_fork: fork failed: %s", strerror(errno)); if (!pid) { struct process *child = new_process_info(); child->flags = flags; if (linked) { wlog("linking to %d", master->pid); child->master = master->pid; } else { wlog("child of %d (master %d)", process->pid, master->pid); // Inherit from fork node and zero add_parent(child, &fork_node); add_parent(child, &zero_hash); wlog("fresh process: master 0x%x", child->master); } // Copy fd_map information to child memcpy(&child->fds, &fds, sizeof(struct fd_map)); // Drop fds with close-on-exec set int fd; for (fd = 0; fd < MAX_FDS; fd++) if (child->fds.cloexec[fd]) child->fds.map[fd] = 0; unlock_process(); } else { if (!linked) { // Add a one parent node to the parent add_parent(master, &one_hash); } // Unlock both parent (self) and master if (process->master) spin_unlock(&master->lock); unlock_process(); } fd_map_dump(); return pid; }