pid_t fork(void) { pid_t ret; sigset_t set; __fork_handler(-1); __block_all_sigs(&set); #ifdef SYS_fork ret = syscall(SYS_fork); #else ret = syscall(SYS_clone, SIGCHLD, 0); #endif if (libc.has_thread_pointer && !ret) { pthread_t self = __pthread_self(); self->tid = self->pid = __syscall(SYS_getpid); memset(&self->robust_list, 0, sizeof self->robust_list); libc.threads_minus_1 = 0; } __restore_sigs(&set); __fork_handler(!ret); return ret; }
pid_t fork(void) { pid_t ret; sigset_t set; __fork_handler(-1); __block_all_sigs(&set); #ifdef SYS_fork ret = syscall(SYS_fork); #else ret = syscall(SYS_clone, SIGCHLD, 0); #endif if (!ret) { pthread_t self = __pthread_self(); self->tid = __syscall(SYS_gettid); self->robust_list.off = 0; self->robust_list.pending = 0; libc.threads_minus_1 = 0; } __restore_sigs(&set); __fork_handler(!ret); return ret; }
void __synccall(void (*func)(void *), void *ctx) { sigset_t oldmask; int cs, i, r, pid, self;; DIR dir = {0}; struct dirent *de; struct sigaction sa = { .sa_flags = 0, .sa_handler = handler }; struct chain *cp, *next; struct timespec ts; /* Blocking signals in two steps, first only app-level signals * before taking the lock, then all signals after taking the lock, * is necessary to achieve AS-safety. Blocking them all first would * deadlock if multiple threads called __synccall. Waiting to block * any until after the lock would allow re-entry in the same thread * with the lock already held. */ __block_app_sigs(&oldmask); LOCK(synccall_lock); __block_all_sigs(0); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs); head = 0; if (!libc.threaded) goto single_threaded; callback = func; context = ctx; /* This atomic store ensures that any signaled threads will see the * above stores, and prevents more than a bounded number of threads, * those already in pthread_create, from creating new threads until * the value is cleared to zero again. */ a_store(&__block_new_threads, 1); /* Block even implementation-internal signals, so that nothing * interrupts the SIGSYNCCALL handlers. The main possible source * of trouble is asynchronous cancellation. */ memset(&sa.sa_mask, -1, sizeof sa.sa_mask); __libc_sigaction(SIGSYNCCALL, &sa, 0); pid = __syscall(SYS_getpid); self = __syscall(SYS_gettid); /* Since opendir is not AS-safe, the DIR needs to be setup manually * in automatic storage. Thankfully this is easy. */ dir.fd = open("/proc/self/task", O_RDONLY|O_DIRECTORY|O_CLOEXEC); if (dir.fd < 0) goto out; /* Initially send one signal per counted thread. But since we can't * synchronize with thread creation/exit here, there could be too * few signals. This initial signaling is just an optimization, not * part of the logic. */ for (i=libc.threads_minus_1; i; i--) __syscall(SYS_kill, pid, SIGSYNCCALL); /* Loop scanning the kernel-provided thread list until it shows no * threads that have not already replied to the signal. */ for (;;) { int miss_cnt = 0; while ((de = readdir(&dir))) { if (!isdigit(de->d_name[0])) continue; int tid = atoi(de->d_name); if (tid == self || !tid) continue; /* Set the target thread as the PI futex owner before * checking if it's in the list of caught threads. If it * adds itself to the list after we check for it, then * it will see its own tid in the PI futex and perform * the unlock operation. */ a_store(&target_tid, tid); /* Thread-already-caught is a success condition. */ for (cp = head; cp && cp->tid != tid; cp=cp->next); if (cp) continue; r = -__syscall(SYS_tgkill, pid, tid, SIGSYNCCALL); /* Target thread exit is a success condition. */ if (r == ESRCH) continue; /* The FUTEX_LOCK_PI operation is used to loan priority * to the target thread, which otherwise may be unable * to run. Timeout is necessary because there is a race * condition where the tid may be reused by a different * process. */ clock_gettime(CLOCK_REALTIME, &ts); ts.tv_nsec += 10000000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec++; ts.tv_nsec -= 1000000000; } r = -__syscall(SYS_futex, &target_tid, FUTEX_LOCK_PI|FUTEX_PRIVATE, 0, &ts); /* Obtaining the lock means the thread responded. ESRCH * means the target thread exited, which is okay too. */ if (!r || r == ESRCH) continue; miss_cnt++; } if (!miss_cnt) break; rewinddir(&dir); } close(dir.fd); /* Serialize execution of callback in caught threads. */ for (cp=head; cp; cp=cp->next) { sem_post(&cp->target_sem); sem_wait(&cp->caller_sem); } sa.sa_handler = SIG_IGN; __libc_sigaction(SIGSYNCCALL, &sa, 0); single_threaded: func(ctx); /* Only release the caught threads once all threads, including the * caller, have returned from the callback function. */ for (cp=head; cp; cp=next) { next = cp->next; sem_post(&cp->target_sem); } out: a_store(&__block_new_threads, 0); __wake(&__block_new_threads, -1, 1); pthread_setcancelstate(cs, 0); UNLOCK(synccall_lock); __restore_sigs(&oldmask); }
static int do_wordexp(const char *s, wordexp_t *we, int flags) { size_t i, l; int sq=0, dq=0; size_t np=0; char *w, **tmp; char *redir = (flags & WRDE_SHOWERR) ? "" : "2>/dev/null"; int err = 0; FILE *f; size_t wc = 0; char **wv = 0; int p[2]; pid_t pid; sigset_t set; if (flags & WRDE_REUSE) wordfree(we); if (flags & WRDE_NOCMD) for (i=0; s[i]; i++) switch (s[i]) { case '\\': if (!sq) i++; break; case '\'': if (!dq) sq^=1; break; case '"': if (!sq) dq^=1; break; case '(': if (np) { np++; break; } case ')': if (np) { np--; break; } case '\n': case '|': case '&': case ';': case '<': case '>': case '{': case '}': if (!(sq|dq|np)) return WRDE_BADCHAR; break; case '$': if (s[i+1]=='(' && s[i+2]=='(') { i += 2; np += 2; break; } else if (s[i+1] != '(') break; case '`': if (sq) break; return WRDE_CMDSUB; } if (flags & WRDE_APPEND) { wc = we->we_wordc; wv = we->we_wordv; } i = wc; if (flags & WRDE_DOOFFS) { if (we->we_offs > SIZE_MAX/sizeof(void *)/4) goto nospace; i += we->we_offs; } else { we->we_offs = 0; } if (pipe2(p, O_CLOEXEC) < 0) goto nospace; __block_all_sigs(&set); pid = fork(); __restore_sigs(&set); if (pid < 0) { close(p[0]); close(p[1]); goto nospace; } if (!pid) { if (p[1] == 1) fcntl(1, F_SETFD, 0); else dup2(p[1], 1); execl("/bin/sh", "sh", "-c", "eval \"printf %s\\\\\\\\0 x $1 $2\"", "sh", s, redir, (char *)0); _exit(1); } close(p[1]); f = fdopen(p[0], "r"); if (!f) { close(p[0]); kill(pid, SIGKILL); reap(pid); goto nospace; } l = wv ? i+1 : 0; free(getword(f)); if (feof(f)) { fclose(f); reap(pid); return WRDE_SYNTAX; } while ((w = getword(f))) { if (i+1 >= l) { l += l/2+10; tmp = realloc(wv, l*sizeof(char *)); if (!tmp) break; wv = tmp; } wv[i++] = w; wv[i] = 0; } if (!feof(f)) err = WRDE_NOSPACE; fclose(f); reap(pid); if (!wv) wv = calloc(i+1, sizeof *wv); we->we_wordv = wv; we->we_wordc = i; if (flags & WRDE_DOOFFS) { if (wv) for (i=we->we_offs; i; i--) we->we_wordv[i-1] = 0; we->we_wordc -= we->we_offs; } return err; nospace: if (!(flags & WRDE_APPEND)) { we->we_wordc = 0; we->we_wordv = 0; } return WRDE_NOSPACE; }
_Noreturn void pthread_exit(void *result) { pthread_t self = pthread_self(); sigset_t set; self->result = result; while (self->cancelbuf) { void (*f)(void *) = self->cancelbuf->__f; void *x = self->cancelbuf->__x; self->cancelbuf = self->cancelbuf->__next; f(x); } __pthread_tsd_run_dtors(); __lock(self->exitlock); /* Mark this thread dead before decrementing count */ __lock(self->killlock); self->dead = 1; /* Block all signals before decrementing the live thread count. * This is important to ensure that dynamically allocated TLS * is not under-allocated/over-committed, and possibly for other * reasons as well. */ __block_all_sigs(&set); /* Wait to unlock the kill lock, which governs functions like * pthread_kill which target a thread id, until signals have * been blocked. This precludes observation of the thread id * as a live thread (with application code running in it) after * the thread was reported dead by ESRCH being returned. */ __unlock(self->killlock); /* It's impossible to determine whether this is "the last thread" * until performing the atomic decrement, since multiple threads * could exit at the same time. For the last thread, revert the * decrement and unblock signals to give the atexit handlers and * stdio cleanup code a consistent state. */ if (a_fetch_add(&libc.threads_minus_1, -1)==0) { libc.threads_minus_1 = 0; __restore_sigs(&set); exit(0); } if (self->detached && self->map_base) { /* Detached threads must avoid the kernel clear_child_tid * feature, since the virtual address will have been * unmapped and possibly already reused by a new mapping * at the time the kernel would perform the write. In * the case of threads that started out detached, the * initial clone flags are correct, but if the thread was * detached later (== 2), we need to clear it here. */ if (self->detached == 2) __syscall(SYS_set_tid_address, 0); /* The following call unmaps the thread's stack mapping * and then exits without touching the stack. */ __unmapself(self->map_base, self->map_size); } for (;;) __syscall(SYS_exit, 0); }