static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode) { pthread_descr th; __pthread_exit_requested = 1; __pthread_exit_code = exitcode; /* A forced asynchronous cancellation follows. Make sure we won't get stuck later in the main thread with a system lock being held by one of the cancelled threads. Ideally one would use the same code as in pthread_atfork(), but we can't distinguish system and user handlers there. */ __flockfilelist(); /* Send the CANCEL signal to all running threads, including the main thread, but excluding the thread from which the exit request originated (that thread must complete the exit, e.g. calling atexit functions and flushing stdio buffers). */ for (th = issuing_thread->p_nextlive; th != issuing_thread; th = th->p_nextlive) { kill(th->p_pid, __pthread_sig_cancel); } /* Now, wait for all these threads, so that they don't become zombies and their times are properly added to the thread manager's times. */ for (th = issuing_thread->p_nextlive; th != issuing_thread; th = th->p_nextlive) { waitpid(th->p_pid, NULL, __WCLONE); } __fresetlockfiles(); restart(issuing_thread); _exit(0); }
pid_t __fork(void) { pid_t pid; struct handler_list * prepare, * child, * parent; pthread_mutex_lock(&pthread_atfork_lock); prepare = pthread_atfork_prepare; child = pthread_atfork_child; parent = pthread_atfork_parent; pthread_mutex_unlock(&pthread_atfork_lock); pthread_call_handlers(prepare); #warning hack alert __MALLOC_LOCK; pid = __libc_fork(); #warning hack alert __MALLOC_UNLOCK; if (pid == 0) { __pthread_reset_main_thread(); #warning need to reconsider __fresetlockfiles! __fresetlockfiles(); pthread_call_handlers(child); } else { pthread_call_handlers(parent); } return pid; }
static pid_t __fork(void) { pid_t pid; struct handler_list * prepare, * child, * parent; __pthread_mutex_lock(&pthread_atfork_lock); prepare = pthread_atfork_prepare; child = pthread_atfork_child; parent = pthread_atfork_parent; pthread_call_handlers(prepare); __pthread_once_fork_prepare(); #ifdef __MALLOC__ __pthread_mutex_lock(&__malloc_sbrk_lock); __pthread_mutex_lock(&__malloc_heap_lock); #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ __pthread_mutex_lock(&__malloc_mmb_heap_lock); #endif #elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__) __pthread_mutex_lock(&__malloc_lock); #endif pid = __libc_fork(); if (pid == 0) { #if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__) __libc_lock_init_recursive(__malloc_lock); #elif defined(__MALLOC__) #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ __libc_lock_init_adaptive(__malloc_mmb_heap_lock); #endif __libc_lock_init_adaptive(__malloc_heap_lock); __libc_lock_init(__malloc_sbrk_lock); #endif __libc_lock_init_adaptive(pthread_atfork_lock); __pthread_reset_main_thread(); __fresetlockfiles(); __pthread_once_fork_child(); pthread_call_handlers(child); } else { #if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__) __pthread_mutex_unlock(&__malloc_lock); #elif defined(__MALLOC__) #ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ __pthread_mutex_unlock(&__malloc_mmb_heap_lock); #endif __pthread_mutex_unlock(&__malloc_heap_lock); __pthread_mutex_unlock(&__malloc_sbrk_lock); #endif __pthread_mutex_unlock(&pthread_atfork_lock); __pthread_once_fork_parent(); pthread_call_handlers(parent); } return pid; }
int fork(void) { int pid; struct handler_list * prepare, * child, * parent; pthread_mutex_lock(&pthread_atfork_lock); prepare = pthread_atfork_prepare; child = pthread_atfork_child; parent = pthread_atfork_parent; pthread_mutex_unlock(&pthread_atfork_lock); pthread_call_handlers(prepare); pid = __fork(); if (pid == 0) { __pthread_reset_main_thread(); __fresetlockfiles(); pthread_call_handlers(child); } else { pthread_call_handlers(parent); } return pid; }