static int do_clone (struct pthread *pd, const struct pthread_attr *attr, int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS, int stopped) { #ifdef PREPARE_CREATE PREPARE_CREATE; #endif if (__builtin_expect (stopped != 0, 0)) /* We make sure the thread does not run far by forcing it to get a lock. We lock it here too so that the new thread cannot continue until we tell it to. */ lll_lock (pd->lock, LLL_PRIVATE); /* One more thread. We cannot have the thread do this itself, since it might exist but not have been scheduled yet by the time we've returned and need to check the value to behave correctly. We must do it before creating the thread, in case it does get scheduled first and then might mistakenly think it was the only thread. In the failure case, we momentarily store a false value; this doesn't matter because there is no kosher thing a signal handler interrupting us right here can do that cares whether the thread count is correct. */ atomic_increment (&__nptl_nthreads); int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags, pd, &pd->tid, TLS_VALUE, &pd->tid); if (__builtin_expect (rc == -1, 0)) { atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */ /* Perhaps a thread wants to change the IDs and if waiting for this stillborn thread. */ if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0)) lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE); /* Free the resources. */ __deallocate_stack (pd); /* We have to translate error codes. */ return errno == ENOMEM ? EAGAIN : errno; } /* Now we have the possibility to set scheduling parameters etc. */ if (__builtin_expect (stopped != 0, 0)) { INTERNAL_SYSCALL_DECL (err); int res = 0; /* Set the affinity mask if necessary. */ if (attr->cpuset != NULL) { res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, attr->cpusetsize, attr->cpuset); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) { /* The operation failed. We have to kill the thread. First send it the cancellation signal. */ INTERNAL_SYSCALL_DECL (err2); err_out: (void) INTERNAL_SYSCALL (tgkill, err2, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid, SIGCANCEL); /* We do not free the stack here because the canceled thread itself will do this. */ return (INTERNAL_SYSCALL_ERROR_P (res, err) ? INTERNAL_SYSCALL_ERRNO (res, err) : 0); } } /* Set the scheduling parameters. */ if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0) { res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid, pd->schedpolicy, &pd->schedparam); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) goto err_out; } } /* We now have for sure more than one thread. The main thread might not yet have the flag set. No need to set the global variable again if this is what we use. */ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); return 0; }
static int do_clone (struct pthread *pd, const struct pthread_attr *attr, int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS, int stopped) { #ifdef PREPARE_CREATE PREPARE_CREATE; #endif if (stopped) /* We Make sure the thread does not run far by forcing it to get a lock. We lock it here too so that the new thread cannot continue until we tell it to. */ lll_lock (pd->lock); /* One more thread. We cannot have the thread do this itself, since it might exist but not have been scheduled yet by the time we've returned and need to check the value to behave correctly. We must do it before creating the thread, in case it does get scheduled first and then might mistakenly think it was the only thread. In the failure case, we momentarily store a false value; this doesn't matter because there is no kosher thing a signal handler interrupting us right here can do that cares whether the thread count is correct. */ atomic_increment (&__nptl_nthreads); if (ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags, pd, &pd->tid, TLS_VALUE, &pd->tid) == -1) { atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */ /* Failed. If the thread is detached, remove the TCB here since the caller cannot do this. The caller remembered the thread as detached and cannot reverify that it is not since it must not access the thread descriptor again. */ if (IS_DETACHED (pd)) __deallocate_stack (pd); return errno; } /* Now we have the possibility to set scheduling parameters etc. */ if (__builtin_expect (stopped != 0, 0)) { INTERNAL_SYSCALL_DECL (err); int res = 0; /* Set the affinity mask if necessary. */ if (attr->cpuset != NULL) { res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, sizeof (cpu_set_t), attr->cpuset); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) { /* The operation failed. We have to kill the thread. First send it the cancellation signal. */ INTERNAL_SYSCALL_DECL (err2); err_out: #if __ASSUME_TGKILL (void) INTERNAL_SYSCALL (tgkill, err2, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid, SIGCANCEL); #else (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL); #endif return (INTERNAL_SYSCALL_ERROR_P (res, err) ? INTERNAL_SYSCALL_ERRNO (res, err) : 0); } } /* Set the scheduling parameters. */ if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0) { res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid, pd->schedpolicy, &pd->schedparam); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) goto err_out; } } /* We now have for sure more than one thread. The main thread might not yet have the flag set. No need to set the global variable again if this is what we use. */ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); return 0; }
static int do_clone (struct pthread *pd, const struct pthread_attr *attr, int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS, int stopped) { #if 0 PREPARE_CREATE; #endif if (__builtin_expect (stopped != 0, 0)) /* We make sure the thread does not run far by forcing it to get a lock. We lock it here too so that the new thread cannot continue until we tell it to. */ lll_lock (pd->lock, LLL_PRIVATE); /* One more thread. We cannot have the thread do this itself, since it might exist but not have been scheduled yet by the time we've returned and need to check the value to behave correctly. We must do it before creating the thread, in case it does get scheduled first and then might mistakenly think it was the only thread. In the failure case, we momentarily store a false value; this doesn't matter because there is no kosher thing a signal handler interrupting us right here can do that cares whether the thread count is correct. */ atomic_increment (&__nptl_nthreads); #if !defined(__native_client__) && !defined(__ZRT_HOST) #error "This code was changed to work only in Native Client" #endif /* Native Client does not have a notion of a thread ID, so we make one up. This must be small enough to leave space for number identifying the clock. Use CLOCK_IDFIELD_SIZE to guarantee that. */ pd->tid = ((unsigned int) pd) >> CLOCK_IDFIELD_SIZE; /* Native Client syscall thread_create does not push return address onto stack as opposed to the kernel. We emulate this behavior on x86-64 to meet the ABI requirement ((%rsp + 8) mod 16 == 0). On x86-32 the attribute 'force_align_arg_pointer' does the same for start_thread (). */ #ifdef __x86_64__ STACK_VARIABLES_ARGS -= 8; #endif if (__nacl_irt_thread_create (fct, STACK_VARIABLES_ARGS, pd) != 0) { pd->tid = 0; atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */ /* Failed. If the thread is detached, remove the TCB here since the caller cannot do this. The caller remembered the thread as detached and cannot reverify that it is not since it must not access the thread descriptor again. */ if (IS_DETACHED (pd)) __deallocate_stack (pd); /* We have to translate error codes. */ return errno == ENOMEM ? EAGAIN : errno; } /* Now we have the possibility to set scheduling parameters etc. */ if (__builtin_expect (stopped != 0, 0)) { INTERNAL_SYSCALL_DECL (err); int res = 0; /* Set the affinity mask if necessary. */ if (attr->cpuset != NULL) { res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, attr->cpusetsize, attr->cpuset); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) { /* The operation failed. We have to kill the thread. First send it the cancellation signal. */ INTERNAL_SYSCALL_DECL (err2); err_out: #if __ASSUME_TGKILL (void) INTERNAL_SYSCALL (tgkill, err2, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid, SIGCANCEL); #else (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL); #endif return (INTERNAL_SYSCALL_ERROR_P (res, err) ? INTERNAL_SYSCALL_ERRNO (res, err) : 0); } } /* Set the scheduling parameters. */ if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0) { res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid, pd->schedpolicy, &pd->schedparam); if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0)) goto err_out; } } /* We now have for sure more than one thread. The main thread might not yet have the flag set. No need to set the global variable again if this is what we use. */ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); return 0; }