int __pthread_current_priority (void) { struct pthread *self = THREAD_SELF; if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) return self->schedparam.sched_priority; int result = 0; #ifdef TPP_PTHREAD_SCHED int policy; struct sched_param param; #endif lll_lock (self->lock, LLL_PRIVATE); if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { #ifndef TPP_PTHREAD_SCHED if (__sched_getparam (self->tid, &self->schedparam) != 0) #else if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0) #endif result = -1; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { #ifndef TPP_PTHREAD_SCHED self->schedpolicy = __sched_getscheduler (self->tid); #else if (__pthread_getschedparam (self->tid, &self->schedpolicy, ¶m) != 0) self->schedpolicy = -1; #endif if (self->schedpolicy == -1) result = -1; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result != -1) result = self->schedparam.sched_priority; lll_unlock (self->lock, LLL_PRIVATE); return result; }
int __pthread_getschedparam (pthread_t threadid, int *policy, struct sched_param *param) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; int result = 0; /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ lll_lock (pd->lock, LLL_PRIVATE); /* The library is responsible for maintaining the values at all times. If the user uses an interface other than pthread_setschedparam to modify the scheduler setting it is not the library's problem. In case the descriptor's values have not yet been retrieved do it now. */ if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0) { if (__sched_getparam (pd->tid, &pd->schedparam) != 0) result = 1; else pd->flags |= ATTR_FLAG_SCHED_SET; } if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0) { pd->schedpolicy = __sched_getscheduler (pd->tid); if (pd->schedpolicy == -1) result = 1; else pd->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { *policy = pd->schedpolicy; memcpy (param, &pd->schedparam, sizeof (struct sched_param)); } lll_unlock (pd->lock, LLL_PRIVATE); return result; }
int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param) { pthread_handle handle = thread_handle(thread); int pid, pol; __pthread_lock(&handle->h_lock, NULL); if (__builtin_expect (invalid_handle(handle, thread), 0)) { __pthread_unlock(&handle->h_lock); return ESRCH; } pid = handle->h_descr->p_pid; __pthread_unlock(&handle->h_lock); pol = __sched_getscheduler(pid); if (__builtin_expect (pol, 0) == -1) return errno; if (__sched_getparam(pid, param) == -1) return errno; *policy = pol; return 0; }
int __pthread_current_priority (void) { struct pthread *self = THREAD_SELF; if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) return self->schedparam.sched_priority; int result = 0; /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ lll_lock (self->lock, LLL_PRIVATE); if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { if (__sched_getparam (self->tid, &self->schedparam) != 0) result = -1; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { self->schedpolicy = __sched_getscheduler (self->tid); if (self->schedpolicy == -1) result = -1; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result != -1) result = self->schedparam.sched_priority; lll_unlock (self->lock, LLL_PRIVATE); return result; }
int __pthread_tpp_change_priority (int previous_prio, int new_prio) { struct pthread *self = THREAD_SELF; struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); if (tpp == NULL) { if (__sched_fifo_min_prio == -1) __init_sched_fifo_prio (); size_t size = sizeof *tpp; size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1) * sizeof (tpp->priomap[0]); tpp = calloc (size, 1); if (tpp == NULL) return ENOMEM; tpp->priomax = __sched_fifo_min_prio - 1; THREAD_SETMEM (self, tpp, tpp); } assert (new_prio == -1 || (new_prio >= __sched_fifo_min_prio && new_prio <= __sched_fifo_max_prio)); assert (previous_prio == -1 || (previous_prio >= __sched_fifo_min_prio && previous_prio <= __sched_fifo_max_prio)); int priomax = tpp->priomax; int newpriomax = priomax; if (new_prio != -1) { if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0) return EAGAIN; ++tpp->priomap[new_prio - __sched_fifo_min_prio]; if (new_prio > priomax) newpriomax = new_prio; } if (previous_prio != -1) { if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0 && priomax == previous_prio && previous_prio > new_prio) { int i; for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i) if (tpp->priomap[i - __sched_fifo_min_prio]) break; newpriomax = i; } } if (priomax == newpriomax) return 0; lll_lock (self->lock, LLL_PRIVATE); tpp->priomax = newpriomax; int result = 0; #ifdef TPP_PTHREAD_SCHED int policy; struct sched_param param; #endif if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { #ifndef TPP_PTHREAD_SCHED if (__sched_getparam (self->tid, &self->schedparam) != 0) #else if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0) #endif result = errno; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { #ifndef TPP_PTHREAD_SCHED self->schedpolicy = __sched_getscheduler (self->tid); #else if (__pthread_getschedparam (self->tid, &self->schedpolicy, ¶m) != 0) self->schedpolicy = -1; #endif if (self->schedpolicy == -1) result = errno; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { struct sched_param sp = self->schedparam; if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) { if (sp.sched_priority < newpriomax) sp.sched_priority = newpriomax; #ifndef TPP_PTHREAD_SCHED if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) #else if (__pthread_setschedparam (self->tid, self->schedpolicy, &sp) < 0) #endif result = errno; } } lll_unlock (self->lock, LLL_PRIVATE); return result; }
int __pthread_tpp_change_priority (int previous_prio, int new_prio) { struct pthread *self = THREAD_SELF; struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); if (tpp == NULL) { /* See __init_sched_fifo_prio. We need both the min and max prio, so need to check both, and run initialization if either one is not initialized. The memory model's write-read coherence rule makes this work. */ if (fifo_min_prio == -1 || fifo_max_prio == -1) { __init_sched_fifo_prio (); fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); } size_t size = sizeof *tpp; size += (fifo_max_prio - fifo_min_prio + 1) * sizeof (tpp->priomap[0]); tpp = calloc (size, 1); if (tpp == NULL) return ENOMEM; tpp->priomax = fifo_min_prio - 1; THREAD_SETMEM (self, tpp, tpp); } assert (new_prio == -1 || (new_prio >= fifo_min_prio && new_prio <= fifo_max_prio)); assert (previous_prio == -1 || (previous_prio >= fifo_min_prio && previous_prio <= fifo_max_prio)); int priomax = tpp->priomax; int newpriomax = priomax; if (new_prio != -1) { if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0) return EAGAIN; ++tpp->priomap[new_prio - fifo_min_prio]; if (new_prio > priomax) newpriomax = new_prio; } if (previous_prio != -1) { if (--tpp->priomap[previous_prio - fifo_min_prio] == 0 && priomax == previous_prio && previous_prio > new_prio) { int i; for (i = previous_prio - 1; i >= fifo_min_prio; --i) if (tpp->priomap[i - fifo_min_prio]) break; newpriomax = i; } } if (priomax == newpriomax) return 0; /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ lll_lock (self->lock, LLL_PRIVATE); tpp->priomax = newpriomax; int result = 0; if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { if (__sched_getparam (self->tid, &self->schedparam) != 0) result = errno; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { self->schedpolicy = __sched_getscheduler (self->tid); if (self->schedpolicy == -1) result = errno; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { struct sched_param sp = self->schedparam; if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) { if (sp.sched_priority < newpriomax) sp.sched_priority = newpriomax; if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) result = errno; } } lll_unlock (self->lock, LLL_PRIVATE); return result; }
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg, sigset_t * mask, int father_pid, int report_events, td_thr_events_t *event_maskp) { size_t sseg; int pid; pthread_descr new_thread; char *stack_addr; char * new_thread_bottom; pthread_t new_thread_id; char *guardaddr = NULL; size_t guardsize = 0, stksize = 0; int pagesize = __getpagesize(); int saved_errno = 0; #ifdef USE_TLS new_thread = _dl_allocate_tls (NULL); if (new_thread == NULL) return EAGAIN; # if TLS_DTV_AT_TP /* pthread_descr is below TP. */ new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE); # endif #else /* Prevent warnings. */ new_thread = NULL; #endif /* First check whether we have to change the policy and if yes, whether we can do this. Normally this should be done by examining the return value of the __sched_setscheduler call in pthread_start_thread but this is hard to implement. FIXME */ if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0) return EPERM; /* Find a free segment for the thread, and allocate a stack if needed */ for (sseg = 2; ; sseg++) { if (sseg >= PTHREAD_THREADS_MAX) { #ifdef USE_TLS # if TLS_DTV_AT_TP new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE); # endif _dl_deallocate_tls (new_thread, true); #endif return EAGAIN; } if (__pthread_handles[sseg].h_descr != NULL) continue; if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize, &stack_addr, &new_thread_bottom, &guardaddr, &guardsize, &stksize) == 0) { #ifdef USE_TLS new_thread->p_stackaddr = stack_addr; #else new_thread = (pthread_descr) stack_addr; #endif break; #ifndef __ARCH_USE_MMU__ } else { /* When there is MMU, mmap () is used to allocate the stack. If one * segment is already mapped, we should continue to see if we can * use the next one. However, when there is no MMU, malloc () is used. * It's waste of CPU cycles to continue to try if it fails. */ return EAGAIN; #endif } } __pthread_handles_num++; /* Allocate new thread identifier */ pthread_threads_counter += PTHREAD_THREADS_MAX; new_thread_id = sseg + pthread_threads_counter; /* Initialize the thread descriptor. Elements which have to be initialized to zero already have this value. */ #if !defined USE_TLS || !TLS_DTV_AT_TP new_thread->p_header.data.tcb = new_thread; new_thread->p_header.data.self = new_thread; #endif #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP new_thread->p_multiple_threads = 1; #endif new_thread->p_tid = new_thread_id; new_thread->p_lock = &(__pthread_handles[sseg].h_lock); new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE; new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED; #if !(USE_TLS && HAVE___THREAD) new_thread->p_errnop = &new_thread->p_errno; new_thread->p_h_errnop = &new_thread->p_h_errno; new_thread->p_resp = &new_thread->p_res; #endif new_thread->p_guardaddr = guardaddr; new_thread->p_guardsize = guardsize; new_thread->p_nr = sseg; new_thread->p_inheritsched = attr ? attr->__inheritsched : 0; new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF ? __MAX_ALLOCA_CUTOFF : stksize / 4; /* Initialize the thread handle */ __pthread_init_lock(&__pthread_handles[sseg].h_lock); __pthread_handles[sseg].h_descr = new_thread; __pthread_handles[sseg].h_bottom = new_thread_bottom; /* Determine scheduling parameters for the thread */ new_thread->p_start_args.schedpolicy = -1; if (attr != NULL) { new_thread->p_detached = attr->__detachstate; new_thread->p_userstack = attr->__stackaddr_set; switch(attr->__inheritsched) { case PTHREAD_EXPLICIT_SCHED: new_thread->p_start_args.schedpolicy = attr->__schedpolicy; memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam, sizeof (struct sched_param)); break; case PTHREAD_INHERIT_SCHED: new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid); __sched_getparam(father_pid, &new_thread->p_start_args.schedparam); break; } new_thread->p_priority = new_thread->p_start_args.schedparam.sched_priority; } /* Finish setting up arguments to pthread_start_thread */ new_thread->p_start_args.start_routine = start_routine; new_thread->p_start_args.arg = arg; new_thread->p_start_args.mask = *mask; /* Make the new thread ID available already now. If any of the later functions fail we return an error value and the caller must not use the stored thread ID. */ *thread = new_thread_id; /* Raise priority of thread manager if needed */ __pthread_manager_adjust_prio(new_thread->p_priority); /* Do the cloning. We have to use two different functions depending on whether we are debugging or not. */ pid = 0; /* Note that the thread never can have PID zero. */ if (report_events) { /* See whether the TD_CREATE event bit is set in any of the masks. */ int idx = __td_eventword (TD_CREATE); uint32_t mask = __td_eventmask (TD_CREATE); if ((mask & (__pthread_threads_events.event_bits[idx] | event_maskp->event_bits[idx])) != 0) { /* Lock the mutex the child will use now so that it will stop. */ __pthread_lock(new_thread->p_lock, NULL); /* We have to report this event. */ #ifdef NEED_SEPARATE_REGISTER_STACK /* Perhaps this version should be used on all platforms. But this requires that __clone2 be uniformly supported everywhere. And there is some argument for changing the __clone2 interface to pass sp and bsp instead, making it more IA64 specific, but allowing stacks to grow outward from each other, to get less paging and fewer mmaps. */ pid = __clone2(pthread_start_thread_event, (void **)new_thread_bottom, (char *)stack_addr - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #elif _STACK_GROWS_UP pid = __clone(pthread_start_thread_event, (void *) new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #else pid = __clone(pthread_start_thread_event, stack_addr, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #endif saved_errno = errno; if (pid != -1) { /* Now fill in the information about the new thread in the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ new_thread->p_eventbuf.eventdata = new_thread; new_thread->p_eventbuf.eventnum = TD_CREATE; __pthread_last_event = new_thread; /* We have to set the PID here since the callback function in the debug library will need it and we cannot guarantee the child got scheduled before the debugger. */ new_thread->p_pid = pid; /* Now call the function which signals the event. */ __linuxthreads_create_event (); /* Now restart the thread. */ __pthread_unlock(new_thread->p_lock); } } } if (pid == 0) { #ifdef NEED_SEPARATE_REGISTER_STACK pid = __clone2(pthread_start_thread, (void **)new_thread_bottom, (char *)stack_addr - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #elif _STACK_GROWS_UP pid = __clone(pthread_start_thread, (void *) new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #else pid = __clone(pthread_start_thread, stack_addr, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #endif /* !NEED_SEPARATE_REGISTER_STACK */ saved_errno = errno; } /* Check if cloning succeeded */ if (pid == -1) { /* Free the stack if we allocated it */ if (attr == NULL || !attr->__stackaddr_set) { #ifdef NEED_SEPARATE_REGISTER_STACK size_t stacksize = ((char *)(new_thread->p_guardaddr) - new_thread_bottom); munmap((caddr_t)new_thread_bottom, 2 * stacksize + new_thread->p_guardsize); #elif _STACK_GROWS_UP # ifdef USE_TLS size_t stacksize = guardaddr - stack_addr; munmap(stack_addr, stacksize + guardsize); # else size_t stacksize = guardaddr - (char *)new_thread; munmap(new_thread, stacksize + guardsize); # endif #else # ifdef USE_TLS size_t stacksize = stack_addr - new_thread_bottom; # else size_t stacksize = (char *)(new_thread+1) - new_thread_bottom; # endif munmap(new_thread_bottom - guardsize, guardsize + stacksize); #endif } #ifdef USE_TLS # if TLS_DTV_AT_TP new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE); # endif _dl_deallocate_tls (new_thread, true); #endif __pthread_handles[sseg].h_descr = NULL; __pthread_handles[sseg].h_bottom = NULL; __pthread_handles_num--; return saved_errno; } /* Insert new thread in doubly linked list of active threads */ new_thread->p_prevlive = __pthread_main_thread; new_thread->p_nextlive = __pthread_main_thread->p_nextlive; __pthread_main_thread->p_nextlive->p_prevlive = new_thread; __pthread_main_thread->p_nextlive = new_thread; /* Set pid field of the new thread, in case we get there before the child starts. */ new_thread->p_pid = pid; return 0; }
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg, sigset_t * mask, int father_pid) { size_t sseg; int pid; pthread_descr new_thread; pthread_t new_thread_id; int i; /* Find a free stack segment for the current stack */ for (sseg = 1; ; sseg++) { if (sseg >= PTHREAD_THREADS_MAX) return EAGAIN; if (__pthread_handles[sseg].h_descr != NULL) continue; new_thread = thread_segment(sseg); /* Allocate space for stack and thread descriptor. */ if (mmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN, -1, 0) != (caddr_t) -1) break; /* It seems part of this segment is already mapped. Try the next. */ } /* Allocate new thread identifier */ pthread_threads_counter += PTHREAD_THREADS_MAX; new_thread_id = sseg + pthread_threads_counter; /* Initialize the thread descriptor */ new_thread->p_nextwaiting = NULL; new_thread->p_tid = new_thread_id; new_thread->p_priority = 0; new_thread->p_spinlock = &(__pthread_handles[sseg].h_spinlock); new_thread->p_signal = 0; new_thread->p_signal_jmp = NULL; new_thread->p_cancel_jmp = NULL; new_thread->p_terminated = 0; new_thread->p_detached = attr == NULL ? 0 : attr->detachstate; new_thread->p_exited = 0; new_thread->p_retval = NULL; new_thread->p_joining = NULL; new_thread->p_cleanup = NULL; new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE; new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED; new_thread->p_canceled = 0; new_thread->p_errno = 0; new_thread->p_h_errno = 0; for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++) new_thread->p_specific[i] = NULL; /* Initialize the thread handle */ __pthread_handles[sseg].h_spinlock = 0; /* should already be 0 */ __pthread_handles[sseg].h_descr = new_thread; /* Determine scheduling parameters for the thread */ new_thread->p_start_args.schedpolicy = SCHED_OTHER; if (attr != NULL && attr->schedpolicy != SCHED_OTHER) { switch(attr->inheritsched) { case PTHREAD_EXPLICIT_SCHED: new_thread->p_start_args.schedpolicy = attr->schedpolicy; new_thread->p_start_args.schedparam = attr->schedparam; break; case PTHREAD_INHERIT_SCHED: new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid); __sched_getparam(father_pid, &new_thread->p_start_args.schedparam); break; } new_thread->p_priority = new_thread->p_start_args.schedparam.sched_priority; } /* Finish setting up arguments to pthread_start_thread */ new_thread->p_start_args.start_routine = start_routine; new_thread->p_start_args.arg = arg; new_thread->p_start_args.mask = *mask; /* Do the cloning */ pid = __clone(pthread_start_thread, (void **) new_thread, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | PTHREAD_SIG_RESTART, new_thread); /* Check if cloning succeeded */ if (pid == -1) { /* Free the stack */ munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), INITIAL_STACK_SIZE); __pthread_handles[sseg].h_descr = NULL; return errno; } /* Insert new thread in doubly linked list of active threads */ new_thread->p_prevlive = __pthread_main_thread; new_thread->p_nextlive = __pthread_main_thread->p_nextlive; __pthread_main_thread->p_nextlive->p_prevlive = new_thread; __pthread_main_thread->p_nextlive = new_thread; /* Set pid field of the new thread, in case we get there before the child starts. */ new_thread->p_pid = pid; /* We're all set */ *thread = new_thread_id; return 0; }
int pthread_getattr_np (pthread_t thread, pthread_attr_t *attr) { pthread_handle handle = thread_handle (thread); pthread_descr descr; int ret = 0; if (handle == NULL) return ENOENT; descr = handle->h_descr; attr->__detachstate = (descr->p_detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE); attr->__schedpolicy = __sched_getscheduler (descr->p_pid); if (attr->__schedpolicy == -1) return errno; if (__sched_getparam (descr->p_pid, (struct sched_param *) &attr->__schedparam) != 0) return errno; attr->__inheritsched = descr->p_inheritsched; attr->__scope = PTHREAD_SCOPE_SYSTEM; #ifdef _STACK_GROWS_DOWN # ifdef USE_TLS attr->__stacksize = descr->p_stackaddr - (char *)descr->p_guardaddr - descr->p_guardsize; # else attr->__stacksize = (char *)(descr + 1) - (char *)descr->p_guardaddr - descr->p_guardsize; # endif #else # ifdef USE_TLS attr->__stacksize = (char *)descr->p_guardaddr - descr->p_stackaddr; # else attr->__stacksize = (char *)descr->p_guardaddr - (char *)descr; # endif #endif attr->__guardsize = descr->p_guardsize; attr->__stackaddr_set = descr->p_userstack; #ifdef NEED_SEPARATE_REGISTER_STACK if (descr->p_userstack == 0) attr->__stacksize *= 2; /* XXX This is awkward. The guard pages are in the middle of the two stacks. We must count the guard size in the stack size since otherwise the range of the stack area cannot be computed. */ attr->__stacksize += attr->__guardsize; #endif #ifdef USE_TLS attr->__stackaddr = descr->p_stackaddr; #else # ifndef _STACK_GROWS_UP attr->__stackaddr = (char *)(descr + 1); # else attr->__stackaddr = (char *)descr; # endif #endif #ifdef USE_TLS if (attr->__stackaddr == NULL) #else if (descr == &__pthread_initial_thread) #endif { /* Stack size limit. */ struct rlimit rl; /* The safest way to get the top of the stack is to read /proc/self/maps and locate the line into which __libc_stack_end falls. */ FILE *fp = fopen ("/proc/self/maps", "rc"); if (fp == NULL) ret = errno; /* We need the limit of the stack in any case. */ else if (getrlimit (RLIMIT_STACK, &rl) != 0) ret = errno; else { /* We need no locking. */ __fsetlocking (fp, FSETLOCKING_BYCALLER); /* Until we found an entry (which should always be the case) mark the result as a failure. */ ret = ENOENT; char *line = NULL; size_t linelen = 0; uintptr_t last_to = 0; while (! feof_unlocked (fp)) { if (__getdelim (&line, &linelen, '\n', fp) <= 0) break; uintptr_t from; uintptr_t to; if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2) continue; if (from <= (uintptr_t) __libc_stack_end && (uintptr_t) __libc_stack_end < to) { /* Found the entry. Now we have the info we need. */ attr->__stacksize = rl.rlim_cur; #ifdef _STACK_GROWS_UP /* Don't check to enforce a limit on the __stacksize */ attr->__stackaddr = (void *) from; #else attr->__stackaddr = (void *) to; /* The limit might be too high. */ if ((size_t) attr->__stacksize > (size_t) attr->__stackaddr - last_to) attr->__stacksize = (size_t) attr->__stackaddr - last_to; #endif /* We succeed and no need to look further. */ ret = 0; break; } last_to = to; } fclose (fp); free (line); } } return 0; }