int __pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { __pthread_init_lock(&mutex->__m_lock); mutex->__m_kind = mutex_attr == NULL ? PTHREAD_MUTEX_TIMED_NP : mutex_attr->__mutexkind; mutex->__m_count = 0; mutex->__m_owner = NULL; return 0; }
int sem_init(sem_t *sem, int pshared, unsigned int value) { if (value > SEM_VALUE_MAX) { __set_errno(EINVAL); return -1; } if (pshared) { __set_errno(ENOSYS); return -1; } __pthread_init_lock(&sem->__sem_lock); sem->__sem_value = value; sem->__sem_waiting = NULL; return 0; }
int __pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { __pthread_init_lock(&rwlock->__rw_lock); rwlock->__rw_readers = 0; rwlock->__rw_writer = NULL; rwlock->__rw_read_waiting = NULL; rwlock->__rw_write_waiting = NULL; if (attr == NULL) { rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP; rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE; } else { rwlock->__rw_kind = attr->__lockkind; rwlock->__rw_pshared = attr->__pshared; } return 0; }
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg, sigset_t * mask, int father_pid, int report_events, td_thr_events_t *event_maskp) { size_t sseg; int pid; pthread_descr new_thread; char * new_thread_bottom; char * new_thread_top; pthread_t new_thread_id; char *guardaddr = NULL; size_t guardsize = 0; int pagesize = getpagesize(); int saved_errno = 0; /* First check whether we have to change the policy and if yes, whether we can do this. Normally this should be done by examining the return value of the sched_setscheduler call in pthread_start_thread but this is hard to implement. FIXME */ if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0) return EPERM; /* Find a free segment for the thread, and allocate a stack if needed */ for (sseg = 2; ; sseg++) { if (sseg >= PTHREAD_THREADS_MAX) return EAGAIN; if (__pthread_handles[sseg].h_descr != NULL) continue; if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize, &new_thread, &new_thread_bottom, &guardaddr, &guardsize) == 0) break; #ifndef __ARCH_USE_MMU__ else /* When there is MMU, mmap () is used to allocate the stack. If one * segment is already mapped, we should continue to see if we can * use the next one. However, when there is no MMU, malloc () is used. * It's waste of CPU cycles to continue to try if it fails. */ return EAGAIN; #endif } __pthread_handles_num++; /* Allocate new thread identifier */ pthread_threads_counter += PTHREAD_THREADS_MAX; new_thread_id = sseg + pthread_threads_counter; /* Initialize the thread descriptor. Elements which have to be initialized to zero already have this value. */ new_thread->p_tid = new_thread_id; new_thread->p_lock = &(__pthread_handles[sseg].h_lock); new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE; new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED; new_thread->p_errnop = &new_thread->p_errno; new_thread->p_h_errnop = &new_thread->p_h_errno; #ifdef __UCLIBC_HAS_XLOCALE__ /* Initialize thread's locale to the global locale. */ new_thread->locale = __global_locale; #endif /* __UCLIBC_HAS_XLOCALE__ */ new_thread->p_guardaddr = guardaddr; new_thread->p_guardsize = guardsize; new_thread->p_self = new_thread; new_thread->p_nr = sseg; /* Initialize the thread handle */ __pthread_init_lock(&__pthread_handles[sseg].h_lock); __pthread_handles[sseg].h_descr = new_thread; __pthread_handles[sseg].h_bottom = new_thread_bottom; /* Determine scheduling parameters for the thread */ new_thread->p_start_args.schedpolicy = -1; if (attr != NULL) { new_thread->p_detached = attr->__detachstate; new_thread->p_userstack = attr->__stackaddr_set; switch(attr->__inheritsched) { case PTHREAD_EXPLICIT_SCHED: new_thread->p_start_args.schedpolicy = attr->__schedpolicy; memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam, sizeof (struct sched_param)); break; case PTHREAD_INHERIT_SCHED: new_thread->p_start_args.schedpolicy = sched_getscheduler(father_pid); sched_getparam(father_pid, &new_thread->p_start_args.schedparam); break; } new_thread->p_priority = new_thread->p_start_args.schedparam.sched_priority; } /* Finish setting up arguments to pthread_start_thread */ new_thread->p_start_args.start_routine = start_routine; new_thread->p_start_args.arg = arg; new_thread->p_start_args.mask = *mask; /* Raise priority of thread manager if needed */ __pthread_manager_adjust_prio(new_thread->p_priority); /* Do the cloning. We have to use two different functions depending on whether we are debugging or not. */ pid = 0; /* Note that the thread never can have PID zero. */ new_thread_top = ((char *)new_thread - THREAD_STACK_OFFSET); /* ******************************************************** */ /* This code was moved from below to cope with running threads * on uClinux systems. See comment below... * Insert new thread in doubly linked list of active threads */ new_thread->p_prevlive = __pthread_main_thread; new_thread->p_nextlive = __pthread_main_thread->p_nextlive; __pthread_main_thread->p_nextlive->p_prevlive = new_thread; __pthread_main_thread->p_nextlive = new_thread; /* ********************************************************* */ if (report_events) { /* See whether the TD_CREATE event bit is set in any of the masks. */ int idx = __td_eventword (TD_CREATE); uint32_t m = __td_eventmask (TD_CREATE); if ((m & (__pthread_threads_events.event_bits[idx] | event_maskp->event_bits[idx])) != 0) { /* Lock the mutex the child will use now so that it will stop. */ __pthread_lock(new_thread->p_lock, NULL); /* We have to report this event. */ #ifdef __ia64__ pid = __clone2(pthread_start_thread_event, new_thread_top, new_thread_top - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | __pthread_sig_cancel, new_thread); #else pid = clone(pthread_start_thread_event, new_thread_top, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | __pthread_sig_cancel, new_thread); #endif saved_errno = errno; if (pid != -1) { /* Now fill in the information about the new thread in the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ new_thread->p_eventbuf.eventdata = new_thread; new_thread->p_eventbuf.eventnum = TD_CREATE; __pthread_last_event = new_thread; /* We have to set the PID here since the callback function in the debug library will need it and we cannot guarantee the child got scheduled before the debugger. */ new_thread->p_pid = pid; /* Now call the function which signals the event. */ __linuxthreads_create_event (); /* Now restart the thread. */ __pthread_unlock(new_thread->p_lock); } } } if (pid == 0) { PDEBUG("cloning new_thread = %p\n", new_thread); #ifdef __ia64__ pid = __clone2(pthread_start_thread, new_thread_top, new_thread_top - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | __pthread_sig_cancel, new_thread); #else pid = clone(pthread_start_thread, new_thread_top, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | __pthread_sig_cancel, new_thread); #endif saved_errno = errno; } /* Check if cloning succeeded */ if (pid == -1) { /******************************************************** * Code inserted to remove the thread from our list of active * threads in case of failure (needed to cope with uClinux), * See comment below. */ new_thread->p_nextlive->p_prevlive = new_thread->p_prevlive; new_thread->p_prevlive->p_nextlive = new_thread->p_nextlive; /********************************************************/ /* Free the stack if we allocated it */ if (attr == NULL || !attr->__stackaddr_set) { #ifdef __ARCH_USE_MMU__ if (new_thread->p_guardsize != 0) munmap(new_thread->p_guardaddr, new_thread->p_guardsize); munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), INITIAL_STACK_SIZE); #else free(new_thread_bottom); #endif /* __ARCH_USE_MMU__ */ } __pthread_handles[sseg].h_descr = NULL; __pthread_handles[sseg].h_bottom = NULL; __pthread_handles_num--; return saved_errno; } PDEBUG("new thread pid = %d\n", pid); #if 0 /* *********************************************************** This code has been moved before the call to clone(). In uClinux, the use of wait on a semaphore is dependant upon that the child so the child must be in the active threads list. This list is used in pthread_find_self() to get the pthread_descr of self. So, if the child calls sem_wait before this code is executed , it will hang forever and initial_thread will instead be posted by a sem_post call. */ /* Insert new thread in doubly linked list of active threads */ new_thread->p_prevlive = __pthread_main_thread; new_thread->p_nextlive = __pthread_main_thread->p_nextlive; __pthread_main_thread->p_nextlive->p_prevlive = new_thread; __pthread_main_thread->p_nextlive = new_thread; /************************************************************/ #endif /* Set pid field of the new thread, in case we get there before the child starts. */ new_thread->p_pid = pid; /* We're all set */ *thread = new_thread_id; return 0; }
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg, sigset_t * mask, int father_pid, int report_events, td_thr_events_t *event_maskp) { size_t sseg; int pid; pthread_descr new_thread; char *stack_addr; char * new_thread_bottom; pthread_t new_thread_id; char *guardaddr = NULL; size_t guardsize = 0, stksize = 0; int pagesize = __getpagesize(); int saved_errno = 0; #ifdef USE_TLS new_thread = _dl_allocate_tls (NULL); if (new_thread == NULL) return EAGAIN; # if TLS_DTV_AT_TP /* pthread_descr is below TP. */ new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE); # endif #else /* Prevent warnings. */ new_thread = NULL; #endif /* First check whether we have to change the policy and if yes, whether we can do this. Normally this should be done by examining the return value of the __sched_setscheduler call in pthread_start_thread but this is hard to implement. FIXME */ if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0) return EPERM; /* Find a free segment for the thread, and allocate a stack if needed */ for (sseg = 2; ; sseg++) { if (sseg >= PTHREAD_THREADS_MAX) { #ifdef USE_TLS # if TLS_DTV_AT_TP new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE); # endif _dl_deallocate_tls (new_thread, true); #endif return EAGAIN; } if (__pthread_handles[sseg].h_descr != NULL) continue; if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize, &stack_addr, &new_thread_bottom, &guardaddr, &guardsize, &stksize) == 0) { #ifdef USE_TLS new_thread->p_stackaddr = stack_addr; #else new_thread = (pthread_descr) stack_addr; #endif break; #ifndef __ARCH_USE_MMU__ } else { /* When there is MMU, mmap () is used to allocate the stack. If one * segment is already mapped, we should continue to see if we can * use the next one. However, when there is no MMU, malloc () is used. * It's waste of CPU cycles to continue to try if it fails. */ return EAGAIN; #endif } } __pthread_handles_num++; /* Allocate new thread identifier */ pthread_threads_counter += PTHREAD_THREADS_MAX; new_thread_id = sseg + pthread_threads_counter; /* Initialize the thread descriptor. Elements which have to be initialized to zero already have this value. */ #if !defined USE_TLS || !TLS_DTV_AT_TP new_thread->p_header.data.tcb = new_thread; new_thread->p_header.data.self = new_thread; #endif #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP new_thread->p_multiple_threads = 1; #endif new_thread->p_tid = new_thread_id; new_thread->p_lock = &(__pthread_handles[sseg].h_lock); new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE; new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED; #if !(USE_TLS && HAVE___THREAD) new_thread->p_errnop = &new_thread->p_errno; new_thread->p_h_errnop = &new_thread->p_h_errno; new_thread->p_resp = &new_thread->p_res; #endif new_thread->p_guardaddr = guardaddr; new_thread->p_guardsize = guardsize; new_thread->p_nr = sseg; new_thread->p_inheritsched = attr ? attr->__inheritsched : 0; new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF ? __MAX_ALLOCA_CUTOFF : stksize / 4; /* Initialize the thread handle */ __pthread_init_lock(&__pthread_handles[sseg].h_lock); __pthread_handles[sseg].h_descr = new_thread; __pthread_handles[sseg].h_bottom = new_thread_bottom; /* Determine scheduling parameters for the thread */ new_thread->p_start_args.schedpolicy = -1; if (attr != NULL) { new_thread->p_detached = attr->__detachstate; new_thread->p_userstack = attr->__stackaddr_set; switch(attr->__inheritsched) { case PTHREAD_EXPLICIT_SCHED: new_thread->p_start_args.schedpolicy = attr->__schedpolicy; memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam, sizeof (struct sched_param)); break; case PTHREAD_INHERIT_SCHED: new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid); __sched_getparam(father_pid, &new_thread->p_start_args.schedparam); break; } new_thread->p_priority = new_thread->p_start_args.schedparam.sched_priority; } /* Finish setting up arguments to pthread_start_thread */ new_thread->p_start_args.start_routine = start_routine; new_thread->p_start_args.arg = arg; new_thread->p_start_args.mask = *mask; /* Make the new thread ID available already now. If any of the later functions fail we return an error value and the caller must not use the stored thread ID. */ *thread = new_thread_id; /* Raise priority of thread manager if needed */ __pthread_manager_adjust_prio(new_thread->p_priority); /* Do the cloning. We have to use two different functions depending on whether we are debugging or not. */ pid = 0; /* Note that the thread never can have PID zero. */ if (report_events) { /* See whether the TD_CREATE event bit is set in any of the masks. */ int idx = __td_eventword (TD_CREATE); uint32_t mask = __td_eventmask (TD_CREATE); if ((mask & (__pthread_threads_events.event_bits[idx] | event_maskp->event_bits[idx])) != 0) { /* Lock the mutex the child will use now so that it will stop. */ __pthread_lock(new_thread->p_lock, NULL); /* We have to report this event. */ #ifdef NEED_SEPARATE_REGISTER_STACK /* Perhaps this version should be used on all platforms. But this requires that __clone2 be uniformly supported everywhere. And there is some argument for changing the __clone2 interface to pass sp and bsp instead, making it more IA64 specific, but allowing stacks to grow outward from each other, to get less paging and fewer mmaps. */ pid = __clone2(pthread_start_thread_event, (void **)new_thread_bottom, (char *)stack_addr - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #elif _STACK_GROWS_UP pid = __clone(pthread_start_thread_event, (void *) new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #else pid = __clone(pthread_start_thread_event, stack_addr, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #endif saved_errno = errno; if (pid != -1) { /* Now fill in the information about the new thread in the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ new_thread->p_eventbuf.eventdata = new_thread; new_thread->p_eventbuf.eventnum = TD_CREATE; __pthread_last_event = new_thread; /* We have to set the PID here since the callback function in the debug library will need it and we cannot guarantee the child got scheduled before the debugger. */ new_thread->p_pid = pid; /* Now call the function which signals the event. */ __linuxthreads_create_event (); /* Now restart the thread. */ __pthread_unlock(new_thread->p_lock); } } } if (pid == 0) { #ifdef NEED_SEPARATE_REGISTER_STACK pid = __clone2(pthread_start_thread, (void **)new_thread_bottom, (char *)stack_addr - new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #elif _STACK_GROWS_UP pid = __clone(pthread_start_thread, (void *) new_thread_bottom, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #else pid = __clone(pthread_start_thread, stack_addr, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM | __pthread_sig_cancel, new_thread); #endif /* !NEED_SEPARATE_REGISTER_STACK */ saved_errno = errno; } /* Check if cloning succeeded */ if (pid == -1) { /* Free the stack if we allocated it */ if (attr == NULL || !attr->__stackaddr_set) { #ifdef NEED_SEPARATE_REGISTER_STACK size_t stacksize = ((char *)(new_thread->p_guardaddr) - new_thread_bottom); munmap((caddr_t)new_thread_bottom, 2 * stacksize + new_thread->p_guardsize); #elif _STACK_GROWS_UP # ifdef USE_TLS size_t stacksize = guardaddr - stack_addr; munmap(stack_addr, stacksize + guardsize); # else size_t stacksize = guardaddr - (char *)new_thread; munmap(new_thread, stacksize + guardsize); # endif #else # ifdef USE_TLS size_t stacksize = stack_addr - new_thread_bottom; # else size_t stacksize = (char *)(new_thread+1) - new_thread_bottom; # endif munmap(new_thread_bottom - guardsize, guardsize + stacksize); #endif } #ifdef USE_TLS # if TLS_DTV_AT_TP new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE); # endif _dl_deallocate_tls (new_thread, true); #endif __pthread_handles[sseg].h_descr = NULL; __pthread_handles[sseg].h_bottom = NULL; __pthread_handles_num--; return saved_errno; } /* Insert new thread in doubly linked list of active threads */ new_thread->p_prevlive = __pthread_main_thread; new_thread->p_nextlive = __pthread_main_thread->p_nextlive; __pthread_main_thread->p_nextlive->p_prevlive = new_thread; __pthread_main_thread->p_nextlive = new_thread; /* Set pid field of the new thread, in case we get there before the child starts. */ new_thread->p_pid = pid; return 0; }