/* Unlock RWLOCK. */ int attribute_protected __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer) rwlock->__data.__writer = 0; else --rwlock->__data.__nr_readers; if (rwlock->__data.__nr_readers == 0) { if (rwlock->__data.__nr_writers_queued) { ++rwlock->__data.__writer_wakeup; lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); lll_futex_wake (&rwlock->__data.__writer_wakeup, 1, rwlock->__data.__shared); return 0; } else if (rwlock->__data.__nr_readers_queued) { ++rwlock->__data.__readers_wakeup; lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, rwlock->__data.__shared); return 0; } } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return 0; }
int pthread_mutex_unlock (pthread_mutex_t *mtxp) { struct pthread *self = PTHREAD_SELF; int ret = 0, flags = mtxp->__flags & GSYNC_SHARED; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: lll_unlock (&mtxp->__lock, flags); break; case PTHREAD_MUTEX_RECURSIVE: if (!mtx_owned_p (mtxp, self, flags)) ret = EPERM; else if (--mtxp->__cnt == 0) { mtxp->__owner_id = mtxp->__shpid = 0; lll_unlock (&mtxp->__lock, flags); } break; case PTHREAD_MUTEX_ERRORCHECK: if (!mtx_owned_p (mtxp, self, flags)) ret = EPERM; else { mtxp->__owner_id = mtxp->__shpid = 0; lll_unlock (&mtxp->__lock, flags); } break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: if (mtxp->__owner_id == NOTRECOVERABLE_ID) ; /* Nothing to do. */ else if (mtxp->__owner_id != self->id || (int)(mtxp->__lock & LLL_OWNER_MASK) != getpid ()) ret = EPERM; else if (--mtxp->__cnt == 0) { /* Release the lock. If it's in an inconsistent * state, mark it as not recoverable. */ mtxp->__owner_id = (mtxp->__lock & LLL_DEAD_OWNER) ? NOTRECOVERABLE_ID : 0; lll_robust_unlock (&mtxp->__lock, flags); } break; default: ret = EINVAL; break; } return (ret); }
int __pthread_cond_signal (pthread_cond_t *cond) { int pshared = (cond->__data.__mutex == (void *) ~0l) ? LLL_SHARED : LLL_PRIVATE; LIBC_PROBE (cond_signal, 1, cond); /* Make sure we are alone. */ lll_lock (cond->__data.__lock, pshared); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) { /* Yes. Mark one of them as woken. */ ++cond->__data.__wakeup_seq; ++cond->__data.__futex; #if (defined lll_futex_cmp_requeue_pi \ && defined __ASSUME_REQUEUE_PI) pthread_mutex_t *mut = cond->__data.__mutex; if (USE_REQUEUE_PI (mut) /* This can only really fail with a ENOSYS, since nobody can modify futex while we have the cond_lock. */ && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0, &mut->__data.__lock, cond->__data.__futex, pshared) == 0) { lll_unlock (cond->__data.__lock, pshared); return 0; } else #endif /* Wake one. */ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1, 1, &cond->__data.__lock, pshared), 0)) return 0; /* Fallback if neither of them work. */ lll_futex_wake (&cond->__data.__futex, 1, pshared); } /* We are done. */ lll_unlock (cond->__data.__lock, pshared); return 0; }
int attribute_protected __pthread_cond_signal ( pthread_cond_t *cond) { int pshared = (cond->__data.__mutex == (void *) ~0l) ? LLL_SHARED : LLL_PRIVATE; /* Make sure we are alone. */ lll_lock (cond->__data.__lock, pshared); /* Are there any waiters to be woken? */ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) { /* Yes. Mark one of them as woken. */ ++cond->__data.__wakeup_seq; ++cond->__data.__futex; /* Wake one. */ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1, 1, &cond->__data.__lock, pshared), 0)) return 0; lll_futex_wake (&cond->__data.__futex, 1, pshared); } /* We are done. */ lll_unlock (cond->__data.__lock, pshared); return 0; }
malloc_zone_t * malloc_zone_from_ptr(const void *ptr) { OSLog("malloc_zone_from_ptr(%p): searching ... ", ptr); lll_lock(&malloc_lock); OSMemoryZone* osm = NULL; for (int i = 0; i < MAX_ZONES; i++) { OSMemoryZone* tosm = &zones[i]; if (tosm->registred && tosm->memory_space != NULL) { if (XXX_mspace_has_pointer(tosm->memory_space, (void*)ptr)) { osm = tosm; break; } } } lll_unlock(&malloc_lock); if (osm == NULL) { osm = default_zone; OSLog("malloc_zone_from_ptr(%p): no hits, returinig default_zone", ptr); } else { OSLog("malloc_zone_from_ptr(%p): found zone [%p] ", ptr, osm); } return (malloc_zone_t*)osm; }
int __pthread_once ( pthread_once_t *once_control, void (*init_routine) (void)) { /* XXX Depending on whether the LOCK_IN_ONCE_T is defined use a global lock variable or one which is part of the pthread_once_t object. */ if (*once_control == PTHREAD_ONCE_INIT) { lll_lock (once_lock, LLL_PRIVATE); /* XXX This implementation is not complete. It doesn't take cancellation and fork into account. */ if (*once_control == PTHREAD_ONCE_INIT) { init_routine (); *once_control = !PTHREAD_ONCE_INIT; } lll_unlock (once_lock, LLL_PRIVATE); } return 0; }
int __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { int result = EBUSY; if (ELIDE_TRYLOCK (rwlock->__data.__rwelision, rwlock->__data.__lock == 0 && rwlock->__data.__nr_readers == 0 && rwlock->__data.__writer, 0)) return 0; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && (rwlock->__data.__nr_writers_queued == 0 || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { --rwlock->__data.__nr_readers; result = EAGAIN; } else result = 0; } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; }
/* Unlock RWLOCK. */ int __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { int futex_shared = rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED; LIBC_PROBE (rwlock_unlock, 1, rwlock); if (ELIDE_UNLOCK (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)) return 0; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer) rwlock->__data.__writer = 0; else --rwlock->__data.__nr_readers; /* If there are still readers present, we do not yet need to wake writers nor are responsible to wake any readers. */ if (rwlock->__data.__nr_readers == 0) { /* Note that if there is a blocked writer, we effectively make it responsible for waking any readers because we don't wake readers in this case. */ if (rwlock->__data.__nr_writers_queued) { ++rwlock->__data.__writer_wakeup; lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); futex_wake (&rwlock->__data.__writer_wakeup, 1, futex_shared); return 0; } else if (rwlock->__data.__nr_readers_queued) { ++rwlock->__data.__readers_wakeup; lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared); return 0; } } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return 0; }
int pthread_getattr_default_np (pthread_attr_t *out) { struct pthread_attr *real_out; real_out = (struct pthread_attr *) out; lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); *real_out = __default_pthread_attr; lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); return 0; }
int attribute_protected __pthread_getschedparam ( pthread_t threadid, int *policy, struct sched_param *param) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; int result = 0; lll_lock (pd->lock, LLL_PRIVATE); /* The library is responsible for maintaining the values at all times. If the user uses a interface other than pthread_setschedparam to modify the scheduler setting it is not the library's problem. In case the descriptor's values have not yet been retrieved do it now. */ if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0) { if (sched_getparam (pd->tid, &pd->schedparam) != 0) result = 1; else pd->flags |= ATTR_FLAG_SCHED_SET; } if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0) { pd->schedpolicy = sched_getscheduler (pd->tid); if (pd->schedpolicy == -1) result = 1; else pd->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { *policy = pd->schedpolicy; memcpy (param, &pd->schedparam, sizeof (struct sched_param)); } lll_unlock (pd->lock, LLL_PRIVATE); return result; }
int __pthread_current_priority (void) { struct pthread *self = THREAD_SELF; if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) return self->schedparam.sched_priority; int result = 0; #ifdef TPP_PTHREAD_SCHED int policy; struct sched_param param; #endif lll_lock (self->lock, LLL_PRIVATE); if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { #ifndef TPP_PTHREAD_SCHED if (__sched_getparam (self->tid, &self->schedparam) != 0) #else if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0) #endif result = -1; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { #ifndef TPP_PTHREAD_SCHED self->schedpolicy = __sched_getscheduler (self->tid); #else if (__pthread_getschedparam (self->tid, &self->schedpolicy, ¶m) != 0) self->schedpolicy = -1; #endif if (self->schedpolicy == -1) result = -1; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result != -1) result = self->schedparam.sched_priority; lll_unlock (self->lock, LLL_PRIVATE); return result; }
int __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { int result = EBUSY; bool wake = false; int futex_shared = rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED; if (ELIDE_TRYLOCK (rwlock->__data.__rwelision, rwlock->__data.__lock == 0 && rwlock->__data.__nr_readers == 0 && rwlock->__data.__writer, 0)) return 0; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && (rwlock->__data.__nr_writers_queued == 0 || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { --rwlock->__data.__nr_readers; result = EAGAIN; } else { result = 0; /* See pthread_rwlock_rdlock. */ if (rwlock->__data.__nr_readers == 1 && rwlock->__data.__nr_readers_queued > 0 && rwlock->__data.__nr_writers_queued > 0) { ++rwlock->__data.__readers_wakeup; wake = true; } } } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); if (wake) futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared); return result; }
int __lll_unlock_elision (int *lock, short *adapt_count, int pshared) { /* When the lock was free we're in a transaction. */ if (*lock == 0) __builtin_tend (0); else { lll_unlock ((*lock), pshared); /* Update the adapt count AFTER completing the critical section. Doing this here prevents unneeded stalling when entering a critical section. Saving about 8% runtime on P8. */ if (*adapt_count > 0) (*adapt_count)--; } return 0; }
int __pthread_rwlock_trywrlock ( pthread_rwlock_t *rwlock) { int result = EBUSY; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0) { rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid); result = 0; } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; }
int __lll_unlock_elision (int *lock, short *adapt_count, int pshared) { /* When the lock was free we're in a transaction. */ if (*lock == 0) __libc_tend (0); else { /* Update adapt_count in the critical section to prevent a write-after-destroy error as mentioned in BZ 20822. The following update of adapt_count has to be contained within the critical region of the fall-back lock in order to not violate the mutex destruction requirements. */ short __tmp = atomic_load_relaxed (adapt_count); if (__tmp > 0) atomic_store_relaxed (adapt_count, __tmp - 1); lll_unlock ((*lock), pshared); } return 0; }
int __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { int result = 0; LIBC_PROBE (rdlock_entry, 1, rwlock); if (ELIDE_LOCK (rwlock->__data.__rwelision, rwlock->__data.__lock == 0 && rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)) return 0; /* Make sure we are alone. */ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); /* Get the rwlock if there is no writer... */ if (rwlock->__data.__writer == 0 /* ...and if either no writer is waiting or we prefer readers. */ && (!rwlock->__data.__nr_writers_queued || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { /* Increment the reader counter. Avoid overflow. */ if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { /* Overflow on number of readers. */ --rwlock->__data.__nr_readers; result = EAGAIN; } else LIBC_PROBE (rdlock_acquire_read, 1, rwlock); /* We are done, free the lock. */ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; } return __pthread_rwlock_rdlock_slow (rwlock); }
/* * OSMemoryZone */ OSMemoryZone* _OSGetFirstAvailableMemoryZone(void) { lll_lock(&malloc_lock); OSMemoryZone* osm = NULL; for (int i = 0; i < MAX_ZONES; i++) { OSMemoryZone* tosm = &zones[i]; if (tosm->registred == FALSE) { osm = tosm; } } if (osm != NULL) { osm->registred = TRUE; } else { OSHalt("out of avail malloc zones (max: %d)", MAX_ZONES); } lll_unlock(&malloc_lock); return (OSMemoryZone*)osm; }
int pthread_setschedprio ( pthread_t threadid, int prio) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; int result = 0; struct sched_param param; param.sched_priority = prio; lll_lock (pd->lock, LLL_PRIVATE); /* If the thread should have higher priority because of some PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */ if (__builtin_expect (pd->tpp != NULL, 0) && pd->tpp->priomax > prio) param.sched_priority = pd->tpp->priomax; /* Try to set the scheduler information. */ if (__builtin_expect (sched_setparam (pd->tid, ¶m) == -1, 0)) result = errno; else { /* We succeeded changing the kernel information. Reflect this change in the thread descriptor. */ param.sched_priority = prio; memcpy (&pd->schedparam, ¶m, sizeof (struct sched_param)); pd->flags |= ATTR_FLAG_SCHED_SET; } lll_unlock (pd->lock, LLL_PRIVATE); return result; }
int sem_close (sem_t *sem) { int result = 0; /* Get the lock. */ lll_lock (__sem_mappings_lock, LLL_PRIVATE); /* Locate the entry for the mapping the caller provided. */ rec = NULL; the_sem = sem; __twalk (__sem_mappings, walker); if (rec != NULL) { /* Check the reference counter. If it is going to be zero, free all the resources. */ if (--rec->refcnt == 0) { /* Remove the record from the tree. */ (void) __tdelete (rec, &__sem_mappings, __sem_search); result = munmap (rec->sem, sizeof (sem_t)); free (rec); } } else { /* This is no valid semaphore. */ result = -1; __set_errno (EINVAL); } /* Release the lock. */ lll_unlock (__sem_mappings_lock, LLL_PRIVATE); return result; }
int __pthread_current_priority (void) { struct pthread *self = THREAD_SELF; if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) return self->schedparam.sched_priority; int result = 0; /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ lll_lock (self->lock, LLL_PRIVATE); if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { if (__sched_getparam (self->tid, &self->schedparam) != 0) result = -1; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { self->schedpolicy = __sched_getscheduler (self->tid); if (self->schedpolicy == -1) result = -1; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result != -1) result = self->schedparam.sched_priority; lll_unlock (self->lock, LLL_PRIVATE); return result; }
int __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { int result = EBUSY; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && (rwlock->__data.__nr_writers_queued == 0 || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0)) { --rwlock->__data.__nr_readers; result = EAGAIN; } else result = 0; } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); return result; }
int __pthread_tpp_change_priority (int previous_prio, int new_prio) { struct pthread *self = THREAD_SELF; struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); if (tpp == NULL) { if (__sched_fifo_min_prio == -1) __init_sched_fifo_prio (); size_t size = sizeof *tpp; size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1) * sizeof (tpp->priomap[0]); tpp = calloc (size, 1); if (tpp == NULL) return ENOMEM; tpp->priomax = __sched_fifo_min_prio - 1; THREAD_SETMEM (self, tpp, tpp); } assert (new_prio == -1 || (new_prio >= __sched_fifo_min_prio && new_prio <= __sched_fifo_max_prio)); assert (previous_prio == -1 || (previous_prio >= __sched_fifo_min_prio && previous_prio <= __sched_fifo_max_prio)); int priomax = tpp->priomax; int newpriomax = priomax; if (new_prio != -1) { if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0) return EAGAIN; ++tpp->priomap[new_prio - __sched_fifo_min_prio]; if (new_prio > priomax) newpriomax = new_prio; } if (previous_prio != -1) { if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0 && priomax == previous_prio && previous_prio > new_prio) { int i; for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i) if (tpp->priomap[i - __sched_fifo_min_prio]) break; newpriomax = i; } } if (priomax == newpriomax) return 0; lll_lock (self->lock, LLL_PRIVATE); tpp->priomax = newpriomax; int result = 0; #ifdef TPP_PTHREAD_SCHED int policy; struct sched_param param; #endif if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { #ifndef TPP_PTHREAD_SCHED if (__sched_getparam (self->tid, &self->schedparam) != 0) #else if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0) #endif result = errno; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { #ifndef TPP_PTHREAD_SCHED self->schedpolicy = __sched_getscheduler (self->tid); #else if (__pthread_getschedparam (self->tid, &self->schedpolicy, ¶m) != 0) self->schedpolicy = -1; #endif if (self->schedpolicy == -1) result = errno; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { struct sched_param sp = self->schedparam; if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) { if (sp.sched_priority < newpriomax) sp.sched_priority = newpriomax; #ifndef TPP_PTHREAD_SCHED if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) #else if (__pthread_setschedparam (self->tid, self->schedpolicy, &sp) < 0) #endif result = errno; } } lll_unlock (self->lock, LLL_PRIVATE); return result; }
void __pthread_initialize_minimal_internal (void) { #ifndef SHARED /* Unlike in the dynamically linked case the dynamic linker has not taken care of initializing the TLS data structures. */ __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN); /* We must prevent gcc from being clever and move any of the following code ahead of the __libc_setup_tls call. This function will initialize the thread register which is subsequently used. */ __asm __volatile (""); #endif /* Minimal initialization of the thread descriptor. */ struct pthread *pd = THREAD_SELF; __pthread_initialize_pids (pd); THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); THREAD_SETMEM (pd, user_stack, true); if (LLL_LOCK_INITIALIZER != 0) THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER); #if HP_TIMING_AVAIL THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset)); #endif /* Initialize the robust mutex data. */ { #ifdef __PTHREAD_MUTEX_HAVE_PREV pd->robust_prev = &pd->robust_head; #endif pd->robust_head.list = &pd->robust_head; #ifdef __NR_set_robust_list pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) - offsetof (pthread_mutex_t, __data.__list.__next)); INTERNAL_SYSCALL_DECL (err); int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, sizeof (struct robust_list_head)); if (INTERNAL_SYSCALL_ERROR_P (res, err)) #endif set_robust_list_not_avail (); } #ifdef __NR_futex # ifndef __ASSUME_PRIVATE_FUTEX /* Private futexes are always used (at least internally) so that doing the test once this early is beneficial. */ { int word = 0; INTERNAL_SYSCALL_DECL (err); word = INTERNAL_SYSCALL (futex, err, 3, &word, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1); if (!INTERNAL_SYSCALL_ERROR_P (word, err)) THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG); } /* Private futexes have been introduced earlier than the FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we know the former are not supported. This also means we know the kernel will return ENOSYS for unknown operations. */ if (THREAD_GETMEM (pd, header.private_futex) != 0) # endif # ifndef __ASSUME_FUTEX_CLOCK_REALTIME { int word = 0; /* NB: the syscall actually takes six parameters. The last is the bit mask. But since we will not actually wait at all the value is irrelevant. Given that passing six parameters is difficult on some architectures we just pass whatever random value the calling convention calls for to the kernel. It causes no harm. */ INTERNAL_SYSCALL_DECL (err); word = INTERNAL_SYSCALL (futex, err, 5, &word, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME | FUTEX_PRIVATE_FLAG, 1, NULL, 0); assert (INTERNAL_SYSCALL_ERROR_P (word, err)); if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS) __set_futex_clock_realtime (); } # endif #endif /* Set initial thread's stack block from 0 up to __libc_stack_end. It will be bigger than it actually is, but for unwind.c/pt-longjmp.c purposes this is good enough. */ THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end); /* Initialize the list of all running threads with the main thread. */ INIT_LIST_HEAD (&__stack_user); list_add (&pd->list, &__stack_user); /* Before initializing __stack_user, the debugger could not find us and had to set __nptl_initial_report_events. Propagate its setting. */ THREAD_SETMEM (pd, report_events, __nptl_initial_report_events); #if defined SIGCANCEL || defined SIGSETXID struct sigaction sa; __sigemptyset (&sa.sa_mask); # ifdef SIGCANCEL /* Install the cancellation signal handler. If for some reason we cannot install the handler we do not abort. Maybe we should, but it is only asynchronous cancellation which is affected. */ sa.sa_sigaction = sigcancel_handler; sa.sa_flags = SA_SIGINFO; (void) __libc_sigaction (SIGCANCEL, &sa, NULL); # endif # ifdef SIGSETXID /* Install the handle to change the threads' uid/gid. */ sa.sa_sigaction = sighandler_setxid; sa.sa_flags = SA_SIGINFO | SA_RESTART; (void) __libc_sigaction (SIGSETXID, &sa, NULL); # endif /* The parent process might have left the signals blocked. Just in case, unblock it. We reuse the signal mask in the sigaction structure. It is already cleared. */ # ifdef SIGCANCEL __sigaddset (&sa.sa_mask, SIGCANCEL); # endif # ifdef SIGSETXID __sigaddset (&sa.sa_mask, SIGSETXID); # endif { INTERNAL_SYSCALL_DECL (err); (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask, NULL, _NSIG / 8); } #endif /* Get the size of the static and alignment requirements for the TLS block. */ size_t static_tls_align; _dl_get_tls_static_info (&__static_tls_size, &static_tls_align); /* Make sure the size takes all the alignments into account. */ if (STACK_ALIGN > static_tls_align) static_tls_align = STACK_ALIGN; __static_tls_align_m1 = static_tls_align - 1; __static_tls_size = roundup (__static_tls_size, static_tls_align); /* Determine the default allowed stack size. This is the size used in case the user does not specify one. */ struct rlimit limit; if (__getrlimit (RLIMIT_STACK, &limit) != 0 || limit.rlim_cur == RLIM_INFINITY) /* The system limit is not usable. Use an architecture-specific default. */ limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE; else if (limit.rlim_cur < PTHREAD_STACK_MIN) /* The system limit is unusably small. Use the minimal size acceptable. */ limit.rlim_cur = PTHREAD_STACK_MIN; /* Make sure it meets the minimum size that allocate_stack (allocatestack.c) will demand, which depends on the page size. */ const uintptr_t pagesz = GLRO(dl_pagesize); const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK; if (limit.rlim_cur < minstack) limit.rlim_cur = minstack; /* Round the resource limit up to page size. */ limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz); lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); __default_pthread_attr.stacksize = limit.rlim_cur; __default_pthread_attr.guardsize = GLRO (dl_pagesize); lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); #ifdef SHARED /* Transfer the old value from the dynamic linker's internal location. */ *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) (); GL(dl_error_catch_tsd) = &__libc_dl_error_tsd; /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock, keep the lock count from the ld.so implementation. */ GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock; GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock; unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count; GL(dl_load_lock).mutex.__data.__count = 0; while (rtld_lock_count-- > 0) __pthread_mutex_lock (&GL(dl_load_lock).mutex); GL(dl_make_stack_executable_hook) = &__make_stacks_executable; #endif GL(dl_init_static_tls) = &__pthread_init_static_tls; GL(dl_wait_lookup_done) = &__wait_lookup_done; /* Register the fork generation counter with the libc. */ #ifndef TLS_MULTIPLE_THREADS_IN_TCB __libc_multiple_threads_ptr = #endif __libc_pthread_init (&__fork_generation, __reclaim_stacks, ptr_pthread_functions); /* Determine whether the machine is SMP or not. */ __is_smp = is_smp_system (); }
void __unregister_atfork ( void *dso_handle) { /* Check whether there is any entry in the list which we have to remove. It is likely that this is not the case so don't bother getting the lock. We do not worry about other threads adding entries for this DSO right this moment. If this happens this is a race and we can do whatever we please. The program will crash anyway seen. */ struct fork_handler *runp = __fork_handlers; struct fork_handler *lastp = NULL; while (runp != NULL) if (runp->dso_handle == dso_handle) break; else { lastp = runp; runp = runp->next; } if (runp == NULL) /* Nothing to do. */ return; /* Get the lock to not conflict with additions or deletions. Note that there couldn't have been another thread deleting something. The __unregister_atfork function is only called from the dlclose() code which itself serializes the operations. */ lll_lock (__fork_lock, LLL_PRIVATE); /* We have to create a new list with all the entries we don't remove. */ struct deleted_handler { struct fork_handler *handler; struct deleted_handler *next; } *deleted = NULL; /* Remove the entries for the DSO which is unloaded from the list. It's a single linked list so readers are. */ do { again: if (runp->dso_handle == dso_handle) { if (lastp == NULL) { /* We have to use an atomic operation here because __linkin_atfork also uses one. */ if (catomic_compare_and_exchange_bool_acq (&__fork_handlers, runp->next, runp) != 0) { runp = __fork_handlers; goto again; } } else lastp->next = runp->next; /* We cannot overwrite the ->next element now. Put the deleted entries in a separate list. */ struct deleted_handler *newp = alloca (sizeof (*newp)); newp->handler = runp; newp->next = deleted; deleted = newp; } else lastp = runp; runp = runp->next; } while (runp != NULL); /* Release the lock. */ lll_unlock (__fork_lock, LLL_PRIVATE); /* Walk the list of all entries which have to be deleted. */ while (deleted != NULL) { /* We need to be informed by possible current users. */ deleted->handler->need_signal = 1; /* Make sure this gets written out first. */ atomic_write_barrier (); /* Decrement the reference counter. If it does not reach zero wait for the last user. */ atomic_decrement (&deleted->handler->refcntr); unsigned int val; while ((val = deleted->handler->refcntr) != 0) lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE); deleted = deleted->next; } }
static int create_thread (struct pthread *pd, const struct pthread_attr *attr, STACK_VARIABLES_PARMS) { #ifdef TLS_TCB_AT_TP assert (pd->header.tcb != NULL); #endif /* We rely heavily on various flags the CLONE function understands: CLONE_VM, CLONE_FS, CLONE_FILES These flags select semantics with shared address space and file descriptors according to what POSIX requires. CLONE_SIGNAL This flag selects the POSIX signal semantics. CLONE_SETTLS The sixth parameter to CLONE determines the TLS area for the new thread. CLONE_PARENT_SETTID The kernels writes the thread ID of the newly created thread into the location pointed to by the fifth parameters to CLONE. Note that it would be semantically equivalent to use CLONE_CHILD_SETTID but it is be more expensive in the kernel. CLONE_CHILD_CLEARTID The kernels clears the thread ID of a thread that has called sys_exit() in the location pointed to by the seventh parameter to CLONE. CLONE_DETACHED No signal is generated if the thread exists and it is automatically reaped. The termination signal is chosen to be zero which means no signal is sent. */ int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGNAL | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_SYSVSEM #if __ASSUME_NO_CLONE_DETACHED == 0 | CLONE_DETACHED #endif | 0); if (__builtin_expect (THREAD_GETMEM (THREAD_SELF, report_events), 0)) { /* The parent thread is supposed to report events. Check whether the TD_CREATE event is needed, too. */ const int _idx = __td_eventword (TD_CREATE); const uint32_t _mask = __td_eventmask (TD_CREATE); if ((_mask & (__nptl_threads_events.event_bits[_idx] | pd->eventbuf.eventmask.event_bits[_idx])) != 0) { /* We always must have the thread start stopped. */ pd->stopped_start = true; /* Create the thread. We always create the thread stopped so that it does not get far before we tell the debugger. */ int res = do_clone (pd, attr, clone_flags, start_thread, STACK_VARIABLES_ARGS, 1); if (res == 0) { /* Now fill in the information about the new thread in the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ pd->eventbuf.eventnum = TD_CREATE; pd->eventbuf.eventdata = pd; /* Enqueue the descriptor. */ do pd->nextevent = __nptl_last_event; while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event, pd, pd->nextevent) != 0); /* Now call the function which signals the event. */ __nptl_create_event (); /* And finally restart the new thread. */ lll_unlock (pd->lock); } return res; } } #ifdef NEED_DL_SYSINFO assert (THREAD_SELF_SYSINFO == THREAD_SYSINFO (pd)); #endif /* Determine whether the newly created threads has to be started stopped since we have to set the scheduling parameters or set the affinity. */ bool stopped = false; if (attr != NULL && (attr->cpuset != NULL || (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)) stopped = true; pd->stopped_start = stopped; pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling); /* Actually create the thread. */ int res = do_clone (pd, attr, clone_flags, start_thread, STACK_VARIABLES_ARGS, stopped); if (res == 0 && stopped) /* And finally restart the new thread. */ lll_unlock (pd->lock); return res; }
int __pthread_tpp_change_priority (int previous_prio, int new_prio) { struct pthread *self = THREAD_SELF; struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); if (tpp == NULL) { /* See __init_sched_fifo_prio. We need both the min and max prio, so need to check both, and run initialization if either one is not initialized. The memory model's write-read coherence rule makes this work. */ if (fifo_min_prio == -1 || fifo_max_prio == -1) { __init_sched_fifo_prio (); fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); } size_t size = sizeof *tpp; size += (fifo_max_prio - fifo_min_prio + 1) * sizeof (tpp->priomap[0]); tpp = calloc (size, 1); if (tpp == NULL) return ENOMEM; tpp->priomax = fifo_min_prio - 1; THREAD_SETMEM (self, tpp, tpp); } assert (new_prio == -1 || (new_prio >= fifo_min_prio && new_prio <= fifo_max_prio)); assert (previous_prio == -1 || (previous_prio >= fifo_min_prio && previous_prio <= fifo_max_prio)); int priomax = tpp->priomax; int newpriomax = priomax; if (new_prio != -1) { if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0) return EAGAIN; ++tpp->priomap[new_prio - fifo_min_prio]; if (new_prio > priomax) newpriomax = new_prio; } if (previous_prio != -1) { if (--tpp->priomap[previous_prio - fifo_min_prio] == 0 && priomax == previous_prio && previous_prio > new_prio) { int i; for (i = previous_prio - 1; i >= fifo_min_prio; --i) if (tpp->priomap[i - fifo_min_prio]) break; newpriomax = i; } } if (priomax == newpriomax) return 0; /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ lll_lock (self->lock, LLL_PRIVATE); tpp->priomax = newpriomax; int result = 0; if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) { if (__sched_getparam (self->tid, &self->schedparam) != 0) result = errno; else self->flags |= ATTR_FLAG_SCHED_SET; } if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) { self->schedpolicy = __sched_getscheduler (self->tid); if (self->schedpolicy == -1) result = errno; else self->flags |= ATTR_FLAG_POLICY_SET; } if (result == 0) { struct sched_param sp = self->schedparam; if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) { if (sp.sched_priority < newpriomax) sp.sched_priority = newpriomax; if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) result = errno; } } lll_unlock (self->lock, LLL_PRIVATE); return result; }
pthread_mutex_timedlock ( pthread_mutex_t *mutex, const struct timespec *abstime) { int oldval; pid_t id = THREAD_GETMEM (THREAD_SELF, tid); int result = 0; /* We must not check ABSTIME here. If the thread does not block abstime must not be checked for a valid value. */ switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex), PTHREAD_MUTEX_TIMED_NP)) { /* Recursive mutex. */ case PTHREAD_MUTEX_RECURSIVE_NP: /* Check whether we already hold the mutex. */ if (mutex->__data.__owner == id) { /* Just bump the counter. */ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) /* Overflow of the counter. */ return EAGAIN; ++mutex->__data.__count; goto out; } /* We have to get the mutex. */ result = lll_timedlock (mutex->__data.__lock, abstime, PTHREAD_MUTEX_PSHARED (mutex)); if (result != 0) goto out; /* Only locked once so far. */ mutex->__data.__count = 1; break; /* Error checking mutex. */ case PTHREAD_MUTEX_ERRORCHECK_NP: /* Check whether we already hold the mutex. */ if (__builtin_expect (mutex->__data.__owner == id, 0)) return EDEADLK; /* FALLTHROUGH */ case PTHREAD_MUTEX_TIMED_NP: simple: /* Normal mutex. */ result = lll_timedlock (mutex->__data.__lock, abstime, PTHREAD_MUTEX_PSHARED (mutex)); break; case PTHREAD_MUTEX_ADAPTIVE_NP: if (! __is_smp) goto simple; if (lll_trylock (mutex->__data.__lock) != 0) { int cnt = 0; int max_cnt = MIN (MAX_ADAPTIVE_COUNT, mutex->__data.__spins * 2 + 10); do { if (cnt++ >= max_cnt) { result = lll_timedlock (mutex->__data.__lock, abstime, PTHREAD_MUTEX_PSHARED (mutex)); break; } #ifdef BUSY_WAIT_NOP BUSY_WAIT_NOP; #endif } while (lll_trylock (mutex->__data.__lock) != 0); mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; } break; case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: case PTHREAD_MUTEX_ROBUST_NORMAL_NP: case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, &mutex->__data.__list.__next); oldval = mutex->__data.__lock; do { again: if ((oldval & FUTEX_OWNER_DIED) != 0) { /* The previous owner died. Try locking the mutex. */ int newval = id | (oldval & FUTEX_WAITERS); newval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, newval, oldval); if (newval != oldval) { oldval = newval; goto again; } /* We got the mutex. */ mutex->__data.__count = 1; /* But it is inconsistent unless marked otherwise. */ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; ENQUEUE_MUTEX (mutex); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Note that we deliberately exit here. If we fall through to the end of the function __nusers would be incremented which is not correct because the old owner has to be discounted. */ return EOWNERDEAD; } /* Check whether we already hold the mutex. */ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) { int kind = PTHREAD_MUTEX_TYPE (mutex); if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) /* Overflow of the counter. */ return EAGAIN; ++mutex->__data.__count; return 0; } } result = lll_robust_timedlock (mutex->__data.__lock, abstime, id, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); if (__builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; lll_unlock (mutex->__data.__lock, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } if (result == ETIMEDOUT || result == EINVAL) goto out; oldval = result; } while ((oldval & FUTEX_OWNER_DIED) != 0); mutex->__data.__count = 1; ENQUEUE_MUTEX (mutex); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); break; case PTHREAD_MUTEX_PI_RECURSIVE_NP: case PTHREAD_MUTEX_PI_ERRORCHECK_NP: case PTHREAD_MUTEX_PI_NORMAL_NP: case PTHREAD_MUTEX_PI_ADAPTIVE_NP: case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: { int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; if (robust) /* Note: robust PI futexes are signaled by setting bit 0. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, (void *) (((uintptr_t) &mutex->__data.__list.__next) | 1)); oldval = mutex->__data.__lock; /* Check whether we already hold the mutex. */ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) /* Overflow of the counter. */ return EAGAIN; ++mutex->__data.__count; return 0; } } oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, id, 0); if (oldval != 0) { /* The mutex is locked. The kernel will now take care of everything. The timeout value must be a relative value. Convert it. */ int private = (robust ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) : PTHREAD_MUTEX_PSHARED (mutex)); INTERNAL_SYSCALL_DECL (__err); int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, __lll_private_flag (FUTEX_LOCK_PI, private), 1, abstime); if (INTERNAL_SYSCALL_ERROR_P (e, __err)) { if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT) return ETIMEDOUT; if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK) { assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK || (kind != PTHREAD_MUTEX_ERRORCHECK_NP && kind != PTHREAD_MUTEX_RECURSIVE_NP)); /* ESRCH can happen only for non-robust PI mutexes where the owner of the lock died. */ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust); /* Delay the thread until the timeout is reached. Then return ETIMEDOUT. */ struct timespec reltime; struct timespec now; INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME, &now); reltime.tv_sec = abstime->tv_sec - now.tv_sec; reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec; if (reltime.tv_nsec < 0) { reltime.tv_nsec += 1000000000; --reltime.tv_sec; } if (reltime.tv_sec >= 0) while (nanosleep_not_cancel (&reltime, &reltime) != 0) continue; return ETIMEDOUT; } return INTERNAL_SYSCALL_ERRNO (e, __err); } oldval = mutex->__data.__lock; assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); } if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) { atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); /* We got the mutex. */ mutex->__data.__count = 1; /* But it is inconsistent unless marked otherwise. */ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; ENQUEUE_MUTEX_PI (mutex); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Note that we deliberately exit here. If we fall through to the end of the function __nusers would be incremented which is not correct because the old owner has to be discounted. */ return EOWNERDEAD; } if (robust && __builtin_expect (mutex->__data.__owner == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) { /* This mutex is now not recoverable. */ mutex->__data.__count = 0; INTERNAL_SYSCALL_DECL (__err); INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, __lll_private_flag (FUTEX_UNLOCK_PI, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), 0, 0); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } mutex->__data.__count = 1; if (robust) { ENQUEUE_MUTEX_PI (mutex); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); } } break; case PTHREAD_MUTEX_PP_RECURSIVE_NP: case PTHREAD_MUTEX_PP_ERRORCHECK_NP: case PTHREAD_MUTEX_PP_NORMAL_NP: case PTHREAD_MUTEX_PP_ADAPTIVE_NP: { int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; oldval = mutex->__data.__lock; /* Check whether we already hold the mutex. */ if (mutex->__data.__owner == id) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) return EDEADLK; if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { /* Just bump the counter. */ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) /* Overflow of the counter. */ return EAGAIN; ++mutex->__data.__count; return 0; } } int oldprio = -1, ceilval; do { int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; if (__pthread_current_priority () > ceiling) { result = EINVAL; failpp: if (oldprio != -1) __pthread_tpp_change_priority (oldprio, -1); return result; } result = __pthread_tpp_change_priority (oldprio, ceiling); if (result) return result; ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; oldprio = ceiling; oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 1, ceilval); if (oldval == ceilval) break; do { oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 2, ceilval | 1); if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) break; if (oldval != ceilval) { /* Reject invalid timeouts. */ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { result = EINVAL; goto failpp; } struct timeval tv; struct timespec rt; /* Get the current time. */ (void) gettimeofday (&tv, NULL); /* Compute relative timeout. */ rt.tv_sec = abstime->tv_sec - tv.tv_sec; rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; if (rt.tv_nsec < 0) { rt.tv_nsec += 1000000000; --rt.tv_sec; } /* Already timed out? */ if (rt.tv_sec < 0) { result = ETIMEDOUT; goto failpp; } lll_futex_timed_wait (&mutex->__data.__lock, ceilval | 2, &rt, PTHREAD_MUTEX_PSHARED (mutex)); } } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ceilval | 2, ceilval) != ceilval); } while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); assert (mutex->__data.__owner == 0); mutex->__data.__count = 1; } break; default: /* Correct code cannot set any other type. */ return EINVAL; }
void OSSpinLockUnlock(volatile OSSpinLock *__lock) { lll_unlock((OSLowLock*)__lock); }
int pthread_getattr_np ( pthread_t thread_id, pthread_attr_t *attr) { struct pthread *thread = (struct pthread *) thread_id; struct pthread_attr *iattr = (struct pthread_attr *) attr; int ret = 0; lll_lock (thread->lock, LLL_PRIVATE); /* The thread library is responsible for keeping the values in the thread desriptor up-to-date in case the user changes them. */ memcpy (&iattr->schedparam, &thread->schedparam, sizeof (struct sched_param)); iattr->schedpolicy = thread->schedpolicy; /* Clear the flags work. */ iattr->flags = thread->flags; /* The thread might be detached by now. */ if (IS_DETACHED (thread)) iattr->flags |= ATTR_FLAG_DETACHSTATE; /* This is the guardsize after adjusting it. */ iattr->guardsize = thread->reported_guardsize; /* The sizes are subject to alignment. */ if (__builtin_expect (thread->stackblock != NULL, 1)) { iattr->stacksize = thread->stackblock_size; iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize; } else { /* No stack information available. This must be for the initial thread. Get the info in some magical way. */ assert (abs (thread->pid) == thread->tid); /* Stack size limit. */ struct rlimit rl; /* The safest way to get the top of the stack is to read /proc/self/maps and locate the line into which __libc_stack_end falls. */ FILE *fp = fopen ("/proc/self/maps", "rc"); if (fp == NULL) ret = errno; /* We need the limit of the stack in any case. */ else { if (getrlimit (RLIMIT_STACK, &rl) != 0) ret = errno; else { /* We need no locking. */ __fsetlocking (fp, FSETLOCKING_BYCALLER); /* Until we found an entry (which should always be the case) mark the result as a failure. */ ret = ENOENT; char *line = NULL; size_t linelen = 0; uintptr_t last_to = 0; while (! feof_unlocked (fp)) { if (getdelim (&line, &linelen, '\n', fp) <= 0) break; uintptr_t from; uintptr_t to; if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2) continue; if (from <= (uintptr_t) __libc_stack_end && (uintptr_t) __libc_stack_end < to) { /* Found the entry. Now we have the info we need. */ iattr->stacksize = rl.rlim_cur; iattr->stackaddr = (void *) to; /* The limit might be too high. */ if ((size_t) iattr->stacksize > (size_t) iattr->stackaddr - last_to) iattr->stacksize = (size_t) iattr->stackaddr - last_to; /* We succeed and no need to look further. */ ret = 0; break; } last_to = to; } free (line); } fclose (fp); } } iattr->flags |= ATTR_FLAG_STACKADDR; if (ret == 0) { size_t size = 16; cpu_set_t *cpuset = NULL; do { size <<= 1; void *newp = realloc (cpuset, size); if (newp == NULL) { ret = ENOMEM; break; } cpuset = (cpu_set_t *) newp; ret = __pthread_getaffinity_np (thread_id, size, cpuset); } /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */ while (ret == EINVAL && size < 1024 * 1024); if (ret == 0) { iattr->cpuset = cpuset; iattr->cpusetsize = size; } else { free (cpuset); if (ret == ENOSYS) { /* There is no such functionality. */ ret = 0; iattr->cpuset = NULL; iattr->cpusetsize = 0; } } } lll_unlock (thread->lock, LLL_PRIVATE); return ret; }
int pthread_setattr_default_np (const pthread_attr_t *in) { const struct pthread_attr *real_in; struct pthread_attr attrs; int ret; assert (sizeof (*in) >= sizeof (struct pthread_attr)); real_in = (struct pthread_attr *) in; /* Catch invalid values. */ int policy = real_in->schedpolicy; ret = check_sched_policy_attr (policy); if (ret) return ret; const struct sched_param *param = &real_in->schedparam; if (param->sched_priority > 0) { ret = check_sched_priority_attr (param->sched_priority, policy); if (ret) return ret; } ret = check_cpuset_attr (real_in->cpuset, real_in->cpusetsize); if (ret) return ret; /* stacksize == 0 is fine. It means that we don't change the current value. */ if (real_in->stacksize != 0) { ret = check_stacksize_attr (real_in->stacksize); if (ret) return ret; } /* Having a default stack address is wrong. */ if (real_in->flags & ATTR_FLAG_STACKADDR) return EINVAL; attrs = *real_in; /* Now take the lock because we start writing into __default_pthread_attr. */ lll_lock (__default_pthread_attr_lock, LLL_PRIVATE); /* Free the cpuset if the input is 0. Otherwise copy in the cpuset contents. */ size_t cpusetsize = attrs.cpusetsize; if (cpusetsize == 0) { free (__default_pthread_attr.cpuset); __default_pthread_attr.cpuset = NULL; } else if (cpusetsize == __default_pthread_attr.cpusetsize) { attrs.cpuset = __default_pthread_attr.cpuset; memcpy (attrs.cpuset, real_in->cpuset, cpusetsize); } else { /* This may look wrong at first sight, but it isn't. We're freeing __default_pthread_attr.cpuset and allocating to attrs.cpuset because we'll copy over all of attr to __default_pthread_attr later. */ cpu_set_t *newp = realloc (__default_pthread_attr.cpuset, cpusetsize); if (newp == NULL) { ret = ENOMEM; goto out; } attrs.cpuset = newp; memcpy (attrs.cpuset, real_in->cpuset, cpusetsize); } /* We don't want to accidentally set the default stacksize to zero. */ if (attrs.stacksize == 0) attrs.stacksize = __default_pthread_attr.stacksize; __default_pthread_attr = attrs; out: lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); return ret; }