Esempio n. 1
0
static pid_t __fork(void)
{
  pid_t pid;
  struct handler_list * prepare, * child, * parent;

  __pthread_mutex_lock(&pthread_atfork_lock);
  prepare = pthread_atfork_prepare;
  child = pthread_atfork_child;
  parent = pthread_atfork_parent;
  pthread_call_handlers(prepare);

  __pthread_once_fork_prepare();
#ifdef __MALLOC__
  __pthread_mutex_lock(&__malloc_sbrk_lock);
  __pthread_mutex_lock(&__malloc_heap_lock);
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
  __pthread_mutex_lock(&__malloc_mmb_heap_lock);
#endif
#elif defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
  __pthread_mutex_lock(&__malloc_lock);
#endif

  pid = __libc_fork();
  if (pid == 0) {
#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
    __libc_lock_init_recursive(__malloc_lock);
#elif defined(__MALLOC__)
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
    __libc_lock_init_adaptive(__malloc_mmb_heap_lock);
#endif
    __libc_lock_init_adaptive(__malloc_heap_lock);
    __libc_lock_init(__malloc_sbrk_lock);
#endif
    __libc_lock_init_adaptive(pthread_atfork_lock);
    __pthread_reset_main_thread();
    __fresetlockfiles();
    __pthread_once_fork_child();
    pthread_call_handlers(child);
  } else {
#if defined(__MALLOC_STANDARD__) || defined(__MALLOC_SIMPLE__)
    __pthread_mutex_unlock(&__malloc_lock);
#elif defined(__MALLOC__)
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
    __pthread_mutex_unlock(&__malloc_mmb_heap_lock);
#endif
    __pthread_mutex_unlock(&__malloc_heap_lock);
    __pthread_mutex_unlock(&__malloc_sbrk_lock);
#endif
    __pthread_mutex_unlock(&pthread_atfork_lock);
    __pthread_once_fork_parent();
    pthread_call_handlers(parent);
  }
  return pid;
}
Esempio n. 2
0
/* Delete a key */
int pthread_key_delete(pthread_key_t key)
{
    pthread_descr self = thread_self();

    __pthread_mutex_lock(&pthread_keys_mutex);
    if (key >= PTHREAD_KEYS_MAX || !pthread_keys[key].in_use) {
	__pthread_mutex_unlock(&pthread_keys_mutex);
	return EINVAL;
    }
    pthread_keys[key].in_use = 0;
    pthread_keys[key].destr = NULL;

    /* Set the value of the key to NULL in all running threads, so
       that if the key is reallocated later by pthread_key_create, its
       associated values will be NULL in all threads.
       Do nothing if no threads have been created yet.  */
    if (__pthread_manager_request != -1)
    {
	pthread_descr th;
	unsigned int idx1st, idx2nd;

	idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
	idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;
	th = self;
	do {
	    /* If the thread already is terminated don't modify the memory.  */
	    if (!th->p_terminated && th->p_specific[idx1st] != NULL)
		th->p_specific[idx1st][idx2nd] = NULL;
	    th = th->p_nextlive;
	} while (th != self);
    }

    __pthread_mutex_unlock(&pthread_keys_mutex);
    return 0;
}
Esempio n. 3
0
void
__flockfile (FILE *stream)
{
#ifdef USE_IN_LIBIO
  __pthread_mutex_lock (stream->_lock);
#else
#endif
}
Esempio n. 4
0
void
__pthread_destroy_specific (struct __pthread *thread)
{
  int i;
  int seen_one;

  /* Check if there is any thread specific data.  */
  if (thread->thread_specifics == NULL)
    return;

  __pthread_key_lock_ready ();

  /* Iterate and call the destructors on any thread specific data.  */
  for (;;)
    {
      seen_one = 0;

      __pthread_mutex_lock (&__pthread_key_lock);

      for (i = 0; i < __pthread_key_count && i < thread->thread_specifics_size;
	   i++)
	{
	  void *value;

	  if (__pthread_key_destructors[i] == PTHREAD_KEY_INVALID)
	    continue;

	  value = thread->thread_specifics[i];
	  if (value != NULL)
	    {
	      thread->thread_specifics[i] = 0;

	      if (__pthread_key_destructors[i])
		{
		  seen_one = 1;
		  __pthread_key_destructors[i] (value);
		}
	    }
	}

      __pthread_mutex_unlock (&__pthread_key_lock);

      if (!seen_one)
	break;

      /* This may take a very long time.  Let those blocking on
         pthread_key_create or pthread_key_delete make progress.  */
      sched_yield ();
    }

  free (thread->thread_specifics);
  thread->thread_specifics = 0;
  thread->thread_specifics_size = 0;
}
void
__pthread_destroy_specific (struct __pthread *thread)
{
  error_t err;
  int i;
  int seen_one;

  /* Check if there is any thread specific data.  */
  if (! thread->thread_specifics)
    return;

  __pthread_key_lock_ready ();

  /* Iterate and call the destructors on any thread specific data.  */
  for (;;)
    {
      seen_one = 0;

      __pthread_mutex_lock (&__pthread_key_lock);

      for (i = 0; i < __pthread_key_count; i ++)
	{
	  void *value;

	  if (__pthread_key_destructors[i] == PTHREAD_KEY_INVALID)
	    continue;

	  value = hurd_ihash_find (thread->thread_specifics, i);
	  if (value)
	    {
	      err = hurd_ihash_remove (thread->thread_specifics, i);
	      assert (err == 1);

	      if (__pthread_key_destructors[i])
		{
		  seen_one = 1;
		  __pthread_key_destructors[i] (value);
		}
	    }
	}

      __pthread_mutex_unlock (&__pthread_key_lock);

      if (! seen_one)
	break;

      /* This may take a very long time.  Let those blocking on
	 pthread_key_create or pthread_key_delete make progress.  */
      sched_yield ();
    }

  hurd_ihash_free (thread->thread_specifics);
  thread->thread_specifics = 0;
}
Esempio n. 6
0
/* rewinddir() just does an lseek(fd,0,0) - see close for comments */
void rewinddir(DIR * dir)
{
	if (!dir) {
		__set_errno(EBADF);
		return;
	}
#ifdef __UCLIBC_HAS_THREADS__
	__pthread_mutex_lock(&(dir->dd_lock));
#endif
	lseek(dir->dd_fd, 0, SEEK_SET);
	dir->dd_nextoff = dir->dd_nextloc = dir->dd_size = 0;
#ifdef __UCLIBC_HAS_THREADS__
	__pthread_mutex_unlock(&(dir->dd_lock));
#endif
}
Esempio n. 7
0
int pthread_atfork(void (*prepare)(void),
		     void (*parent)(void),
		     void (*child)(void))
{
  struct handler_list_block * block =
    (struct handler_list_block *) malloc(sizeof(struct handler_list_block));
  if (block == NULL) return ENOMEM;
  __pthread_mutex_lock(&pthread_atfork_lock);
  /* "prepare" handlers are called in LIFO */
  pthread_insert_list(&pthread_atfork_prepare, prepare, &block->prepare, 0);
  /* "parent" handlers are called in FIFO */
  pthread_insert_list(&pthread_atfork_parent, parent, &block->parent, 1);
  /* "child" handlers are called in FIFO */
  pthread_insert_list(&pthread_atfork_child, child, &block->child, 1);
  __pthread_mutex_unlock(&pthread_atfork_lock);
  return 0;
}
Esempio n. 8
0
int pthread_key_create(pthread_key_t * key, destr_function destr)
{
  int i;

  __pthread_mutex_lock(&pthread_keys_mutex);
  for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
    if (! pthread_keys[i].in_use) {
      /* Mark key in use */
      pthread_keys[i].in_use = 1;
      pthread_keys[i].destr = destr;
      __pthread_mutex_unlock(&pthread_keys_mutex);
      *key = i;
      return 0;
    }
  }
  __pthread_mutex_unlock(&pthread_keys_mutex);
  return EAGAIN;
}
Esempio n. 9
0
struct dirent *readdir(DIR * dir)
{
    ssize_t bytes;
    struct dirent *de;

    if (!dir) {
        __set_errno(EBADF);
        return NULL;
    }

#ifdef __UCLIBC_HAS_THREADS__
    __pthread_mutex_lock(&(dir->dd_lock));
#endif

    do {
        if (dir->dd_size <= dir->dd_nextloc) {
            /* read dir->dd_max bytes of directory entries. */
            bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
            if (bytes <= 0) {
                de = NULL;
                goto all_done;
            }
            dir->dd_size = bytes;
            dir->dd_nextloc = 0;
        }

        de = (struct dirent *) (((char *) dir->dd_buf) + dir->dd_nextloc);

        /* Am I right? H.J. */
        dir->dd_nextloc += de->d_reclen;

        /* We have to save the next offset here. */
        dir->dd_nextoff = de->d_off;

        /* Skip deleted files.  */
    } while (de->d_ino == 0);

all_done:
#ifdef __UCLIBC_HAS_THREADS__
    __pthread_mutex_unlock(&(dir->dd_lock));
#endif
    return de;
}
Esempio n. 10
0
int regex_match(struct regex_data *regex, char const *subject, int partial)
{
	int rc;
	__pthread_mutex_lock(&regex->match_mutex);
	rc = pcre2_match(
	    regex->regex, (PCRE2_SPTR)subject, PCRE2_ZERO_TERMINATED, 0,
	    partial ? PCRE2_PARTIAL_SOFT : 0, regex->match_data, NULL);
	__pthread_mutex_unlock(&regex->match_mutex);
	if (rc > 0)
		return REGEX_MATCH;
	switch (rc) {
	case PCRE2_ERROR_PARTIAL:
		return REGEX_MATCH_PARTIAL;
	case PCRE2_ERROR_NOMATCH:
		return REGEX_NO_MATCH;
	default:
		return REGEX_ERROR;
	}
}
Esempio n. 11
0
int
__pthread_setcanceltype (int type, int *oldtype)
{
  struct __pthread *p = _pthread_self ();

  switch (type)
    {
    default:
      return EINVAL;
    case PTHREAD_CANCEL_DEFERRED:
    case PTHREAD_CANCEL_ASYNCHRONOUS:
      break;
    }

  __pthread_mutex_lock (&p->cancel_lock);
  if (oldtype)
    *oldtype = p->cancel_type;
  p->cancel_type = type;
  __pthread_mutex_unlock (&p->cancel_lock);

  return 0;
}
Esempio n. 12
0
int
__pthread_setcancelstate (int state, int *oldstate)
{
    struct __pthread *p = _pthread_self ();

    switch (state)
    {
    default:
        return EINVAL;
    case PTHREAD_CANCEL_ENABLE:
    case PTHREAD_CANCEL_DISABLE:
        break;
    }

    __pthread_mutex_lock (&p->cancel_lock);
    if (oldstate)
        *oldstate = p->cancel_state;
    p->cancel_state = state;
    __pthread_mutex_unlock (&p->cancel_lock);

    return 0;
}
Esempio n. 13
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  __pthread_initialize_pids (pd);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
  {
#ifdef __PTHREAD_MUTEX_HAVE_PREV
    pd->robust_prev = &pd->robust_head;
#endif
    pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
    pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				    - offsetof (pthread_mutex_t,
						__data.__list.__next));
    INTERNAL_SYSCALL_DECL (err);
    int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
				sizeof (struct robust_list_head));
    if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
      set_robust_list_not_avail ();
  }

#ifdef __NR_futex
# ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    INTERNAL_SYSCALL_DECL (err);
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
# endif
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      INTERNAL_SYSCALL_DECL (err);
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
# endif
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

#if defined SIGCANCEL || defined SIGSETXID
  struct sigaction sa;
  __sigemptyset (&sa.sa_mask);

# ifdef SIGCANCEL
  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
# endif

# ifdef SIGSETXID
  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;
  (void) __libc_sigaction (SIGSETXID, &sa, NULL);
# endif

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
# ifdef SIGCANCEL
  __sigaddset (&sa.sa_mask, SIGCANCEL);
# endif
# ifdef SIGSETXID
  __sigaddset (&sa.sa_mask, SIGSETXID);
# endif
  {
    INTERNAL_SYSCALL_DECL (err);
    (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			     NULL, _NSIG / 8);
  }
#endif

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (__getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = GLRO(dl_pagesize);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  __default_pthread_attr.stacksize = limit.rlim_cur;
  __default_pthread_attr.guardsize = GLRO (dl_pagesize);
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    __pthread_mutex_lock (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Esempio n. 14
0
File: preload.c Progetto: passimm/rr
/* Prevent use of lock elision; Haswell's TSX/RTM features used by
   lock elision increment the rbc perf counter for instructions which
   are later rolled back if the transaction fails. */
int pthread_mutex_lock(pthread_mutex_t* mutex)
{
	disable_elision_for_mutex(mutex);
	return __pthread_mutex_lock(mutex);
}
Esempio n. 15
0
int
mtx_lock (mtx_t *mutex)
{
  int err_code = __pthread_mutex_lock ((pthread_mutex_t *) mutex);
  return thrd_err_map (err_code);
}
Esempio n. 16
0
/* Block on condition variable COND until ABSTIME.  As a GNU
   extension, if ABSTIME is NULL, then wait forever.  MUTEX should be
   held by the calling thread.  On return, MUTEX will be held by the
   calling thread.  */
int
__pthread_cond_timedwait_internal (pthread_cond_t *cond,
				   pthread_mutex_t *mutex,
				   const struct timespec *abstime)
{
  error_t err;
  int cancelled, oldtype, drain;
  clockid_t clock_id = __pthread_default_condattr.__clock;

  if (abstime && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))
    return EINVAL;

  struct __pthread *self = _pthread_self ();
  struct cancel_ctx ctx;
  ctx.wakeup = self;
  ctx.cond = cond;

  /* Test for a pending cancellation request, switch to deferred mode for
     safer resource handling, and prepare the hook to call in case we're
     cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
     hook can be called by another thread at any time.  Whatever happens,
     this function must exit with MUTEX locked.

     This function contains inline implementations of pthread_testcancel and
     pthread_setcanceltype to reduce locking overhead.  */
  __pthread_mutex_lock (&self->cancel_lock);
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;

  if (!cancelled)
    {
      self->cancel_hook = cancel_hook;
      self->cancel_hook_arg = &ctx;
      oldtype = self->cancel_type;

      if (oldtype != PTHREAD_CANCEL_DEFERRED)
	self->cancel_type = PTHREAD_CANCEL_DEFERRED;

      /* Add ourselves to the list of waiters.  This is done while setting
         the cancellation hook to simplify the cancellation procedure, i.e.
         if the thread is queued, it can be cancelled, otherwise it is
         already unblocked, progressing on the return path.  */
      __pthread_spin_lock (&cond->__lock);
      __pthread_enqueue (&cond->__queue, self);
      if (cond->__attr != NULL)
	clock_id = cond->__attr->__clock;
      __pthread_spin_unlock (&cond->__lock);
    }
  __pthread_mutex_unlock (&self->cancel_lock);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  /* Release MUTEX before blocking.  */
  __pthread_mutex_unlock (mutex);

  /* Block the thread.  */
  if (abstime != NULL)
    err = __pthread_timedblock (self, abstime, clock_id);
  else
    {
      err = 0;
      __pthread_block (self);
    }

  __pthread_spin_lock (&cond->__lock);
  if (self->prevp == NULL)
    {
      /* Another thread removed us from the list of waiters, which means a
         wakeup message has been sent.  It was either consumed while we were
         blocking, or queued after we timed out and before we acquired the
         condition lock, in which case the message queue must be drained.  */
      if (!err)
	drain = 0;
      else
	{
	  assert (err == ETIMEDOUT);
	  drain = 1;
	}
    }
  else
    {
      /* We're still in the list of waiters.  Noone attempted to wake us up,
         i.e. we timed out.  */
      assert (err == ETIMEDOUT);
      __pthread_dequeue (self);
      drain = 0;
    }
  __pthread_spin_unlock (&cond->__lock);

  if (drain)
    __pthread_block (self);

  /* We're almost done.  Remove the unblock hook, restore the previous
     cancellation type, and check for a pending cancellation request.  */
  __pthread_mutex_lock (&self->cancel_lock);
  self->cancel_hook = NULL;
  self->cancel_hook_arg = NULL;
  self->cancel_type = oldtype;
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;
  __pthread_mutex_unlock (&self->cancel_lock);

  /* Reacquire MUTEX before returning/cancelling.  */
  __pthread_mutex_lock (mutex);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  return err;
}