Example #1
0
int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
                   void * (*start_routine)(void *), void *arg)
{
    pthread_descr self = thread_self();
    struct pthread_request request;
    if (__pthread_manager_request < 0) {
        if (__pthread_initialize_manager() < 0) return EAGAIN;
    }
    request.req_thread = self;
    request.req_kind = REQ_CREATE;
    request.req_args.create.attr = attr;
    request.req_args.create.fn = start_routine;
    request.req_args.create.arg = arg;
    sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
                &request.req_args.create.mask);
    PDEBUG("write REQ_CREATE to manager thread\n");
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
                                    (char *) &request, sizeof(request)));
    PDEBUG("before suspend(self)\n");
    suspend(self);
    PDEBUG("after suspend(self)\n");
    if (THREAD_GETMEM(self, p_retcode) == 0)
        *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
    return THREAD_GETMEM(self, p_retcode);
}
Example #2
0
void pthread_testcancel(void)
{
  pthread_descr self = thread_self();
  if (THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)
    pthread_exit(PTHREAD_CANCELED);
}
Example #3
0
/* For asynchronous cancellation we use a signal.  This is the handler.  */
static void
sighandler_setxid (int sig, siginfo_t *si, void *ctx)
{
  /* Safety check.  It would be possible to call this function for
     other signals and send a signal from another process.  This is not
     correct and might even be a security problem.  Try to catch as
     many incorrect invocations as possible.  */
  if (sig != SIGSETXID
#ifdef __ASSUME_CORRECT_SI_PID
      /* Kernels before 2.5.75 stored the thread ID and not the process
	 ID in si_pid so we skip this test.  */
      || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
#endif
      || si->si_code != SI_TKILL)
    return;

  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
			__xidcmd->id[1], __xidcmd->id[2]);

  if (atomic_decrement_val (&__xidcmd->cntr) == 0)
    lll_futex_wake (&__xidcmd->cntr, 1);

  /* Reset the SETXID flag.  */
  struct pthread *self = THREAD_SELF;
  int flags = THREAD_GETMEM (self, cancelhandling);
  THREAD_SETMEM (self, cancelhandling, flags & ~SETXID_BITMASK);

  /* And release the futex.  */
  self->setxid_futex = 1;
  lll_futex_wake (&self->setxid_futex, 1);
}
Example #4
0
void __pthread_perform_cleanup(char *currentframe)
{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer *c = THREAD_GETMEM(self, p_cleanup);
  struct _pthread_cleanup_buffer *last;

  if (c != NULL)
    while (FRAME_LEFT (currentframe, c))
      {
	last = c;
	c = c->__prev;

	if (c == NULL || FRAME_LEFT (last, c))
	  {
	    c = NULL;
	    break;
	  }
      }

  while (c != NULL)
    {
      c->__routine(c->__arg);

      last = c;
      c = c->__prev;

      if (FRAME_LEFT (last, c))
	break;
    }

  /* And the TSD which needs special help.  */
  if (THREAD_GETMEM(self, p_libc_specific[_LIBC_TSD_KEY_RPC_VARS]) != NULL)
    __rpc_thread_destroy ();
}
static void pthread_handle_sigrestart(int sig)
{
    pthread_descr self = thread_self();
    THREAD_SETMEM(self, p_signal, sig);
    if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
	siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
}
Example #6
0
int
raise (
     int sig)
{
#if (defined(__ASSUME_TGKILL) && __ASSUME_TGKILL) || defined __NR_tgkill
  /* raise is an async-safe function.  It could be called while the
     fork function temporarily invalidated the PID field.  Adjust for
     that.  */
  pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
  if (__builtin_expect (pid < 0, 0))
    pid = -pid;
#endif

#if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL
  return INLINE_SYSCALL (tgkill, 3, pid, THREAD_GETMEM (THREAD_SELF, tid),
			 sig);
#else
# ifdef __NR_tgkill
  int res = INLINE_SYSCALL (tgkill, 3, pid, THREAD_GETMEM (THREAD_SELF, tid),
			    sig);
  if (res != -1 || errno != ENOSYS)
    return res;
# endif
  return INLINE_SYSCALL (tkill, 2, THREAD_GETMEM (THREAD_SELF, tid), sig);
#endif
}
Example #7
0
void __pthread_destroy_specifics()
{
  pthread_descr self = thread_self();
  int i, j, round, found_nonzero;
  destr_function destr;
  void * data;

  for (round = 0, found_nonzero = 1;
       found_nonzero && round < PTHREAD_DESTRUCTOR_ITERATIONS;
       round++) {
    found_nonzero = 0;
    for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++)
      if (THREAD_GETMEM_NC(self, p_specific[i]) != NULL)
        for (j = 0; j < PTHREAD_KEY_2NDLEVEL_SIZE; j++) {
          destr = pthread_keys[i * PTHREAD_KEY_2NDLEVEL_SIZE + j].destr;
          data = THREAD_GETMEM_NC(self, p_specific[i])[j];
          if (destr != NULL && data != NULL) {
            THREAD_GETMEM_NC(self, p_specific[i])[j] = NULL;
            destr(data);
            found_nonzero = 1;
          }
        }
  }
  __pthread_lock(THREAD_GETMEM(self, p_lock), self);
  for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++) {
    if (THREAD_GETMEM_NC(self, p_specific[i]) != NULL) {
      free(THREAD_GETMEM_NC(self, p_specific[i]));
      THREAD_SETMEM_NC(self, p_specific[i], NULL);
    }
  }
  __pthread_unlock(THREAD_GETMEM(self, p_lock));
}
Example #8
0
void __pthread_cleanup_upto (__jmp_buf target, char *targetframe)
{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer * c;

  for (c = THREAD_GETMEM(self, p_cleanup);
       c != NULL && __JMPBUF_UNWINDS(target, c, demangle_ptr);
       c = c->__prev)
    {
#if _STACK_GROWS_DOWN
      if ((char *) c <= targetframe)
	{
	  c = NULL;
	  break;
	}
#elif _STACK_GROWS_UP
      if ((char *) c >= targetframe)
	{
	  c = NULL;
	  break;
	}
#else
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
#endif
      c->__routine(c->__arg);
    }
  THREAD_SETMEM(self, p_cleanup, c);
#ifdef NOT_FOR_L4
  if (THREAD_GETMEM(self, p_in_sighandler)
      && __JMPBUF_UNWINDS(target, THREAD_GETMEM(self, p_in_sighandler),
			 demangle_ptr))
    THREAD_SETMEM(self, p_in_sighandler, NULL);
#endif
}
void __pthread_perform_cleanup(char *currentframe)
{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer * c;

  for (c = THREAD_GETMEM(self, p_cleanup); c != NULL; c = c->__prev)
    {
#ifdef _STACK_GROWS_DOWN
      if ((char *) c <= currentframe)
	break;
#elif defined _STACK_GROWS_UP
      if ((char *) c >= currentframe)
	break;
#else
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
#endif
      c->__routine(c->__arg);
    }

#ifdef __UCLIBC_HAS_RPC__
  /* And the TSD which needs special help.  */
  if (THREAD_GETMEM(self, p_libc_specific[_LIBC_TSD_KEY_RPC_VARS]) != NULL)
      __rpc_thread_destroy ();
#endif
}
Example #10
0
void pthread_testcancel(void)
{
  pthread_descr self = thread_self();
  if (THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
}
Example #11
0
pthread_start_thread(void *arg)
{
  pthread_descr self = (pthread_descr) arg;
  struct pthread_request request;
  void * outcome;
#if HP_TIMING_AVAIL
  hp_timing_t tmpclock;
#endif
  /* Initialize special thread_self processing, if any.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(self, self->p_nr);
#endif
#if HP_TIMING_AVAIL
  HP_TIMING_NOW (tmpclock);
  THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
#endif
  /* Make sure our pid field is initialized, just in case we get there
     before our father has initialized it. */
  THREAD_SETMEM(self, p_pid, __getpid());
  /* Initial signal mask is that of the creating thread. (Otherwise,
     we'd just inherit the mask of the thread manager.) */
  sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
  /* Set the scheduling policy and priority for the new thread, if needed */
  if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
    /* Explicit scheduling attributes were provided: apply them */
    __sched_setscheduler(THREAD_GETMEM(self, p_pid),
			 THREAD_GETMEM(self, p_start_args.schedpolicy),
                         &self->p_start_args.schedparam);
  else if (manager_thread->p_priority > 0)
    /* Default scheduling required, but thread manager runs in realtime
       scheduling: switch new thread to SCHED_OTHER policy */
    {
      struct sched_param default_params;
      default_params.sched_priority = 0;
      __sched_setscheduler(THREAD_GETMEM(self, p_pid),
                           SCHED_OTHER, &default_params);
    }
#if !(USE_TLS && HAVE___THREAD)
  /* Initialize thread-locale current locale to point to the global one.
     With __thread support, the variable's initializer takes care of this.  */
  __uselocale (LC_GLOBAL_LOCALE);
#else
  /* Initialize __resp.  */
  __resp = &self->p_res;
#endif
  /* Make gdb aware of new thread */
  if (__pthread_threads_debug && __pthread_sig_debug > 0) {
    request.req_thread = self;
    request.req_kind = REQ_DEBUG;
    TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
					(char *) &request, sizeof(request)));
    suspend(self);
  }
  /* Run the thread code */
  outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
							   p_start_args.arg));
  /* Exit with the given return value */
  __pthread_do_exit(outcome, CURRENT_STACK_FRAME);
}
/* For asynchronous cancellation we use a signal.  This is the handler.  */
static void
sigcancel_handler (int sig, siginfo_t *si, void *ctx)
{
#ifdef __ASSUME_CORRECT_SI_PID
  /* Determine the process ID.  It might be negative if the thread is
     in the middle of a fork() call.  */
  pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
  if (__builtin_expect (pid < 0, 0))
    pid = -pid;
#endif

  /* Safety check.  It would be possible to call this function for
     other signals and send a signal from another process.  This is not
     correct and might even be a security problem.  Try to catch as
     many incorrect invocations as possible.  */
  if (sig != SIGCANCEL
#ifdef __ASSUME_CORRECT_SI_PID
      /* Kernels before 2.5.75 stored the thread ID and not the process
	 ID in si_pid so we skip this test.  */
      || si->si_pid != pid
#endif
      || si->si_code != SI_TKILL)
    return;

  struct pthread *self = THREAD_SELF;

  int oldval = THREAD_GETMEM (self, cancelhandling);
  while (1)
    {
      /* We are canceled now.  When canceled by another thread this flag
	 is already set but if the signal is directly send (internally or
	 from another process) is has to be done here.  */
      int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;

      if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
	/* Already canceled or exiting.  */
	break;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
					      oldval);
      if (curval == oldval)
	{
	  /* Set the return value.  */
	  THREAD_SETMEM (self, result, PTHREAD_CANCELED);

	  /* Make sure asynchronous cancellation is still enabled.  */
	  if ((newval & CANCELTYPE_BITMASK) != 0)
	    /* Run the registered destructors and terminate the thread.  */
	    __do_cancel ();

	  break;
	}

      oldval = curval;
    }
}
Example #13
0
int
pthread_sigqueue (
     pthread_t threadid,
     int signo,
     const union sigval value)
{
#ifdef __NR_rt_tgsigqueueinfo
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* Set up the siginfo_t structure.  */
  siginfo_t info;
  memset (&info, '\0', sizeof (siginfo_t));
  info.si_signo = signo;
  info.si_code = SI_QUEUE;
  info.si_pid = THREAD_GETMEM (THREAD_SELF, pid);
  info.si_uid = getuid ();
  info.si_value = value;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_sigqueue is not guaranteed to be async-safe.  */
  int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4,
			      THREAD_GETMEM (THREAD_SELF, pid),
			      tid, signo, &info);

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
#else
  return ENOSYS;
#endif
}
Example #14
0
void _pthread_cleanup_push_defer(struct _pthread_cleanup_buffer * buffer,
				 void (*routine)(void *), void * arg)
{
  pthread_descr self = thread_self();
  buffer->__routine = routine;
  buffer->__arg = arg;
  buffer->__canceltype = THREAD_GETMEM(self, p_canceltype);
  buffer->__prev = THREAD_GETMEM(self, p_cleanup);
  THREAD_SETMEM(self, p_canceltype, PTHREAD_CANCEL_DEFERRED);
  THREAD_SETMEM(self, p_cleanup, buffer);
}
Example #15
0
void _pthread_cleanup_pop_restore(struct _pthread_cleanup_buffer * buffer,
				  int execute)
{
  pthread_descr self = thread_self();
  if (execute) buffer->__routine(buffer->__arg);
  THREAD_SETMEM(self, p_cleanup, buffer->__prev);
  THREAD_SETMEM(self, p_canceltype, buffer->__canceltype);
  if (THREAD_GETMEM(self, p_canceled) &&
      THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE &&
      THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
}
Example #16
0
int
attribute_hidden
__pthread_enable_asynccancel (void)
{
  struct pthread *self = THREAD_SELF;
  int oldval;
  if (is_recording()) {
    pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); 
    oldval = THREAD_GETMEM (self, cancelhandling);
    pthread_log_record (oldval, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); 
  } else if (is_replaying()) {
    pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); 
    oldval = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); 
  } else {
    oldval = THREAD_GETMEM (self, cancelhandling);
  }

  while (1)
    {
      int newval = oldval | CANCELTYPE_BITMASK;

      if (newval == oldval)
	break;

      int curval;
      if (is_recording()) {
	pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); 
	curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval);
	pthread_log_record (curval, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); 
      } else if (is_replaying()) {
	pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); 
	curval = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); 
      } else {
	curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval);
      }
 
      if (__builtin_expect (curval == oldval, 1))
	{
	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
	    {
	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
	      __do_cancel ();
	    }

	  break;
	}

      /* Prepare the next round.  */
      oldval = curval;
    }

  return oldval;
}
Example #17
0
/* sigwait -- synchronously wait for a signal */
int sigwait(const sigset_t * set, int * sig)
{
  volatile pthread_descr self = thread_self();
  sigset_t mask;
  int s;
  sigjmp_buf jmpbuf;
  struct sigaction sa;

  /* Get ready to block all signals except those in set
     and the cancellation signal.
     Also check that handlers are installed on all signals in set,
     and if not, install our dummy handler.  This is conformant to
     POSIX: "The effect of sigwait() on the signal actions for the
     signals in set is unspecified." */
  sigfillset(&mask);
  sigdelset(&mask, __pthread_sig_cancel);
  for (s = 1; s < NSIG; s++) {
    if (sigismember(set, s) &&
        s != __pthread_sig_restart &&
        s != __pthread_sig_cancel &&
        s != __pthread_sig_debug) {
      sigdelset(&mask, s);
      if (sighandler[s].old == (arch_sighandler_t) SIG_ERR ||
          sighandler[s].old == (arch_sighandler_t) SIG_DFL ||
          sighandler[s].old == (arch_sighandler_t) SIG_IGN) {
        sa.sa_handler = pthread_null_sighandler;
        sigfillset(&sa.sa_mask);
        sa.sa_flags = 0;
        sigaction(s, &sa, NULL);
      }
    }
  }
  /* Test for cancellation */
  if (sigsetjmp(jmpbuf, 1) == 0) {
    THREAD_SETMEM(self, p_cancel_jmp, &jmpbuf);
    if (! (THREAD_GETMEM(self, p_canceled)
	   && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) {
      /* Reset the signal count */
      THREAD_SETMEM(self, p_signal, 0);
      /* Say we're in sigwait */
      THREAD_SETMEM(self, p_sigwaiting, 1);
      /* Unblock the signals and wait for them */
      sigsuspend(&mask);
    }
  }
  THREAD_SETMEM(self, p_cancel_jmp, NULL);
  /* The signals are now reblocked.  Check for cancellation */
  pthread_testcancel();
  /* We should have self->p_signal != 0 and equal to the signal received */
  *sig = THREAD_GETMEM(self, p_signal);
  return 0;
}
Example #18
0
void
__cleanup_fct_attribute
__pthread_register_cancel_defer (__pthread_unwind_buf_t *buf)
{
  struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
  struct pthread *self = THREAD_SELF;

  /* Store old info.  */
  ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
  ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);

  int cancelhandling;
  if (is_recording()) {
    pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); 
    cancelhandling = THREAD_GETMEM (self, cancelhandling);
    pthread_log_record (cancelhandling, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); 
  } else if (is_replaying()) {
    pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); 
    cancelhandling = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); 
  } else {
    cancelhandling = THREAD_GETMEM (self, cancelhandling);
  }
  /* Disable asynchronous cancellation for now.  */
  if (__builtin_expect (cancelhandling & CANCELTYPE_BITMASK, 0))
    while (1)
      {
	int curval;
	if (is_recording()) {
	  pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); 
	  curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, cancelhandling & ~CANCELTYPE_BITMASK, cancelhandling);
	  pthread_log_record (curval, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); 
	} else if (is_replaying()) {
	  pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); 
	  curval = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); 
	} else {
	  curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, cancelhandling & ~CANCELTYPE_BITMASK, cancelhandling);
	}
	if (__builtin_expect (curval == cancelhandling, 1))
	  /* Successfully replaced the value.  */
	  break;

	/* Prepare for the next round.  */
	cancelhandling = curval;
      }

  ibuf->priv.data.canceltype = (cancelhandling & CANCELTYPE_BITMASK
				? PTHREAD_CANCEL_ASYNCHRONOUS
				: PTHREAD_CANCEL_DEFERRED);

  /* Store the new cleanup handler info.  */
  THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
}
static void pthread_handle_sigcancel(int sig)
{
  pthread_descr self = thread_self();
  sigjmp_buf * jmpbuf;
  

  if (self == &__pthread_manager_thread)
    {
#ifdef THREAD_SELF
      /* A new thread might get a cancel signal before it is fully
	 initialized, so that the thread register might still point to the
	 manager thread.  Double check that this is really the manager
	 thread.  */
      pthread_descr real_self = thread_self_stack();
      if (real_self == &__pthread_manager_thread)
	{
	  __pthread_manager_sighandler(sig);
	  return;
	}
      /* Oops, thread_self() isn't working yet..  */
      self = real_self;
# ifdef INIT_THREAD_SELF
      INIT_THREAD_SELF(self, self->p_nr);
# endif
#else
      __pthread_manager_sighandler(sig);
      return;
#endif
    }
  if (__builtin_expect (__pthread_exit_requested, 0)) {
    /* Main thread should accumulate times for thread manager and its
       children, so that timings for main thread account for all threads. */
    if (self == __pthread_main_thread) {
#ifdef USE_TLS
      waitpid(__pthread_manager_thread->p_pid, NULL, __WCLONE);
#else
      waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
#endif
    }
    _exit(__pthread_exit_code);
  }
  if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
      pthread_exit(PTHREAD_CANCELED);
    jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
    if (jmpbuf != NULL) {
      THREAD_SETMEM(self, p_cancel_jmp, NULL);
      siglongjmp(*jmpbuf, 1);
    }
  }
}
Example #20
0
int pthread_setcanceltype(int type, int * oldtype)
{
  pthread_descr self = thread_self();
  if (type < PTHREAD_CANCEL_DEFERRED || type > PTHREAD_CANCEL_ASYNCHRONOUS)
    return EINVAL;
  if (oldtype != NULL) *oldtype = THREAD_GETMEM(self, p_canceltype);
  THREAD_SETMEM(self, p_canceltype, type);
  if (THREAD_GETMEM(self, p_canceled) &&
      THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE &&
      THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  return 0;
}
Example #21
0
int pthread_setcancelstate(int state, int * oldstate)
{
  pthread_descr self = thread_self();
  if (state < PTHREAD_CANCEL_ENABLE || state > PTHREAD_CANCEL_DISABLE)
    return EINVAL;
  if (oldstate != NULL) *oldstate = THREAD_GETMEM(self, p_cancelstate);
  THREAD_SETMEM(self, p_cancelstate, state);
  if (THREAD_GETMEM(self, p_canceled) &&
      THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE &&
      THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
    pthread_exit(PTHREAD_CANCELED);
  return 0;
}
Example #22
0
void __pthread_perform_cleanup(void)
{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer * c;
  for (c = THREAD_GETMEM(self, p_cleanup); c != NULL; c = c->__prev)
    c->__routine(c->__arg);

#ifdef __UCLIBC_HAS_RPC__
  /* And the TSD which needs special help.  */
  if (THREAD_GETMEM(self, p_libc_specific[_LIBC_TSD_KEY_RPC_VARS]) != NULL)
      __rpc_thread_destroy ();
#endif
}
Example #23
0
void
__cleanup_fct_attribute
__pthread_register_cancel (__pthread_unwind_buf_t *buf)
{
  struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
  struct pthread *self = THREAD_SELF;

  /* Store old info.  */
  ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
  ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);

  /* Store the new cleanup handler info.  */
  THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
}
Example #24
0
int attribute_noreturn __pthread_manager_event(void *arg)
{
  /* If we have special thread_self processing, initialize it.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(&__pthread_manager_thread, 1);
#endif

  /* Get the lock the manager will free once all is correctly set up.  */
  __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
  /* Free it immediately.  */
  __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));

  __pthread_manager(arg);
}
Example #25
0
int
__pthread_kill (
     pthread_t threadid,
     int signo)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_kill is not guaranteed to be async-safe.  */
  int val;
#if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
			  tid, signo);
#else
# ifdef __NR_tgkill
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
			  tid, signo);
  if (INTERNAL_SYSCALL_ERROR_P (val, err)
      && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
    val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo);
#endif

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
}
Example #26
0
void
__cleanup_fct_attribute
__pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf)
{
  struct pthread *self = THREAD_SELF;
  struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;

  THREAD_SETMEM (self, cleanup_jmp_buf, ibuf->priv.data.prev);

  int cancelhandling;
  if (ibuf->priv.data.canceltype != PTHREAD_CANCEL_DEFERRED
      && ((cancelhandling = THREAD_GETMEM (self, cancelhandling))
	  & CANCELTYPE_BITMASK) == 0)
    {
      while (1)
	{
	  int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
						  cancelhandling
						  | CANCELTYPE_BITMASK,
						  cancelhandling);
	  if (__glibc_likely (curval == cancelhandling))
	    /* Successfully replaced the value.  */
	    break;

	  /* Prepare for the next round.  */
	  cancelhandling = curval;
	}

      CANCELLATION_P (self);
    }
}
Example #27
0
void
internal_function attribute_hidden
__libc_disable_asynccancel (int oldtype)
{
  /* If asynchronous cancellation was enabled before we do not have
     anything to do.  */
  if (oldtype & CANCELTYPE_BITMASK)
    return;

  struct pthread *self = THREAD_SELF;

#ifdef THREAD_ATOMIC_AND
  THREAD_ATOMIC_AND (self, cancelhandling, ~CANCELTYPE_BITMASK);
#else
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval & ~CANCELTYPE_BITMASK;

      if (newval == oldval)
	break;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
					      oldval);
      if (__builtin_expect (curval == oldval, 1))
	break;

      /* Prepare the next round.  */
      oldval = curval;
    }
#endif
}
Example #28
0
static _Unwind_Reason_Code
unwind_stop (int version, _Unwind_Action actions,
	     _Unwind_Exception_Class exc_class,
	     struct _Unwind_Exception *exc_obj,
	     struct _Unwind_Context *context, void *stop_parameter)
{
  struct pthread_unwind_buf *buf = stop_parameter;
  struct pthread *self = THREAD_SELF;
  struct _pthread_cleanup_buffer *curp = THREAD_GETMEM (self, cleanup);
  int do_longjump = 0;

  /* Adjust all pointers used in comparisons, so that top of thread's
     stack is at the top of address space.  Without that, things break
     if stack is allocated above the main stack.  */
  uintptr_t adj = (uintptr_t) self->stackblock + self->stackblock_size;

  /* Do longjmp if we're at "end of stack", aka "end of unwind data".
     We assume there are only C frame without unwind data in between
     here and the jmp_buf target.  Otherwise simply note that the CFA
     of a function is NOT within it's stack frame; it's the SP of the
     previous frame.  */
  if ((actions & _UA_END_OF_STACK)
      || ! _JMPBUF_CFA_UNWINDS_ADJ (buf->cancel_jmp_buf[0].jmp_buf, context,
				    adj))
    do_longjump = 1;

  if (__builtin_expect (curp != NULL, 0))
    {
      /* Handle the compatibility stuff.  Execute all handlers
	 registered with the old method which would be unwound by this
	 step.  */
      struct _pthread_cleanup_buffer *oldp = buf->priv.data.cleanup;
      void *cfa = (void *) (_Unwind_Ptr) _Unwind_GetCFA (context);

      if (curp != oldp && (do_longjump || FRAME_LEFT (cfa, curp, adj)))
	{
	  do
	    {
	      /* Pointer to the next element.  */
	      struct _pthread_cleanup_buffer *nextp = curp->__prev;

	      /* Call the handler.  */
	      curp->__routine (curp->__arg);

	      /* To the next.  */
	      curp = nextp;
	    }
	  while (curp != oldp
		 && (do_longjump || FRAME_LEFT (cfa, curp, adj)));

	  /* Mark the current element as handled.  */
	  THREAD_SETMEM (self, cleanup, curp);
	}
    }

  if (do_longjump)
    __libc_unwind_longjmp ((struct __jmp_buf_tag *) buf->cancel_jmp_buf, 1);

  return _URC_NO_REASON;
}
Example #29
0
/* The next two functions are similar to pthread_setcanceltype() but
   more specialized for the use in the cancelable functions like write().
   They do not need to check parameters etc.  */
int
attribute_hidden
__pthread_enable_asynccancel (void)
{
  struct pthread *self = THREAD_SELF;
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval | CANCELTYPE_BITMASK;

      if (newval == oldval)
	break;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
					      oldval);
      if (__builtin_expect (curval == oldval, 1))
	{
	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
	    {
	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
	      __do_cancel ();
	    }

	  break;
	}

      /* Prepare the next round.  */
      oldval = curval;
    }

  return oldval;
}
Example #30
0
static int
rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
    pthread_readlock_info **pexisting, int *pout_of_mem)
{
  pthread_readlock_info *existing = NULL;
  int out_of_mem = 0, have_lock_already = 0;
  pthread_descr self = *pself;

  if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
    {
      if (!self)
	*pself = self = thread_self();

      existing = rwlock_is_in_list(self, rwlock);

      if (existing != NULL
	  || THREAD_GETMEM (self, p_untracked_readlock_count) > 0)
	have_lock_already = 1;
      else
	{
	  existing = rwlock_add_to_list(self, rwlock);
	  if (existing == NULL)
	    out_of_mem = 1;
	}
    }

  *pout_of_mem = out_of_mem;
  *pexisting = existing;

  return have_lock_already;
}