Esempio n. 1
0
struct hurd_sigstate *
_hurd_thread_sigstate (thread_t thread)
{
  struct hurd_sigstate *ss;
  __mutex_lock (&_hurd_siglock);
  for (ss = _hurd_sigstates; ss != NULL; ss = ss->next)
    if (ss->thread == thread)
       break;
  if (ss == NULL)
    {
      ss = malloc (sizeof (*ss));
      if (ss == NULL)
	__libc_fatal ("hurd: Can't allocate sigstate\n");
      ss->thread = thread;
      __spin_lock_init (&ss->lock);

      /* Initialize default state.  */
      __sigemptyset (&ss->blocked);
      __sigemptyset (&ss->pending);
      memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack));
      ss->preemptors = NULL;
      ss->suspended = MACH_PORT_NULL;
      ss->intr_port = MACH_PORT_NULL;
      ss->context = NULL;

      if (thread == MACH_PORT_NULL)
	{
	  /* Process-wide sigstate, use the system defaults.  */
	  default_sigaction (ss->actions);

	  /* The global sigstate is not added to the _hurd_sigstates list.
	     It is created with _hurd_thread_sigstate (MACH_PORT_NULL)
	     but should be accessed through _hurd_global_sigstate.  */
	}
      else
	{
	  /* Use the global actions as a default for new threads.  */
	  struct hurd_sigstate *s = _hurd_global_sigstate;
	  if (s)
	    {
	      __spin_lock (&s->lock);
	      memcpy (ss->actions, s->actions, sizeof (s->actions));
	      __spin_unlock (&s->lock);
	    }
	  else
	    default_sigaction (ss->actions);

	  ss->next = _hurd_sigstates;
	  _hurd_sigstates = ss;
	}
    }
  __mutex_unlock (&_hurd_siglock);
  return ss;
}
Esempio n. 2
0
/* Set the disposition for SIG.  */
__sighandler_t sigset (int sig, __sighandler_t disp)
{
    struct sigaction act, oact;
    sigset_t set;

#ifdef SIG_HOLD
    /* Handle SIG_HOLD first.  */
    if (disp == SIG_HOLD)
    {
	/* Create an empty signal set.  */
	if (__sigemptyset (&set) < 0)
	    return SIG_ERR;

	/* Add the specified signal.  */
	if (__sigaddset (&set, sig) < 0)
	    return SIG_ERR;

	/* Add the signal set to the current signal mask.  */
	if (sigprocmask (SIG_BLOCK, &set, NULL) < 0)
	    return SIG_ERR;

	return SIG_HOLD;
    }
#endif	/* SIG_HOLD */

    /* Check signal extents to protect __sigismember.  */
    if (disp == SIG_ERR || sig < 1 || sig >= NSIG)
    {
	__set_errno (EINVAL);
	return SIG_ERR;
    }

    act.sa_handler = disp;
    if (__sigemptyset (&act.sa_mask) < 0)
	return SIG_ERR;
    act.sa_flags = 0;
    if (sigaction (sig, &act, &oact) < 0)
	return SIG_ERR;

    /* Create an empty signal set.  */
    if (__sigemptyset (&set) < 0)
	return SIG_ERR;

    /* Add the specified signal.  */
    if (__sigaddset (&set, sig) < 0)
	return SIG_ERR;

    /* Remove the signal set from the current signal mask.  */
    if (sigprocmask (SIG_UNBLOCK, &set, NULL) < 0)
	return SIG_ERR;

    return oact.sa_handler;
}
Esempio n. 3
0
struct hurd_sigstate *
_hurd_thread_sigstate (thread_t thread)
{
  struct hurd_sigstate *ss;
  __mutex_lock (&_hurd_siglock);
  for (ss = _hurd_sigstates; ss != NULL; ss = ss->next)
    if (ss->thread == thread)
       break;
  if (ss == NULL)
    {
      ss = malloc (sizeof (*ss));
      if (ss == NULL)
	__libc_fatal ("hurd: Can't allocate thread sigstate\n");
      ss->thread = thread;
      __spin_lock_init (&ss->lock);

      /* Initialize default state.  */
      __sigemptyset (&ss->blocked);
      __sigemptyset (&ss->pending);
      memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack));
      ss->sigaltstack.ss_flags |= SS_DISABLE;
      ss->preemptors = NULL;
      ss->suspended = MACH_PORT_NULL;
      ss->intr_port = MACH_PORT_NULL;
      ss->context = NULL;

      /* Initialize the sigaction vector from the default signal receiving
	 thread's state, and its from the system defaults.  */
      if (thread == _hurd_sigthread)
	default_sigaction (ss->actions);
      else
	{
	  struct hurd_sigstate *s;
	  for (s = _hurd_sigstates; s != NULL; s = s->next)
	    if (s->thread == _hurd_sigthread)
	      break;
	  if (s)
	    {
	      __spin_lock (&s->lock);
	      memcpy (ss->actions, s->actions, sizeof (s->actions));
	      __spin_unlock (&s->lock);
	    }
	  else
	    default_sigaction (ss->actions);
	}

      ss->next = _hurd_sigstates;
      _hurd_sigstates = ss;
    }
  __mutex_unlock (&_hurd_siglock);
  return ss;
}
Esempio n. 4
0
/* Cause an abnormal program termination with core-dump.  */
void abort(void)
{
    sigset_t sigset;

      /* Make sure we acquire the lock before proceeding.  */
      LOCK;

    /* Unmask SIGABRT to be sure we can get it */
    if (__sigemptyset(&sigset) == 0 && __sigaddset(&sigset, SIGABRT) == 0) {
	sigprocmask(SIG_UNBLOCK, &sigset, (sigset_t *) NULL);
    }

    /* If we are using stdio, try to shut it down.  At the very least,
	 * this will attempt to commit all buffered writes.  It may also
	 * unbuffer all writable files, or close them outright.
	 * Check the stdio routines for details. */
    if (_stdio_term)
		_stdio_term();

    while (1) {
	/* Try to suicide with a SIGABRT.  */
	if (been_there_done_that == 0) {
	    been_there_done_that++;
	    UNLOCK;
	    raise(SIGABRT);
	    LOCK;
	}

	/* Still here?  Try to remove any signal handlers.  */
	if (been_there_done_that == 1) {
	    struct sigaction act;

	    been_there_done_that++;
	    memset (&act, '\0', sizeof (struct sigaction));
	    act.sa_handler = SIG_DFL;
	    __sigfillset (&act.sa_mask);
	    act.sa_flags = 0;
	    sigaction (SIGABRT, &act, NULL);
	}

	/* Still here?  Try to suicide with an illegal instruction */
	if (been_there_done_that == 2) {
	    been_there_done_that++;
	    ABORT_INSTRUCTION;
	}

	/* Still here?  Try to at least exit */
	if (been_there_done_that == 3) {
	    been_there_done_that++;
	    _exit (127);
	}

	/* Still here?  We're screwed.  Sleepy time.  Good night */
	while (1)
	    /* Try for ever and ever.  */
	    ABORT_INSTRUCTION;
    }
}
Esempio n. 5
0
static void
default_sigaction (struct sigaction actions[NSIG])
{
  int signo;

  __sigemptyset (&actions[0].sa_mask);
  actions[0].sa_flags = SA_RESTART;
  actions[0].sa_handler = SIG_DFL;

  for (signo = 1; signo < NSIG; ++signo)
    actions[signo] = actions[0];
}
Esempio n. 6
0
/* Cause an abnormal program termination with core-dump.  */
void abort(void)
{
    sigset_t sigset;

      /* Make sure we acquire the lock before proceeding.  */
      LOCK;

    /* Unmask SIGABRT to be sure we can get it */
    if (__sigemptyset(&sigset) == 0 && __sigaddset(&sigset, SIGABRT) == 0) {
	sigprocmask(SIG_UNBLOCK, &sigset, (sigset_t *) NULL);
    }

    while (1) {
	/* Try to suicide with a SIGABRT.  */
	if (been_there_done_that == 0) {
	    been_there_done_that++;
	    UNLOCK;
	    raise(SIGABRT);
	    LOCK;
	}

	/* Still here?  Try to remove any signal handlers.  */
	if (been_there_done_that == 1) {
	    struct sigaction act;

	    been_there_done_that++;
	    memset (&act, '\0', sizeof (struct sigaction));
	    act.sa_handler = SIG_DFL;
	    __sigfillset (&act.sa_mask);
	    act.sa_flags = 0;
	    sigaction (SIGABRT, &act, NULL);
	}

	/* Still here?  Try to suicide with an illegal instruction */
	if (been_there_done_that == 2) {
	    been_there_done_that++;
	    ABORT_INSTRUCTION;
	}

	/* Still here?  Try to at least exit */
	if (been_there_done_that == 3) {
	    been_there_done_that++;
	    _exit (127);
	}

	/* Still here?  We're screwed.  Sleepy time.  Good night */
	while (1)
	    /* Try for ever and ever.  */
	    ABORT_INSTRUCTION;
    }
}
Esempio n. 7
0
/* Clear all signals from SET.  */
int sigemptyset (sigset_t *set)
{
#if 0 /* is it really required by standards?! */
  if (set == NULL)
    {
      __set_errno (EINVAL);
      return -1;
    }
#endif

  __sigemptyset (set);

  return 0;
}
Esempio n. 8
0
/* Cause an abnormal program termination with core-dump.  */
__NORETURN
void
DEFUN_VOID(abort)
{
  sigset_t sigs;

  if (__sigemptyset(&sigs) == 0 &&
      __sigaddset(&sigs, SIGABRT) == 0)
    (void) __sigprocmask(SIG_UNBLOCK, &sigs, (sigset_t *) NULL);

  while (1)
    if (raise (SIGABRT))
      /* If we can't signal ourselves, exit.  */
      _exit (127);
  /* If we signal ourselves and are still alive,
     or can't exit, loop forever.  */
}
Esempio n. 9
0
error_t
__pthread_sigstate (struct __pthread *thread, int how,
		    const sigset_t *set, sigset_t *oset, int clear_pending)
{
  error_t err = 0;
  struct hurd_sigstate *ss;

  ss = _hurd_thread_sigstate (thread->kernel_thread);
  assert (ss);

  __spin_lock (&ss->lock);

  if (oset != NULL)
    *oset = ss->blocked;

  if (set != NULL)
    {
      switch (how)
	{
	case SIG_BLOCK:
	  ss->blocked |= *set;
	  break;

	case SIG_SETMASK:
	  ss->blocked = *set;
	  break;

	case SIG_UNBLOCK:
	  ss->blocked &= ~*set;
	  break;

	default:
	  err = EINVAL;
	  break;
	}
      ss->blocked &= ~_SIG_CANT_MASK;
    }

  if (!err && clear_pending)
    __sigemptyset (&ss->pending);

  __spin_unlock (&ss->lock);

  return err;
}
Esempio n. 10
0
static kern_return_t
get_int (int which, int *value)
{
  switch (which)
    {
    case INIT_UMASK:
      *value = _hurd_umask;
      return 0;
    case INIT_SIGMASK:
      {
	struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
	__spin_lock (&ss->lock);
	*value = ss->blocked;
	__spin_unlock (&ss->lock);
	return 0;
      }
    case INIT_SIGPENDING:
      {
	struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
	__spin_lock (&ss->lock);
	*value = ss->pending;
	__spin_unlock (&ss->lock);
	return 0;
      }
    case INIT_SIGIGN:
      {
	struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
	sigset_t ign;
	int sig;
	__spin_lock (&ss->lock);
	__sigemptyset (&ign);
	for (sig = 1; sig < NSIG; ++sig)
	  if (ss->actions[sig].sa_handler == SIG_IGN)
	    __sigaddset (&ign, sig);
	__spin_unlock (&ss->lock);
	*value = ign;
	return 0;
      }
    default:
      return EINVAL;
    }
}
Esempio n. 11
0
/* Set the handler for the signal SIG to HANDLER,
   returning the old handler, or SIG_ERR on error.  */
__sighandler_t __sysv_signal (int sig, __sighandler_t handler)
{
  struct sigaction act, oact;

  /* Check signal extents to protect __sigismember.  */
  if (handler == SIG_ERR || sig < 1 || sig >= NSIG)
    {
      __set_errno (EINVAL);
      return SIG_ERR;
    }

  act.sa_handler = handler;
  __sigemptyset (&act.sa_mask);
  act.sa_flags = (SA_ONESHOT | SA_NOMASK | SA_INTERRUPT) & ~SA_RESTART;
  /* In Linux (as of 2.6.25), fails only if sig is SIGKILL or SIGSTOP */
  if (sigaction (sig, &act, &oact) < 0)
    return SIG_ERR;

  return oact.sa_handler;
}
Esempio n. 12
0
/* Helper, initializes a fresh uthread to be thread0. */
static void uthread_init_thread0(struct uthread *uthread)
{
	assert(uthread);
	/* Save a pointer to thread0's tls region (the glibc one) into its tcb */
	uthread->tls_desc = get_tls_desc();
	/* Save a pointer to the uthread in its own TLS */
	current_uthread = uthread;
	/* Thread is currently running (it is 'us') */
	uthread->state = UT_RUNNING;
	/* Reset the signal state */
	uthread->sigstate.mask = 0;
	__sigemptyset(&uthread->sigstate.pending);
	uthread->sigstate.data = NULL;
	/* utf/as doesn't represent the state of the uthread (we are running) */
	uthread->flags &= ~(UTHREAD_SAVED | UTHREAD_FPSAVED);
	/* need to track thread0 for TLS deallocation */
	uthread->flags |= UTHREAD_IS_THREAD0;
	uthread->notif_disabled_depth = 0;
	/* setting the uthread's TLS var.  this is idempotent for SCPs (us) */
	__vcoreid = 0;
}
Esempio n. 13
0
/* Set the handler for the signal SIG to HANDLER,
   returning the old handler, or SIG_ERR on error.  */
__sighandler_t
__sysv_signal (int sig, __sighandler_t handler)
{
  struct sigaction act, oact;

  /* Check signal extents to protect __sigismember.  */
  if (handler == SIG_ERR || sig < 1 || sig >= NSIG)
    {
      __set_errno (EINVAL);
      return SIG_ERR;
    }

  act.sa_handler = handler;
  if (__sigemptyset (&act.sa_mask) < 0)
    return SIG_ERR;
  act.sa_flags = SA_ONESHOT | SA_NOMASK | SA_INTERRUPT;
  act.sa_flags &= ~SA_RESTART;
  if (__sigaction (sig, &act, &oact) < 0)
    return SIG_ERR;

  return oact.sa_handler;
}
Esempio n. 14
0
/* This is a quick and dirty, but not 100% compliant with
 * the stupid SysV SIGCHLD vs. SIG_IGN behaviour.  It is
 * fine unless you are messing with SIGCHLD...  */
unsigned int sleep (unsigned int sec)
{
	unsigned int res;
	struct timespec ts = { .tv_sec = (long int) seconds, .tv_nsec = 0 };
	res = nanosleep(&ts, &ts);
	if (res) res = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);
	return res;
}

# else

/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the sstupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int sleep (unsigned int seconds)
{
    struct timespec ts = { .tv_sec = (long int) seconds, .tv_nsec = 0 };
    sigset_t set;
    struct sigaction oact;
    unsigned int result;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0) {
#  ifdef CANCELLATION_P
	int cancelhandling;
	CANCELLATION_P (THREAD_SELF);
#  endif
	return 0;
    }

    /* Linux will wake up the system call, nanosleep, when SIGCHLD
       arrives even if SIGCHLD is ignored.  We have to deal with it
       in libc.  */

    __sigemptyset (&set);
    __sigaddset (&set, SIGCHLD);

    /* Is SIGCHLD set to SIG_IGN? */
    sigaction (SIGCHLD, NULL, &oact); /* never fails */
    if (oact.sa_handler == SIG_IGN) {
	/* Yes.  Block SIGCHLD, save old mask.  */
	sigprocmask (SIG_BLOCK, &set, &set); /* never fails */
    }

    /* Run nanosleep, with SIGCHLD blocked if SIGCHLD is SIG_IGNed.  */
    result = nanosleep (&ts, &ts);
    if (result != 0) {
	/* Got EINTR. Return remaining time.  */
	result = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);
    }

    if (!__sigismember (&set, SIGCHLD)) {
	/* We did block SIGCHLD, and old mask had no SIGCHLD bit.
	   IOW: we need to unblock SIGCHLD now. Do it.  */
	/* this sigprocmask call never fails, thus never updates errno,
	   and therefore we don't need to save/restore it.  */
	sigprocmask (SIG_SETMASK, &set, NULL); /* never fails */
    }

    return result;
}

# endif

#else /* __UCLIBC_HAS_REALTIME__ */

/* no nanosleep, use signals and alarm() */
static void sleep_alarm_handler(int attribute_unused sig)
{
}
unsigned int sleep (unsigned int seconds)
{
    struct sigaction act, oact;
    sigset_t set, oset;
    unsigned int result, remaining;
    time_t before, after;
    int old_errno = errno;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0)
	return 0;

    /* block SIGALRM */
    __sigemptyset (&set);
    __sigaddset (&set, SIGALRM);
    sigprocmask (SIG_BLOCK, &set, &oset); /* can't fail */

    act.sa_handler = sleep_alarm_handler;
    act.sa_flags = 0;
    act.sa_mask = oset;
    sigaction(SIGALRM, &act, &oact); /* never fails */

    before = time(NULL);
    remaining = alarm(seconds);
    if (remaining && remaining > seconds) {
	/* restore user's alarm */
	sigaction(SIGALRM, &oact, NULL);
	alarm(remaining); /* restore old alarm */
	sigsuspend(&oset);
	after = time(NULL);
    } else {
	sigsuspend (&oset);
	after = time(NULL);
	sigaction (SIGALRM, &oact, NULL);
    }
    result = after - before;
    alarm(remaining > result ? remaining - result : 0);
    sigprocmask (SIG_SETMASK, &oset, NULL);

    __set_errno(old_errno);

    return result > seconds ? 0 : seconds - result;
}

#endif /* __UCLIBC_HAS_REALTIME__ */

libc_hidden_def(sleep)
Esempio n. 15
0
/* Execute LINE as a shell command, returning its status.  */
static int
do_system (const char *line)
{
  int status, save;
  pid_t pid;
  struct sigaction sa;
#ifndef _LIBC_REENTRANT
  struct sigaction intr, quit;
#endif
  sigset_t omask;

  sa.sa_handler = SIG_IGN;
  sa.sa_flags = 0;
  __sigemptyset (&sa.sa_mask);

  DO_LOCK ();
  if (ADD_REF () == 0)
    {
      if (__sigaction (SIGINT, &sa, &intr) < 0)
	{
	  (void) SUB_REF ();
	  goto out;
	}
      if (__sigaction (SIGQUIT, &sa, &quit) < 0)
	{
	  save = errno;
	  (void) SUB_REF ();
	  goto out_restore_sigint;
	}
    }
  DO_UNLOCK ();

  /* We reuse the bitmap in the 'sa' structure.  */
  __sigaddset (&sa.sa_mask, SIGCHLD);
  save = errno;
  if (__sigprocmask (SIG_BLOCK, &sa.sa_mask, &omask) < 0)
    {
#ifndef _LIBC
      if (errno == ENOSYS)
	__set_errno (save);
      else
#endif
	{
	  DO_LOCK ();
	  if (SUB_REF () == 0)
	    {
	      save = errno;
	      (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
	    out_restore_sigint:
	      (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL);
	      __set_errno (save);
	    }
	out:
	  DO_UNLOCK ();
	  return -1;
	}
    }

#ifdef CLEANUP_HANDLER
  CLEANUP_HANDLER;
#endif

#ifdef FORK
  pid = FORK ();
#else
  pid = __fork ();
#endif
  if (pid == (pid_t) 0)
    {
      /* Child side.  */
      const char *new_argv[4];
      new_argv[0] = SHELL_NAME;
      new_argv[1] = "-c";
      new_argv[2] = line;
      new_argv[3] = NULL;

      /* Restore the signals.  */
      (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL);
      (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
      (void) __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL);
      INIT_LOCK ();

      /* Exec the shell.  */
      (void) __execve (SHELL_PATH, (char *const *) new_argv, __environ);
      _exit (127);
    }
  else if (pid < (pid_t) 0)
    /* The fork failed.  */
    status = -1;
  else
    /* Parent side.  */
    {
      /* Note the system() is a cancellation point.  But since we call
	 waitpid() which itself is a cancellation point we do not
	 have to do anything here.  */
      if (TEMP_FAILURE_RETRY (__waitpid (pid, &status, 0)) != pid)
	status = -1;
    }

#ifdef CLEANUP_HANDLER
  CLEANUP_RESET;
#endif

  save = errno;
  DO_LOCK ();
  if ((SUB_REF () == 0
       && (__sigaction (SIGINT, &intr, (struct sigaction *) NULL)
	   | __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL)) != 0)
      || __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL) != 0)
    {
#ifndef _LIBC
      /* glibc cannot be used on systems without waitpid.  */
      if (errno == ENOSYS)
	__set_errno (save);
      else
#endif
	status = -1;
    }
  DO_UNLOCK ();

  return status;
}
Esempio n. 16
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  __pthread_initialize_pids (pd);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
  {
#ifdef __PTHREAD_MUTEX_HAVE_PREV
    pd->robust_prev = &pd->robust_head;
#endif
    pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
    pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				    - offsetof (pthread_mutex_t,
						__data.__list.__next));
    INTERNAL_SYSCALL_DECL (err);
    int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
				sizeof (struct robust_list_head));
    if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
      set_robust_list_not_avail ();
  }

#ifdef __NR_futex
# ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    INTERNAL_SYSCALL_DECL (err);
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
# endif
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      INTERNAL_SYSCALL_DECL (err);
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
# endif
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

#if defined SIGCANCEL || defined SIGSETXID
  struct sigaction sa;
  __sigemptyset (&sa.sa_mask);

# ifdef SIGCANCEL
  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
# endif

# ifdef SIGSETXID
  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;
  (void) __libc_sigaction (SIGSETXID, &sa, NULL);
# endif

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
# ifdef SIGCANCEL
  __sigaddset (&sa.sa_mask, SIGCANCEL);
# endif
# ifdef SIGSETXID
  __sigaddset (&sa.sa_mask, SIGSETXID);
# endif
  {
    INTERNAL_SYSCALL_DECL (err);
    (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			     NULL, _NSIG / 8);
  }
#endif

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (__getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = GLRO(dl_pagesize);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  __default_pthread_attr.stacksize = limit.rlim_cur;
  __default_pthread_attr.guardsize = GLRO (dl_pagesize);
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    __pthread_mutex_lock (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Esempio n. 17
0
/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the stupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int
__sleep (unsigned int seconds)
{
  const unsigned int max
    = (unsigned int) (((unsigned long int) (~((time_t) 0))) >> 1);
  struct timespec ts;
  sigset_t set, oset;
  unsigned int result;

  /* This is not necessary but some buggy programs depend on this.  */
  if (__glibc_unlikely (seconds == 0))
    {
#ifdef CANCELLATION_P
      CANCELLATION_P (THREAD_SELF);
#endif
      return 0;
    }

  ts.tv_sec = 0;
  ts.tv_nsec = 0;
 again:
  if (sizeof (ts.tv_sec) <= sizeof (seconds))
    {
      /* Since SECONDS is unsigned assigning the value to .tv_sec can
	 overflow it.  In this case we have to wait in steps.  */
      ts.tv_sec += MIN (seconds, max);
      seconds -= (unsigned int) ts.tv_sec;
    }
  else
    {
      ts.tv_sec = (time_t) seconds;
      seconds = 0;
    }

  /* Linux will wake up the system call, nanosleep, when SIGCHLD
     arrives even if SIGCHLD is ignored.  We have to deal with it
     in libc.  We block SIGCHLD first.  */
  __sigemptyset (&set);
  __sigaddset (&set, SIGCHLD);
  if (__sigprocmask (SIG_BLOCK, &set, &oset))
    return -1;

  /* If SIGCHLD is already blocked, we don't have to do anything.  */
  if (!__sigismember (&oset, SIGCHLD))
    {
      int saved_errno;
      struct sigaction oact;

      __sigemptyset (&set);
      __sigaddset (&set, SIGCHLD);

      /* We get the signal handler for SIGCHLD.  */
      if (__sigaction (SIGCHLD, (struct sigaction *) NULL, &oact) < 0)
	{
	  saved_errno = errno;
	  /* Restore the original signal mask.  */
	  (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	  __set_errno (saved_errno);
	  return -1;
	}

      /* Note the sleep() is a cancellation point.  But since we call
	 nanosleep() which itself is a cancellation point we do not
	 have to do anything here.  */
      if (oact.sa_handler == SIG_IGN)
	{
	  //__libc_cleanup_push (cl, &oset);

	  /* We should leave SIGCHLD blocked.  */
	  while (1)
	    {
	      result = __nanosleep (&ts, &ts);

	      if (result != 0 || seconds == 0)
		break;

	      if (sizeof (ts.tv_sec) <= sizeof (seconds))
		{
		  ts.tv_sec = MIN (seconds, max);
		  seconds -= (unsigned int) ts.tv_nsec;
		}
	    }

	  //__libc_cleanup_pop (0);

	  saved_errno = errno;
	  /* Restore the original signal mask.  */
	  (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	  __set_errno (saved_errno);

	  goto out;
	}

      /* We should unblock SIGCHLD.  Restore the original signal mask.  */
      (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
    }

  result = __nanosleep (&ts, &ts);
  if (result == 0 && seconds != 0)
    goto again;

 out:
  if (result != 0)
    /* Round remaining time.  */
    result = seconds + (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);

  return result;
}
Esempio n. 18
0
/* Cause an abnormal program termination with core-dump.  */
void
abort (void)
{
  struct sigaction act;
  sigset_t sigs;

  /* First acquire the lock.  */
  __libc_lock_lock_recursive (lock);

  /* Now it's for sure we are alone.  But recursive calls are possible.  */

  /* Unlock SIGABRT.  */
  if (stage == 0)
    {
      ++stage;
      if (__sigemptyset (&sigs) == 0 &&
	  __sigaddset (&sigs, SIGABRT) == 0)
	__sigprocmask (SIG_UNBLOCK, &sigs, (sigset_t *) NULL);
    }

  /* Flush all streams.  We cannot close them now because the user
     might have registered a handler for SIGABRT.  */
  if (stage == 1)
    {
      ++stage;
      fflush (NULL);
    }

  /* Send signal which possibly calls a user handler.  */
  if (stage == 2)
    {
      /* This stage is special: we must allow repeated calls of
	 `abort' when a user defined handler for SIGABRT is installed.
	 This is risky since the `raise' implementation might also
	 fail but I don't see another possibility.  */
      int save_stage = stage;

      stage = 0;
      __libc_lock_unlock_recursive (lock);

      raise (SIGABRT);

      __libc_lock_lock_recursive (lock);
      stage = save_stage + 1;
    }

  /* There was a handler installed.  Now remove it.  */
  if (stage == 3)
    {
      ++stage;
      memset (&act, '\0', sizeof (struct sigaction));
      act.sa_handler = SIG_DFL;
      __sigfillset (&act.sa_mask);
      act.sa_flags = 0;
      __sigaction (SIGABRT, &act, NULL);
    }

  /* Now close the streams which also flushes the output the user
     defined handler might has produced.  */
  if (stage == 4)
    {
      ++stage;
      __fcloseall ();
    }

  /* Try again.  */
  if (stage == 5)
    {
      ++stage;
      raise (SIGABRT);
    }

  /* Now try to abort using the system specific command.  */
  if (stage == 6)
    {
      ++stage;
      ABORT_INSTRUCTION;
    }

  /* If we can't signal ourselves and the abort instruction failed, exit.  */
  if (stage == 7)
    {
      ++stage;
      _exit (127);
    }

  /* If even this fails try to use the provided instruction to crash
     or otherwise make sure we never return.  */
  while (1)
    /* Try for ever and ever.  */
    ABORT_INSTRUCTION;
}
Esempio n. 19
0
/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the sstupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int sleep (unsigned int seconds)
{
    struct timespec ts = { tv_sec: (long int) seconds, tv_nsec: 0 };
    sigset_t set, oset;
    unsigned int result;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0)
	return 0;

    /* Linux will wake up the system call, nanosleep, when SIGCHLD
       arrives even if SIGCHLD is ignored.  We have to deal with it
       in libc.  We block SIGCHLD first.  */
    if (__sigemptyset (&set) < 0
	    || __sigaddset (&set, SIGCHLD) < 0
	    || sigprocmask (SIG_BLOCK, &set, &oset))
	return -1;

    /* If SIGCHLD is already blocked, we don't have to do anything.  */
    if (!__sigismember (&oset, SIGCHLD))
    {
	int saved_errno;
	struct sigaction oact;

	if (__sigemptyset (&set) < 0 || __sigaddset (&set, SIGCHLD) < 0)
	    return -1;

	/* We get the signal handler for SIGCHLD.  */
	if (sigaction (SIGCHLD, (struct sigaction *) NULL, &oact) < 0)
	{
	    saved_errno = errno;
	    /* Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    __set_errno (saved_errno);
	    return -1;
	}

	if (oact.sa_handler == SIG_IGN)
	{
	    /* We should leave SIGCHLD blocked.  */
	    result = nanosleep (&ts, &ts);

	    saved_errno = errno;
	    /* Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    __set_errno (saved_errno);
	}
	else
	{
	    /* We should unblock SIGCHLD.  Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    result = nanosleep (&ts, &ts);
	}
    }
    else
	result = nanosleep (&ts, &ts);

    if (result != 0)
	/* Round remaining time.  */
	result = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);

    return result;
}
Esempio n. 20
0
static void pthread_initialize(void)
{
  struct sigaction sa;
  sigset_t mask;
#ifdef __ARCH_USE_MMU__
  struct rlimit limit;
  rlim_t max_stack;
#endif

  /* If already done (e.g. by a constructor called earlier!), bail out */
  if (__pthread_initial_thread_bos != NULL) return;
#ifdef TEST_FOR_COMPARE_AND_SWAP
  /* Test if compare-and-swap is available */
  __pthread_has_cas = compare_and_swap_is_available();
#endif
  /* For the initial stack, reserve at least STACK_SIZE bytes of stack
     below the current stack address, and align that on a
     STACK_SIZE boundary. */
  __pthread_initial_thread_bos =
    (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
  /* Update the descriptor for the initial thread. */
  __pthread_initial_thread.p_pid = getpid();
  /* If we have special thread_self processing, initialize that for the
     main thread now.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(&__pthread_initial_thread, 0);
#endif
  /* The errno/h_errno variable of the main thread are the global ones.  */
  __pthread_initial_thread.p_errnop = &_errno;
  __pthread_initial_thread.p_h_errnop = &_h_errno;

#ifdef __UCLIBC_HAS_XLOCALE__
  /* The locale of the main thread is the current locale in use. */
  __pthread_initial_thread.locale = __curlocale_var;
#endif /* __UCLIBC_HAS_XLOCALE__ */

 {			   /* uClibc-specific stdio initialization for threads. */
	 FILE *fp;

	 _stdio_user_locking = 0;	/* 2 if threading not initialized */
	 for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
		 if (fp->__user_locking != 1) {
			 fp->__user_locking = 0;
		 }
	 }
 }

  /* Play with the stack size limit to make sure that no stack ever grows
     beyond STACK_SIZE minus two pages (one page for the thread descriptor
     immediately beyond, and one page to act as a guard page). */

#ifdef __ARCH_USE_MMU__
  /* We cannot allocate a huge chunk of memory to mmap all thread stacks later
   * on a non-MMU system. Thus, we don't need the rlimit either. -StS */
  getrlimit(RLIMIT_STACK, &limit);
  max_stack = STACK_SIZE - 2 * getpagesize();
  if (limit.rlim_cur > max_stack) {
    limit.rlim_cur = max_stack;
    setrlimit(RLIMIT_STACK, &limit);
  }
#else
  /* For non-MMU, the initial thread stack can reside anywhere in memory.
   * We don't have a way of knowing where the kernel started things -- top
   * or bottom (well, that isn't exactly true, but the solution is fairly
   * complex and error prone).  All we can determine here is an address
   * that lies within that stack.  Save that address as a reference so that
   * as other thread stacks are created, we can adjust the estimated bounds
   * of the initial thread's stack appropriately.
   *
   * This checking is handled in NOMMU_INITIAL_THREAD_BOUNDS(), so see that
   * for a few more details.
   */
  __pthread_initial_thread_mid = CURRENT_STACK_FRAME;
  __pthread_initial_thread_tos = (char *) -1;
  __pthread_initial_thread_bos = (char *) 1; /* set it non-zero so we know we have been here */
  PDEBUG("initial thread stack bounds: bos=%p, tos=%p\n",
	 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
#endif /* __ARCH_USE_MMU__ */

  /* Setup signal handlers for the initial thread.
     Since signal handlers are shared between threads, these settings
     will be inherited by all other threads. */
  memset(&sa, 0, sizeof(sa));
  sa.sa_handler = pthread_handle_sigrestart;
  __libc_sigaction(__pthread_sig_restart, &sa, NULL);
  sa.sa_handler = pthread_handle_sigcancel;
  sigaddset(&sa.sa_mask, __pthread_sig_restart);
  __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
  if (__pthread_sig_debug > 0) {
      sa.sa_handler = pthread_handle_sigdebug;
      __sigemptyset(&sa.sa_mask);
      __libc_sigaction(__pthread_sig_debug, &sa, NULL);
  }
  /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
  __sigemptyset(&mask);
  sigaddset(&mask, __pthread_sig_restart);
  sigprocmask(SIG_BLOCK, &mask, NULL);
  /* And unblock __pthread_sig_cancel if it has been blocked. */
  sigdelset(&mask, __pthread_sig_restart);
  sigaddset(&mask, __pthread_sig_cancel);
  sigprocmask(SIG_UNBLOCK, &mask, NULL);
  /* Register an exit function to kill all other threads. */
  /* Do it early so that user-registered atexit functions are called
     before pthread_onexit_process. */
  on_exit(pthread_onexit_process, NULL);
}
Esempio n. 21
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  INTERNAL_SYSCALL_DECL (err);
  pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
#ifdef __PTHREAD_MUTEX_HAVE_PREV
  pd->robust_list.__prev = &pd->robust_list;
#endif
  pd->robust_list.__next = &pd->robust_list;
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);


  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  struct sigaction sa;
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  __sigemptyset (&sa.sa_mask);

  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);

  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;

  (void) __libc_sigaction (SIGSETXID, &sa, NULL);

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
  __sigaddset (&sa.sa_mask, SIGCANCEL);
  __sigaddset (&sa.sa_mask, SIGSETXID);
  (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			   NULL, _NSIG / 8);

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
  __default_stacksize = limit.rlim_cur;

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
  GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Esempio n. 22
0
int
lckpwdf (void)
{
  sigset_t saved_set;			/* Saved set of caught signals.  */
  struct sigaction saved_act;		/* Saved signal action.  */
  sigset_t new_set;			/* New set of caught signals.  */
  struct sigaction new_act;		/* New signal action.  */
  struct flock fl;			/* Information struct for locking.  */
  int result;

  if (lock_fd != -1)
    /* Still locked by own process.  */
    return -1;

  /* Prevent problems caused by multiple threads.  */
  __UCLIBC_MUTEX_LOCK(mylock);

  lock_fd = open (_PATH_PASSWD, O_WRONLY | O_CLOEXEC);
  if (lock_fd == -1) {
    goto DONE;
  }
#ifndef __ASSUME_O_CLOEXEC
  /* Make sure file gets correctly closed when process finished.  */
  fcntl (lock_fd, F_SETFD, FD_CLOEXEC);
#endif

  /* Now we have to get exclusive write access.  Since multiple
     process could try this we won't stop when it first fails.
     Instead we set a timeout for the system call.  Once the timer
     expires it is likely that there are some problems which cannot be
     resolved by waiting. (sa_flags have no SA_RESTART. Thus SIGALRM
     will EINTR fcntl(F_SETLKW)

     It is important that we don't change the signal state.  We must
     restore the old signal behaviour.  */
  memset (&new_act, '\0', sizeof (new_act));
  new_act.sa_handler = noop_handler;
  __sigfillset (&new_act.sa_mask);

  /* Install new action handler for alarm and save old.
   * This never fails in Linux.  */
  sigaction (SIGALRM, &new_act, &saved_act);

  /* Now make sure the alarm signal is not blocked.  */
  __sigemptyset (&new_set);
  __sigaddset (&new_set, SIGALRM);
  sigprocmask (SIG_UNBLOCK, &new_set, &saved_set);

  /* Start timer.  If we cannot get the lock in the specified time we
     get a signal.  */
  alarm (TIMEOUT);

  /* Try to get the lock.  */
  memset (&fl, '\0', sizeof (fl));
  if (F_WRLCK)
    fl.l_type = F_WRLCK;
  if (SEEK_SET)
    fl.l_whence = SEEK_SET;
  result = fcntl (lock_fd, F_SETLKW, &fl);

  /* Clear alarm.  */
  alarm (0);

  sigprocmask (SIG_SETMASK, &saved_set, NULL);
  sigaction (SIGALRM, &saved_act, NULL);

  if (result < 0) {
    close(lock_fd);
    lock_fd = -1;
  }

DONE:
  __UCLIBC_MUTEX_UNLOCK(mylock);
  return 0; /* TODO: return result? */
}
Esempio n. 23
0
int
__lckpwdf (void)
{
  int flags;
  sigset_t saved_set;			/* Saved set of caught signals.  */
  struct sigaction saved_act;		/* Saved signal action.  */
  sigset_t new_set;			/* New set of caught signals.  */
  struct sigaction new_act;		/* New signal action.  */
  struct flock fl;			/* Information struct for locking.  */
  int result;

  if (lock_fd != -1)
    /* Still locked by own process.  */
    return -1;

  /* Prevent problems caused by multiple threads.  */
  __libc_lock_lock (lock);

  int oflags = O_WRONLY | O_CREAT;
#ifdef O_CLOEXEC
  oflags |= O_CLOEXEC;
#endif
  lock_fd = __open (PWD_LOCKFILE, oflags, 0600);
  if (lock_fd == -1)
    /* Cannot create lock file.  */
    RETURN_CLOSE_FD (-1);

#ifndef __ASSUME_O_CLOEXEC
# ifdef O_CLOEXEC
  if (__have_o_cloexec <= 0)
# endif
    {
      /* Make sure file gets correctly closed when process finished.  */
      flags = __fcntl (lock_fd, F_GETFD, 0);
      if (flags == -1)
	/* Cannot get file flags.  */
	RETURN_CLOSE_FD (-1);
# ifdef O_CLOEXEC
      if (__have_o_cloexec == 0)
	__have_o_cloexec = (flags & FD_CLOEXEC) == 0 ? -1 : 1;
      if (__have_o_cloexec < 0)
# endif
	{
	  flags |= FD_CLOEXEC;		/* Close on exit.  */
	  if (__fcntl (lock_fd, F_SETFD, flags) < 0)
	    /* Cannot set new flags.  */
	    RETURN_CLOSE_FD (-1);
	}
    }
#endif

  /* Now we have to get exclusive write access.  Since multiple
     process could try this we won't stop when it first fails.
     Instead we set a timeout for the system call.  Once the timer
     expires it is likely that there are some problems which cannot be
     resolved by waiting.

     It is important that we don't change the signal state.  We must
     restore the old signal behaviour.  */
  memset (&new_act, '\0', sizeof (struct sigaction));
  new_act.sa_handler = noop_handler;
  __sigfillset (&new_act.sa_mask);
  new_act.sa_flags = 0ul;

  /* Install new action handler for alarm and save old.  */
  if (__sigaction (SIGALRM, &new_act, &saved_act) < 0)
    /* Cannot install signal handler.  */
    RETURN_CLOSE_FD (-1);

  /* Now make sure the alarm signal is not blocked.  */
  __sigemptyset (&new_set);
  __sigaddset (&new_set, SIGALRM);
  if (__sigprocmask (SIG_UNBLOCK, &new_set, &saved_set) < 0)
    RETURN_RESTORE_HANDLER (-1);

  /* Start timer.  If we cannot get the lock in the specified time we
     get a signal.  */
  alarm (TIMEOUT);

  /* Try to get the lock.  */
  memset (&fl, '\0', sizeof (struct flock));
  fl.l_type = F_WRLCK;
  fl.l_whence = SEEK_SET;
  result = __fcntl (lock_fd, F_SETLKW, &fl);

  RETURN_CLEAR_ALARM (result);
}
Esempio n. 24
0
/* Set the disposition for SIG.  */
__sighandler_t
sigset (int sig, __sighandler_t disp)
{
  struct sigaction act;
  struct sigaction oact;
  sigset_t set;
  sigset_t oset;

#ifdef SIG_HOLD
  /* Handle SIG_HOLD first.  */
  if (disp == SIG_HOLD)
    {
      /* Create an empty signal set.  */
      if (__sigemptyset (&set) < 0)
	return SIG_ERR;

      /* Add the specified signal.  */
      if (__sigaddset (&set, sig) < 0)
	return SIG_ERR;

      /* Add the signal set to the current signal mask.  */
      if (__sigprocmask (SIG_BLOCK, &set, &oset) < 0)
	return SIG_ERR;

      /* If the signal was already blocked signal this to the caller.  */
      if (__sigismember (&oset, sig))
	return SIG_HOLD;

      /* We need to determine whether a specific handler is installed.  */
      if (__sigaction (sig, NULL, &oact) < 0)
	return SIG_ERR;

      return oact.sa_handler;
    }
#endif	/* SIG_HOLD */

  /* Check signal extents to protect __sigismember.  */
  if (disp == SIG_ERR || sig < 1 || sig >= NSIG)
    {
      __set_errno (EINVAL);
      return SIG_ERR;
    }

  act.sa_handler = disp;
  if (__sigemptyset (&act.sa_mask) < 0)
    return SIG_ERR;
  act.sa_flags = 0;
  if (__sigaction (sig, &act, &oact) < 0)
    return SIG_ERR;

  /* Create an empty signal set.  */
  if (__sigemptyset (&set) < 0)
    return SIG_ERR;

  /* Add the specified signal.  */
  if (__sigaddset (&set, sig) < 0)
    return SIG_ERR;

  /* Remove the signal set from the current signal mask.  */
  if (__sigprocmask (SIG_UNBLOCK, &set, &oset) < 0)
    return SIG_ERR;

  /* If the signal was already blocked return SIG_HOLD.  */
  return __sigismember (&oset, sig) ? SIG_HOLD : oact.sa_handler;
}