/* Helper function to support starting threads for SIGEV_THREAD.  */
static void *
timer_helper_thread (void *arg)
{
    /* Wait for the SIGTIMER signal, allowing the setXid signal, and
       none else.  */
    sigset_t ss;
    sigemptyset (&ss);
    __sigaddset (&ss, SIGTIMER);

    syscall (SYS_thr_self, &__helper_tid);
    sem_post (&__helper_tid_semaphore);

    /* Endless loop of waiting for signals.  The loop is only ended when
       the thread is canceled.  */
    while (1)
    {
        siginfo_t si;

        /* sigwaitinfo cannot be used here, since it deletes
        SIGCANCEL == SIGTIMER from the set.  */

        int oldtype = LIBC_CANCEL_ASYNC ();

        /* XXX The size argument hopefully will have to be changed to the
        real size of the user-level sigset_t.  */
        int result = sigtimedwait (&ss, &si, NULL);

        LIBC_CANCEL_RESET (oldtype);

        if (result > 0)
        {
            if (si.si_code == SI_TIMER)
            {
                struct timer *tk = (struct timer *) si.si_value.sival_ptr;

                /* Check the timer is still used and will not go away
                while we are reading the values here.  */
                pthread_mutex_lock (&__active_timer_sigev_thread_lock);

                struct timer *runp = __active_timer_sigev_thread;
                while (runp != NULL)
                    if (runp == tk)
                        break;
                    else
                        runp = runp->next;

                if (runp != NULL)
                {
                    struct thread_start_data *td = malloc (sizeof (*td));

                    /* There is not much we can do if the allocation fails.  */
                    if (td != NULL)
                    {
                        /* This is the signal we are waiting for.  */
                        td->thrfunc = tk->thrfunc;
                        td->sival = tk->sival;

                        pthread_t th;
                        (void) pthread_create (&th, &tk->attr,
                                               timer_sigev_thread, td);
                    }
                }

                pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
            }
            else if (si.si_code == SI_LWP
                     /* Backward compatibility (see rev 211732 in -CURRENT).  */
                     || si.si_code == SI_USER)
                /* The thread is canceled.  */
                pthread_exit (NULL);
        }
    }
}
Beispiel #2
0
/* Execute LINE as a shell command, returning its status.  */
static int
do_system (const char *line)
{
  int status, save;
  pid_t pid;
  struct sigaction sa;
#ifndef _LIBC_REENTRANT
  struct sigaction intr, quit;
#endif
  sigset_t omask;

  sa.sa_handler = SIG_IGN;
  sa.sa_flags = 0;
  __sigemptyset (&sa.sa_mask);

  DO_LOCK ();
  if (ADD_REF () == 0)
    {
      if (__sigaction (SIGINT, &sa, &intr) < 0)
	{
	  SUB_REF ();
	  goto out;
	}
      if (__sigaction (SIGQUIT, &sa, &quit) < 0)
	{
	  save = errno;
	  SUB_REF ();
	  goto out_restore_sigint;
	}
    }
  DO_UNLOCK ();

  /* We reuse the bitmap in the 'sa' structure.  */
  __sigaddset (&sa.sa_mask, SIGCHLD);
  save = errno;
  if (__sigprocmask (SIG_BLOCK, &sa.sa_mask, &omask) < 0)
    {
#ifndef _LIBC
      if (errno == ENOSYS)
	__set_errno (save);
      else
#endif
	{
	  DO_LOCK ();
	  if (SUB_REF () == 0)
	    {
	      save = errno;
	      (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
	    out_restore_sigint:
	      (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL);
	      __set_errno (save);
	    }
	out:
	  DO_UNLOCK ();
	  return -1;
	}
    }

#ifdef CLEANUP_HANDLER
  CLEANUP_HANDLER;
#endif

#ifdef FORK
  pid = FORK ();
#else
  pid = __fork ();
#endif
  if (pid == (pid_t) 0)
    {
      /* Child side.  */
      const char *new_argv[4];
      new_argv[0] = SHELL_NAME;
      new_argv[1] = "-c";
      new_argv[2] = line;
      new_argv[3] = NULL;

      /* Restore the signals.  */
      (void) __sigaction (SIGINT, &intr, (struct sigaction *) NULL);
      (void) __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
      (void) __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL);
      INIT_LOCK ();

      /* Exec the shell.  */
      SB_LOG(SB_LOGLEVEL_DEBUG, "system(%s)", line);
      (void) __execve (SHELL_PATH, (char *const *) new_argv, __environ);
      _exit (127);
    }
  else if (pid < (pid_t) 0)
    /* The fork failed.  */
    status = -1;
  else
    /* Parent side.  */
    {
      /* Note the system() is a cancellation point.  But since we call
	 waitpid() which itself is a cancellation point we do not
	 have to do anything here.  */
      if (TEMP_FAILURE_RETRY (__waitpid (pid, &status, 0)) != pid)
	status = -1;
      SB_LOG(SB_LOGLEVEL_DEBUG, "system: waitpid => status=%d", status);
    }

#ifdef CLEANUP_HANDLER
  CLEANUP_RESET;
#endif

  save = errno;
  DO_LOCK ();
  if ((SUB_REF () == 0
       && (__sigaction (SIGINT, &intr, (struct sigaction *) NULL)
	   | __sigaction (SIGQUIT, &quit, (struct sigaction *) NULL)) != 0)
      || __sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL) != 0)
    {
#ifndef _LIBC
      /* glibc cannot be used on systems without waitpid.  */
      if (errno == ENOSYS)
	__set_errno (save);
      else
#endif
	status = -1;
    }
  DO_UNLOCK ();

  return status;
}
Beispiel #3
0
/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the stupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int
__sleep (unsigned int seconds)
{
  const unsigned int max
    = (unsigned int) (((unsigned long int) (~((time_t) 0))) >> 1);
  struct timespec ts;
  sigset_t set, oset;
  unsigned int result;

  /* This is not necessary but some buggy programs depend on this.  */
  if (__glibc_unlikely (seconds == 0))
    {
#ifdef CANCELLATION_P
      CANCELLATION_P (THREAD_SELF);
#endif
      return 0;
    }

  ts.tv_sec = 0;
  ts.tv_nsec = 0;
 again:
  if (sizeof (ts.tv_sec) <= sizeof (seconds))
    {
      /* Since SECONDS is unsigned assigning the value to .tv_sec can
	 overflow it.  In this case we have to wait in steps.  */
      ts.tv_sec += MIN (seconds, max);
      seconds -= (unsigned int) ts.tv_sec;
    }
  else
    {
      ts.tv_sec = (time_t) seconds;
      seconds = 0;
    }

  /* Linux will wake up the system call, nanosleep, when SIGCHLD
     arrives even if SIGCHLD is ignored.  We have to deal with it
     in libc.  We block SIGCHLD first.  */
  __sigemptyset (&set);
  __sigaddset (&set, SIGCHLD);
  if (__sigprocmask (SIG_BLOCK, &set, &oset))
    return -1;

  /* If SIGCHLD is already blocked, we don't have to do anything.  */
  if (!__sigismember (&oset, SIGCHLD))
    {
      int saved_errno;
      struct sigaction oact;

      __sigemptyset (&set);
      __sigaddset (&set, SIGCHLD);

      /* We get the signal handler for SIGCHLD.  */
      if (__sigaction (SIGCHLD, (struct sigaction *) NULL, &oact) < 0)
	{
	  saved_errno = errno;
	  /* Restore the original signal mask.  */
	  (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	  __set_errno (saved_errno);
	  return -1;
	}

      /* Note the sleep() is a cancellation point.  But since we call
	 nanosleep() which itself is a cancellation point we do not
	 have to do anything here.  */
      if (oact.sa_handler == SIG_IGN)
	{
	  //__libc_cleanup_push (cl, &oset);

	  /* We should leave SIGCHLD blocked.  */
	  while (1)
	    {
	      result = __nanosleep (&ts, &ts);

	      if (result != 0 || seconds == 0)
		break;

	      if (sizeof (ts.tv_sec) <= sizeof (seconds))
		{
		  ts.tv_sec = MIN (seconds, max);
		  seconds -= (unsigned int) ts.tv_nsec;
		}
	    }

	  //__libc_cleanup_pop (0);

	  saved_errno = errno;
	  /* Restore the original signal mask.  */
	  (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	  __set_errno (saved_errno);

	  goto out;
	}

      /* We should unblock SIGCHLD.  Restore the original signal mask.  */
      (void) __sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
    }

  result = __nanosleep (&ts, &ts);
  if (result == 0 && seconds != 0)
    goto again;

 out:
  if (result != 0)
    /* Round remaining time.  */
    result = seconds + (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);

  return result;
}
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  INTERNAL_SYSCALL_DECL (err);
  pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
  pd->robust_prev = &pd->robust_head;
#endif
  pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
  pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				  - offsetof (pthread_mutex_t,
					      __data.__list.__next));
  int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
			      sizeof (struct robust_list_head));
  if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
    set_robust_list_not_avail ();

#ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
#endif
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  struct sigaction sa;
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  __sigemptyset (&sa.sa_mask);

  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);

  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;

  (void) __libc_sigaction (SIGSETXID, &sa, NULL);

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
  __sigaddset (&sa.sa_mask, SIGCANCEL);
  __sigaddset (&sa.sa_mask, SIGSETXID);
  (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			   NULL, _NSIG / 8);

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
  __default_stacksize = limit.rlim_cur;

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
  GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Beispiel #5
0
/* Execute LINE as a shell command, returning its status.  */
static int
do_system (const char *line)
{
  int status, save;
  pid_t pid;
  struct sigaction sa;
  sigset_t omask;

  memset(&sa, 0, sizeof(sa));
  sa.sa_handler = SIG_IGN;
  /*sa.sa_flags = 0; - done by memset */
  /*__sigemptyset (&sa.sa_mask); - done by memset */

  DO_LOCK ();
  if (ADD_REF () == 0)
    {
      if (sigaction (SIGINT, &sa, &intr) < 0)
	{
	  SUB_REF ();
	  goto out;
	}
      if (sigaction (SIGQUIT, &sa, &quit) < 0)
	{
	  save = errno;
	  SUB_REF ();
	  goto out_restore_sigint;
	}
    }
  DO_UNLOCK ();

  /* We reuse the bitmap in the 'sa' structure.  */
  __sigaddset (&sa.sa_mask, SIGCHLD);
  save = errno;
  if (sigprocmask (SIG_BLOCK, &sa.sa_mask, &omask) < 0)
    {
	{
	  DO_LOCK ();
	  if (SUB_REF () == 0)
	    {
	      save = errno;
	      (void) sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
	    out_restore_sigint:
	      (void) sigaction (SIGINT, &intr, (struct sigaction *) NULL);
	      __set_errno (save);
	    }
	out:
	  DO_UNLOCK ();
	  return -1;
	}
    }

  CLEANUP_HANDLER;

  pid = FORK ();
  if (pid == (pid_t) 0)
    {
      /* Child side.  */
      const char *new_argv[4];
      new_argv[0] = "/bin/sh";
      new_argv[1] = "-c";
      new_argv[2] = line;
      new_argv[3] = NULL;

      /* Restore the signals.  */
      (void) sigaction (SIGINT, &intr, (struct sigaction *) NULL);
      (void) sigaction (SIGQUIT, &quit, (struct sigaction *) NULL);
      (void) sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL);
      INIT_LOCK ();

      /* Exec the shell.  */
      (void) execve ("/bin/sh", (char *const *) new_argv, __environ);
      _exit (127);
    }
  else if (pid < (pid_t) 0)
    /* The fork failed.  */
    status = -1;
  else
    /* Parent side.  */
    {
      /* Note the system() is a cancellation point.  But since we call
	 waitpid() which itself is a cancellation point we do not
	 have to do anything here.  */
      if (TEMP_FAILURE_RETRY (waitpid (pid, &status, 0)) != pid)
	status = -1;
    }

  CLEANUP_RESET;

  save = errno;
  DO_LOCK ();
  if ((SUB_REF () == 0
       && (sigaction (SIGINT, &intr, (struct sigaction *) NULL)
	   | sigaction (SIGQUIT, &quit, (struct sigaction *) NULL)) != 0)
      || sigprocmask (SIG_SETMASK, &omask, (sigset_t *) NULL) != 0)
    {
	status = -1;
    }
  DO_UNLOCK ();

  return status;
}
Beispiel #6
0
/* Cause an abnormal program termination with core-dump.  */
void
abort (void)
{
  struct sigaction act;
  sigset_t sigs;

  /* First acquire the lock.  */
  __libc_lock_lock_recursive (lock);

  /* Now it's for sure we are alone.  But recursive calls are possible.  */

  /* Unlock SIGABRT.  */
  if (stage == 0)
    {
      ++stage;
      if (__sigemptyset (&sigs) == 0 &&
	  __sigaddset (&sigs, SIGABRT) == 0)
	__sigprocmask (SIG_UNBLOCK, &sigs, (sigset_t *) NULL);
    }

  /* Flush all streams.  We cannot close them now because the user
     might have registered a handler for SIGABRT.  */
  if (stage == 1)
    {
      ++stage;
      fflush (NULL);
    }

  /* Send signal which possibly calls a user handler.  */
  if (stage == 2)
    {
      /* This stage is special: we must allow repeated calls of
	 `abort' when a user defined handler for SIGABRT is installed.
	 This is risky since the `raise' implementation might also
	 fail but I don't see another possibility.  */
      int save_stage = stage;

      stage = 0;
      __libc_lock_unlock_recursive (lock);

      raise (SIGABRT);

      __libc_lock_lock_recursive (lock);
      stage = save_stage + 1;
    }

  /* There was a handler installed.  Now remove it.  */
  if (stage == 3)
    {
      ++stage;
      memset (&act, '\0', sizeof (struct sigaction));
      act.sa_handler = SIG_DFL;
      __sigfillset (&act.sa_mask);
      act.sa_flags = 0;
      __sigaction (SIGABRT, &act, NULL);
    }

  /* Now close the streams which also flushes the output the user
     defined handler might has produced.  */
  if (stage == 4)
    {
      ++stage;
      __fcloseall ();
    }

  /* Try again.  */
  if (stage == 5)
    {
      ++stage;
      raise (SIGABRT);
    }

  /* Now try to abort using the system specific command.  */
  if (stage == 6)
    {
      ++stage;
      ABORT_INSTRUCTION;
    }

  /* If we can't signal ourselves and the abort instruction failed, exit.  */
  if (stage == 7)
    {
      ++stage;
      _exit (127);
    }

  /* If even this fails try to use the provided instruction to crash
     or otherwise make sure we never return.  */
  while (1)
    /* Try for ever and ever.  */
    ABORT_INSTRUCTION;
}
Beispiel #7
0
int
__lckpwdf (void)
{
  int flags;
  sigset_t saved_set;			/* Saved set of caught signals.  */
  struct sigaction saved_act;		/* Saved signal action.  */
  sigset_t new_set;			/* New set of caught signals.  */
  struct sigaction new_act;		/* New signal action.  */
  struct flock fl;			/* Information struct for locking.  */
  int result;

  if (lock_fd != -1)
    /* Still locked by own process.  */
    return -1;

  /* Prevent problems caused by multiple threads.  */
  __libc_lock_lock (lock);

  int oflags = O_WRONLY | O_CREAT;
#ifdef O_CLOEXEC
  oflags |= O_CLOEXEC;
#endif
  lock_fd = __open (PWD_LOCKFILE, oflags, 0600);
  if (lock_fd == -1)
    /* Cannot create lock file.  */
    RETURN_CLOSE_FD (-1);

#ifndef __ASSUME_O_CLOEXEC
# ifdef O_CLOEXEC
  if (__have_o_cloexec <= 0)
# endif
    {
      /* Make sure file gets correctly closed when process finished.  */
      flags = __fcntl (lock_fd, F_GETFD, 0);
      if (flags == -1)
	/* Cannot get file flags.  */
	RETURN_CLOSE_FD (-1);
# ifdef O_CLOEXEC
      if (__have_o_cloexec == 0)
	__have_o_cloexec = (flags & FD_CLOEXEC) == 0 ? -1 : 1;
      if (__have_o_cloexec < 0)
# endif
	{
	  flags |= FD_CLOEXEC;		/* Close on exit.  */
	  if (__fcntl (lock_fd, F_SETFD, flags) < 0)
	    /* Cannot set new flags.  */
	    RETURN_CLOSE_FD (-1);
	}
    }
#endif

  /* Now we have to get exclusive write access.  Since multiple
     process could try this we won't stop when it first fails.
     Instead we set a timeout for the system call.  Once the timer
     expires it is likely that there are some problems which cannot be
     resolved by waiting.

     It is important that we don't change the signal state.  We must
     restore the old signal behaviour.  */
  memset (&new_act, '\0', sizeof (struct sigaction));
  new_act.sa_handler = noop_handler;
  __sigfillset (&new_act.sa_mask);
  new_act.sa_flags = 0ul;

  /* Install new action handler for alarm and save old.  */
  if (__sigaction (SIGALRM, &new_act, &saved_act) < 0)
    /* Cannot install signal handler.  */
    RETURN_CLOSE_FD (-1);

  /* Now make sure the alarm signal is not blocked.  */
  __sigemptyset (&new_set);
  __sigaddset (&new_set, SIGALRM);
  if (__sigprocmask (SIG_UNBLOCK, &new_set, &saved_set) < 0)
    RETURN_RESTORE_HANDLER (-1);

  /* Start timer.  If we cannot get the lock in the specified time we
     get a signal.  */
  alarm (TIMEOUT);

  /* Try to get the lock.  */
  memset (&fl, '\0', sizeof (struct flock));
  fl.l_type = F_WRLCK;
  fl.l_whence = SEEK_SET;
  result = __fcntl (lock_fd, F_SETLKW, &fl);

  RETURN_CLEAR_ALARM (result);
}
Beispiel #8
0
int
lckpwdf (void)
{
  sigset_t saved_set;			/* Saved set of caught signals.  */
  struct sigaction saved_act;		/* Saved signal action.  */
  sigset_t new_set;			/* New set of caught signals.  */
  struct sigaction new_act;		/* New signal action.  */
  struct flock fl;			/* Information struct for locking.  */
  int result;

  if (lock_fd != -1)
    /* Still locked by own process.  */
    return -1;

  /* Prevent problems caused by multiple threads.  */
  __UCLIBC_MUTEX_LOCK(mylock);

  lock_fd = open (_PATH_PASSWD, O_WRONLY | O_CLOEXEC);
  if (lock_fd == -1) {
    goto DONE;
  }
#ifndef __ASSUME_O_CLOEXEC
  /* Make sure file gets correctly closed when process finished.  */
  fcntl (lock_fd, F_SETFD, FD_CLOEXEC);
#endif

  /* Now we have to get exclusive write access.  Since multiple
     process could try this we won't stop when it first fails.
     Instead we set a timeout for the system call.  Once the timer
     expires it is likely that there are some problems which cannot be
     resolved by waiting. (sa_flags have no SA_RESTART. Thus SIGALRM
     will EINTR fcntl(F_SETLKW)

     It is important that we don't change the signal state.  We must
     restore the old signal behaviour.  */
  memset (&new_act, '\0', sizeof (new_act));
  new_act.sa_handler = noop_handler;
  __sigfillset (&new_act.sa_mask);

  /* Install new action handler for alarm and save old.
   * This never fails in Linux.  */
  sigaction (SIGALRM, &new_act, &saved_act);

  /* Now make sure the alarm signal is not blocked.  */
  __sigemptyset (&new_set);
  __sigaddset (&new_set, SIGALRM);
  sigprocmask (SIG_UNBLOCK, &new_set, &saved_set);

  /* Start timer.  If we cannot get the lock in the specified time we
     get a signal.  */
  alarm (TIMEOUT);

  /* Try to get the lock.  */
  memset (&fl, '\0', sizeof (fl));
  if (F_WRLCK)
    fl.l_type = F_WRLCK;
  if (SEEK_SET)
    fl.l_whence = SEEK_SET;
  result = fcntl (lock_fd, F_SETLKW, &fl);

  /* Clear alarm.  */
  alarm (0);

  sigprocmask (SIG_SETMASK, &saved_set, NULL);
  sigaction (SIGALRM, &saved_act, NULL);

  if (result < 0) {
    close(lock_fd);
    lock_fd = -1;
  }

DONE:
  __UCLIBC_MUTEX_UNLOCK(mylock);
  return 0; /* TODO: return result? */
}
Beispiel #9
0
/* Set the disposition for SIG.  */
__sighandler_t
sigset (int sig, __sighandler_t disp)
{
  struct sigaction act;
  struct sigaction oact;
  sigset_t set;
  sigset_t oset;

#ifdef SIG_HOLD
  /* Handle SIG_HOLD first.  */
  if (disp == SIG_HOLD)
    {
      /* Create an empty signal set.  */
      if (__sigemptyset (&set) < 0)
	return SIG_ERR;

      /* Add the specified signal.  */
      if (__sigaddset (&set, sig) < 0)
	return SIG_ERR;

      /* Add the signal set to the current signal mask.  */
      if (__sigprocmask (SIG_BLOCK, &set, &oset) < 0)
	return SIG_ERR;

      /* If the signal was already blocked signal this to the caller.  */
      if (__sigismember (&oset, sig))
	return SIG_HOLD;

      /* We need to determine whether a specific handler is installed.  */
      if (__sigaction (sig, NULL, &oact) < 0)
	return SIG_ERR;

      return oact.sa_handler;
    }
#endif	/* SIG_HOLD */

  /* Check signal extents to protect __sigismember.  */
  if (disp == SIG_ERR || sig < 1 || sig >= NSIG)
    {
      __set_errno (EINVAL);
      return SIG_ERR;
    }

  act.sa_handler = disp;
  if (__sigemptyset (&act.sa_mask) < 0)
    return SIG_ERR;
  act.sa_flags = 0;
  if (__sigaction (sig, &act, &oact) < 0)
    return SIG_ERR;

  /* Create an empty signal set.  */
  if (__sigemptyset (&set) < 0)
    return SIG_ERR;

  /* Add the specified signal.  */
  if (__sigaddset (&set, sig) < 0)
    return SIG_ERR;

  /* Remove the signal set from the current signal mask.  */
  if (__sigprocmask (SIG_UNBLOCK, &set, &oset) < 0)
    return SIG_ERR;

  /* If the signal was already blocked return SIG_HOLD.  */
  return __sigismember (&oset, sig) ? SIG_HOLD : oact.sa_handler;
}
Beispiel #10
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  INTERNAL_SYSCALL_DECL (err);
  pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
#ifdef __PTHREAD_MUTEX_HAVE_PREV
  pd->robust_list.__prev = &pd->robust_list;
#endif
  pd->robust_list.__next = &pd->robust_list;
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);


  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  struct sigaction sa;
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  __sigemptyset (&sa.sa_mask);

  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);

  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;

  (void) __libc_sigaction (SIGSETXID, &sa, NULL);

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
  __sigaddset (&sa.sa_mask, SIGCANCEL);
  __sigaddset (&sa.sa_mask, SIGSETXID);
  (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			   NULL, _NSIG / 8);

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = __sysconf (_SC_PAGESIZE);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
  __default_stacksize = limit.rlim_cur;

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
  GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Beispiel #11
0
/*
 * Send a message to user space and wait for reply.
 */
int
xfs_message_rpc(int fd, struct xfs_message_header * message, u_int size)
{
    int ret;
    struct xfs_channel *chan = &xfs_channel[fd];
    struct xfs_link *this_message;
    struct xfs_link *this_process;
    struct xfs_message_header *msg;
#if defined(HAVE_STRUCT_PROC_P_SIGMASK)
    sigset_t oldsigmask;
#endif /* HAVE_STRUCT_PROC_P_SIGMASK */

    XFSDEB(XDEBMSG, ("xfs_message_rpc opcode = %d\n", message->opcode));

    if (!(chan->status & CHANNEL_OPENED))	/* No receiver? */
	return ENODEV;

    if (size < sizeof(struct xfs_message_wakeup)) {
	printf("XFS PANIC Error: Message to small to receive wakeup, opcode = %d\n", message->opcode);
	return ENOMEM;
    }
    this_message = xfs_alloc(sizeof(struct xfs_link));
    this_process = xfs_alloc(sizeof(struct xfs_link));
    msg = xfs_alloc(size);
    bcopy(message, msg, size);

    msg->size = size;
    msg->sequence_num = chan->nsequence++;
    this_message->error_or_size = 0;
    this_message->message = msg;
    this_process->message = msg;
    xfs_appendq(&chan->messageq, this_message);
    xfs_appendq(&chan->sleepq, this_process);
    xfs_select_wakeup(chan);
    this_process->error_or_size = 0;

    if (chan->status & CHANNEL_WAITING) {
	chan->status &= ~CHANNEL_WAITING;
	wakeup((caddr_t) chan);
    }

    /*
     * Remove SIGIO from the sigmask so no IO will
     * wake us up from tsleep()
     */

#ifdef HAVE_STRUCT_PROC_P_SIGMASK
    oldsigmask = xfs_curproc()->p_sigmask;
#ifdef __sigaddset
    __sigaddset(&xfs_curproc()->p_sigmask, SIGIO);
#else
    xfs_curproc()->p_sigmask |= sigmask(SIGIO);
#endif /* __sigaddset */
#elif defined(HAVE_STRUCT_PROC_P_SIGWAITMASK)
    oldsigmask = xfs_curproc()->p_sigwaitmask;
    sigaddset(&xfs_curproc()->p_sigwaitmask, SIGIO);
#endif
    /*
     * We have to check if we have a receiver here too because the
     * daemon could have terminated before we sleep. This seems to
     * happen sometimes when rebooting.
     */
    if (!(chan->status & CHANNEL_OPENED) ||
	tsleep((caddr_t) this_process, (PZERO + 1) | PCATCH, "xfs", 0)) {
	XFSDEB(XDEBMSG, ("caught signal\n"));
	this_process->error_or_size = EINTR;
    }

#ifdef HAVE_STRUCT_PROC_P_SIGMASK
    xfs_curproc()->p_sigmask = oldsigmask;
#elif defined(HAVE_STRUCT_PROC_P_SIGWAITMASK)
    xfs_curproc()->p_sigwaitmask = oldsigmask;
#endif

    /*
     * Caught signal, got reply message or device was closed.
     * Need to clean up both messageq and sleepq.
     */
    if (xfs_onq(this_message)) {
	xfs_outq(this_message);
    }
    if (xfs_onq(this_process)) {
	xfs_outq(this_process);
    }
    ret = this_process->error_or_size;

    XFSDEB(XDEBMSG, ("xfs_message_rpc this_process->error_or_size = %d\n",
		     this_process->error_or_size));
    XFSDEB(XDEBMSG, ("xfs_message_rpc opcode ((xfs_message_wakeup*)(this_process->message))->error = %d\n", ((struct xfs_message_wakeup *) (this_process->message))->error));

    bcopy(msg, message, size);

    xfs_free(this_message, sizeof(*this_message));
    xfs_free(this_process, sizeof(*this_process));
    xfs_free(msg, size);

    return ret;
}
Beispiel #12
0
/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the sstupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int sleep (unsigned int seconds)
{
    struct timespec ts = { tv_sec: (long int) seconds, tv_nsec: 0 };
    sigset_t set, oset;
    unsigned int result;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0)
	return 0;

    /* Linux will wake up the system call, nanosleep, when SIGCHLD
       arrives even if SIGCHLD is ignored.  We have to deal with it
       in libc.  We block SIGCHLD first.  */
    if (__sigemptyset (&set) < 0
	    || __sigaddset (&set, SIGCHLD) < 0
	    || sigprocmask (SIG_BLOCK, &set, &oset))
	return -1;

    /* If SIGCHLD is already blocked, we don't have to do anything.  */
    if (!__sigismember (&oset, SIGCHLD))
    {
	int saved_errno;
	struct sigaction oact;

	if (__sigemptyset (&set) < 0 || __sigaddset (&set, SIGCHLD) < 0)
	    return -1;

	/* We get the signal handler for SIGCHLD.  */
	if (sigaction (SIGCHLD, (struct sigaction *) NULL, &oact) < 0)
	{
	    saved_errno = errno;
	    /* Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    __set_errno (saved_errno);
	    return -1;
	}

	if (oact.sa_handler == SIG_IGN)
	{
	    /* We should leave SIGCHLD blocked.  */
	    result = nanosleep (&ts, &ts);

	    saved_errno = errno;
	    /* Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    __set_errno (saved_errno);
	}
	else
	{
	    /* We should unblock SIGCHLD.  Restore the original signal mask.  */
	    (void) sigprocmask (SIG_SETMASK, &oset, (sigset_t *) NULL);
	    result = nanosleep (&ts, &ts);
	}
    }
    else
	result = nanosleep (&ts, &ts);

    if (result != 0)
	/* Round remaining time.  */
	result = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);

    return result;
}
Beispiel #13
0
/* This is a quick and dirty, but not 100% compliant with
 * the stupid SysV SIGCHLD vs. SIG_IGN behaviour.  It is
 * fine unless you are messing with SIGCHLD...  */
unsigned int sleep (unsigned int sec)
{
	unsigned int res;
	struct timespec ts = { .tv_sec = (long int) seconds, .tv_nsec = 0 };
	res = nanosleep(&ts, &ts);
	if (res) res = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);
	return res;
}

# else

/* We are going to use the `nanosleep' syscall of the kernel.  But the
   kernel does not implement the sstupid SysV SIGCHLD vs. SIG_IGN
   behaviour for this syscall.  Therefore we have to emulate it here.  */
unsigned int sleep (unsigned int seconds)
{
    struct timespec ts = { .tv_sec = (long int) seconds, .tv_nsec = 0 };
    sigset_t set;
    struct sigaction oact;
    unsigned int result;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0) {
#  ifdef CANCELLATION_P
	int cancelhandling;
	CANCELLATION_P (THREAD_SELF);
#  endif
	return 0;
    }

    /* Linux will wake up the system call, nanosleep, when SIGCHLD
       arrives even if SIGCHLD is ignored.  We have to deal with it
       in libc.  */

    __sigemptyset (&set);
    __sigaddset (&set, SIGCHLD);

    /* Is SIGCHLD set to SIG_IGN? */
    sigaction (SIGCHLD, NULL, &oact); /* never fails */
    if (oact.sa_handler == SIG_IGN) {
	/* Yes.  Block SIGCHLD, save old mask.  */
	sigprocmask (SIG_BLOCK, &set, &set); /* never fails */
    }

    /* Run nanosleep, with SIGCHLD blocked if SIGCHLD is SIG_IGNed.  */
    result = nanosleep (&ts, &ts);
    if (result != 0) {
	/* Got EINTR. Return remaining time.  */
	result = (unsigned int) ts.tv_sec + (ts.tv_nsec >= 500000000L);
    }

    if (!__sigismember (&set, SIGCHLD)) {
	/* We did block SIGCHLD, and old mask had no SIGCHLD bit.
	   IOW: we need to unblock SIGCHLD now. Do it.  */
	/* this sigprocmask call never fails, thus never updates errno,
	   and therefore we don't need to save/restore it.  */
	sigprocmask (SIG_SETMASK, &set, NULL); /* never fails */
    }

    return result;
}

# endif

#else /* __UCLIBC_HAS_REALTIME__ */

/* no nanosleep, use signals and alarm() */
static void sleep_alarm_handler(int attribute_unused sig)
{
}
unsigned int sleep (unsigned int seconds)
{
    struct sigaction act, oact;
    sigset_t set, oset;
    unsigned int result, remaining;
    time_t before, after;
    int old_errno = errno;

    /* This is not necessary but some buggy programs depend on this.  */
    if (seconds == 0)
	return 0;

    /* block SIGALRM */
    __sigemptyset (&set);
    __sigaddset (&set, SIGALRM);
    sigprocmask (SIG_BLOCK, &set, &oset); /* can't fail */

    act.sa_handler = sleep_alarm_handler;
    act.sa_flags = 0;
    act.sa_mask = oset;
    sigaction(SIGALRM, &act, &oact); /* never fails */

    before = time(NULL);
    remaining = alarm(seconds);
    if (remaining && remaining > seconds) {
	/* restore user's alarm */
	sigaction(SIGALRM, &oact, NULL);
	alarm(remaining); /* restore old alarm */
	sigsuspend(&oset);
	after = time(NULL);
    } else {
	sigsuspend (&oset);
	after = time(NULL);
	sigaction (SIGALRM, &oact, NULL);
    }
    result = after - before;
    alarm(remaining > result ? remaining - result : 0);
    sigprocmask (SIG_SETMASK, &oset, NULL);

    __set_errno(old_errno);

    return result > seconds ? 0 : seconds - result;
}

#endif /* __UCLIBC_HAS_REALTIME__ */

libc_hidden_def(sleep)