Example #1
0
int
pthread_sigmask (int how, const sigset_t *newmask, sigset_t *oldmask)
{
  sigset_t local_newmask;

  /* The only thing we have to make sure here is that SIGCANCEL and
     SIGSETXID is not blocked.  */
  if (newmask != NULL
      && (__builtin_expect (__sigismember (newmask, SIGCANCEL), 0)
	  || __builtin_expect (__sigismember (newmask, SIGSETXID), 0)))
    {
      local_newmask = *newmask;
      __sigdelset (&local_newmask, SIGCANCEL);
      __sigdelset (&local_newmask, SIGSETXID);
      newmask = &local_newmask;
    }

#ifdef INTERNAL_SYSCALL
  /* We know that realtime signals are available if NPTL is used.  */
  INTERNAL_SYSCALL_DECL (err);
  int result = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, how, newmask,
				 oldmask, _NSIG / 8);

  return (INTERNAL_SYSCALL_ERROR_P (result, err)
	  ? INTERNAL_SYSCALL_ERRNO (result, err)
	  : 0);
#else
  return sigprocmask (how, newmask, oldmask) == -1 ? errno : 0;
#endif
}
Example #2
0
/* For asynchronous cancellation we use a signal.  This is the handler.  */
static void
sighandler_setxid (int sig, siginfo_t *si, void *ctx)
{
  /* Safety check.  It would be possible to call this function for
     other signals and send a signal from another process.  This is not
     correct and might even be a security problem.  Try to catch as
     many incorrect invocations as possible.  */
  if (sig != SIGSETXID
#ifdef __ASSUME_CORRECT_SI_PID
      /* Kernels before 2.5.75 stored the thread ID and not the process
	 ID in si_pid so we skip this test.  */
      || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
#endif
      || si->si_code != SI_TKILL)
    return;

  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
			__xidcmd->id[1], __xidcmd->id[2]);

  if (atomic_decrement_val (&__xidcmd->cntr) == 0)
    lll_futex_wake (&__xidcmd->cntr, 1);

  /* Reset the SETXID flag.  */
  struct pthread *self = THREAD_SELF;
  int flags = THREAD_GETMEM (self, cancelhandling);
  THREAD_SETMEM (self, cancelhandling, flags & ~SETXID_BITMASK);

  /* And release the futex.  */
  self->setxid_futex = 1;
  lll_futex_wake (&self->setxid_futex, 1);
}
clock_t
__times (struct tms *buf)
{
  INTERNAL_SYSCALL_DECL (err);
  clock_t ret = INTERNAL_SYSCALL (times, err, 1, buf);
  if (INTERNAL_SYSCALL_ERROR_P (ret, err)
      && __builtin_expect (INTERNAL_SYSCALL_ERRNO (ret, err) == EFAULT, 0))
    {
      /* This might be an error or not.  For architectures which have
	 no separate return value and error indicators we cannot
	 distinguish a return value of -1 from an error.  Do it the
	 hard way.  We crash applications which pass in an invalid BUF
	 pointer.  */
#define touch(v) \
      do {								      \
	clock_t temp = v;						      \
	asm volatile ("" : "+r" (temp));				      \
	v = temp;							      \
      } while (0)
      touch (buf->tms_utime);
      touch (buf->tms_stime);
      touch (buf->tms_cutime);
      touch (buf->tms_cstime);

      /* If we come here the memory is valid and the kernel did not
	 return an EFAULT error.  Return the value given by the kernel.  */
    }

  /* Return value (clock_t) -1 signals an error, but if there wasn't any,
     return the following value.  */
  if (ret == (clock_t) -1)
    return (clock_t) 0;

  return ret;
}
Example #4
0
/* Remove message queue named NAME.  */
int
mq_unlink (const char *name)
{
  if (name[0] != '/')
    {
      __set_errno (EINVAL);
      return -1;
    }

  INTERNAL_SYSCALL_DECL (err);
  int ret = INTERNAL_SYSCALL (mq_unlink, err, 1, name + 1);

  /* While unlink can return either EPERM or EACCES, mq_unlink should
     return just EACCES.  */
  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (ret, err)))
    {
      ret = INTERNAL_SYSCALL_ERRNO (ret, err);
      if (ret == EPERM)
	ret = EACCES;
      __set_errno (ret);
      ret = -1;
    }

  return ret;
}
Example #5
0
int
posix_fadvise (int fd, off_t offset, off_t len, int advise)
{
  INTERNAL_SYSCALL_DECL (err);
# ifdef __NR_fadvise64
  int ret = INTERNAL_SYSCALL_CALL (fadvise64, err, fd,
				   __ALIGNMENT_ARG SYSCALL_LL (offset),
				   len, advise);
# else
#  ifdef __ASSUME_FADVISE64_64_6ARG
  int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd, advise,
				   __ALIGNMENT_ARG SYSCALL_LL (offset),
				   SYSCALL_LL (len));
#  else

#   ifdef __ASSUME_FADVISE64_64_NO_ALIGN
#    undef __ALIGNMENT_ARG
#    define __ALIGNMENT_ARG
#   endif

  int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd,
				   __ALIGNMENT_ARG SYSCALL_LL (offset),
				   SYSCALL_LL (len), advise);
#  endif
# endif
  if (INTERNAL_SYSCALL_ERROR_P (ret, err))
    return INTERNAL_SYSCALL_ERRNO (ret, err);
  return 0;
}
Example #6
0
/* We can simply use the syscall.  The CPU clocks are not supported
   with this function.  */
int
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
		   struct timespec *rem)
{
  INTERNAL_SYSCALL_DECL (err);
  int r;

  if (clock_id == CLOCK_THREAD_CPUTIME_ID)
    return EINVAL;
  if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
    clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);

  if (SINGLE_THREAD_P)
    r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
  else
    {
      int oldstate = LIBC_CANCEL_ASYNC ();

      r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
			    rem);

      LIBC_CANCEL_RESET (oldstate);
    }

  return (INTERNAL_SYSCALL_ERROR_P (r, err)
	  ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
}
Example #7
0
int posix_fadvise64(int fd, off64_t offset, off64_t len, int advice)
{
	INTERNAL_SYSCALL_DECL (err);
	/* ARM has always been funky. */
#if defined (__arm__) || \
    (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && (defined(__powerpc__) || defined(__xtensa__)))
	/* arch with 64-bit data in even reg alignment #1: [powerpc/xtensa]
	 * custom syscall handler (rearranges @advice to avoid register hole punch) */
	int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, advice,
			OFF64_HI_LO (offset), OFF64_HI_LO (len));
#elif defined(__UCLIBC_SYSCALL_ALIGN_64BIT__)
	/* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future]
	 * stock syscall handler in kernel (reg hole punched) */
	int ret = INTERNAL_SYSCALL (fadvise64_64, err, 7, fd, 0,
			OFF64_HI_LO (offset), OFF64_HI_LO (len),
			advice);
# else
	int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd,
			OFF64_HI_LO (offset), OFF64_HI_LO (len),
			advice);
# endif
	if (INTERNAL_SYSCALL_ERROR_P (ret, err))
		return INTERNAL_SYSCALL_ERRNO (ret, err);
	return 0;
}
Example #8
0
const fenv_t *
__fe_nomask_env (void)
{
#if __ASSUME_NEW_PRCTL_SYSCALL == 0
# if defined PR_SET_FPEXC && defined PR_FP_EXC_PRECISE
  int result = INLINE_SYSCALL (prctl, 2, PR_SET_FPEXC, PR_FP_EXC_PRECISE);

  if (result == -1 && errno == EINVAL)
# endif
    {
      struct sigaction act;

      act.sa_handler = (sighandler_t) fe_nomask_handler;
      sigemptyset (&act.sa_mask);
      act.sa_flags = 0;

      sigaction (SIGUSR1, &act, &oact);
      raise (SIGUSR1);
    }
#else
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, PR_FP_EXC_PRECISE);
#endif

  return FE_ENABLED_ENV;
}
Example #9
0
int
__feholdexcept (fenv_t *envp)
{
  fenv_union_t u;
  INTERNAL_SYSCALL_DECL (err);
  int r;

  /* Get the current state.  */
  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]);
  if (INTERNAL_SYSCALL_ERROR_P (r, err))
    return -1;

  u.l[1] = fegetenv_register ();
  *envp = u.fenv;

  /* Clear everything except for the rounding mode and trapping to the
     kernel.  */
  u.l[0] &= ~(PR_FP_EXC_DIV
	      | PR_FP_EXC_OVF
	      | PR_FP_EXC_UND
	      | PR_FP_EXC_RES
	      | PR_FP_EXC_INV);
  u.l[1] &= SPEFSCR_FRMC | (SPEFSCR_ALL_EXCEPT_ENABLE & ~SPEFSCR_FINXE);

  /* Put the new state in effect.  */
  fesetenv_register (u.l[1]);
  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
			u.l[0] | PR_FP_EXC_SW_ENABLE);
  if (INTERNAL_SYSCALL_ERROR_P (r, err))
    return -1;

  return 0;
}
Example #10
0
int
__pthread_kill (pthread_t threadid, int signo)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__glibc_unlikely (tid <= 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers,
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  pid_t pid = __getpid ();

  int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, tid, signo);
  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
}
gid_t
__getegid (void)
{
  INTERNAL_SYSCALL_DECL (err);
#if __ASSUME_32BITUIDS > 0
  /* No error checking.  */
  return INTERNAL_SYSCALL (getegid32, err, 0);
#else
# ifdef __NR_getegid32
  if (__libc_missing_32bit_uids <= 0)
    {
      int result;

      result = INTERNAL_SYSCALL (getegid32, err, 0);
      if (! INTERNAL_SYSCALL_ERROR_P (result, err)
	  || INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	return result;

      __libc_missing_32bit_uids = 1;
    }
# endif /* __NR_getegid32 */

  /* No error checking.  */
  return INTERNAL_SYSCALL (getegid, err, 0);
#endif
}
Example #12
0
int
fedisableexcept (int excepts)
{
  int result = 0, pflags, r;
  INTERNAL_SYSCALL_DECL (err);

  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
  if (INTERNAL_SYSCALL_ERROR_P (r, err))
    return -1;

  /* Save old enable bits.  */
  result = __fexcepts_from_prctl (pflags);

  pflags &= ~__fexcepts_to_prctl (excepts);
  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
			pflags | PR_FP_EXC_SW_ENABLE);
  if (INTERNAL_SYSCALL_ERROR_P (r, err))
    return -1;

  /* If disabling signals for "inexact", also disable trapping to the
     kernel.  */
  if ((excepts & FE_INEXACT) != 0)
    {
      unsigned long fpescr;

      fpescr = fegetenv_register ();
      fpescr &= ~SPEFSCR_FINXE;
      fesetenv_register (fpescr);
    }

  return result;
}
Example #13
0
int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *cpuset)
{
	size_t cnt;
	if (unlikely (__kernel_cpumask_size == 0)) {
		INTERNAL_SYSCALL_DECL (err);
		int res;
		size_t psize = 128;
		void *p = alloca (psize);

		while (res = INTERNAL_SYSCALL (sched_getaffinity, err, 3, getpid (),
					       psize, p),
		       INTERNAL_SYSCALL_ERROR_P (res, err)
		       && INTERNAL_SYSCALL_ERRNO (res, err) == EINVAL)
			p = extend_alloca (p, psize, 2 * psize);

		if (res == 0 || INTERNAL_SYSCALL_ERROR_P (res, err)) {
			__set_errno (INTERNAL_SYSCALL_ERRNO (res, err));
			return -1;
		}

		__kernel_cpumask_size = res;
	}

	/* We now know the size of the kernel cpumask_t.  Make sure the user
	   does not request to set a bit beyond that.  */
	for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
		if (((char *) cpuset)[cnt] != '\0') {
			/* Found a nonzero byte.  This means the user request cannot be
			   fulfilled.  */
			__set_errno (EINVAL);
			return -1;
		}

	return INLINE_SYSCALL (sched_setaffinity, 3, pid, cpusetsize, cpuset);
}
Example #14
0
int
setfsuid (uid_t uid)
{
  INTERNAL_SYSCALL_DECL (err);
# if  __ASSUME_32BITUIDS > 0
  /* No error checking. */
  return INTERNAL_SYSCALL (setfsuid32, err, 1, uid);
# else
#  ifdef __NR_setfsuid32
  if (__libc_missing_32bit_uids <= 0)
    {
      int result;

      result = INTERNAL_SYSCALL (setfsuid32, err, 1, uid);
      if (! INTERNAL_SYSCALL_ERROR_P (result, err)
	  || INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	return result;

      __libc_missing_32bit_uids = 1;
    }
#  endif /* __NR_setfsuid32 */

  if (uid != (uid_t) ((__kernel_uid_t) uid))
    {
      __set_errno (EINVAL);
      return -1;
    }

  /* No error checking. */
  return INTERNAL_SYSCALL (setfsuid, err, 1, uid);
# endif
}
Example #15
0
/* Reserve storage for the data of the file associated with FD.  */
int
posix_fallocate (int fd, __off_t offset, __off_t len)
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
  if (__builtin_expect (__have_fallocate >= 0, 1))
# endif
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len);

      if (! INTERNAL_SYSCALL_ERROR_P (res, err))
	return 0;

# ifndef __ASSUME_FALLOCATE
      if (__builtin_expect (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS, 0))
	__have_fallocate = -1;
      else
# endif
	if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP)
	  return INTERNAL_SYSCALL_ERRNO (res, err);
    }
#endif

  return internal_fallocate (fd, offset, len);
}
int
__pthread_setaffinity_new (pthread_t th, size_t cpusetsize,
			   const cpu_set_t *cpuset)
{
  const struct pthread *pd = (const struct pthread *) th;

  INTERNAL_SYSCALL_DECL (err);
  int res;

  if (__builtin_expect (__kernel_cpumask_size == 0, 0))
    {
      res = __determine_cpumask_size (pd->tid);
      if (res != 0)
	return res;
    }

  /* We now know the size of the kernel cpumask_t.  Make sure the user
     does not request to set a bit beyond that.  */
  for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
    if (((char *) cpuset)[cnt] != '\0')
      /* Found a nonzero byte.  This means the user request cannot be
	 fulfilled.  */
      return EINVAL;

  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, cpusetsize,
			  cpuset);
  return (INTERNAL_SYSCALL_ERROR_P (res, err)
	  ? INTERNAL_SYSCALL_ERRNO (res, err)
	  : 0);
}
Example #17
0
int
setup_thread (struct database_dyn *db)
{
#ifdef __NR_set_tid_address
  /* Only supported when NPTL is used.  */
  char buf[100];
  if (confstr (_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof (buf)) >= sizeof (buf)
      || strncmp (buf, "NPTL", 4) != 0)
    return 0;

  /* Do not try this at home, kids.  We play with the SETTID address
     even thought the process is multi-threaded.  This can only work
     since none of the threads ever terminates.  */
  INTERNAL_SYSCALL_DECL (err);
  int r = INTERNAL_SYSCALL (set_tid_address, err, 1,
			    &db->head->nscd_certainly_running);
  if (!INTERNAL_SYSCALL_ERROR_P (r, err))
    /* We know the kernel can reset this field when nscd terminates.
       So, set the field to a nonzero value which indicates that nscd
       is certainly running and clients can skip the test.  */
    return db->head->nscd_certainly_running = 1;
#endif

  return 0;
}
Example #18
0
void
__netlink_close (struct netlink_handle *h)
{
  /* Don't modify errno.  */
  INTERNAL_SYSCALL_DECL (err);
  (void) INTERNAL_SYSCALL (close, err, 1, h->fd);
}
Example #19
0
/* Reserve storage for the data of the file associated with FD.  */
int
posix_fallocate (int fd, __off_t offset, __off_t len)
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
  if (__glibc_likely (__have_fallocate >= 0))
# endif
    {
      INTERNAL_SYSCALL_DECL (err);
# ifdef INTERNAL_SYSCALL_TYPES
      int res = INTERNAL_SYSCALL_TYPES (fallocate, err, 4, int, fd,
					int, 0, off_t, offset,
					off_t, len);
# else
      int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len);
# endif

      if (! INTERNAL_SYSCALL_ERROR_P (res, err))
	return 0;

# ifndef __ASSUME_FALLOCATE
      if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS))
	__have_fallocate = -1;
      else
# endif
	if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP)
	  return INTERNAL_SYSCALL_ERRNO (res, err);
    }
Example #20
0
/* Return any pending signal or wait for one for the given time.  */
static __inline__ int
do_sigwait (const sigset_t *set, int *sig)
{
  int ret;

  /* XXX The size argument hopefully will have to be changed to the
     real size of the user-level sigset_t.  */
#ifdef INTERNAL_SYSCALL
  INTERNAL_SYSCALL_DECL (err);
  ret = INTERNAL_SYSCALL (rt_sigtimedwait, err, 4, set,
			  NULL, NULL, _NSIG / 8);
  if (! INTERNAL_SYSCALL_ERROR_P (ret, err))
    {
      *sig = ret;
      ret = 0;
    }
  else
    ret = INTERNAL_SYSCALL_ERRNO (ret, err);
#else
  ret = INLINE_SYSCALL (rt_sigtimedwait, 4, set,
			NULL, NULL, _NSIG / 8);
  if (ret != -1)
    {
      *sig = ret;
      ret = 0;
    }
  else
    ret = errno;
#endif

  return ret;
}
Example #21
0
const fenv_t *
__fe_nomask_env_priv (void)
{
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, PR_FP_EXC_PRECISE);

  return FE_ENABLED_ENV;
}
Example #22
0
const fenv_t *
__fe_mask_env (void)
{
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, PR_FP_EXC_DISABLED);

  return FE_DFL_ENV;
}
Example #23
0
pid_t _fork_parent(void)
{
	INTERNAL_SYSCALL_DECL(err);
	register long ret = INTERNAL_SYSCALL(clone, err, 2, CLONE_VM, 0);
	if (ret > 0)
		/* parent needs to die now w/out touching stack */
		INTERNAL_SYSCALL(exit, err, 1, 0);
	return ret;
}
Example #24
0
static
#endif
void
__nptl_set_robust (struct pthread *self)
{
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
		    sizeof (struct robust_list_head));
}
void
__nptl_main (void)
{
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (write, err, 3, STDOUT_FILENO, (const char *) banner,
		    sizeof banner - 1);

  _exit (0);
}
Example #26
0
/* Return any pending signal or wait for one for the given time.  */
static int
do_sigwait (const sigset_t *set, int *sig)
{
  int ret;

#ifdef SIGCANCEL
  sigset_t tmpset;
  if (set != NULL
      && (__builtin_expect (__sigismember (set, SIGCANCEL), 0)
# ifdef SIGSETXID
	  || __builtin_expect (__sigismember (set, SIGSETXID), 0)
# endif
	  ))
    {
      /* Create a temporary mask without the bit for SIGCANCEL set.  */
      // We are not copying more than we have to.
      memcpy (&tmpset, set, _NSIG / 8);
      __sigdelset (&tmpset, SIGCANCEL);
# ifdef SIGSETXID
      __sigdelset (&tmpset, SIGSETXID);
# endif
      set = &tmpset;
    }
#endif

  /* XXX The size argument hopefully will have to be changed to the
     real size of the user-level sigset_t.  */
#ifdef INTERNAL_SYSCALL
  INTERNAL_SYSCALL_DECL (err);
  do
    ret = INTERNAL_SYSCALL (rt_sigtimedwait, err, 4, set,
			    NULL, NULL, _NSIG / 8);
  while (INTERNAL_SYSCALL_ERROR_P (ret, err)
	 && INTERNAL_SYSCALL_ERRNO (ret, err) == EINTR);
  if (! INTERNAL_SYSCALL_ERROR_P (ret, err))
    {
      *sig = ret;
      ret = 0;
    }
  else
    ret = INTERNAL_SYSCALL_ERRNO (ret, err);
#else
  do
    ret = INLINE_SYSCALL (rt_sigtimedwait, 4, set, NULL, NULL, _NSIG / 8);
  while (ret == -1 && errno == EINTR);
  if (ret != -1)
    {
      *sig = ret;
      ret = 0;
    }
  else
    ret = errno;
#endif

  return ret;
}
Example #27
0
time_t
time (time_t *t)
{
  INTERNAL_SYSCALL_DECL (err);
  time_t res = INTERNAL_SYSCALL (time, err, 1, NULL);
  /* There cannot be any error.  */
  if (t != NULL)
    *t = res;
  return res;
}
Example #28
0
int
pthread_sigqueue (
     pthread_t threadid,
     int signo,
     const union sigval value)
{
#ifdef __NR_rt_tgsigqueueinfo
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* Set up the siginfo_t structure.  */
  siginfo_t info;
  memset (&info, '\0', sizeof (siginfo_t));
  info.si_signo = signo;
  info.si_code = SI_QUEUE;
  info.si_pid = THREAD_GETMEM (THREAD_SELF, pid);
  info.si_uid = getuid ();
  info.si_value = value;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_sigqueue is not guaranteed to be async-safe.  */
  int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4,
			      THREAD_GETMEM (THREAD_SELF, pid),
			      tid, signo, &info);

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
	  ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
#else
  return ENOSYS;
#endif
}
Example #29
0
const fenv_t *
__fe_mask_env (void)
{
#if defined PR_SET_FPEXC && defined PR_FP_EXC_DISABLED
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, PR_FP_EXC_DISABLED);
#else
  __set_errno (ENOSYS);
#endif
  return FE_DFL_ENV;
}
Example #30
0
static void
unwind_cleanup (_Unwind_Reason_Code reason, struct _Unwind_Exception *exc)
{
  /* When we get here a C++ catch block didn't rethrow the object.  We
     cannot handle this case and therefore abort.  */
# define STR_N_LEN(str) str, strlen (str)
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (write, err, 3, STDERR_FILENO,
		    STR_N_LEN ("FATAL: exception not rethrown\n"));
  abort ();
}