Exemplo n.º 1
0
int
__pthread_setaffinity_new (pthread_t th, size_t cpusetsize,
                           const cpu_set_t *cpuset)
{
    const struct pthread *pd = (const struct pthread *) th;

    INTERNAL_SYSCALL_DECL (err);
    int res;

    if (__builtin_expect (__kernel_cpumask_size == 0, 0))
    {
        res = __determine_cpumask_size (pd->tid);
        if (res != 0)
            return res;
    }

    /* We now know the size of the kernel cpumask_t.  Make sure the user
       does not request to set a bit beyond that.  */
    for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
        if (((char *) cpuset)[cnt] != '\0')
            /* Found a nonzero byte.  This means the user request cannot be
            fulfilled.  */
            return EINVAL;

    res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, cpusetsize,
                            cpuset);

#ifdef RESET_VGETCPU_CACHE
    if (!INTERNAL_SYSCALL_ERROR_P (res, err))
        RESET_VGETCPU_CACHE ();
#endif

    return (INTERNAL_SYSCALL_ERROR_P (res, err)
            ? INTERNAL_SYSCALL_ERRNO (res, err)
            : 0);
}
Exemplo n.º 2
0
/* Get information about the file NAME in BUF.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  int result;
  INTERNAL_SYSCALL_DECL (err);
#ifdef STAT_IS_KERNEL_STAT
# define kst (*st)
#else
  struct kernel_stat kst;
#endif

  result = INTERNAL_SYSCALL (newfstatat, err, 4, fd, file, &kst, flag);
  if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    {
#ifdef STAT_IS_KERNEL_STAT
      return 0;
#else
      return __xstat_conv (vers, &kst, st);
#endif
    }
  else
    return INLINE_SYSCALL_ERROR_RETURN_VALUE (INTERNAL_SYSCALL_ERRNO (result,
								      err));
}
pthread_mutex_timedlock (
     pthread_mutex_t *mutex,
     const struct timespec *abstime)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  int result = 0;

  /* We must not check ABSTIME here.  If the thread does not block
     abstime must not be checked for a valid value.  */

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
			    PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
	{
	  /* Just bump the counter.  */
	  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
	    /* Overflow of the counter.  */
	    return EAGAIN;

	  ++mutex->__data.__count;

	  goto out;
	}

      /* We have to get the mutex.  */
      result = lll_timedlock (mutex->__data.__lock, abstime,
			      PTHREAD_MUTEX_PSHARED (mutex));

      if (result != 0)
	goto out;

      /* Only locked once so far.  */
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
	return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      result = lll_timedlock (mutex->__data.__lock, abstime,
			      PTHREAD_MUTEX_PSHARED (mutex));
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
	goto simple;

      if (lll_trylock (mutex->__data.__lock) != 0)
	{
	  int cnt = 0;
	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
			     mutex->__data.__spins * 2 + 10);
	  do
	    {
	      if (cnt++ >= max_cnt)
		{
		  result = lll_timedlock (mutex->__data.__lock, abstime,
					  PTHREAD_MUTEX_PSHARED (mutex));
		  break;
		}

#ifdef BUSY_WAIT_NOP
	      BUSY_WAIT_NOP;
#endif
	    }
	  while (lll_trylock (mutex->__data.__lock) != 0);

	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
	}
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id | (oldval & FUTEX_WAITERS);

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);
	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  */
	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
					 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }

	  if (result == ETIMEDOUT || result == EINVAL)
	    goto out;

	  oldval = result;
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      id, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  The timeout value must be a relative value.
	       Convert it.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);

	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1,
				      abstime);
	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
	      {
		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
		  return ETIMEDOUT;

		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
		  {
		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
				&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
		    /* ESRCH can happen only for non-robust PI mutexes where
		       the owner of the lock died.  */
		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
			    || !robust);

		    /* Delay the thread until the timeout is reached.
		       Then return ETIMEDOUT.  */
		    struct timespec reltime;
		    struct timespec now;

		    INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
				      &now);
		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
		    if (reltime.tv_nsec < 0)
		      {
			reltime.tv_nsec += 1000000000;
			--reltime.tv_sec;
		      }
		    if (reltime.tv_sec >= 0)
		      while (nanosleep_not_cancel (&reltime, &reltime) != 0)
			continue;

		    return ETIMEDOUT;
		  }

		return INTERNAL_SYSCALL_ERRNO (e, __err);
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  */
	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
	}
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		result = EINVAL;
	      failpp:
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return result;
	      }

	    result = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (result)
	      return result;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						     ceilval | 1, ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  {
		    /* Reject invalid timeouts.  */
		    if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
		      {
			result = EINVAL;
			goto failpp;
		      }

		    struct timeval tv;
		    struct timespec rt;

		    /* Get the current time.  */
		    (void) gettimeofday (&tv, NULL);

		    /* Compute relative timeout.  */
		    rt.tv_sec = abstime->tv_sec - tv.tv_sec;
		    rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
		    if (rt.tv_nsec < 0)
		      {
			rt.tv_nsec += 1000000000;
			--rt.tv_sec;
		      }

		    /* Already timed out?  */
		    if (rt.tv_sec < 0)
		      {
			result = ETIMEDOUT;
			goto failpp;
		      }

		    lll_futex_timed_wait (&mutex->__data.__lock,
					  ceilval | 2, &rt,
					  PTHREAD_MUTEX_PSHARED (mutex));
		  }
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }
Exemplo n.º 4
0
/* Get information about the file NAME relative to FD in ST.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  if (vers != _STAT_VER_KERNEL && vers != _STAT_VER_LINUX)
    {
      __set_errno (EINVAL);
      return -1;
    }

  int res;

#ifdef __NR_newfstatat
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      res = INLINE_SYSCALL (newfstatat, 4, fd, file, st, flag);
# ifndef __ASSUME_ATFCTS
      if (res == -1 && errno == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	return res;
    }
#endif

#ifndef __ASSUME_ATFCTS
  if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
	{
	  __set_errno (ENOENT);
	  return -1;
	}

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  INTERNAL_SYSCALL_DECL (err);

  if (flag & AT_SYMLINK_NOFOLLOW)
    res = INTERNAL_SYSCALL (lstat, err, 2, file, CHECK_1 (st));
  else
    res = INTERNAL_SYSCALL (stat, err, 2, file, CHECK_1 (st));

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
    {
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (res, err), fd, buf);
      res = -1;
    }

  return res;
#endif
}
Exemplo n.º 5
0
/* Get information about the file NAME relative to FD in ST.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  int result;
  INTERNAL_SYSCALL_DECL (err);
  struct stat64 st64;

#ifdef __NR_fstatat64
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag);
# ifndef __ASSUME_ATFCTS
      if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)
	  && INTERNAL_SYSCALL_ERRNO (result, err) == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	  return __xstat32_conv (vers, &st64, st);
	else
	  {
	    __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
	    return -1;
	  }
    }
#endif

#ifndef __ASSUME_ATFCTS
  if (__builtin_expect (flag & ~AT_SYMLINK_NOFOLLOW, 0))
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

# if __ASSUME_STAT64_SYSCALL == 0
  struct kernel_stat kst;
# endif
  if (vers == _STAT_VER_KERNEL)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat, err, 2, CHECK_STRING (file),
				   CHECK_1 ((struct kernel_stat *) st));
      else
	result = INTERNAL_SYSCALL (stat, err, 2, CHECK_STRING (file),
				   CHECK_1 ((struct kernel_stat *) st));
      goto out;
    }

# if __ASSUME_STAT64_SYSCALL > 0

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat64, err, 2, CHECK_STRING (file),
			       __ptrvalue (&st64));
  else
    result = INTERNAL_SYSCALL (stat64, err, 2, CHECK_STRING (file),
			       __ptrvalue (&st64));
  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat32_conv (vers, &st64, st);
# else
#  if defined __NR_stat64
  /* To support 32 bit UIDs, we have to use stat64.  The normal stat
     call only returns 16 bit UIDs.  */
  if (! __have_no_stat64)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat64, err, 2, CHECK_STRING (file),
				   __ptrvalue (&st64));
      else
	result = INTERNAL_SYSCALL (stat64, err, 2, CHECK_STRING (file),
				   __ptrvalue (&st64));

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	result = __xstat32_conv (vers, &st64, st);

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)
	  || INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	goto out;

      __have_no_stat64 = 1;
    }
#  endif
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat, err, 2, CHECK_STRING (file),
			       __ptrvalue (&kst));
  else
    result = INTERNAL_SYSCALL (stat, err, 2, CHECK_STRING (file),
			       __ptrvalue (&kst));
  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat_conv (vers, &kst, st);
# endif  /* __ASSUME_STAT64_SYSCALL  */

 out:
  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      result = -1;
    }

  return result;
#endif
}
Exemplo n.º 6
0
int
pthread_cancel (pthread_t th)
{
  volatile struct pthread *pd = (volatile struct pthread *) th;

  /* Make sure the descriptor is valid.  */
  if (INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

#ifdef SHARED
  pthread_cancel_init ();
#endif
  int result = 0;
  int oldval;
  int newval;
  do
    {
    again:
      oldval = pd->cancelhandling;
      newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;

      /* Avoid doing unnecessary work.  The atomic operation can
	 potentially be expensive if the bug has to be locked and
	 remote cache lines have to be invalidated.  */
      if (oldval == newval)
	break;

      /* If the cancellation is handled asynchronously just send a
	 signal.  We avoid this if possible since it's more
	 expensive.  */
      if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
	{
	  /* Mark the cancellation as "in progress".  */
	  if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
						    oldval | CANCELING_BITMASK,
						    oldval))
	    goto again;

#ifdef SIGCANCEL
	  /* The cancellation handler will take care of marking the
	     thread as canceled.  */
	  pid_t pid = getpid ();

	  INTERNAL_SYSCALL_DECL (err);
	  int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid,
					   SIGCANCEL);
	  if (INTERNAL_SYSCALL_ERROR_P (val, err))
	    result = INTERNAL_SYSCALL_ERRNO (val, err);
#else
          /* It should be impossible to get here at all, since
             pthread_setcanceltype should never have allowed
             PTHREAD_CANCEL_ASYNCHRONOUS to be set.  */
          abort ();
#endif

	  break;
	}

	/* A single-threaded process should be able to kill itself, since
	   there is nothing in the POSIX specification that says that it
	   cannot.  So we set multiple_threads to true so that cancellation
	   points get executed.  */
	THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
	__pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
#endif
    }
  /* Mark the thread as canceled.  This has to be done
     atomically since other bits could be modified as well.  */
  while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
					       oldval));

  return result;
}
Exemplo n.º 7
0
int
fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag)
{
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  int result;
  INTERNAL_SYSCALL_DECL (err);

#if __ASSUME_LCHOWN_SYSCALL
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown, err, 3, file, owner, group);
  else
    result = INTERNAL_SYSCALL (chown, err, 3, file, owner, group);
#else
  char link[PATH_MAX + 2];
  char path[2 * PATH_MAX + 4];
  int loopct;
  size_t filelen;
  static int libc_old_chown = 0 /* -1=old linux, 1=new linux, 0=unknown */;

  if (libc_old_chown == 1)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				   group);
      else
	result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				   group);
      goto out;
    }

# ifdef __NR_lchown
  if (flag & AT_SYMLINK_NOFOLLOW)
    {
      result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				 group);
      goto out;
    }

  if (libc_old_chown == 0)
    {
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      if (INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	{
	  libc_old_chown = 1;
	  goto fail;
	}
      libc_old_chown = -1;
    }
# else
  if (flag & AT_SYMLINK_NOFOLLOW)
    {
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
      goto out;
    }
# endif

  result = __readlink (file, link, PATH_MAX + 1);
  if (result == -1)
    {
# ifdef __NR_lchown
      result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				 group);
# else
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
# endif
      goto out;
    }

  filelen = strlen (file) + 1;
  if (filelen > sizeof (path))
    {
      errno = ENAMETOOLONG;
      return -1;
    }
  memcpy (path, file, filelen);

  /* 'The system has an arbitrary limit...'  In practise, we'll hit
     ENAMETOOLONG before this, usually.  */
  for (loopct = 0; loopct < 128; ++loopct)
    {
      size_t linklen;

      if (result >= PATH_MAX + 1)
	{
	  errno = ENAMETOOLONG;
	  return -1;
	}

      link[result] = 0;  /* Null-terminate string, just-in-case.  */

      linklen = strlen (link) + 1;

      if (link[0] == '/')
	memcpy (path, link, linklen);
      else
	{
	  filelen = strlen (path);

	  while (filelen > 1 && path[filelen - 1] == '/')
	    --filelen;
	  while (filelen > 0 && path[filelen - 1] != '/')
	    --filelen;
	  if (filelen + linklen > sizeof (path))
	    {
	      errno = ENAMETOOLONG;
	      return -1;
	    }
	  memcpy (path + filelen, link, linklen);
	}

      result = __readlink (path, link, PATH_MAX + 1);

      if (result == -1)
	{
# ifdef __NR_lchown
	  result = INTERNAL_SYSCALL (lchown, err, 3, path, owner, group);
# else
	  result = INTERNAL_SYSCALL (chown, err, 3, path, owner, group);
# endif
	  goto out;
	}
    }
  __set_errno (ELOOP);
  return -1;

 out:
#endif

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
#if !__ASSUME_LCHOWN_SYSCALL
    fail:
#endif
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      result = -1;
    }

  return result;
}
Exemplo n.º 8
0
/* Get information about the file NAME in BUF.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
    int result;
    INTERNAL_SYSCALL_DECL (err);
#ifdef STAT_IS_KERNEL_STAT
# define kst (*st)
#else
    struct kernel_stat kst;
#endif

#ifdef __NR_newfstatat
# ifndef __ASSUME_ATFCTS
    if (__have_atfcts >= 0)
# endif
    {
        result = INTERNAL_SYSCALL (newfstatat, err, 4, fd, file, &kst, flag);
# ifndef __ASSUME_ATFCTS
        if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)
                && INTERNAL_SYSCALL_ERRNO (result, err) == ENOSYS)
            __have_atfcts = -1;
        else
# endif
            if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
            {
#ifdef STAT_IS_KERNEL_STAT
                return 0;
#else
                return __xstat_conv (vers, &kst, st);
#endif
            }
            else
            {
                __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
                return -1;
            }
    }
#endif

    if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
        __set_errno (EINVAL);
        return -1;
    }

    char *buf = NULL;

    if (fd != AT_FDCWD && file[0] != '/')
    {
        size_t filelen = strlen (file);
        if (__builtin_expect (filelen == 0, 0))
        {
            __set_errno (ENOENT);
            return -1;
        }

        static const char procfd[] = "/proc/self/fd/%d/%s";
        /* Buffer for the path name we are going to use.  It consists of
        - the string /proc/self/fd/
         - the file descriptor number
         - the file name provided.
         The final NUL is included in the sizeof.   A bit of overhead
         due to the format elements compensates for possible negative
         numbers.  */
        size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
        buf = alloca (buflen);

        __snprintf (buf, buflen, procfd, fd, file);
        file = buf;
    }

    if (vers == _STAT_VER_KERNEL)
    {
        if (flag & AT_SYMLINK_NOFOLLOW)
            result = INTERNAL_SYSCALL (lstat, err, 2, file,
                                       (struct kernel_stat *) st);
        else
            result = INTERNAL_SYSCALL (stat, err, 2, file,
                                       (struct kernel_stat *) st);

        if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
            return result;
    }
#ifdef STAT_IS_KERNEL_STAT
    else
    {
        __set_errno (EINVAL);
        return -1;
    }
#else
    if (flag & AT_SYMLINK_NOFOLLOW)
        result = INTERNAL_SYSCALL (lstat, err, 2, file, &kst);
    else
        result = INTERNAL_SYSCALL (stat, err, 2, file, &kst);

    if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
        return __xstat_conv (vers, &kst, st);
#endif

    __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);

    return -1;
}
Exemplo n.º 9
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  __pthread_initialize_pids (pd);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
  {
#ifdef __PTHREAD_MUTEX_HAVE_PREV
    pd->robust_prev = &pd->robust_head;
#endif
    pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
    pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				    - offsetof (pthread_mutex_t,
						__data.__list.__next));
    INTERNAL_SYSCALL_DECL (err);
    int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
				sizeof (struct robust_list_head));
    if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
      set_robust_list_not_avail ();
  }

#ifdef __NR_futex
# ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    INTERNAL_SYSCALL_DECL (err);
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
# endif
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      INTERNAL_SYSCALL_DECL (err);
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
# endif
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

#if defined SIGCANCEL || defined SIGSETXID
  struct sigaction sa;
  __sigemptyset (&sa.sa_mask);

# ifdef SIGCANCEL
  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
# endif

# ifdef SIGSETXID
  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;
  (void) __libc_sigaction (SIGSETXID, &sa, NULL);
# endif

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
# ifdef SIGCANCEL
  __sigaddset (&sa.sa_mask, SIGCANCEL);
# endif
# ifdef SIGSETXID
  __sigaddset (&sa.sa_mask, SIGSETXID);
# endif
  {
    INTERNAL_SYSCALL_DECL (err);
    (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			     NULL, _NSIG / 8);
  }
#endif

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (__getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = GLRO(dl_pagesize);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  __default_pthread_attr.stacksize = limit.rlim_cur;
  __default_pthread_attr.guardsize = GLRO (dl_pagesize);
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    __pthread_mutex_lock (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Exemplo n.º 10
0
int
timer_create (clockid_t clock_id, struct sigevent *evp, timer_t *timerid)
{
#undef timer_create
  {
    clockid_t syscall_clockid = (clock_id == CLOCK_PROCESS_CPUTIME_ID
				 ? MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED)
				 : clock_id == CLOCK_THREAD_CPUTIME_ID
				 ? MAKE_THREAD_CPUCLOCK (0, CPUCLOCK_SCHED)
				 : clock_id);

    /* If the user wants notification via a thread we need to handle
       this special.  */
    if (evp == NULL
	|| __builtin_expect (evp->sigev_notify != SIGEV_THREAD, 1))
      {
	struct sigevent local_evp;

	/* We avoid allocating too much memory by basically
	   using struct timer as a derived class with the
	   first two elements being in the superclass.  We only
	   need these two elements here.  */
	struct timer *newp = (struct timer *) malloc (offsetof (struct timer,
								thrfunc));
	if (newp == NULL)
	  /* No more memory.  */
	  return -1;

	if (evp == NULL)
	  {
	    /* The kernel has to pass up the timer ID which is a
	       userlevel object.  Therefore we cannot leave it up to
	       the kernel to determine it.  */
	    local_evp.sigev_notify = SIGEV_SIGNAL;
	    local_evp.sigev_signo = SIGALRM;
	    local_evp.sigev_value.sival_ptr = newp;

	    evp = &local_evp;
	  }

	kernel_timer_t ktimerid;
	int retval = INLINE_SYSCALL (timer_create, 3, syscall_clockid, evp,
				     &ktimerid);

	if (retval != -1)
	  {
	    newp->sigev_notify = (evp != NULL
				  ? evp->sigev_notify : SIGEV_SIGNAL);
	    newp->ktimerid = ktimerid;

	    *timerid = (timer_t) newp;
	  }
	else
	  {
	    /* Cannot allocate the timer, fail.  */
	    free (newp);
	    retval = -1;
	  }

	return retval;
      }
    else
      {
	/* Create the helper thread.  */
	pthread_once (&__helper_once, __start_helper_thread);
	if (__helper_tid == 0)
	  {
	    /* No resources to start the helper thread.  */
	    __set_errno (EAGAIN);
	    return -1;
	  }

	struct timer *newp;
	newp = (struct timer *) malloc (sizeof (struct timer));
	if (newp == NULL)
	  return -1;

	/* Copy the thread parameters the user provided.  */
	newp->sival = evp->sigev_value;
	newp->thrfunc = evp->sigev_notify_function;
	newp->sigev_notify = SIGEV_THREAD;

	/* We cannot simply copy the thread attributes since the
	   implementation might keep internal information for
	   each instance.  */
	(void) pthread_attr_init (&newp->attr);
	if (evp->sigev_notify_attributes != NULL)
	  {
	    struct pthread_attr *nattr;
	    struct pthread_attr *oattr;

	    nattr = (struct pthread_attr *) &newp->attr;
	    oattr = (struct pthread_attr *) evp->sigev_notify_attributes;

	    nattr->schedparam = oattr->schedparam;
	    nattr->schedpolicy = oattr->schedpolicy;
	    nattr->flags = oattr->flags;
	    nattr->guardsize = oattr->guardsize;
	    nattr->stackaddr = oattr->stackaddr;
	    nattr->stacksize = oattr->stacksize;
	  }

	/* In any case set the detach flag.  */
	(void) pthread_attr_setdetachstate (&newp->attr,
					    PTHREAD_CREATE_DETACHED);

	/* Create the event structure for the kernel timer.  */
	struct sigevent sev =
	  { .sigev_value.sival_ptr = newp,
	    .sigev_signo = SIGTIMER,
	    .sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID,
	    ._sigev_un = { ._pad = { [0] = __helper_tid } } };

	/* Create the timer.  */
	INTERNAL_SYSCALL_DECL (err);
	int res;
	res = INTERNAL_SYSCALL (timer_create, err, 3,
				syscall_clockid, &sev, &newp->ktimerid);
	if (! INTERNAL_SYSCALL_ERROR_P (res, err))
	  {
	    /* Add to the queue of active timers with thread
	       delivery.  */
	    pthread_mutex_lock (&__active_timer_sigev_thread_lock);
	    newp->next = __active_timer_sigev_thread;
	    __active_timer_sigev_thread = newp;
	    pthread_mutex_unlock (&__active_timer_sigev_thread_lock);

	    *timerid = (timer_t) newp;
	    return 0;
	  }

	/* Free the resources.  */
	free (newp);

	__set_errno (INTERNAL_SYSCALL_ERRNO (res, err));

	return -1;
      }
  }
Exemplo n.º 11
0
int
clock_getcpuclockid (pid_t pid, clockid_t *clock_id)
{
#ifdef __NR_clock_getres
  /* The clockid_t value is a simple computation from the PID.
     But we do a clock_getres call to validate it.  */

  const clockid_t pidclock = MAKE_PROCESS_CPUCLOCK (pid, CPUCLOCK_SCHED);

# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
  extern int __libc_missing_posix_cpu_timers attribute_hidden;
#  if !(__ASSUME_POSIX_TIMERS > 0)
  extern int __libc_missing_posix_timers attribute_hidden;
  if (__libc_missing_posix_timers && !__libc_missing_posix_cpu_timers)
    __libc_missing_posix_cpu_timers = 1;
#  endif
  if (!__libc_missing_posix_cpu_timers)
# endif
    {
      INTERNAL_SYSCALL_DECL (err);
      int r = INTERNAL_SYSCALL (clock_getres, err, 2, pidclock, NULL);
      if (!INTERNAL_SYSCALL_ERROR_P (r, err))
	{
	  *clock_id = pidclock;
	  return 0;
	}

# if !(__ASSUME_POSIX_TIMERS > 0)
      if (INTERNAL_SYSCALL_ERRNO (r, err) == ENOSYS)
	{
	  /* The kernel doesn't support these calls at all.  */
	  __libc_missing_posix_timers = 1;
	  __libc_missing_posix_cpu_timers = 1;
	}
      else
# endif
	if (INTERNAL_SYSCALL_ERRNO (r, err) == EINVAL)
	  {
# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
	    if (pidclock == MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED)
		|| INTERNAL_SYSCALL_ERROR_P (INTERNAL_SYSCALL
					     (clock_getres, err, 2,
					      MAKE_PROCESS_CPUCLOCK
					      (0, CPUCLOCK_SCHED), NULL),
					     err))
	      /* The kernel doesn't support these clocks at all.  */
	      __libc_missing_posix_cpu_timers = 1;
	    else
# endif
	      /* The clock_getres system call checked the PID for us.  */
	      return ESRCH;
	  }
	else
	  return INTERNAL_SYSCALL_ERRNO (r, err);
    }
#endif

  /* We don't allow any process ID but our own.  */
  if (pid != 0 && pid != getpid ())
    return EPERM;

#ifdef CLOCK_PROCESS_CPUTIME_ID
  if (HAS_CPUCLOCK)
    {
      /* Store the number.  */
      *clock_id = CLOCK_PROCESS_CPUTIME_ID;

      return 0;
    }
#endif

  /* We don't have a timer for that.  */
  return ENOENT;
}
Exemplo n.º 12
0
static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (PTHREAD_MUTEX_TYPE (mutex))
    {
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id;
#ifdef NO_INCR
	      newval |= FUTEX_WAITERS;
#else
	      newval |= (oldval & FUTEX_WAITERS);
#endif

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);

	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  If we are not supposed
		 to increment __nusers we actually have to decrement
		 it here.  */
#ifdef NO_INCR
	      --mutex->__data.__nusers;
#endif

	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int newval = id;
#ifdef NO_INCR
	newval |= FUTEX_WAITERS;
#endif
	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      newval, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);
	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1, 0);

	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
	      {
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
		/* ESRCH can happen only for non-robust PI mutexes where
		   the owner of the lock died.  */
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

		/* Delay the thread indefinitely.  */
		while (1)
		  pause_not_cancel ();
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  If we are not supposed to
	       increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
	    --mutex->__data.__nusers;
#endif

	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return EINVAL;
	      }

	    int retval = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (retval)
	      return retval;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
						     ceilval | 2,
#else
						     ceilval | 1,
#endif
						     ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
				  PTHREAD_MUTEX_PSHARED (mutex));
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }
Exemplo n.º 13
0
hp_timing_t
__get_clockfreq (void)
{
  /* We read the information from the /proc filesystem.  /proc/cpuinfo
     contains at least one line like:
     timebase        : 33333333
     We search for this line and convert the number into an integer.  */
  static hp_timing_t timebase_freq;
  hp_timing_t result = 0L;

  /* If this function was called before, we know the result.  */
  if (timebase_freq != 0)
    return timebase_freq;

  /* If we can use the vDSO to obtain the timebase even better.  */
#ifdef SHARED
  INTERNAL_SYSCALL_DECL (err);
  timebase_freq =
    INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK (get_tbfreq, err, hp_timing_t, 0);
  if (INTERNAL_SYSCALL_ERROR_P (timebase_freq, err)
      && INTERNAL_SYSCALL_ERRNO (timebase_freq, err) == ENOSYS)
#endif
    {
      int fd = __open ("/proc/cpuinfo", O_RDONLY);

      if (__builtin_expect (fd != -1, 1))
	{
	  /* The timebase will be in the 1st 1024 bytes for systems with up
	     to 8 processors.  If the first read returns less then 1024
	     bytes read,  we have the whole cpuinfo and can start the scan.
	     Otherwise we will have to read more to insure we have the
	     timebase value in the scan.  */
	  char buf[1024];
	  ssize_t n;

	  n = __read (fd, buf, sizeof (buf));
	  if (n == sizeof (buf))
	    {
	      /* We are here because the 1st read returned exactly sizeof
	         (buf) bytes.  This implies that we are not at EOF and may
	         not have read the timebase value yet.  So we need to read
	         more bytes until we know we have EOF.  We copy the lower
	         half of buf to the upper half and read sizeof (buf)/2
	         bytes into the lower half of buf and repeat until we
	         reach EOF.  We can assume that the timebase will be in
	         the last 512 bytes of cpuinfo, so two 512 byte half_bufs
	         will be sufficient to contain the timebase and will
	         handle the case where the timebase spans the half_buf
	         boundry.  */
	      const ssize_t half_buf = sizeof (buf) / 2;
	      while (n >= half_buf)
		{
		  memcpy (buf, buf + half_buf, half_buf);
		  n = __read (fd, buf + half_buf, half_buf);
		}
	      if (n >= 0)
		n += half_buf;
	    }

	  if (__builtin_expect (n, 1) > 0)
	    {
	      char *mhz = memmem (buf, n, "timebase", 7);

	      if (__builtin_expect (mhz != NULL, 1))
		{
		  char *endp = buf + n;

		  /* Search for the beginning of the string.  */
		  while (mhz < endp && (*mhz < '0' || *mhz > '9')
			 && *mhz != '\n')
		    ++mhz;

		  while (mhz < endp && *mhz != '\n')
		    {
		      if (*mhz >= '0' && *mhz <= '9')
			{
			  result *= 10;
			  result += *mhz - '0';
			}

		      ++mhz;
		    }
		}
	      timebase_freq = result;
	    }
	  __close (fd);
	}
    }

  return timebase_freq;
}
Exemplo n.º 14
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		       pd, &pd->tid, TLS_VALUE, &pd->tid);

  if (__builtin_expect (rc == -1, 0))
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Perhaps a thread wants to change the IDs and if waiting
	 for this stillborn thread.  */
      if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
			    == -2, 0))
	lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);

      /* Free the resources.  */
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);

	      /* We do not free the stack here because the canceled thread
		 itself will do this.  */

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Exemplo n.º 15
0
/* Get information about the file NAME in BUF.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  INTERNAL_SYSCALL_DECL (err);
  int result, errno_out;

  /* ??? The __fxstatat entry point is new enough that it must be using
     vers == _STAT_VER_KERNEL64.  For the benefit of dl-fxstatat64.c, we
     cannot actually check this, lest the compiler not optimize the rest
     of the function away.  */

#ifdef __NR_fstatat64
  if (__have_atfcts >= 0)
    {
      result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, st, flag);
      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      errno_out = INTERNAL_SYSCALL_ERRNO (result, err);
#ifndef __ASSUME_ATFCTS
      if (errno_out == ENOSYS)
	__have_atfcts = -1;
      else
#endif
	{
	  __set_errno (errno_out);
	  return -1;
	}
    }
#endif /* __NR_fstatat64 */

  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
        {
          __set_errno (ENOENT);
          return -1;
        }

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

#ifdef __NR_stat64
  if (!__libc_missing_axp_stat64)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat64, err, 2, file, st);
      else
	result = INTERNAL_SYSCALL (stat64, err, 2, file, st);

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      errno_out = INTERNAL_SYSCALL_ERRNO (result, err);
# if __ASSUME_STAT64_SYSCALL == 0
      if (errno_out == ENOSYS)
	__libc_missing_axp_stat64 = 1;
      else
# endif
	goto fail;
    }
#endif /* __NR_stat64 */

  struct kernel_stat kst;

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat, err, 2, file, &kst);
  else
    result = INTERNAL_SYSCALL (stat, err, 2, file, &kst);

  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat_conv (vers, &kst, st);
  errno_out = INTERNAL_SYSCALL_ERRNO (result, err);

 fail:
  __atfct_seterrno (errno_out, fd, buf);

  return -1;
}
Exemplo n.º 16
0
int
fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag)
{
  int result;

#ifdef __NR_fchownat
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      result = INLINE_SYSCALL (fchownat, 5, fd, file, owner, group, flag);
# ifndef __ASSUME_ATFCTS
      if (result == -1 && errno == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	return result;
    }
#endif

#ifndef __ASSUME_ATFCTS
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
	{
	  __set_errno (ENOENT);
	  return -1;
	}

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  INTERNAL_SYSCALL_DECL (err);

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown32, err, 3, CHECK_STRING (file), owner,
			       group);
  else
    result = INTERNAL_SYSCALL (chown32, err, 3, CHECK_STRING (file), owner,
			       group);

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      return -1;
    }

  return result;

#endif
}
Exemplo n.º 17
0
int
__fxstatat64 (int vers, int fd, const char *file, struct stat64 *st, int flag)
{
  if (__builtin_expect (vers != _STAT_VER_LINUX, 0))
    {
      __set_errno (EINVAL);
      return -1;
    }

  int result;
  INTERNAL_SYSCALL_DECL (err);

#ifdef __NR_fstatat64
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, st, flag);
# ifndef __ASSUME_ATFCTS
      if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)
	  && INTERNAL_SYSCALL_ERRNO (result, err) == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	  return 0;
	else
	  {
	    __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
	    return -1;
	  }
    }
#endif

#ifndef __ASSUME_ATFCTS
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

# if __ASSUME_STAT64_SYSCALL > 0
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat64, err, 2, CHECK_STRING (file),
			       CHECK_1 (st));
  else
    result = INTERNAL_SYSCALL (stat64, err, 2, CHECK_STRING (file),
			       CHECK_1 (st));
  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    {
#  if defined _HAVE_STAT64___ST_INO && __ASSUME_ST_INO_64_BIT == 0
      if (st->__st_ino != (__ino_t) st->st_ino)
	st->st_ino = st->__st_ino;
#  endif
      return result;
    }
# else
  struct kernel_stat kst;
#  ifdef __NR_stat64
  if (! __have_no_stat64)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat64, err, 2, CHECK_STRING (file),
				   CHECK_1 (st));
      else
	result = INTERNAL_SYSCALL (stat64, err, 2, CHECK_STRING (file),
				   CHECK_1 (st));

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	{
#   if defined _HAVE_STAT64___ST_INO && __ASSUME_ST_INO_64_BIT == 0
	  if (st->__st_ino != (__ino_t) st->st_ino)
	    st->st_ino = st->__st_ino;
#   endif
	  return result;
	}
      if (INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	goto fail;

      __have_no_stat64 = 1;
    }
#  endif

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat, err, 2, CHECK_STRING (file),
			       __ptrvalue (&kst));
  else
    result = INTERNAL_SYSCALL (stat, err, 2, CHECK_STRING (file),
			       __ptrvalue (&kst));

  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat64_conv (vers, &kst, st);

 fail:
# endif
  __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);

  return -1;
#endif
}
Exemplo n.º 18
0
int
pthread_cancel (
    pthread_t th)
{
    volatile struct pthread *pd = (volatile struct pthread *) th;

    /* Make sure the descriptor is valid.  */
    if (INVALID_TD_P (pd))
        /* Not a valid thread handle.  */
        return ESRCH;

#ifdef SHARED
    pthread_cancel_init ();
#endif
    int result = 0;
    int oldval;
    int newval;
    do
    {
again:
        oldval = pd->cancelhandling;
        newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;

        /* Avoid doing unnecessary work.  The atomic operation can
        potentially be expensive if the bug has to be locked and
         remote cache lines have to be invalidated.  */
        if (oldval == newval)
            break;

        /* If the cancellation is handled asynchronously just send a
        signal.  We avoid this if possible since it's more
         expensive.  */
        if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
        {
            /* Mark the cancellation as "in progress".  */
            if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
                    oldval | CANCELING_BITMASK,
                    oldval))
                goto again;

            /* The cancellation handler will take care of marking the
               thread as canceled.  */
            INTERNAL_SYSCALL_DECL (err);

            /* One comment: The PID field in the TCB can temporarily be
               changed (in fork).  But this must not affect this code
               here.  Since this function would have to be called while
               the thread is executing fork, it would have to happen in
               a signal handler.  But this is no allowed, pthread_cancel
               is not guaranteed to be async-safe.  */
            int val;
#if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL
            val = INTERNAL_SYSCALL (tgkill, err, 3,
                                    THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
                                    SIGCANCEL);
#else
# ifdef __NR_tgkill
            val = INTERNAL_SYSCALL (tgkill, err, 3,
                                    THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
                                    SIGCANCEL);
            if (INTERNAL_SYSCALL_ERROR_P (val, err)
                    && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
                val = INTERNAL_SYSCALL (tkill, err, 2, pd->tid, SIGCANCEL);
#endif

            if (INTERNAL_SYSCALL_ERROR_P (val, err))
                result = INTERNAL_SYSCALL_ERRNO (val, err);

            break;
        }
    }
    /* Mark the thread as canceled.  This has to be done
       atomically since other bits could be modified as well.  */
    while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
            oldval));

    return result;
}
int
fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag)
{
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
	{
	  __set_errno (ENOENT);
	  return -1;
	}

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  int result;
  INTERNAL_SYSCALL_DECL (err);

#if __ASSUME_32BITUIDS > 0
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown32, err, 3, CHECK_STRING (file), owner,
			       group);
  else
    result = INTERNAL_SYSCALL (chown32, err, 3, CHECK_STRING (file), owner,
			       group);
#else
# ifdef __NR_chown32
  if (__libc_missing_32bit_uids <= 0)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lchown32, err, 3, CHECK_STRING (file),
				   owner, group);
      else
	result = INTERNAL_SYSCALL (chown32, err, 3, CHECK_STRING (file), owner,
				   group);

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      if (INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	goto fail;

      __libc_missing_32bit_uids = 1;
    }
# endif /* __NR_chown32 */

  if (((owner + 1) > (gid_t) ((__kernel_uid_t) -1U))
      || ((group + 1) > (gid_t) ((__kernel_gid_t) -1U)))
    {
      __set_errno (EINVAL);
      return -1;
    }

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown, err, 3, CHECK_STRING (file), owner,
			       group);
  else
    result = INTERNAL_SYSCALL (chown, err, 3, CHECK_STRING (file), owner,
			       group);
#endif

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
    fail:
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      result = -1;
    }

  return result;
}
Exemplo n.º 20
0
static int
create_thread (struct pthread *pd, const struct pthread_attr *attr,
	       bool stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran)
{
  /* Determine whether the newly created threads has to be started
     stopped since we have to set the scheduling parameters or set the
     affinity.  */
  if (attr != NULL
      && (__glibc_unlikely (attr->cpuset != NULL)
	  || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)))
    stopped_start = true;

  pd->stopped_start = stopped_start;
  if (__glibc_unlikely (stopped_start))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* We rely heavily on various flags the CLONE function understands:

     CLONE_VM, CLONE_FS, CLONE_FILES
	These flags select semantics with shared address space and
	file descriptors according to what POSIX requires.

     CLONE_SIGHAND, CLONE_THREAD
	This flag selects the POSIX signal semantics and various
	other kinds of sharing (itimers, POSIX timers, etc.).

     CLONE_SETTLS
	The sixth parameter to CLONE determines the TLS area for the
	new thread.

     CLONE_PARENT_SETTID
	The kernels writes the thread ID of the newly created thread
	into the location pointed to by the fifth parameters to CLONE.

	Note that it would be semantically equivalent to use
	CLONE_CHILD_SETTID but it is be more expensive in the kernel.

     CLONE_CHILD_CLEARTID
	The kernels clears the thread ID of a thread that has called
	sys_exit() in the location pointed to by the seventh parameter
	to CLONE.

     The termination signal is chosen to be zero which means no signal
     is sent.  */
  const int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM
			   | CLONE_SIGHAND | CLONE_THREAD
			   | CLONE_SETTLS | CLONE_PARENT_SETTID
			   | CLONE_CHILD_CLEARTID
			   | 0);

  TLS_DEFINE_INIT_TP (tp, pd);

  if (__glibc_unlikely (ARCH_CLONE (&start_thread, STACK_VARIABLES_ARGS,
				    clone_flags, pd, &pd->tid, tp, &pd->tid)
			== -1))
    return errno;

  /* It's started now, so if we fail below, we'll have to cancel it
     and let it clean itself up.  */
  *thread_ran = true;

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (attr != NULL)
    {
      INTERNAL_SYSCALL_DECL (err);
      int res;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  assert (stopped_start);

	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
	  err_out:
	    {
	      /* The operation failed.  We have to kill the thread.
		 We let the normal cancellation mechanism do the work.  */

	      INTERNAL_SYSCALL_DECL (err2);
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);

	      return INTERNAL_SYSCALL_ERRNO (res, err);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  assert (stopped_start);

	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
	    goto err_out;
	}
    }

  return 0;
}
Exemplo n.º 21
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (stopped)
    /* We Make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  if (ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		  pd, &pd->tid, TLS_VALUE, &pd->tid) == -1)
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      return errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  sizeof (cpu_set_t), attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Exemplo n.º 22
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#if 0
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

#if !defined(__native_client__) && !defined(__ZRT_HOST)
#error "This code was changed to work only in Native Client"
#endif

  /* Native Client does not have a notion of a thread ID, so we make
     one up.  This must be small enough to leave space for number identifying
     the clock.  Use CLOCK_IDFIELD_SIZE to guarantee that.  */
  pd->tid = ((unsigned int) pd) >> CLOCK_IDFIELD_SIZE;

  /* Native Client syscall thread_create does not push return address onto stack
     as opposed to the kernel.  We emulate this behavior on x86-64 to meet the
     ABI requirement ((%rsp + 8) mod 16 == 0).  On x86-32 the attribute
     'force_align_arg_pointer' does the same for start_thread ().  */
#ifdef __x86_64__
  STACK_VARIABLES_ARGS -= 8;
#endif

  if (__nacl_irt_thread_create (fct, STACK_VARIABLES_ARGS, pd) != 0)
    {
      pd->tid = 0;
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}