Esempio n. 1
0
int
setfsgid (gid_t gid)
{
  INTERNAL_SYSCALL_DECL (err);
  return INTERNAL_SYSCALL (setfsgid32, err, 1, gid);
}
int
clock_getcpuclockid (pid_t pid, clockid_t *clock_id)
{
#ifdef __NR_clock_getres
  /* The clockid_t value is a simple computation from the PID.
     But we do a clock_getres call to validate it.  */

  const clockid_t pidclock = MAKE_PROCESS_CPUCLOCK (pid, CPUCLOCK_SCHED);

# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
  extern int __libc_missing_posix_cpu_timers attribute_hidden;
#  if !(__ASSUME_POSIX_TIMERS > 0)
  extern int __libc_missing_posix_timers attribute_hidden;
  if (__libc_missing_posix_timers && !__libc_missing_posix_cpu_timers)
    __libc_missing_posix_cpu_timers = 1;
#  endif
  if (!__libc_missing_posix_cpu_timers)
# endif
    {
      INTERNAL_SYSCALL_DECL (err);
      int r = INTERNAL_SYSCALL (clock_getres, err, 2, pidclock, NULL);
      if (!INTERNAL_SYSCALL_ERROR_P (r, err))
	{
	  *clock_id = pidclock;
	  return 0;
	}

# if !(__ASSUME_POSIX_TIMERS > 0)
      if (INTERNAL_SYSCALL_ERRNO (r, err) == ENOSYS)
	{
	  /* The kernel doesn't support these calls at all.  */
	  __libc_missing_posix_timers = 1;
	  __libc_missing_posix_cpu_timers = 1;
	}
      else
# endif
	if (INTERNAL_SYSCALL_ERRNO (r, err) == EINVAL)
	  {
# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
	    if (pidclock == MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED)
		|| INTERNAL_SYSCALL_ERROR_P (INTERNAL_SYSCALL
					     (clock_getres, err, 2,
					      MAKE_PROCESS_CPUCLOCK
					      (0, CPUCLOCK_SCHED), NULL),
					     err))
	      /* The kernel doesn't support these clocks at all.  */
	      __libc_missing_posix_cpu_timers = 1;
	    else
# endif
	      /* The clock_getres system call checked the PID for us.  */
	      return ESRCH;
	  }
	else
	  return INTERNAL_SYSCALL_ERRNO (r, err);
    }
#endif

  /* We don't allow any process ID but our own.  */
  if (pid != 0 && pid != getpid ())
    return EPERM;

#ifdef CLOCK_PROCESS_CPUTIME_ID
  if (HAS_CPUCLOCK)
    {
      /* Store the number.  */
      *clock_id = CLOCK_PROCESS_CPUTIME_ID;

      return 0;
    }
#endif

  /* We don't have a timer for that.  */
  return ENOENT;
}
Esempio n. 3
0
/* Get the value of the system variable NAME.  */
long int
__sysconf (int name)
{
  const char *procfname = NULL;

  switch (name)
    {
      struct rlimit rlimit;
#ifdef __NR_clock_getres
    case _SC_MONOTONIC_CLOCK:
      /* Check using the clock_getres system call.  */
      {
	struct timespec ts;
	INTERNAL_SYSCALL_DECL (err);
	int r;
	r = INTERNAL_SYSCALL (clock_getres, err, 2, CLOCK_MONOTONIC, &ts);
	return INTERNAL_SYSCALL_ERROR_P (r, err) ? -1 : _POSIX_VERSION;
      }
#endif

    case _SC_CPUTIME:
    case _SC_THREAD_CPUTIME:
      return HAS_CPUCLOCK (name);

    case _SC_ARG_MAX:
#if !__ASSUME_ARG_MAX_STACK_BASED
      /* Determine whether this is a kernel with an argument limit
	 determined by the stack size.  */
      if (GLRO(dl_discover_osversion) ()
	  >= __LINUX_ARG_MAX_STACK_BASED_MIN_KERNEL)
#endif
	/* Use getrlimit to get the stack limit.  */
	if (__getrlimit (RLIMIT_STACK, &rlimit) == 0)
	  return MAX (legacy_ARG_MAX, rlimit.rlim_cur / 4);

      return legacy_ARG_MAX;

    case _SC_NGROUPS_MAX:
      /* Try to read the information from the /proc/sys/kernel/ngroups_max
	 file.  */
      procfname = "/proc/sys/kernel/ngroups_max";
      break;

    case _SC_SIGQUEUE_MAX:
      if (__getrlimit (RLIMIT_SIGPENDING, &rlimit) == 0)
	return rlimit.rlim_cur;

      /* The /proc/sys/kernel/rtsig-max file contains the answer.  */
      procfname = "/proc/sys/kernel/rtsig-max";
      break;

    default:
      break;
    }

  if (procfname != NULL)
    {
      int fd = open_not_cancel_2 (procfname, O_RDONLY);
      if (fd != -1)
	{
	  /* This is more than enough, the file contains a single integer.  */
	  char buf[32];
	  ssize_t n;
	  n = TEMP_FAILURE_RETRY (read_not_cancel (fd, buf, sizeof (buf) - 1));
	  close_not_cancel_no_status (fd);

	  if (n > 0)
	    {
	      /* Terminate the string.  */
	      buf[n] = '\0';

	      char *endp;
	      long int res = strtol (buf, &endp, 10);
	      if (endp != buf && (*endp == '\0' || *endp == '\n'))
		return res;
	    }
	}
    }

  return posix_sysconf (name);
}
Esempio n. 4
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  if (ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		  pd, &pd->tid, TLS_VALUE, &pd->tid) == -1)
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      pid_t pid = getpid ();
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3, pid, pd->tid, SIGCANCEL);

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Esempio n. 5
0
int
fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag)
{
  int result;

#ifdef __NR_fchownat
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      result = INLINE_SYSCALL (fchownat, 5, fd, file, owner, group, flag);
# ifndef __ASSUME_ATFCTS
      if (result == -1 && errno == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	return result;
    }
#endif

#ifndef __ASSUME_ATFCTS
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

# if __ASSUME_LCHOWN_SYSCALL
  INTERNAL_SYSCALL_DECL (err);

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown, err, 3, file, owner, group);
  else
    result = INTERNAL_SYSCALL (chown, err, 3, file, owner, group);

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      return -1;
    }
# else
  /* Don't inline the rest to avoid unnecessary code duplication.  */
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = __lchown (file, owner, group);
  else
    result = __chown (file, owner, group);
  if (result < 0)
    __atfct_seterrno (errno, fd, buf);
# endif

  return result;

#endif
}
Esempio n. 6
0
/* Get the value of the system variable NAME.  */
long int sysconf(int name)
{
#ifdef __UCLIBC_HAS_THREADS_NATIVE__
    struct rlimit rlimit;
#endif
    switch (name)
    {
    default:
        __set_errno(EINVAL);
        return -1;

    case _SC_ARG_MAX:
#ifdef __UCLIBC_HAS_THREADS_NATIVE__
        /* Use getrlimit to get the stack limit.  */
        if (getrlimit (RLIMIT_STACK, &rlimit) == 0)
            return MAX (legacy_ARG_MAX, rlimit.rlim_cur / 4);
#elif defined ARG_MAX
        return ARG_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_CHILD_MAX:
#ifdef	CHILD_MAX
        return CHILD_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_CLK_TCK:
        /* Can't use CLK_TCK here since that calls __sysconf(_SC_CLK_TCK) */
        return __UCLIBC_CLK_TCK_CONST;

    case _SC_NGROUPS_MAX:
#ifdef	NGROUPS_MAX
        return NGROUPS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_OPEN_MAX:
        RETURN_FUNCTION(getdtablesize());

    case _SC_STREAM_MAX:
#ifdef	STREAM_MAX
        return STREAM_MAX;
#else
        return FOPEN_MAX;
#endif

    case _SC_TZNAME_MAX:
        return _POSIX_TZNAME_MAX;

    case _SC_JOB_CONTROL:
#ifdef	_POSIX_JOB_CONTROL
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_SAVED_IDS:
#ifdef	_POSIX_SAVED_IDS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_REALTIME_SIGNALS:
#ifdef	_POSIX_REALTIME_SIGNALS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PRIORITY_SCHEDULING:
#ifdef	_POSIX_PRIORITY_SCHEDULING
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_TIMERS:
#ifdef	_POSIX_TIMERS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_ASYNCHRONOUS_IO:
#ifdef	_POSIX_ASYNCHRONOUS_IO
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PRIORITIZED_IO:
#ifdef	_POSIX_PRIORITIZED_IO
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_SYNCHRONIZED_IO:
#ifdef	_POSIX_SYNCHRONIZED_IO
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_FSYNC:
#ifdef	_POSIX_FSYNC
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_MAPPED_FILES:
#ifdef	_POSIX_MAPPED_FILES
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_MEMLOCK:
#ifdef	_POSIX_MEMLOCK
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_MEMLOCK_RANGE:
#ifdef	_POSIX_MEMLOCK_RANGE
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_MEMORY_PROTECTION:
#ifdef	_POSIX_MEMORY_PROTECTION
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_MESSAGE_PASSING:
#ifdef	_POSIX_MESSAGE_PASSING
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_SEMAPHORES:
#ifdef	_POSIX_SEMAPHORES
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_SHARED_MEMORY_OBJECTS:
#ifdef	_POSIX_SHARED_MEMORY_OBJECTS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_VERSION:
        return _POSIX_VERSION;

    case _SC_PAGESIZE:
#if defined(GETPAGESIZE_IS_DYNAMIC) && (GETPAGESIZE_IS_DYNAMIC == 1)
        RETURN_FUNCTION(getpagesize());
#else
        return getpagesize();		/* note: currently this is not dynamic */
#endif

    case _SC_AIO_LISTIO_MAX:
#ifdef	AIO_LISTIO_MAX
        return AIO_LISTIO_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_AIO_MAX:
#ifdef	AIO_MAX
        return AIO_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_AIO_PRIO_DELTA_MAX:
#ifdef	AIO_PRIO_DELTA_MAX
        return AIO_PRIO_DELTA_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_DELAYTIMER_MAX:
#ifdef	DELAYTIMER_MAX
        return DELAYTIMER_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_MQ_OPEN_MAX:
#ifdef	MQ_OPEN_MAX
        return MQ_OPEN_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_MQ_PRIO_MAX:
#ifdef	MQ_PRIO_MAX
        return MQ_PRIO_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_RTSIG_MAX:
#ifdef	RTSIG_MAX
        return RTSIG_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_SEM_NSEMS_MAX:
#ifdef	SEM_NSEMS_MAX
        return SEM_NSEMS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_SEM_VALUE_MAX:
#ifdef	SEM_VALUE_MAX
        return SEM_VALUE_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_SIGQUEUE_MAX:
#ifdef	SIGQUEUE_MAX
        return SIGQUEUE_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_TIMER_MAX:
#ifdef	TIMER_MAX
        return TIMER_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_BC_BASE_MAX:
#ifdef	BC_BASE_MAX
        return BC_BASE_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_BC_DIM_MAX:
#ifdef	BC_DIM_MAX
        return BC_DIM_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_BC_SCALE_MAX:
#ifdef	BC_SCALE_MAX
        return BC_SCALE_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_BC_STRING_MAX:
#ifdef	BC_STRING_MAX
        return BC_STRING_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_COLL_WEIGHTS_MAX:
#ifdef	COLL_WEIGHTS_MAX
        return COLL_WEIGHTS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_EQUIV_CLASS_MAX:
#ifdef	EQUIV_CLASS_MAX
        return EQUIV_CLASS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_EXPR_NEST_MAX:
#ifdef	EXPR_NEST_MAX
        return EXPR_NEST_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_LINE_MAX:
#ifdef	LINE_MAX
        return LINE_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_RE_DUP_MAX:
#ifdef	RE_DUP_MAX
        return RE_DUP_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_CHARCLASS_NAME_MAX:
#ifdef	CHARCLASS_NAME_MAX
        return CHARCLASS_NAME_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII:
#ifdef	_POSIX_PII
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_XTI:
#ifdef	_POSIX_PII_XTI
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_SOCKET:
#ifdef	_POSIX_PII_SOCKET
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_INTERNET:
#ifdef	_POSIX_PII_INTERNET
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_OSI:
#ifdef	_POSIX_PII_OSI
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_POLL:
#ifdef	_POSIX_POLL
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_SELECT:
#ifdef	_POSIX_SELECT
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_UIO_MAXIOV:
#ifdef	UIO_MAXIOV
        return UIO_MAXIOV;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_INTERNET_STREAM:
#ifdef	_POSIX_PII_INTERNET_STREAM
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_INTERNET_DGRAM:
#ifdef	_POSIX_PII_INTERNET_DGRAM
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_OSI_COTS:
#ifdef	_POSIX_PII_OSI_COTS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_OSI_CLTS:
#ifdef	_POSIX_PII_OSI_CLTS
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_PII_OSI_M:
#ifdef	_POSIX_PII_OSI_M
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_T_IOV_MAX:
#ifdef	_T_IOV_MAX
        return _T_IOV_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_VERSION:
        return _POSIX2_VERSION;

    case _SC_2_C_BIND:
#ifdef	_POSIX2_C_BIND
        return _POSIX2_C_BIND;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_C_DEV:
#ifdef	_POSIX2_C_DEV
        return _POSIX2_C_DEV;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_C_VERSION:
#ifdef	_POSIX2_C_VERSION
        return _POSIX2_C_VERSION;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_FORT_DEV:
#ifdef	_POSIX2_FORT_DEV
        return _POSIX2_FORT_DEV;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_FORT_RUN:
#ifdef	_POSIX2_FORT_RUN
        return _POSIX2_FORT_RUN;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_LOCALEDEF:
#ifdef	_POSIX2_LOCALEDEF
        return _POSIX2_LOCALEDEF;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_SW_DEV:
#ifdef	_POSIX2_SW_DEV
        return _POSIX2_SW_DEV;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_CHAR_TERM:
#ifdef	_POSIX2_CHAR_TERM
        return _POSIX2_CHAR_TERM;
#else
        RETURN_NEG_1;
#endif

    case _SC_2_UPE:
#ifdef	_POSIX2_UPE
        return _POSIX2_UPE;
#else
        RETURN_NEG_1;
#endif

    /* POSIX 1003.1c (POSIX Threads).  */
    case _SC_THREADS:
#ifdef __UCLIBC_HAS_THREADS__
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_SAFE_FUNCTIONS:
#ifdef __UCLIBC_HAS_THREADS__
        return 1;
#else
        RETURN_NEG_1;
#endif

    /* If you change these, also change libc/pwd_grp/pwd_grp.c to match */
    case _SC_GETGR_R_SIZE_MAX:
        return __UCLIBC_GRP_BUFFER_SIZE__;

    case _SC_GETPW_R_SIZE_MAX:
        return __UCLIBC_PWD_BUFFER_SIZE__;

    /* getlogin() is a worthless interface.  In uClibc we let the user specify
     * whatever they want via the LOGNAME environment variable, or we return NULL
     * if getenv() fails to find anything.  So this is merely how large a env
     * variable can be.  Lets use 256 */
    case _SC_LOGIN_NAME_MAX:
        return 256;

        /* If you change this, also change _SC_TTY_NAME_MAX in libc/unistd/sysconf.c */
#define TTYNAME_BUFLEN		32
    case _SC_TTY_NAME_MAX:
        return TTYNAME_BUFLEN;

    case _SC_THREAD_DESTRUCTOR_ITERATIONS:
#ifdef	_POSIX_THREAD_DESTRUCTOR_ITERATIONS
        return _POSIX_THREAD_DESTRUCTOR_ITERATIONS;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_KEYS_MAX:
#ifdef	PTHREAD_KEYS_MAX
        return PTHREAD_KEYS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_STACK_MIN:
#ifdef	PTHREAD_STACK_MIN
        return PTHREAD_STACK_MIN;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_THREADS_MAX:
#ifdef	PTHREAD_THREADS_MAX
        return PTHREAD_THREADS_MAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_ATTR_STACKADDR:
#ifdef	_POSIX_THREAD_ATTR_STACKADDR
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_ATTR_STACKSIZE:
#ifdef	_POSIX_THREAD_ATTR_STACKSIZE
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_PRIORITY_SCHEDULING:
#ifdef	_POSIX_THREAD_PRIORITY_SCHEDULING
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_PRIO_INHERIT:
#ifdef	_POSIX_THREAD_PRIO_INHERIT
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_PRIO_PROTECT:
#ifdef	_POSIX_THREAD_PRIO_PROTECT
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_THREAD_PROCESS_SHARED:
#ifdef	_POSIX_THREAD_PROCESS_SHARED
        return 1;
#else
        RETURN_NEG_1;
#endif

    case _SC_NPROCESSORS_CONF:
        RETURN_FUNCTION(nprocessors_conf());

    case _SC_NPROCESSORS_ONLN:
        RETURN_FUNCTION(nprocessors_onln());

    case _SC_PHYS_PAGES:
#if 0
        RETURN_FUNCTION(get_phys_pages());
#else
        RETURN_NEG_1;
#endif

    case _SC_AVPHYS_PAGES:
#if 0
        RETURN_FUNCTION(get_avphys_pages());
#else
        RETURN_NEG_1;
#endif

    case _SC_ATEXIT_MAX:
        return __UCLIBC_MAX_ATEXIT;

    case _SC_PASS_MAX:
        /* We have no limit but since the return value might be used to
        allocate a buffer we restrict the value.  */
        return BUFSIZ;

    case _SC_XOPEN_VERSION:
        return _XOPEN_VERSION;

    case _SC_XOPEN_XCU_VERSION:
        return _XOPEN_XCU_VERSION;

    case _SC_XOPEN_UNIX:
        return _XOPEN_UNIX;

    case _SC_XOPEN_CRYPT:
#ifdef	_XOPEN_CRYPT
        return _XOPEN_CRYPT;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_ENH_I18N:
#ifdef	_XOPEN_ENH_I18N
        return _XOPEN_ENH_I18N;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_SHM:
#ifdef	_XOPEN_SHM
        return _XOPEN_SHM;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_XPG2:
#ifdef	_XOPEN_XPG2
        return _XOPEN_XPG2;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_XPG3:
#ifdef	_XOPEN_XPG3
        return _XOPEN_XPG3;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_XPG4:
#ifdef	_XOPEN_XPG4
        return _XOPEN_XPG4;
#else
        RETURN_NEG_1;
#endif

    case _SC_CHAR_BIT:
        return CHAR_BIT;

    case _SC_CHAR_MAX:
        return CHAR_MAX;

    case _SC_CHAR_MIN:
        return CHAR_MIN;

    case _SC_INT_MAX:
        return INT_MAX;

    case _SC_INT_MIN:
        return INT_MIN;

    case _SC_LONG_BIT:
        return sizeof (long int) * CHAR_BIT;

    case _SC_WORD_BIT:
        return sizeof (int) * CHAR_BIT;

    case _SC_MB_LEN_MAX:
        return MB_LEN_MAX;

    case _SC_NZERO:
        return NZERO;

    case _SC_SSIZE_MAX:
        return _POSIX_SSIZE_MAX;

    case _SC_SCHAR_MAX:
        return SCHAR_MAX;

    case _SC_SCHAR_MIN:
        return SCHAR_MIN;

    case _SC_SHRT_MAX:
        return SHRT_MAX;

    case _SC_SHRT_MIN:
        return SHRT_MIN;

    case _SC_UCHAR_MAX:
        return UCHAR_MAX;

    case _SC_UINT_MAX:
        return UINT_MAX;

    case _SC_ULONG_MAX:
        return ULONG_MAX;

    case _SC_USHRT_MAX:
        return USHRT_MAX;

    case _SC_NL_ARGMAX:
#ifdef	NL_ARGMAX
        return NL_ARGMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_NL_LANGMAX:
#ifdef	NL_LANGMAX
        return NL_LANGMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_NL_MSGMAX:
#ifdef	NL_MSGMAX
        return NL_MSGMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_NL_NMAX:
#ifdef	NL_NMAX
        return NL_NMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_NL_SETMAX:
#ifdef	NL_SETMAX
        return NL_SETMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_NL_TEXTMAX:
#ifdef	NL_TEXTMAX
        return NL_TEXTMAX;
#else
        RETURN_NEG_1;
#endif

    case _SC_XBS5_ILP32_OFF32:
#ifdef _XBS5_ILP32_OFF32
        return _XBS5_ILP32_OFF32;
#else
        RETURN_NEG_1;
#endif
    case _SC_XBS5_ILP32_OFFBIG:
#ifdef _XBS5_ILP32_OFFBIG
        return _XBS5_ILP32_OFFBIG;
#else
        RETURN_NEG_1;
#endif
    case _SC_XBS5_LP64_OFF64:
#ifdef _XBS5_LP64_OFF64
        return _XBS5_LP64_OFF64;
#else
        RETURN_NEG_1;
#endif
    case _SC_XBS5_LPBIG_OFFBIG:
#ifdef _XBS5_LPBIG_OFFBIG
        return _XBS5_LPBIG_OFFBIG;
#else
        RETURN_NEG_1;
#endif
    case _SC_V7_ILP32_OFF32:
#ifdef _POSIX_V7_ILP32_OFF32
        return _POSIX_V7_ILP32_OFF32;
#else
        RETURN_NEG_1;
#endif
    case _SC_V7_ILP32_OFFBIG:
#ifdef _POSIX_V7_ILP32_OFFBIG
        return _POSIX_V7_ILP32_OFFBIG;
#else
        RETURN_NEG_1;
#endif
    case _SC_V7_LP64_OFF64:
#ifdef _POSIX_V7_LP64_OFF64
        return _POSIX_V7_LP64_OFF64;
#else
        RETURN_NEG_1;
#endif
    case _SC_V7_LPBIG_OFFBIG:
#ifdef _POSIX_V7_LPBIG_OFFBIG
        return _POSIX_V7_LPBIG_OFFBIG;
#else
        RETURN_NEG_1;
#endif

    case _SC_XOPEN_LEGACY:
        return _XOPEN_LEGACY;

    case _SC_XOPEN_REALTIME:
#ifdef _XOPEN_REALTIME
        return _XOPEN_REALTIME;
#else
        RETURN_NEG_1;
#endif
    case _SC_XOPEN_REALTIME_THREADS:
#ifdef _XOPEN_REALTIME_THREADS
        return _XOPEN_REALTIME_THREADS;
#else
        RETURN_NEG_1;
#endif

    case _SC_MONOTONIC_CLOCK:
#ifdef __NR_clock_getres
        /* Check using the clock_getres system call.  */
# ifdef __UCLIBC_HAS_THREADS_NATIVE__
    {
        struct timespec ts;
        INTERNAL_SYSCALL_DECL (err);
        int r;
        r = INTERNAL_SYSCALL (clock_getres, err, 2, CLOCK_MONOTONIC, &ts);
        return INTERNAL_SYSCALL_ERROR_P (r, err) ? -1 : _POSIX_VERSION;
    }
# elif defined __UCLIBC_HAS_REALTIME__
    if (clock_getres(CLOCK_MONOTONIC, NULL) >= 0)
        return _POSIX_VERSION;
# endif
#endif
    RETURN_NEG_1;

#ifdef __UCLIBC_HAS_THREADS_NATIVE__
    case _SC_THREAD_CPUTIME:
# if _POSIX_THREAD_CPUTIME > 0
        return _POSIX_THREAD_CPUTIME;
# else
        RETURN_NEG_1;
# endif
#endif
    }
}
Esempio n. 7
0
time_t
time (time_t *t)
{
  INTERNAL_SYSCALL_DECL (err);
  return INTERNAL_SYSCALL (time, err, 1, t);
}
Esempio n. 8
0
static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (PTHREAD_MUTEX_TYPE (mutex))
    {
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id;
#ifdef NO_INCR
	      newval |= FUTEX_WAITERS;
#else
	      newval |= (oldval & FUTEX_WAITERS);
#endif

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);

	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  If we are not supposed
		 to increment __nusers we actually have to decrement
		 it here.  */
#ifdef NO_INCR
	      --mutex->__data.__nusers;
#endif

	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int newval = id;
#ifdef NO_INCR
	newval |= FUTEX_WAITERS;
#endif
	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      newval, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);
	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1, 0);

	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
	      {
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
		/* ESRCH can happen only for non-robust PI mutexes where
		   the owner of the lock died.  */
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

		/* Delay the thread indefinitely.  */
		while (1)
		  pause_not_cancel ();
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  If we are not supposed to
	       increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
	    --mutex->__data.__nusers;
#endif

	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return EINVAL;
	      }

	    int retval = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (retval)
	      return retval;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
						     ceilval | 2,
#else
						     ceilval | 1,
#endif
						     ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
				  PTHREAD_MUTEX_PSHARED (mutex));
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }
Esempio n. 9
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  INTERNAL_SYSCALL_DECL (err);
  pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
  pd->robust_prev = &pd->robust_head;
#endif
  pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
  pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				  - offsetof (pthread_mutex_t,
					      __data.__list.__next));
  int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
			      sizeof (struct robust_list_head));
  if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
    set_robust_list_not_avail ();

#ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
#endif
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  struct sigaction sa;
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  __sigemptyset (&sa.sa_mask);

  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);

  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;

  (void) __libc_sigaction (SIGSETXID, &sa, NULL);

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
  __sigaddset (&sa.sa_mask, SIGCANCEL);
  __sigaddset (&sa.sa_mask, SIGSETXID);
  (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			   NULL, _NSIG / 8);

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = GLRO(dl_pagesize);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz;
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  __default_pthread_attr.stacksize = limit.rlim_cur;
  __default_pthread_attr.guardsize = GLRO (dl_pagesize);
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    __pthread_mutex_lock (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Esempio n. 10
0
/* Get information about the file NAME in BUF.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  INTERNAL_SYSCALL_DECL (err);
  int result, errno_out;

  /* ??? The __fxstatat entry point is new enough that it must be using
     vers == _STAT_VER_KERNEL64.  For the benefit of dl-fxstatat64.c, we
     cannot actually check this, lest the compiler not optimize the rest
     of the function away.  */

#ifdef __NR_fstatat64
  if (__have_atfcts >= 0)
    {
      result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, st, flag);
      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      errno_out = INTERNAL_SYSCALL_ERRNO (result, err);
#ifndef __ASSUME_ATFCTS
      if (errno_out == ENOSYS)
	__have_atfcts = -1;
      else
#endif
	{
	  __set_errno (errno_out);
	  return -1;
	}
    }
#endif /* __NR_fstatat64 */

  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
        {
          __set_errno (ENOENT);
          return -1;
        }

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

#ifdef __NR_stat64
  if (!__libc_missing_axp_stat64)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat64, err, 2, file, st);
      else
	result = INTERNAL_SYSCALL (stat64, err, 2, file, st);

      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      errno_out = INTERNAL_SYSCALL_ERRNO (result, err);
# if __ASSUME_STAT64_SYSCALL == 0
      if (errno_out == ENOSYS)
	__libc_missing_axp_stat64 = 1;
      else
# endif
	goto fail;
    }
#endif /* __NR_stat64 */

  struct kernel_stat kst;

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat, err, 2, file, &kst);
  else
    result = INTERNAL_SYSCALL (stat, err, 2, file, &kst);

  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat_conv (vers, &kst, st);
  errno_out = INTERNAL_SYSCALL_ERRNO (result, err);

 fail:
  __atfct_seterrno (errno_out, fd, buf);

  return -1;
}
Esempio n. 11
0
int
fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag)
{
  if (flag & ~AT_SYMLINK_NOFOLLOW)
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  int result;
  INTERNAL_SYSCALL_DECL (err);

#if __ASSUME_LCHOWN_SYSCALL
  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lchown, err, 3, file, owner, group);
  else
    result = INTERNAL_SYSCALL (chown, err, 3, file, owner, group);
#else
  char link[PATH_MAX + 2];
  char path[2 * PATH_MAX + 4];
  int loopct;
  size_t filelen;
  static int libc_old_chown = 0 /* -1=old linux, 1=new linux, 0=unknown */;

  if (libc_old_chown == 1)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				   group);
      else
	result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				   group);
      goto out;
    }

# ifdef __NR_lchown
  if (flag & AT_SYMLINK_NOFOLLOW)
    {
      result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				 group);
      goto out;
    }

  if (libc_old_chown == 0)
    {
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
      if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	return result;
      if (INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS)
	{
	  libc_old_chown = 1;
	  goto fail;
	}
      libc_old_chown = -1;
    }
# else
  if (flag & AT_SYMLINK_NOFOLLOW)
    {
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
      goto out;
    }
# endif

  result = __readlink (file, link, PATH_MAX + 1);
  if (result == -1)
    {
# ifdef __NR_lchown
      result = INTERNAL_SYSCALL (lchown, err, 3, __ptrvalue (file), owner,
				 group);
# else
      result = INTERNAL_SYSCALL (chown, err, 3, __ptrvalue (file), owner,
				 group);
# endif
      goto out;
    }

  filelen = strlen (file) + 1;
  if (filelen > sizeof (path))
    {
      errno = ENAMETOOLONG;
      return -1;
    }
  memcpy (path, file, filelen);

  /* 'The system has an arbitrary limit...'  In practise, we'll hit
     ENAMETOOLONG before this, usually.  */
  for (loopct = 0; loopct < 128; ++loopct)
    {
      size_t linklen;

      if (result >= PATH_MAX + 1)
	{
	  errno = ENAMETOOLONG;
	  return -1;
	}

      link[result] = 0;  /* Null-terminate string, just-in-case.  */

      linklen = strlen (link) + 1;

      if (link[0] == '/')
	memcpy (path, link, linklen);
      else
	{
	  filelen = strlen (path);

	  while (filelen > 1 && path[filelen - 1] == '/')
	    --filelen;
	  while (filelen > 0 && path[filelen - 1] != '/')
	    --filelen;
	  if (filelen + linklen > sizeof (path))
	    {
	      errno = ENAMETOOLONG;
	      return -1;
	    }
	  memcpy (path + filelen, link, linklen);
	}

      result = __readlink (path, link, PATH_MAX + 1);

      if (result == -1)
	{
# ifdef __NR_lchown
	  result = INTERNAL_SYSCALL (lchown, err, 3, path, owner, group);
# else
	  result = INTERNAL_SYSCALL (chown, err, 3, path, owner, group);
# endif
	  goto out;
	}
    }
  __set_errno (ELOOP);
  return -1;

 out:
#endif

  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
#if !__ASSUME_LCHOWN_SYSCALL
    fail:
#endif
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      result = -1;
    }

  return result;
}
Esempio n. 12
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#if 0
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

#if !defined(__native_client__) && !defined(__ZRT_HOST)
#error "This code was changed to work only in Native Client"
#endif

  /* Native Client does not have a notion of a thread ID, so we make
     one up.  This must be small enough to leave space for number identifying
     the clock.  Use CLOCK_IDFIELD_SIZE to guarantee that.  */
  pd->tid = ((unsigned int) pd) >> CLOCK_IDFIELD_SIZE;

  /* Native Client syscall thread_create does not push return address onto stack
     as opposed to the kernel.  We emulate this behavior on x86-64 to meet the
     ABI requirement ((%rsp + 8) mod 16 == 0).  On x86-32 the attribute
     'force_align_arg_pointer' does the same for start_thread ().  */
#ifdef __x86_64__
  STACK_VARIABLES_ARGS -= 8;
#endif

  if (__nacl_irt_thread_create (fct, STACK_VARIABLES_ARGS, pd) != 0)
    {
      pd->tid = 0;
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Esempio n. 13
0
/* Get information about the file NAME relative to FD in ST.  */
int
__fxstatat (int vers, int fd, const char *file, struct stat *st, int flag)
{
  int result;
  INTERNAL_SYSCALL_DECL (err);
  struct stat64 st64;

#ifdef __NR_fstatat64
# ifndef __ASSUME_ATFCTS
  if (__have_atfcts >= 0)
# endif
    {
      result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag);
# ifndef __ASSUME_ATFCTS
      if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)
	  && INTERNAL_SYSCALL_ERRNO (result, err) == ENOSYS)
	__have_atfcts = -1;
      else
# endif
	if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1))
	  return __xstat32_conv (vers, &st64, st);
	else
	  {
	    __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
	    return -1;
	  }
    }
#endif

#ifndef __ASSUME_ATFCTS
  if (__builtin_expect (flag & ~AT_SYMLINK_NOFOLLOW, 0))
    {
      __set_errno (EINVAL);
      return -1;
    }

  char *buf = NULL;

  if (fd != AT_FDCWD && file[0] != '/')
    {
      size_t filelen = strlen (file);
      if (__builtin_expect (filelen == 0, 0))
	{
	  __set_errno (ENOENT);
	  return -1;
	}

      static const char procfd[] = "/proc/self/fd/%d/%s";
      /* Buffer for the path name we are going to use.  It consists of
	 - the string /proc/self/fd/
	 - the file descriptor number
	 - the file name provided.
	 The final NUL is included in the sizeof.   A bit of overhead
	 due to the format elements compensates for possible negative
	 numbers.  */
      size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen;
      buf = alloca (buflen);

      __snprintf (buf, buflen, procfd, fd, file);
      file = buf;
    }

  if (vers == _STAT_VER_KERNEL)
    {
      if (flag & AT_SYMLINK_NOFOLLOW)
	result = INTERNAL_SYSCALL (lstat, err, 2, CHECK_STRING (file),
				   CHECK_1 ((struct kernel_stat *) st));
      else
	result = INTERNAL_SYSCALL (stat, err, 2, CHECK_STRING (file),
				   CHECK_1 ((struct kernel_stat *) st));
      goto out;
    }

  if (flag & AT_SYMLINK_NOFOLLOW)
    result = INTERNAL_SYSCALL (lstat64, err, 2, CHECK_STRING (file),
			       __ptrvalue (&st64));
  else
    result = INTERNAL_SYSCALL (stat64, err, 2, CHECK_STRING (file),
			       __ptrvalue (&st64));
  if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1))
    return __xstat32_conv (vers, &st64, st);

 out:
  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0))
    {
      __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf);
      result = -1;
    }

  return result;
#endif
}