Пример #1
0
void
__pthread_init_max_stacksize(void)
{
  struct rlimit limit;
  size_t max_stack;

  getrlimit(RLIMIT_STACK, &limit);
#ifdef FLOATING_STACKS
  if (limit.rlim_cur == RLIM_INFINITY)
    limit.rlim_cur = ARCH_STACK_MAX_SIZE;
# ifdef NEED_SEPARATE_REGISTER_STACK
  max_stack = limit.rlim_cur / 2;
# else
  max_stack = limit.rlim_cur;
# endif
#else
  /* Play with the stack size limit to make sure that no stack ever grows
     beyond STACK_SIZE minus one page (to act as a guard page). */
# ifdef NEED_SEPARATE_REGISTER_STACK
  /* STACK_SIZE bytes hold both the main stack and register backing
     store. The rlimit value applies to each individually.  */
  max_stack = STACK_SIZE/2 - __getpagesize ();
# else
  max_stack = STACK_SIZE - __getpagesize();
# endif
  if (limit.rlim_cur > max_stack) {
    limit.rlim_cur = max_stack;
    __libc_setrlimit(RLIMIT_STACK, &limit);
  }
#endif
  __pthread_max_stacksize = max_stack;
}
/* General function to get information about memory status from proc
   filesystem.  */
static long int
internal_function
phys_pages_info (const char *format)
{
  char buffer[8192];
  long int result = -1;

  /* If we haven't found an appropriate entry return 1.  */
  FILE *fp = fopen ("/proc/meminfo", "rc");
  if (fp != NULL)
    {
      /* No threads use this stream.  */
      __fsetlocking (fp, FSETLOCKING_BYCALLER);

      result = 0;
      /* Read all lines and count the lines starting with the
	 string "processor".  We don't have to fear extremely long
	 lines since the kernel will not generate them.  8192
	 bytes are really enough.  */
      while (fgets_unlocked (buffer, sizeof buffer, fp) != NULL)
	if (sscanf (buffer, format, &result) == 1)
	  {
	    result /= (__getpagesize () / 1024);
	    break;
	  }

      fclose (fp);
    }

  if (result == -1)
    /* We cannot get the needed value: signal an error.  */
    __set_errno (ENOSYS);

  return result;
}
Пример #3
0
__ptr_t valloc(__malloc_size_t size)
{
	if (pagesize == 0)
		pagesize = __getpagesize();

	return memalign(pagesize, size);
}
Пример #4
0
int
__munmap (void *addr, size_t len)
{
    int pagesize = __getpagesize ();
    if ((unsigned long) addr & (pagesize - 1))
    {
	__set_errno (EINVAL);
	return -1;
    }
  return INLINE_SYSCALL (munmap, 2, addr, len);
}
Пример #5
0
int __pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
{
  size_t ps = __getpagesize ();

  /* First round up the guard size.  */
  guardsize = roundup (guardsize, ps);

  /* The guard size must not be larger than the stack itself */
  if (guardsize >= attr->__stacksize) return EINVAL;

  attr->__guardsize = guardsize;

  return 0;
}
Пример #6
0
//int __pthread_attr_init_2_1(pthread_attr_t *attr)
int pthread_attr_init(pthread_attr_t *attr)
{
  size_t ps = __getpagesize ();

  attr->__detachstate = PTHREAD_CREATE_JOINABLE;
  attr->__schedpolicy = SCHED_OTHER;
  attr->__schedparam.sched_priority = 0;
  attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
  attr->__scope = PTHREAD_SCOPE_SYSTEM;
  attr->__guardsize = ps;
  attr->__stackaddr = NULL;
  attr->__stackaddr_set = 0;
  attr->__stacksize = STACK_SIZE - ps;
  return 0;
}
Пример #7
0
int
do_test (void)
{
  fd = open ("/dev/null", O_WRONLY);
  if (fd < 0)
    {
      printf ("couldn't open /dev/null, %m\n");
      return 1;
    }

  count = sysconf (_SC_NPROCESSORS_ONLN);
  if (count <= 0)
    count = 1;
  count *= 8;

  pthread_t th[count + 1];
  pthread_attr_t attr;
  int i, ret, sz;
  pthread_attr_init (&attr);
  sz = __getpagesize ();
  if (sz < PTHREAD_STACK_MIN)
	  sz = PTHREAD_STACK_MIN;
  pthread_attr_setstacksize (&attr, sz);

  for (i = 0; i <= count; ++i)
    if ((ret = pthread_create (&th[i], &attr, tf, (void *) (long) i)) != 0)
      {
	errno = ret;
	printf ("pthread_create %d failed: %m\n", i);
	return 1;
      }

  struct timespec ts = { .tv_sec = 20, .tv_nsec = 0 };
  while (nanosleep (&ts, &ts) != 0);

  pthread_mutex_lock (&lock);
  exiting = true;
  pthread_mutex_unlock (&lock);

  for (i = 0; i < count; ++i)
    pthread_join (th[i], NULL);

  close (fd);
  return 0;
}
Пример #8
0
/* Compute (num*mem_unit)/pagesize, but avoid overflowing long int.
   In practice, mem_unit is never bigger than the page size, so after
   the first loop it is 1.  [In the kernel, it is initialized to
   PAGE_SIZE in mm/page_alloc.c:si_meminfo(), and then in
   kernel.sys.c:do_sysinfo() it is set to 1 if unsigned long can
   represent all the sizes measured in bytes].  */
static long int
sysinfo_mempages (unsigned long int num, unsigned int mem_unit)
{
  unsigned long int ps = __getpagesize ();

  while (mem_unit > 1 && ps > 1)
    {
      mem_unit >>= 1;
      ps >>= 1;
    }
  num *= mem_unit;
  while (ps > 1)
    {
      ps >>= 1;
      num >>= 1;
    }
  return num;
}
Пример #9
0
int __pthread_attr_init_2_1(pthread_attr_t *attr)
{
  size_t ps = __getpagesize ();

  attr->__detachstate = PTHREAD_CREATE_JOINABLE;
  attr->__schedpolicy = SCHED_OTHER;
  attr->__schedparam.sched_priority = 0;
  attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
  attr->__scope = PTHREAD_SCOPE_SYSTEM;
#ifdef NEED_SEPARATE_REGISTER_STACK
  attr->__guardsize = ps + ps;
#else
  attr->__guardsize = ps;
#endif
  attr->__stackaddr = NULL;
  attr->__stackaddr_set = 0;
  attr->__stacksize = STACK_SIZE - ps;
  return 0;
}
Пример #10
0
static TACommandVerdict __getpagesize_cmd(TAThread thread,TAInputStream stream)
{
    int res;
    /* [ Set errno ] */
    errno = readInt(&stream);

    START_TARGET_OPERATION(thread);

    res = __getpagesize();

    END_TARGET_OPERATION(thread);

    /* Response */
    writeInt(thread, res);
    writeInt(thread, errno);

    sendResponse(thread);

    return taDefaultVerdict;
}
Пример #11
0
int
__pthread_attr_init_2_1 (pthread_attr_t *attr)
{
  struct pthread_attr *iattr;

  ASSERT_TYPE_SIZE (pthread_attr_t, __SIZEOF_PTHREAD_ATTR_T);
  ASSERT_PTHREAD_INTERNAL_SIZE (pthread_attr_t, struct pthread_attr);

  /* Many elements are initialized to zero so let us do it all at
     once.  This also takes care of clearing the bytes which are not
     internally used.  */
  memset (attr, '\0', __SIZEOF_PTHREAD_ATTR_T);

  iattr = (struct pthread_attr *) attr;

  /* Default guard size specified by the standard.  */
  iattr->guardsize = __getpagesize ();

  return 0;
}
Пример #12
0
int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
			 void * (*start_routine)(void *), void *arg)
{
  /* The ATTR attribute is not really of type `pthread_attr_t *'.  It has
     the old size and access to the new members might crash the program.
     We convert the struct now.  */
  pthread_attr_t new_attr;

  if (attr != NULL)
    {
      size_t ps = __getpagesize ();

      memcpy (&new_attr, attr,
	      (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
      new_attr.__guardsize = ps;
      new_attr.__stackaddr_set = 0;
      new_attr.__stackaddr = NULL;
      new_attr.__stacksize = STACK_SIZE - ps;
      attr = &new_attr;
    }
  return __pthread_create_2_1 (thread, attr, start_routine, arg);
}
Пример #13
0
/* Get the value of the system variable NAME.  */
long int
__sysconf (int name)
{
  switch (name)
    {
      /* Also add obsolete or unnecessarily added constants here.  */
    case _SC_EQUIV_CLASS_MAX:
    default:
      __set_errno (EINVAL);
      return -1;

    case _SC_ARG_MAX:
#ifdef	ARG_MAX
      return ARG_MAX;
#else
      return -1;
#endif

    case _SC_CHILD_MAX:
#ifdef	CHILD_MAX
      return CHILD_MAX;
#else
      return __get_child_max ();
#endif

    case _SC_CLK_TCK:
      return __getclktck ();

    case _SC_NGROUPS_MAX:
#ifdef	NGROUPS_MAX
      return NGROUPS_MAX;
#else
      return -1;
#endif

    case _SC_OPEN_MAX:
      return __getdtablesize ();

    case _SC_STREAM_MAX:
#ifdef	STREAM_MAX
      return STREAM_MAX;
#else
      return FOPEN_MAX;
#endif

    case _SC_TZNAME_MAX:
      return MAX (__tzname_max (), _POSIX_TZNAME_MAX);

    case _SC_JOB_CONTROL:
#if CONF_IS_DEFINED_SET (_POSIX_JOB_CONTROL)
      return _POSIX_JOB_CONTROL;
#else
      return -1;
#endif

    case _SC_SAVED_IDS:
#if CONF_IS_DEFINED_SET (_POSIX_SAVED_IDS)
      return 1;
#else
      return -1;
#endif

    case _SC_REALTIME_SIGNALS:
#if CONF_IS_DEFINED_SET (_POSIX_REALTIME_SIGNALS)
      return _POSIX_REALTIME_SIGNALS;
#else
      return -1;
#endif

    case _SC_PRIORITY_SCHEDULING:
#if CONF_IS_DEFINED_SET (_POSIX_PRIORITY_SCHEDULING)
      return _POSIX_PRIORITY_SCHEDULING;
#else
      return -1;
#endif

    case _SC_TIMERS:
#if CONF_IS_DEFINED_SET (_POSIX_TIMERS)
      return _POSIX_TIMERS;
#else
      return -1;
#endif

    case _SC_ASYNCHRONOUS_IO:
#if CONF_IS_DEFINED_SET (_POSIX_ASYNCHRONOUS_IO)
      return _POSIX_ASYNCHRONOUS_IO;
#else
      return -1;
#endif

    case _SC_PRIORITIZED_IO:
#if CONF_IS_DEFINED_SET (_POSIX_PRIORITIZED_IO)
      return _POSIX_PRIORITIZED_IO;
#else
      return -1;
#endif

    case _SC_SYNCHRONIZED_IO:
#if CONF_IS_DEFINED_SET (_POSIX_SYNCHRONIZED_IO)
      return _POSIX_SYNCHRONIZED_IO;
#else
      return -1;
#endif

    case _SC_FSYNC:
#if CONF_IS_DEFINED_SET (_POSIX_FSYNC)
      return _POSIX_FSYNC;
#else
      return -1;
#endif

    case _SC_MAPPED_FILES:
#if CONF_IS_DEFINED_SET (_POSIX_MAPPED_FILES)
      return _POSIX_MAPPED_FILES;
#else
      return -1;
#endif

    case _SC_MEMLOCK:
#if CONF_IS_DEFINED_SET (_POSIX_MEMLOCK)
      return _POSIX_MEMLOCK;
#else
      return -1;
#endif

    case _SC_MEMLOCK_RANGE:
#if CONF_IS_DEFINED_SET (_POSIX_MEMLOCK_RANGE)
      return _POSIX_MEMLOCK_RANGE;
#else
      return -1;
#endif

    case _SC_MEMORY_PROTECTION:
#if CONF_IS_DEFINED_SET (_POSIX_MEMORY_PROTECTION)
      return _POSIX_MEMORY_PROTECTION;
#else
      return -1;
#endif

    case _SC_MESSAGE_PASSING:
#if CONF_IS_DEFINED_SET (_POSIX_MESSAGE_PASSING)
      return _POSIX_MESSAGE_PASSING;
#else
      return -1;
#endif

    case _SC_SEMAPHORES:
#if CONF_IS_DEFINED_SET (_POSIX_SEMAPHORES)
      return _POSIX_SEMAPHORES;
#else
      return -1;
#endif

    case _SC_SHARED_MEMORY_OBJECTS:
#if CONF_IS_DEFINED_SET (_POSIX_SHARED_MEMORY_OBJECTS)
      return _POSIX_SHARED_MEMORY_OBJECTS;
#else
      return -1;
#endif

    case _SC_VERSION:
      return _POSIX_VERSION;

    case _SC_PAGESIZE:
      return __getpagesize ();

    case _SC_AIO_LISTIO_MAX:
#ifdef	AIO_LISTIO_MAX
      return AIO_LISTIO_MAX;
#else
      return -1;
#endif

    case _SC_AIO_MAX:
#ifdef	AIO_MAX
      return AIO_MAX;
#else
      return -1;
#endif

    case _SC_AIO_PRIO_DELTA_MAX:
#ifdef	AIO_PRIO_DELTA_MAX
      return AIO_PRIO_DELTA_MAX;
#else
      return -1;
#endif

    case _SC_DELAYTIMER_MAX:
#ifdef	DELAYTIMER_MAX
      return DELAYTIMER_MAX;
#else
      return -1;
#endif

    case _SC_MQ_OPEN_MAX:
#ifdef	MQ_OPEN_MAX
      return MQ_OPEN_MAX;
#else
      return -1;
#endif

    case _SC_MQ_PRIO_MAX:
#ifdef	MQ_PRIO_MAX
      return MQ_PRIO_MAX;
#else
      return -1;
#endif

    case _SC_RTSIG_MAX:
#ifdef	RTSIG_MAX
      return RTSIG_MAX;
#else
      return -1;
#endif

    case _SC_SEM_NSEMS_MAX:
#ifdef	SEM_NSEMS_MAX
      return SEM_NSEMS_MAX;
#else
      return -1;
#endif

    case _SC_SEM_VALUE_MAX:
#ifdef	SEM_VALUE_MAX
      return SEM_VALUE_MAX;
#else
      return -1;
#endif

    case _SC_SIGQUEUE_MAX:
#ifdef	SIGQUEUE_MAX
      return SIGQUEUE_MAX;
#else
      return -1;
#endif

    case _SC_TIMER_MAX:
#ifdef	TIMER_MAX
      return TIMER_MAX;
#else
      return -1;
#endif

    case _SC_BC_BASE_MAX:
#ifdef	BC_BASE_MAX
      return BC_BASE_MAX;
#else
      return -1;
#endif

    case _SC_BC_DIM_MAX:
#ifdef	BC_DIM_MAX
      return BC_DIM_MAX;
#else
      return -1;
#endif

    case _SC_BC_SCALE_MAX:
#ifdef	BC_SCALE_MAX
      return BC_SCALE_MAX;
#else
      return -1;
#endif

    case _SC_BC_STRING_MAX:
#ifdef	BC_STRING_MAX
      return BC_STRING_MAX;
#else
      return -1;
#endif

    case _SC_COLL_WEIGHTS_MAX:
#ifdef	COLL_WEIGHTS_MAX
      return COLL_WEIGHTS_MAX;
#else
      return -1;
#endif

    case _SC_EXPR_NEST_MAX:
#ifdef	EXPR_NEST_MAX
      return EXPR_NEST_MAX;
#else
      return -1;
#endif

    case _SC_LINE_MAX:
#ifdef	LINE_MAX
      return LINE_MAX;
#else
      return -1;
#endif

    case _SC_RE_DUP_MAX:
#ifdef	RE_DUP_MAX
      return RE_DUP_MAX;
#else
      return -1;
#endif

    case _SC_CHARCLASS_NAME_MAX:
#ifdef	CHARCLASS_NAME_MAX
      return CHARCLASS_NAME_MAX;
#else
      return -1;
#endif

    case _SC_PII:
#if CONF_IS_DEFINED_SET (_POSIX_PII)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_XTI:
#if CONF_IS_DEFINED_SET (_POSIX_PII_XTI)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_SOCKET:
#if CONF_IS_DEFINED_SET (_POSIX_PII_SOCKET)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_INTERNET:
#if CONF_IS_DEFINED_SET (_POSIX_PII_INTERNET)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_OSI:
#if CONF_IS_DEFINED_SET (_POSIX_PII_OSI)
      return 1;
#else
      return -1;
#endif

    case _SC_POLL:
#if CONF_IS_DEFINED_SET (_POSIX_POLL)
      return 1;
#else
      return -1;
#endif

    case _SC_SELECT:
#if CONF_IS_DEFINED_SET (_POSIX_SELECT)
      return 1;
#else
      return -1;
#endif

      /* The same as _SC_IOV_MAX.  */
    case _SC_UIO_MAXIOV:
#ifdef	UIO_MAXIOV
      return UIO_MAXIOV;
#else
      return -1;
#endif

    case _SC_PII_INTERNET_STREAM:
#if CONF_IS_DEFINED_SET (_POSIX_PII_INTERNET_STREAM)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_INTERNET_DGRAM:
#if CONF_IS_DEFINED_SET (_POSIX_PII_INTERNET_DGRAM)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_OSI_COTS:
#if CONF_IS_DEFINED_SET (_POSIX_PII_OSI_COTS)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_OSI_CLTS:
#if CONF_IS_DEFINED_SET (_POSIX_PII_OSI_CLTS)
      return 1;
#else
      return -1;
#endif

    case _SC_PII_OSI_M:
#if CONF_IS_DEFINED_SET (_POSIX_PII_OSI_M)
      return 1;
#else
      return -1;
#endif

    case _SC_T_IOV_MAX:
#ifdef	_T_IOV_MAX
      return _T_IOV_MAX;
#else
      return -1;
#endif

    case _SC_2_VERSION:
      return _POSIX2_VERSION;

    case _SC_2_C_BIND:
#ifdef	_POSIX2_C_BIND
      return _POSIX2_C_BIND;
#else
      return -1;
#endif

    case _SC_2_C_DEV:
#ifdef	_POSIX2_C_DEV
      return _POSIX2_C_DEV;
#else
      return -1;
#endif

    case _SC_2_C_VERSION:
#ifdef	_POSIX2_C_VERSION
      return _POSIX2_C_VERSION;
#else
      return -1;
#endif

    case _SC_2_FORT_DEV:
#ifdef	_POSIX2_FORT_DEV
      return _POSIX2_FORT_DEV;
#else
      return -1;
#endif

    case _SC_2_FORT_RUN:
#ifdef	_POSIX2_FORT_RUN
      return _POSIX2_FORT_RUN;
#else
      return -1;
#endif

    case _SC_2_LOCALEDEF:
#ifdef	_POSIX2_LOCALEDEF
      return _POSIX2_LOCALEDEF;
#else
      return -1;
#endif

    case _SC_2_SW_DEV:
#ifdef	_POSIX2_SW_DEV
      return _POSIX2_SW_DEV;
#else
      return -1;
#endif

    case _SC_2_CHAR_TERM:
#ifdef	_POSIX2_CHAR_TERM
      return _POSIX2_CHAR_TERM;
#else
      return -1;
#endif

    case _SC_2_UPE:
#ifdef	_POSIX2_UPE
      return _POSIX2_UPE;
#else
      return -1;
#endif

      /* POSIX 1003.1c (POSIX Threads).  */
    case _SC_THREADS:
#if CONF_IS_DEFINED_SET (_POSIX_THREADS)
      return _POSIX_THREADS;
#else
      return -1;
#endif

    case _SC_THREAD_SAFE_FUNCTIONS:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_SAFE_FUNCTIONS)
      return _POSIX_THREAD_SAFE_FUNCTIONS;
#else
      return -1;
#endif

    case _SC_GETGR_R_SIZE_MAX:
      return NSS_BUFLEN_GROUP;

    case _SC_GETPW_R_SIZE_MAX:
      return NSS_BUFLEN_PASSWD;

    case _SC_LOGIN_NAME_MAX:
#ifdef	LOGIN_NAME_MAX
      return LOGIN_NAME_MAX;
#else
      return -1;
#endif

    case _SC_TTY_NAME_MAX:
#ifdef	TTY_NAME_MAX
      return TTY_NAME_MAX;
#else
      return -1;
#endif

    case _SC_THREAD_DESTRUCTOR_ITERATIONS:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_DESTRUCTOR_ITERATIONS)
      return _POSIX_THREAD_DESTRUCTOR_ITERATIONS;
#else
      return -1;
#endif

    case _SC_THREAD_KEYS_MAX:
#ifdef	PTHREAD_KEYS_MAX
      return PTHREAD_KEYS_MAX;
#else
      return -1;
#endif

    case _SC_THREAD_STACK_MIN:
#ifdef	PTHREAD_STACK_MIN
      return PTHREAD_STACK_MIN;
#else
      return -1;
#endif

    case _SC_THREAD_THREADS_MAX:
#ifdef	PTHREAD_THREADS_MAX
      return PTHREAD_THREADS_MAX;
#else
      return -1;
#endif

    case _SC_THREAD_ATTR_STACKADDR:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_ATTR_STACKADDR)
      return _POSIX_THREAD_ATTR_STACKADDR;
#else
      return -1;
#endif

    case _SC_THREAD_ATTR_STACKSIZE:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_ATTR_STACKSIZE)
      return _POSIX_THREAD_ATTR_STACKSIZE;
#else
      return -1;
#endif

    case _SC_THREAD_PRIORITY_SCHEDULING:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_PRIORITY_SCHEDULING)
      return _POSIX_THREAD_PRIORITY_SCHEDULING;
#else
      return -1;
#endif

    case _SC_THREAD_PRIO_INHERIT:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_PRIO_INHERIT)
      return _POSIX_THREAD_PRIO_INHERIT;
#else
      return -1;
#endif

    case _SC_THREAD_PRIO_PROTECT:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_PRIO_PROTECT)
      return _POSIX_THREAD_PRIO_PROTECT;
#else
      return -1;
#endif

    case _SC_THREAD_PROCESS_SHARED:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_PROCESS_SHARED)
      return _POSIX_THREAD_PROCESS_SHARED;
#else
      return -1;
#endif

    case _SC_NPROCESSORS_CONF:
      return __get_nprocs_conf ();

    case _SC_NPROCESSORS_ONLN:
      return __get_nprocs ();

    case _SC_PHYS_PAGES:
      return __get_phys_pages ();

    case _SC_AVPHYS_PAGES:
      return __get_avphys_pages ();

    case _SC_ATEXIT_MAX:
      /* We have no limit since we use lists.  */
      return INT_MAX;

    case _SC_PASS_MAX:
      /* We have no limit but since the return value might be used to
	 allocate a buffer we restrict the value.  */
      return BUFSIZ;

    case _SC_XOPEN_VERSION:
      return _XOPEN_VERSION;

    case _SC_XOPEN_XCU_VERSION:
      return _XOPEN_XCU_VERSION;

    case _SC_XOPEN_UNIX:
      return _XOPEN_UNIX;

    case _SC_XOPEN_CRYPT:
#ifdef	_XOPEN_CRYPT
      return _XOPEN_CRYPT;
#else
      return -1;
#endif

    case _SC_XOPEN_ENH_I18N:
#ifdef	_XOPEN_ENH_I18N
      return _XOPEN_ENH_I18N;
#else
      return -1;
#endif

    case _SC_XOPEN_SHM:
#ifdef	_XOPEN_SHM
      return _XOPEN_SHM;
#else
      return -1;
#endif

    case _SC_XOPEN_XPG2:
#ifdef	_XOPEN_XPG2
      return _XOPEN_XPG2;
#else
      return -1;
#endif

    case _SC_XOPEN_XPG3:
#ifdef	_XOPEN_XPG3
      return _XOPEN_XPG3;
#else
      return -1;
#endif

    case _SC_XOPEN_XPG4:
#ifdef	_XOPEN_XPG4
      return _XOPEN_XPG4;
#else
      return -1;
#endif

    case _SC_CHAR_BIT:
      return CHAR_BIT;

    case _SC_CHAR_MAX:
      return CHAR_MAX;

    case _SC_CHAR_MIN:
      return CHAR_MIN;

    case _SC_INT_MAX:
      return INT_MAX;

    case _SC_INT_MIN:
      return INT_MIN;

    case _SC_LONG_BIT:
      return sizeof (long int) * CHAR_BIT;

    case _SC_WORD_BIT:
      return sizeof (int) * CHAR_BIT;

    case _SC_MB_LEN_MAX:
      return MB_LEN_MAX;

    case _SC_NZERO:
      return NZERO;

    case _SC_SSIZE_MAX:
      return _POSIX_SSIZE_MAX;

    case _SC_SCHAR_MAX:
      return SCHAR_MAX;

    case _SC_SCHAR_MIN:
      return SCHAR_MIN;

    case _SC_SHRT_MAX:
      return SHRT_MAX;

    case _SC_SHRT_MIN:
      return SHRT_MIN;

    case _SC_UCHAR_MAX:
      return UCHAR_MAX;

    case _SC_UINT_MAX:
      return UINT_MAX;

    case _SC_ULONG_MAX:
      return ULONG_MAX;

    case _SC_USHRT_MAX:
      return USHRT_MAX;

    case _SC_NL_ARGMAX:
#ifdef	NL_ARGMAX
      return NL_ARGMAX;
#else
      return -1;
#endif

    case _SC_NL_LANGMAX:
#ifdef	NL_LANGMAX
      return NL_LANGMAX;
#else
      return -1;
#endif

    case _SC_NL_MSGMAX:
#ifdef	NL_MSGMAX
      return NL_MSGMAX;
#else
      return -1;
#endif

    case _SC_NL_NMAX:
#ifdef	NL_NMAX
      return NL_NMAX;
#else
      return -1;
#endif

    case _SC_NL_SETMAX:
#ifdef	NL_SETMAX
      return NL_SETMAX;
#else
      return -1;
#endif

    case _SC_NL_TEXTMAX:
#ifdef	NL_TEXTMAX
      return NL_TEXTMAX;
#else
      return -1;
#endif

#define START_ENV_GROUP(VERSION)		\
      /* Empty.  */

#define END_ENV_GROUP(VERSION)			\
      /* Empty.  */

#define KNOWN_ABSENT_ENVIRONMENT(SC_PREFIX, ENV_PREFIX, SUFFIX)	\
    case _SC_##SC_PREFIX##_##SUFFIX:				\
      return _##ENV_PREFIX##_##SUFFIX;

#define KNOWN_PRESENT_ENVIRONMENT(SC_PREFIX, ENV_PREFIX, SUFFIX)	\
    case _SC_##SC_PREFIX##_##SUFFIX:					\
      return _##ENV_PREFIX##_##SUFFIX;

#define UNKNOWN_ENVIRONMENT(SC_PREFIX, ENV_PREFIX, SUFFIX)	\
    case _SC_##SC_PREFIX##_##SUFFIX:				\
      return __sysconf_check_spec (#SUFFIX);

#include <posix/posix-envs.def>

#undef START_ENV_GROUP
#undef END_ENV_GROUP
#undef KNOWN_ABSENT_ENVIRONMENT
#undef KNOWN_PRESENT_ENVIRONMENT
#undef UNKNOWN_ENVIRONMENT

    case _SC_XOPEN_LEGACY:
      return _XOPEN_LEGACY;

    case _SC_XOPEN_REALTIME:
#ifdef _XOPEN_REALTIME
      return _XOPEN_REALTIME;
#else
      return -1;
#endif
    case _SC_XOPEN_REALTIME_THREADS:
#ifdef _XOPEN_REALTIME_THREADS
      return _XOPEN_REALTIME_THREADS;
#else
      return -1;
#endif

    case _SC_ADVISORY_INFO:
#if CONF_IS_DEFINED_SET (_POSIX_ADVISORY_INFO)
      return _POSIX_ADVISORY_INFO;
#else
      return -1;
#endif

    case _SC_BARRIERS:
#if CONF_IS_DEFINED_SET (_POSIX_BARRIERS)
      return _POSIX_BARRIERS;
#else
      return -1;
#endif

    case _SC_BASE:
#if CONF_IS_DEFINED_SET (_POSIX_BASE)
      return _POSIX_BASE;
#else
      return -1;
#endif
    case _SC_C_LANG_SUPPORT:
#if CONF_IS_DEFINED_SET (_POSIX_C_LANG_SUPPORT)
      return _POSIX_C_LANG_SUPPORT;
#else
      return -1;
#endif
    case _SC_C_LANG_SUPPORT_R:
#if CONF_IS_DEFINED_SET (_POSIX_C_LANG_SUPPORT_R)
      return _POSIX_C_LANG_SUPPORT_R;
#else
      return -1;
#endif

    case _SC_CLOCK_SELECTION:
#if CONF_IS_DEFINED_SET (_POSIX_CLOCK_SELECTION)
      return _POSIX_CLOCK_SELECTION;
#else
      return -1;
#endif

    case _SC_CPUTIME:
#if CONF_IS_DEFINED_SET (_POSIX_CPUTIME)
      return _POSIX_CPUTIME;
#else
      return -1;
#endif

    case _SC_DEVICE_IO:
#if CONF_IS_DEFINED_SET (_POSIX_DEVICE_IO)
      return _POSIX_DEVICE_IO;
#else
      return -1;
#endif
    case _SC_DEVICE_SPECIFIC:
#if CONF_IS_DEFINED_SET (_POSIX_DEVICE_SPECIFIC)
      return _POSIX_DEVICE_SPECIFIC;
#else
      return -1;
#endif
    case _SC_DEVICE_SPECIFIC_R:
#if CONF_IS_DEFINED_SET (_POSIX_DEVICE_SPECIFIC_R)
      return _POSIX_DEVICE_SPECIFIC_R;
#else
      return -1;
#endif

    case _SC_FD_MGMT:
#if CONF_IS_DEFINED_SET (_POSIX_FD_MGMT)
      return _POSIX_FD_MGMT;
#else
      return -1;
#endif

    case _SC_FIFO:
#if CONF_IS_DEFINED_SET (_POSIX_FIFO)
      return _POSIX_FIFO;
#else
      return -1;
#endif
    case _SC_PIPE:
#if CONF_IS_DEFINED_SET (_POSIX_PIPE)
      return _POSIX_PIPE;
#else
      return -1;
#endif

    case _SC_FILE_ATTRIBUTES:
#if CONF_IS_DEFINED_SET (_POSIX_FILE_ATTRIBUTES)
      return _POSIX_FILE_ATTRIBUTES;
#else
      return -1;
#endif
    case _SC_FILE_LOCKING:
#if CONF_IS_DEFINED_SET (_POSIX_FILE_LOCKING)
      return _POSIX_FILE_LOCKING;
#else
      return -1;
#endif
    case _SC_FILE_SYSTEM:
#if CONF_IS_DEFINED_SET (_POSIX_FILE_SYSTEM)
      return _POSIX_FILE_SYSTEM;
#else
      return -1;
#endif

    case _SC_MONOTONIC_CLOCK:
#if CONF_IS_DEFINED_SET (_POSIX_MONOTONIC_CLOCK)
      return _POSIX_MONOTONIC_CLOCK;
#else
      return -1;
#endif

    case _SC_MULTI_PROCESS:
#if CONF_IS_DEFINED_SET (_POSIX_MULTI_PROCESS)
      return _POSIX_MULTI_PROCESS;
#else
      return -1;
#endif
    case _SC_SINGLE_PROCESS:
#if CONF_IS_DEFINED_SET (_POSIX_SINGLE_PROCESS)
      return _POSIX_SINGLE_PROCESS;
#else
      return -1;
#endif

    case _SC_NETWORKING:
#if CONF_IS_DEFINED_SET (_POSIX_NETWORKING)
      return _POSIX_NETWORKING;
#else
      return -1;
#endif

    case _SC_READER_WRITER_LOCKS:
#if CONF_IS_DEFINED_SET (_POSIX_READER_WRITER_LOCKS)
      return _POSIX_READER_WRITER_LOCKS;
#else
      return -1;
#endif
    case _SC_SPIN_LOCKS:
#if CONF_IS_DEFINED_SET (_POSIX_SPIN_LOCKS)
      return _POSIX_SPIN_LOCKS;
#else
      return -1;
#endif

    case _SC_REGEXP:
#if CONF_IS_DEFINED_SET (_POSIX_REGEXP)
      return _POSIX_REGEXP;
#else
      return -1;
#endif
    /* _REGEX_VERSION has been removed with IEEE Std 1003.1-2001/Cor 2-2004,
       item XSH/TC2/D6/137.  */
    case _SC_REGEX_VERSION:
      return -1;

    case _SC_SHELL:
#if CONF_IS_DEFINED_SET (_POSIX_SHELL)
      return _POSIX_SHELL;
#else
      return -1;
#endif

    case _SC_SIGNALS:
#if CONF_IS_DEFINED (_POSIX_SIGNALS)
      return _POSIX_SIGNALS;
#else
      return -1;
#endif

    case _SC_SPAWN:
#if CONF_IS_DEFINED_SET (_POSIX_SPAWN)
      return _POSIX_SPAWN;
#else
      return -1;
#endif

    case _SC_SPORADIC_SERVER:
#if CONF_IS_DEFINED_SET (_POSIX_SPORADIC_SERVER)
      return _POSIX_SPORADIC_SERVER;
#else
      return -1;
#endif
    case _SC_THREAD_SPORADIC_SERVER:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_SPORADIC_SERVER)
      return _POSIX_THREAD_SPORADIC_SERVER;
#else
      return -1;
#endif

    case _SC_SYSTEM_DATABASE:
#if CONF_IS_DEFINED_SET (_POSIX_SYSTEM_DATABASE)
      return _POSIX_SYSTEM_DATABASE;
#else
      return -1;
#endif
    case _SC_SYSTEM_DATABASE_R:
#if CONF_IS_DEFINED_SET (_POSIX_SYSTEM_DATABASE_R)
      return _POSIX_SYSTEM_DATABASE_R;
#else
      return -1;
#endif

    case _SC_THREAD_CPUTIME:
#if CONF_IS_DEFINED_SET (_POSIX_THREAD_CPUTIME)
      return _POSIX_THREAD_CPUTIME;
#else
      return -1;
#endif

    case _SC_TIMEOUTS:
#if CONF_IS_DEFINED_SET (_POSIX_TIMEOUTS)
      return _POSIX_TIMEOUTS;
#else
      return -1;
#endif

    case _SC_TYPED_MEMORY_OBJECTS:
#if CONF_IS_DEFINED_SET (_POSIX_TYPED_MEMORY_OBJECTS)
      return _POSIX_TYPED_MEMORY_OBJECTS;
#else
      return -1;
#endif

    case _SC_USER_GROUPS:
#if CONF_IS_DEFINED_SET (_POSIX_USER_GROUPS)
      return _POSIX_USER_GROUPS;
#else
      return -1;
#endif
    case _SC_USER_GROUPS_R:
#if CONF_IS_DEFINED_SET (_POSIX_USER_GROUPS_R)
      return _POSIX_USER_GROUPS_R;
#else
      return -1;
#endif

    case _SC_2_PBS:
#ifdef _POSIX2_PBS
      return _POSIX2_PBS;
#else
      return -1;
#endif
    case _SC_2_PBS_ACCOUNTING:
#ifdef _POSIX2_PBS_ACCOUNTING
      return _POSIX2_PBS_ACCOUNTING;
#else
      return -1;
#endif
    case _SC_2_PBS_CHECKPOINT:
#ifdef _POSIX2_PBS_CHECKPOINT
      return _POSIX2_PBS_CHECKPOINT;
#else
      return -1;
#endif
    case _SC_2_PBS_LOCATE:
#ifdef _POSIX2_PBS_LOCATE
      return _POSIX2_PBS_LOCATE;
#else
      return -1;
#endif
    case _SC_2_PBS_MESSAGE:
#ifdef _POSIX2_PBS_MESSAGE
      return _POSIX2_PBS_MESSAGE;
#else
      return -1;
#endif
    case _SC_2_PBS_TRACK:
#ifdef _POSIX2_PBS_TRACK
      return _POSIX2_PBS_TRACK;
#else
      return -1;
#endif

    case _SC_SYMLOOP_MAX:
#ifdef SYMLOOP_MAX
      return SYMLOOP_MAX;
#else
      return -1;
#endif

    case _SC_STREAMS:
#ifdef _XOPEN_STREAMS
      return _XOPEN_STREAMS;
#else
      return -1;
#endif

    case _SC_HOST_NAME_MAX:
#ifdef HOST_NAME_MAX
      return HOST_NAME_MAX;
#else
      return -1;
#endif

    case _SC_TRACE:
#if CONF_IS_DEFINED_SET (_POSIX_TRACE)
      return _POSIX_TRACE;
#else
      return -1;
#endif
    case _SC_TRACE_EVENT_FILTER:
#if CONF_IS_DEFINED_SET (_POSIX_TRACE_EVENT_FILTER)
      return _POSIX_TRACE_EVENT_FILTER;
#else
      return -1;
#endif
    case _SC_TRACE_INHERIT:
#if CONF_IS_DEFINED_SET (_POSIX_TRACE_INHERIT)
      return _POSIX_TRACE_INHERIT;
#else
      return -1;
#endif
    case _SC_TRACE_LOG:
#if CONF_IS_DEFINED_SET (_POSIX_TRACE_LOG)
      return _POSIX_TRACE_LOG;
#else
      return -1;
#endif

    case _SC_TRACE_EVENT_NAME_MAX:
    case _SC_TRACE_NAME_MAX:
    case _SC_TRACE_SYS_MAX:
    case _SC_TRACE_USER_EVENT_MAX:
      /* No support for tracing.  */

    case _SC_XOPEN_STREAMS:
      /* No support for STREAMS.  */
      return -1;

    case _SC_LEVEL1_ICACHE_SIZE:
    case _SC_LEVEL1_ICACHE_ASSOC:
    case _SC_LEVEL1_ICACHE_LINESIZE:
    case _SC_LEVEL1_DCACHE_SIZE:
    case _SC_LEVEL1_DCACHE_ASSOC:
    case _SC_LEVEL1_DCACHE_LINESIZE:
    case _SC_LEVEL2_CACHE_SIZE:
    case _SC_LEVEL2_CACHE_ASSOC:
    case _SC_LEVEL2_CACHE_LINESIZE:
    case _SC_LEVEL3_CACHE_SIZE:
    case _SC_LEVEL3_CACHE_ASSOC:
    case _SC_LEVEL3_CACHE_LINESIZE:
    case _SC_LEVEL4_CACHE_SIZE:
    case _SC_LEVEL4_CACHE_ASSOC:
    case _SC_LEVEL4_CACHE_LINESIZE:
      /* In general we cannot determine these values.  Therefore we
	 return zero which indicates that no information is
	 available.  */
      return 0;

    case _SC_IPV6:
#if CONF_IS_DEFINED_SET (_POSIX_IPV6)
      return _POSIX_IPV6;
#else
      return -1;
#endif

    case _SC_RAW_SOCKETS:
#if CONF_IS_DEFINED_SET (_POSIX_RAW_SOCKETS)
      return _POSIX_RAW_SOCKETS;
#else
      return -1;
#endif
    }
}
Пример #14
0
char *
__getcwd (char *buf, size_t size)
{
  char *path;
  char *result;

#ifndef NO_ALLOCATION
  size_t alloc_size = size;
  if (size == 0)
    {
      if (buf != NULL)
	{
	  __set_errno (EINVAL);
	  return NULL;
	}

      alloc_size = MAX (PATH_MAX, __getpagesize ());
    }

  if (buf == NULL)
    {
      path = malloc (alloc_size);
      if (path == NULL)
	return NULL;
    }
  else
#else
# define alloc_size size
#endif
    path = buf;

  int retval;

  retval = INLINE_SYSCALL (getcwd, 2, path, alloc_size);
  if (retval >= 0)
    {
#ifndef NO_ALLOCATION
      if (buf == NULL && size == 0)
	/* Ensure that the buffer is only as large as necessary.  */
	buf = realloc (path, (size_t) retval);

      if (buf == NULL)
	/* Either buf was NULL all along, or `realloc' failed but
	   we still have the original string.  */
	buf = path;
#endif

      return buf;
    }

  /* The system call cannot handle paths longer than a page.
     Neither can the magic symlink in /proc/self.  Just use the
     generic implementation right away.  */
  if (errno == ENAMETOOLONG)
    {
#ifndef NO_ALLOCATION
      if (buf == NULL && size == 0)
	{
	  free (path);
	  path = NULL;
	}
#endif

      result = generic_getcwd (path, size);

#ifndef NO_ALLOCATION
      if (result == NULL && buf == NULL && size != 0)
	free (path);
#endif

      return result;
    }

  /* It should never happen that the `getcwd' syscall failed because
     the buffer is too small if we allocated the buffer ourselves
     large enough.  */
  assert (errno != ERANGE || buf != NULL || size != 0);

#ifndef NO_ALLOCATION
  if (buf == NULL)
    free (path);
#endif

  return NULL;
}
Пример #15
0
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
				 void * (*start_routine)(void *), void *arg,
				 sigset_t * mask, int father_pid,
				 int report_events,
				 td_thr_events_t *event_maskp)
{
  size_t sseg;
  int pid;
  pthread_descr new_thread;
  char * new_thread_bottom;
  pthread_t new_thread_id;
  char *guardaddr = NULL;
  size_t guardsize = 0;
  int pagesize = __getpagesize();
  int saved_errno = 0;

  /* First check whether we have to change the policy and if yes, whether
     we can  do this.  Normally this should be done by examining the
     return value of the sched_setscheduler call in pthread_start_thread
     but this is hard to implement.  FIXME  */
  if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
    return EPERM;
  /* Find a free segment for the thread, and allocate a stack if needed */
  for (sseg = 2; ; sseg++)
    {
      if (sseg >= PTHREAD_THREADS_MAX)
	return EAGAIN;
      if (__pthread_handles[sseg].h_descr != NULL)
	continue;
      if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
                                 &new_thread, &new_thread_bottom,
                                 &guardaddr, &guardsize) == 0)
        break;
    }
  __pthread_handles_num++;
  /* Allocate new thread identifier */
  pthread_threads_counter += PTHREAD_THREADS_MAX;
  new_thread_id = sseg + pthread_threads_counter;
  /* Initialize the thread descriptor.  Elements which have to be
     initialized to zero already have this value.  */
  new_thread->p_tid = new_thread_id;
  new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
  new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
  new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
  new_thread->p_errnop = &new_thread->p_errno;
  new_thread->p_h_errnop = &new_thread->p_h_errno;
  new_thread->p_guardaddr = guardaddr;
  new_thread->p_guardsize = guardsize;
  new_thread->p_self = new_thread;
  new_thread->p_nr = sseg;
  /* Initialize the thread handle */
  __pthread_init_lock(&__pthread_handles[sseg].h_lock);
  __pthread_handles[sseg].h_descr = new_thread;
  __pthread_handles[sseg].h_bottom = new_thread_bottom;
  /* Determine scheduling parameters for the thread */
  new_thread->p_start_args.schedpolicy = -1;
  if (attr != NULL) {
    new_thread->p_detached = attr->__detachstate;
    new_thread->p_userstack = attr->__stackaddr_set;

    switch(attr->__inheritsched) {
    case PTHREAD_EXPLICIT_SCHED:
      new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
      memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
	      sizeof (struct sched_param));
      break;
    case PTHREAD_INHERIT_SCHED:
      new_thread->p_start_args.schedpolicy = sched_getscheduler(father_pid);
      sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
      break;
    }
    new_thread->p_priority =
      new_thread->p_start_args.schedparam.sched_priority;
  }
  /* Finish setting up arguments to pthread_start_thread */
  new_thread->p_start_args.start_routine = start_routine;
  new_thread->p_start_args.arg = arg;
  new_thread->p_start_args.mask = *mask;
  /* Raise priority of thread manager if needed */
  __pthread_manager_adjust_prio(new_thread->p_priority);
  /* Do the cloning.  We have to use two different functions depending
     on whether we are debugging or not.  */
  pid = 0;     /* Note that the thread never can have PID zero.  */


  /* ******************************************************** */
  /*  This code was moved from below to cope with running threads
   *  on uClinux systems.  See comment below...
   * Insert new thread in doubly linked list of active threads */ 
  new_thread->p_prevlive = __pthread_main_thread;
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
  __pthread_main_thread->p_nextlive = new_thread;
  /* ********************************************************* */

  if (report_events)
    {
      /* See whether the TD_CREATE event bit is set in any of the
         masks.  */
      int idx = __td_eventword (TD_CREATE);
      uint32_t mask = __td_eventmask (TD_CREATE);

      if ((mask & (__pthread_threads_events.event_bits[idx]
		   | event_maskp->event_bits[idx])) != 0)
	{
	  /* Lock the mutex the child will use now so that it will stop.  */
	  __pthread_lock(new_thread->p_lock, NULL);

	  /* We have to report this event.  */
	  pid = clone(pthread_start_thread_event, (void **) new_thread,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
			__pthread_sig_cancel, new_thread);

	  saved_errno = errno;
	  if (pid != -1)
	    {
	      /* Now fill in the information about the new thread in
	         the newly created thread's data structure.  We cannot let
	         the new thread do this since we don't know whether it was
	         already scheduled when we send the event.  */
	      new_thread->p_eventbuf.eventdata = new_thread;
	      new_thread->p_eventbuf.eventnum = TD_CREATE;
	      __pthread_last_event = new_thread;

	      /* We have to set the PID here since the callback function
		 in the debug library will need it and we cannot guarantee
		 the child got scheduled before the debugger.  */
	      new_thread->p_pid = pid;

	      /* Now call the function which signals the event.  */
	      __linuxthreads_create_event ();

	      /* Now restart the thread.  */
	      __pthread_unlock(new_thread->p_lock);
	    }
	}
    }
  if (pid == 0)
    {
PDEBUG("cloning new_thread = %p\n", new_thread);
      pid = clone(pthread_start_thread, (void **) new_thread,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
		    __pthread_sig_cancel, new_thread);
      saved_errno = errno;
    }
  /* Check if cloning succeeded */
  if (pid == -1) {
    /******************************************************** 
     * Code inserted to remove the thread from our list of active
     * threads in case of failure (needed to cope with uClinux), 
     * See comment below. */
    new_thread->p_nextlive->p_prevlive = new_thread->p_prevlive;
    new_thread->p_prevlive->p_nextlive = new_thread->p_nextlive;
    /********************************************************/

    /* Free the stack if we allocated it */
    if (attr == NULL || !attr->__stackaddr_set)
      {
#ifdef __UCLIBC_HAS_MMU__
	if (new_thread->p_guardsize != 0)
	  munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
	munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
	       INITIAL_STACK_SIZE);
#else
	free(new_thread_bottom);
#endif /* __UCLIBC_HAS_MMU__ */
      }
    __pthread_handles[sseg].h_descr = NULL;
    __pthread_handles[sseg].h_bottom = NULL;
    __pthread_handles_num--;
    return errno;
  }
PDEBUG("new thread pid = %d\n", pid);

#if 0
  /* ***********************************************************
   This code has been moved before the call to clone().  In uClinux,
   the use of wait on a semaphore is dependant upon that the child so
   the child must be in the active threads list. This list is used in
   pthread_find_self() to get the pthread_descr of self. So, if the
   child calls sem_wait before this code is executed , it will hang
   forever and initial_thread will instead be posted by a sem_post
   call. */

  /* Insert new thread in doubly linked list of active threads */
  new_thread->p_prevlive = __pthread_main_thread;
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
  __pthread_main_thread->p_nextlive = new_thread;
  /************************************************************/
#endif

  /* Set pid field of the new thread, in case we get there before the
     child starts. */
  new_thread->p_pid = pid;
  /* We're all set */
  *thread = new_thread_id;
  return 0;
}
Пример #16
0
static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
				 void * (*start_routine)(void *), void *arg,
				 sigset_t * mask, int father_pid,
				 int report_events,
				 td_thr_events_t *event_maskp)
{
  size_t sseg;
  int pid;
  pthread_descr new_thread;
  char *stack_addr;
  char * new_thread_bottom;
  pthread_t new_thread_id;
  char *guardaddr = NULL;
  size_t guardsize = 0, stksize = 0;
  int pagesize = __getpagesize();
  int saved_errno = 0;

#ifdef USE_TLS
  new_thread = _dl_allocate_tls (NULL);
  if (new_thread == NULL)
    return EAGAIN;
# if TLS_DTV_AT_TP
  /* pthread_descr is below TP.  */
  new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
# endif
#else
  /* Prevent warnings.  */
  new_thread = NULL;
#endif

  /* First check whether we have to change the policy and if yes, whether
     we can  do this.  Normally this should be done by examining the
     return value of the __sched_setscheduler call in pthread_start_thread
     but this is hard to implement.  FIXME  */
  if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
    return EPERM;
  /* Find a free segment for the thread, and allocate a stack if needed */
  for (sseg = 2; ; sseg++)
    {
      if (sseg >= PTHREAD_THREADS_MAX)
	{
#ifdef USE_TLS
# if TLS_DTV_AT_TP
	  new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
# endif
	  _dl_deallocate_tls (new_thread, true);
#endif
	  return EAGAIN;
	}
      if (__pthread_handles[sseg].h_descr != NULL)
	continue;
      if (pthread_allocate_stack(attr, thread_segment(sseg),
				 pagesize, &stack_addr, &new_thread_bottom,
                                 &guardaddr, &guardsize, &stksize) == 0)
	{
#ifdef USE_TLS
	  new_thread->p_stackaddr = stack_addr;
#else
	  new_thread = (pthread_descr) stack_addr;
#endif
	  break;
#ifndef __ARCH_USE_MMU__
	} else {
	  /* When there is MMU, mmap () is used to allocate the stack. If one
	   * segment is already mapped, we should continue to see if we can
	   * use the next one. However, when there is no MMU, malloc () is used.
	   * It's waste of CPU cycles to continue to try if it fails.  */
	  return EAGAIN;
#endif
	}
    }
  __pthread_handles_num++;
  /* Allocate new thread identifier */
  pthread_threads_counter += PTHREAD_THREADS_MAX;
  new_thread_id = sseg + pthread_threads_counter;
  /* Initialize the thread descriptor.  Elements which have to be
     initialized to zero already have this value.  */
#if !defined USE_TLS || !TLS_DTV_AT_TP
  new_thread->p_header.data.tcb = new_thread;
  new_thread->p_header.data.self = new_thread;
#endif
#if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
  new_thread->p_multiple_threads = 1;
#endif
  new_thread->p_tid = new_thread_id;
  new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
  new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
  new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
#if !(USE_TLS && HAVE___THREAD)
  new_thread->p_errnop = &new_thread->p_errno;
  new_thread->p_h_errnop = &new_thread->p_h_errno;
  new_thread->p_resp = &new_thread->p_res;
#endif
  new_thread->p_guardaddr = guardaddr;
  new_thread->p_guardsize = guardsize;
  new_thread->p_nr = sseg;
  new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
  new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
				 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
  /* Initialize the thread handle */
  __pthread_init_lock(&__pthread_handles[sseg].h_lock);
  __pthread_handles[sseg].h_descr = new_thread;
  __pthread_handles[sseg].h_bottom = new_thread_bottom;
  /* Determine scheduling parameters for the thread */
  new_thread->p_start_args.schedpolicy = -1;
  if (attr != NULL) {
    new_thread->p_detached = attr->__detachstate;
    new_thread->p_userstack = attr->__stackaddr_set;

    switch(attr->__inheritsched) {
    case PTHREAD_EXPLICIT_SCHED:
      new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
      memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
	      sizeof (struct sched_param));
      break;
    case PTHREAD_INHERIT_SCHED:
      new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
      __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
      break;
    }
    new_thread->p_priority =
      new_thread->p_start_args.schedparam.sched_priority;
  }
  /* Finish setting up arguments to pthread_start_thread */
  new_thread->p_start_args.start_routine = start_routine;
  new_thread->p_start_args.arg = arg;
  new_thread->p_start_args.mask = *mask;
  /* Make the new thread ID available already now.  If any of the later
     functions fail we return an error value and the caller must not use
     the stored thread ID.  */
  *thread = new_thread_id;
  /* Raise priority of thread manager if needed */
  __pthread_manager_adjust_prio(new_thread->p_priority);
  /* Do the cloning.  We have to use two different functions depending
     on whether we are debugging or not.  */
  pid = 0;	/* Note that the thread never can have PID zero.  */
  if (report_events)
    {
      /* See whether the TD_CREATE event bit is set in any of the
         masks.  */
      int idx = __td_eventword (TD_CREATE);
      uint32_t mask = __td_eventmask (TD_CREATE);

      if ((mask & (__pthread_threads_events.event_bits[idx]
		   | event_maskp->event_bits[idx])) != 0)
	{
	  /* Lock the mutex the child will use now so that it will stop.  */
	  __pthread_lock(new_thread->p_lock, NULL);

	  /* We have to report this event.  */
#ifdef NEED_SEPARATE_REGISTER_STACK
	  /* Perhaps this version should be used on all platforms. But
	   this requires that __clone2 be uniformly supported
	   everywhere.

	   And there is some argument for changing the __clone2
	   interface to pass sp and bsp instead, making it more IA64
	   specific, but allowing stacks to grow outward from each
	   other, to get less paging and fewer mmaps.  */
	  pid = __clone2(pthread_start_thread_event,
  		 (void **)new_thread_bottom,
			 (char *)stack_addr - new_thread_bottom,
			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
			 __pthread_sig_cancel, new_thread);
#elif _STACK_GROWS_UP
	  pid = __clone(pthread_start_thread_event, (void *) new_thread_bottom,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
			__pthread_sig_cancel, new_thread);
#else
	  pid = __clone(pthread_start_thread_event, stack_addr,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
			__pthread_sig_cancel, new_thread);
#endif
	  saved_errno = errno;
	  if (pid != -1)
	    {
	      /* Now fill in the information about the new thread in
		 the newly created thread's data structure.  We cannot let
		 the new thread do this since we don't know whether it was
		 already scheduled when we send the event.  */
	      new_thread->p_eventbuf.eventdata = new_thread;
	      new_thread->p_eventbuf.eventnum = TD_CREATE;
	      __pthread_last_event = new_thread;

	      /* We have to set the PID here since the callback function
		 in the debug library will need it and we cannot guarantee
		 the child got scheduled before the debugger.  */
	      new_thread->p_pid = pid;

	      /* Now call the function which signals the event.  */
	      __linuxthreads_create_event ();

	      /* Now restart the thread.  */
	      __pthread_unlock(new_thread->p_lock);
	    }
	}
    }
  if (pid == 0)
    {
#ifdef NEED_SEPARATE_REGISTER_STACK
      pid = __clone2(pthread_start_thread,
		     (void **)new_thread_bottom,
                     (char *)stack_addr - new_thread_bottom,
		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
		     __pthread_sig_cancel, new_thread);
#elif _STACK_GROWS_UP
      pid = __clone(pthread_start_thread, (void *) new_thread_bottom,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
		    __pthread_sig_cancel, new_thread);
#else
      pid = __clone(pthread_start_thread, stack_addr,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM |
		    __pthread_sig_cancel, new_thread);
#endif /* !NEED_SEPARATE_REGISTER_STACK */
      saved_errno = errno;
    }
  /* Check if cloning succeeded */
  if (pid == -1) {
    /* Free the stack if we allocated it */
    if (attr == NULL || !attr->__stackaddr_set)
      {
#ifdef NEED_SEPARATE_REGISTER_STACK
	size_t stacksize = ((char *)(new_thread->p_guardaddr)
			    - new_thread_bottom);
	munmap((caddr_t)new_thread_bottom,
	       2 * stacksize + new_thread->p_guardsize);
#elif _STACK_GROWS_UP
# ifdef USE_TLS
	size_t stacksize = guardaddr - stack_addr;
	munmap(stack_addr, stacksize + guardsize);
# else
	size_t stacksize = guardaddr - (char *)new_thread;
	munmap(new_thread, stacksize + guardsize);
# endif
#else
# ifdef USE_TLS
	size_t stacksize = stack_addr - new_thread_bottom;
# else
	size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
# endif
	munmap(new_thread_bottom - guardsize, guardsize + stacksize);
#endif
      }
#ifdef USE_TLS
# if TLS_DTV_AT_TP
    new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
# endif
    _dl_deallocate_tls (new_thread, true);
#endif
    __pthread_handles[sseg].h_descr = NULL;
    __pthread_handles[sseg].h_bottom = NULL;
    __pthread_handles_num--;
    return saved_errno;
  }
  /* Insert new thread in doubly linked list of active threads */
  new_thread->p_prevlive = __pthread_main_thread;
  new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
  __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
  __pthread_main_thread->p_nextlive = new_thread;
  /* Set pid field of the new thread, in case we get there before the
     child starts. */
  new_thread->p_pid = pid;
  return 0;
}
Пример #17
0
/* Guts of underflow callback if we mmap the file.  This stats the file and
   updates the stream state to match.  In the normal case we return zero.
   If the file is no longer eligible for mmap, its jump tables are reset to
   the vanilla ones and we return nonzero.  */
static int
mmap_remap_check (_IO_FILE *fp)
{
  struct stat64 st;

  if (_IO_SYSSTAT (fp, &st) == 0
      && S_ISREG (st.st_mode) && st.st_size != 0
      /* Limit the file size to 1MB for 32-bit machines.  */
      && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024))
    {
      const size_t pagesize = __getpagesize ();
# define ROUNDED(x)	(((x) + pagesize - 1) & ~(pagesize - 1))
      if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end
					  - fp->_IO_buf_base))
	{
	  /* We can trim off some pages past the end of the file.  */
	  (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size),
			   ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)
			   - ROUNDED (st.st_size));
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
      else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end
					       - fp->_IO_buf_base))
	{
	  /* The file added some pages.  We need to remap it.  */
	  void *p;
#ifdef _G_HAVE_MREMAP
	  p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end
						   - fp->_IO_buf_base),
			ROUNDED (st.st_size), MREMAP_MAYMOVE);
	  if (p == MAP_FAILED)
	    {
	      (void) __munmap (fp->_IO_buf_base,
			       fp->_IO_buf_end - fp->_IO_buf_base);
	      goto punt;
	    }
#else
	  (void) __munmap (fp->_IO_buf_base,
			   fp->_IO_buf_end - fp->_IO_buf_base);
	  p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED,
			fp->_fileno, 0);
	  if (p == MAP_FAILED)
	    goto punt;
#endif
	  fp->_IO_buf_base = p;
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
      else
	{
	  /* The number of pages didn't change.  */
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
# undef ROUNDED

      fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr;
      _IO_setg (fp, fp->_IO_buf_base,
		fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base
		? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end,
		fp->_IO_buf_end);

      /* If we are already positioned at or past the end of the file, don't
	 change the current offset.  If not, seek past what we have mapped,
	 mimicking the position left by a normal underflow reading into its
	 buffer until EOF.  */

      if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base)
	{
	  if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base,
			 SEEK_SET)
	      != fp->_IO_buf_end - fp->_IO_buf_base)
	    fp->_flags |= _IO_ERR_SEEN;
	  else
	    fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base;
	}

      return 0;
    }
  else
    {
      /* Life is no longer good for mmap.  Punt it.  */
      (void) __munmap (fp->_IO_buf_base,
		       fp->_IO_buf_end - fp->_IO_buf_base);
    punt:
      fp->_IO_buf_base = fp->_IO_buf_end = NULL;
      _IO_setg (fp, NULL, NULL, NULL);
      if (fp->_mode <= 0)
	_IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
      else
	_IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
      fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;

      return 1;
    }
}
static void pthread_initialize(void)
{
  struct sigaction sa;
  sigset_t mask;
  struct rlimit limit;
  int max_stack;

  /* If already done (e.g. by a constructor called earlier!), bail out */
  if (__pthread_initial_thread_bos != NULL) return;
#ifdef TEST_FOR_COMPARE_AND_SWAP
  /* Test if compare-and-swap is available */
  __pthread_has_cas = compare_and_swap_is_available();
#endif
  /* For the initial stack, reserve at least STACK_SIZE bytes of stack
     below the current stack address, and align that on a
     STACK_SIZE boundary. */
  __pthread_initial_thread_bos =
    (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
  /* Update the descriptor for the initial thread. */
  __pthread_initial_thread.p_pid = __getpid();
  /* If we have special thread_self processing, initialize that for the
     main thread now.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(&__pthread_initial_thread, 0);
#endif
  /* The errno/h_errno variable of the main thread are the global ones.  */
  __pthread_initial_thread.p_errnop = &_errno;
  __pthread_initial_thread.p_h_errnop = &_h_errno;
  /* Play with the stack size limit to make sure that no stack ever grows
     beyond STACK_SIZE minus two pages (one page for the thread descriptor
     immediately beyond, and one page to act as a guard page). */

#ifdef __UCLIBC_HAS_MMU__
  /* We cannot allocate a huge chunk of memory to mmap all thread stacks later
   * on a non-MMU system. Thus, we don't need the rlimit either. -StS */
  getrlimit(RLIMIT_STACK, &limit);
  max_stack = STACK_SIZE - 2 * __getpagesize();
  if (limit.rlim_cur > max_stack) {
    limit.rlim_cur = max_stack;
    setrlimit(RLIMIT_STACK, &limit);
  }
#else
  /* For non-MMU assume __pthread_initial_thread_tos at upper page boundary, and
   * __pthread_initial_thread_bos at address 0. These bounds are refined as we 
   * malloc other stack frames such that they don't overlap. -StS
   */
  __pthread_initial_thread_tos =
    (char *)(((long)CURRENT_STACK_FRAME + __getpagesize()) & ~(__getpagesize() - 1));
  __pthread_initial_thread_bos = (char *) 1; /* set it non-zero so we know we have been here */
  PDEBUG("initial thread stack bounds: bos=%p, tos=%p\n",
	 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
#endif /* __UCLIBC_HAS_MMU__ */

  /* Setup signal handlers for the initial thread.
     Since signal handlers are shared between threads, these settings
     will be inherited by all other threads. */
  sa.sa_handler = pthread_handle_sigrestart;
  sigemptyset(&sa.sa_mask);
  sa.sa_flags = 0;
  __libc_sigaction(__pthread_sig_restart, &sa, NULL);
  sa.sa_handler = pthread_handle_sigcancel;
  // sa.sa_flags = 0;
  __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
  if (__pthread_sig_debug > 0) {
      sa.sa_handler = pthread_handle_sigdebug;
      sigemptyset(&sa.sa_mask);
      // sa.sa_flags = 0;
      __libc_sigaction(__pthread_sig_debug, &sa, NULL);
  }
  /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
  sigemptyset(&mask);
  sigaddset(&mask, __pthread_sig_restart);
  sigprocmask(SIG_BLOCK, &mask, NULL);
  /* Register an exit function to kill all other threads. */
  /* Do it early so that user-registered atexit functions are called
     before pthread_onexit_process. */
  on_exit(pthread_onexit_process, NULL);
}