int ttyslot() { register struct ttyent *ttyp; register int slot; register char *p; int cnt; size_t buflen = __sysconf (_SC_TTY_NAME_MAX) + 1; char *name; if (buflen == 0) /* This should be enough if no fixed value is given. */ buflen = 32; name = __alloca (buflen); setttyent(); for (cnt = 0; cnt < 3; ++cnt) if (__ttyname_r (cnt, name, buflen) == 0) { if ((p = rindex(name, '/'))) ++p; else p = name; for (slot = 1; (ttyp = getttyent()); ++slot) if (!strcmp(ttyp->ty_name, p)) { endttyent(); return(slot); } break; } endttyent(); return(0); }
/* Add an action to FILE-ACTIONS which tells the implementation to call `open' for the given file during the `spawn' call. */ int posix_spawn_file_actions_addopen (posix_spawn_file_actions_t *file_actions, int fd, const char *path, int oflag, mode_t mode) { int maxfd = __sysconf (_SC_OPEN_MAX); struct __spawn_action *rec; /* Test for the validity of the file descriptor. */ if (fd < 0 || fd >= maxfd) return EBADF; /* Allocate more memory if needed. */ if (file_actions->__used == file_actions->__allocated && __posix_spawn_file_actions_realloc (file_actions) != 0) /* This can only mean we ran out of memory. */ return ENOMEM; /* Add the new value. */ rec = &file_actions->__actions[file_actions->__used]; rec->tag = spawn_do_open; rec->action.open_action.fd = fd; rec->action.open_action.path = path; rec->action.open_action.oflag = oflag; rec->action.open_action.mode = mode; /* Account for the new entry. */ ++file_actions->__used; return 0; }
static TACommandVerdict __sysconf_cmd(TAThread thread,TAInputStream stream) { int name; long res; /* Prepare */ name = readInt(&stream); /* [ Set errno ] */ errno = readInt(&stream); START_TARGET_OPERATION(thread); /* Execute */ res = __sysconf(name); END_TARGET_OPERATION(thread); /* Response */ writeLong(thread, res); writeInt(thread, errno); sendResponse(thread); return taDefaultVerdict; }
/* Add an action to FILE-ACTIONS which tells the implementation to call `dup2' for the given file descriptors during the `spawn' call. */ int posix_spawn_file_actions_adddup2 (posix_spawn_file_actions_t *file_actions, int fd, int newfd) { int maxfd = __sysconf (_SC_OPEN_MAX); struct __spawn_action *rec; /* Test for the validity of the file descriptor. */ if (fd < 0 || newfd < 0 || fd >= maxfd || newfd >= maxfd) return EBADF; /* Allocate more memory if needed. */ if (file_actions->__used == file_actions->__allocated && __posix_spawn_file_actions_realloc (file_actions) != 0) /* This can only mean we ran out of memory. */ return ENOMEM; /* Add the new value. */ rec = &file_actions->__actions[file_actions->__used]; rec->tag = spawn_do_dup2; rec->action.dup2_action.fd = fd; rec->action.dup2_action.newfd = newfd; /* Account for the new entry. */ ++file_actions->__used; return 0; }
int __getpw (__uid_t uid, char *buf) { size_t buflen; char *tmpbuf; struct passwd resbuf, *p; if (buf == NULL) { __set_errno (EINVAL); return -1; } buflen = __sysconf (_SC_GETPW_R_SIZE_MAX); tmpbuf = alloca (buflen); if (__getpwuid_r (uid, &resbuf, tmpbuf, buflen, &p) != 0) return -1; if (p == NULL) return -1; if (sprintf (buf, "%s:%s:%lu:%lu:%s:%s:%s", p->pw_name, p->pw_passwd, (unsigned long int) p->pw_uid, (unsigned long int) p->pw_gid, p->pw_gecos, p->pw_dir, p->pw_shell) < 0) return -1; return 0; }
/** * Pretend that only 1 processor is configured/online, because rr * binds all tracees to one logical CPU. */ long sysconf(int name) { switch (name) { case _SC_NPROCESSORS_ONLN: case _SC_NPROCESSORS_CONF: return 1; } return __sysconf(name); }
/* Function depends on CMD: 1 = Return the limit on the size of a file, in units of 512 bytes. 2 = Set the limit on the size of a file to NEWLIMIT. Only the super-user can increase the limit. 3 = illegal due to shared libraries; normally is (Return the maximum possible address of the data segment.) 4 = Return the maximum number of files that the calling process can open. Returns -1 on errors. */ long int __ulimit (int cmd, ...) { struct rlimit limit; va_list va; long int result = -1; va_start (va, cmd); switch (cmd) { case UL_GETFSIZE: /* Get limit on file size. */ if (__getrlimit (RLIMIT_FSIZE, &limit) == 0) /* Convert from bytes to 512 byte units. */ result = (limit.rlim_cur == RLIM_INFINITY ? LONG_MAX : limit.rlim_cur / 512); break; case UL_SETFSIZE: /* Set limit on file size. */ { long int newlimit = va_arg (va, long int); long int newlen; if ((rlim_t) newlimit > RLIM_INFINITY / 512) { limit.rlim_cur = RLIM_INFINITY; limit.rlim_max = RLIM_INFINITY; newlen = LONG_MAX; } else { limit.rlim_cur = newlimit * 512; limit.rlim_max = newlimit * 512; newlen = newlimit; } result = __setrlimit (RLIMIT_FSIZE, &limit); if (result != -1) result = newlen; } break; case __UL_GETOPENMAX: result = __sysconf (_SC_OPEN_MAX); break; default: __set_errno (EINVAL); } va_end (va); return result; }
long __sysconf_beos(int name) { switch (name) { case _SC_CLK_TCK: return CLK_TCK_BEOS; } return __sysconf(name); }
/* Set the group set for the current user to GROUPS (N of them). For Linux we must convert the array of groups into the format that the kernel expects. */ int setgroups (size_t n, const gid_t *groups) { #if __ASSUME_32BITUIDS > 0 return INLINE_SETXID_SYSCALL (setgroups32, 2, n, CHECK_N (groups, n)); #else if (n > (size_t) __sysconf (_SC_NGROUPS_MAX)) { __set_errno (EINVAL); return -1; } else { size_t i; __kernel_gid_t kernel_groups[n]; # ifdef __NR_setgroups32 if (__libc_missing_32bit_uids <= 0) { int result; int saved_errno = errno; result = INLINE_SETXID_SYSCALL (setgroups32, 2, n, CHECK_N (groups, n)); if (result == 0 || errno != ENOSYS) return result; __set_errno (saved_errno); __libc_missing_32bit_uids = 1; } # endif /* __NR_setgroups32 */ for (i = 0; i < n; i++) { kernel_groups[i] = (__ptrvalue (groups))[i]; if (groups[i] != (gid_t) ((__kernel_gid_t) groups[i])) { __set_errno (EINVAL); return -1; } } return INLINE_SETXID_SYSCALL (setgroups, 2, n, CHECK_N (kernel_groups, n)); } #endif }
/* Initialize the group set for the current user by reading the group database and using all groups of which USER is a member. Also include GROUP. */ int initgroups (const char *user, gid_t group) { #if defined NGROUPS_MAX && NGROUPS_MAX == 0 /* No extra groups allowed. */ return 0; #else long int size; gid_t *groups; int ngroups; int result; /* We always use sysconf even if NGROUPS_MAX is defined. That way, the limit can be raised in the kernel configuration without having to recompile libc. */ long int limit = __sysconf (_SC_NGROUPS_MAX); if (limit > 0) /* We limit the size of the intially allocated array. */ size = MIN (limit, 64); else /* No fixed limit on groups. Pick a starting buffer size. */ size = 16; groups = (gid_t *) malloc (size * sizeof (gid_t)); if (__glibc_unlikely (groups == NULL)) /* No more memory. */ return -1; ngroups = internal_getgrouplist (user, group, &size, &groups, limit); /* Try to set the maximum number of groups the kernel can handle. */ do result = setgroups (ngroups, groups); while (result == -1 && errno == EINVAL && --ngroups > 0); free (groups); return result; #endif }
void qsort_r (void *b, size_t n, size_t s, __compar_d_fn_t cmp, void *arg) { size_t size = n * s; char *tmp = NULL; struct msort_param p; /* For large object sizes use indirect sorting. */ if (s > 32) size = 2 * n * sizeof (void *) + s; if (size < 1024) /* The temporary array is small, so put it on the stack. */ p.t = __alloca (size); else { /* We should avoid allocating too much memory since this might have to be backed up by swap space. */ static long int phys_pages; static int pagesize; if (pagesize == 0) { phys_pages = __sysconf (_SC_PHYS_PAGES); if (phys_pages == -1) /* Error while determining the memory size. So let's assume there is enough memory. Otherwise the implementer should provide a complete implementation of the `sysconf' function. */ phys_pages = (long int) (~0ul >> 1); /* The following determines that we will never use more than a quarter of the physical memory. */ phys_pages /= 4; /* Make sure phys_pages is written to memory. */ atomic_write_barrier (); pagesize = __sysconf (_SC_PAGESIZE); } /* Just a comment here. We cannot compute phys_pages * pagesize and compare the needed amount of memory against this value. The problem is that some systems might have more physical memory then can be represented with a `size_t' value (when measured in bytes. */ /* If the memory requirements are too high don't allocate memory. */ if (size / pagesize > (size_t) phys_pages) { _quicksort (b, n, s, cmp, arg); return; } /* It's somewhat large, so malloc it. */ int save = errno; tmp = malloc (size); __set_errno (save); if (tmp == NULL) { /* Couldn't get space, so use the slower algorithm that doesn't need a temporary array. */ _quicksort (b, n, s, cmp, arg); return; } p.t = tmp; }
/* Change the ownership and access permission of the slave pseudo terminal associated with the master pseudo terminal specified by FD. */ int grantpt (int fd) { int retval = -1; #ifdef PATH_MAX char _buf[PATH_MAX]; #else char _buf[512]; #endif char *buf = _buf; struct stat64 st; if (__glibc_unlikely (pts_name (fd, &buf, sizeof (_buf), &st))) { int save_errno = errno; /* Check, if the file descriptor is valid. pts_name returns the wrong errno number, so we cannot use that. */ if (__libc_fcntl (fd, F_GETFD) == -1 && errno == EBADF) return -1; /* If the filedescriptor is no TTY, grantpt has to set errno to EINVAL. */ if (save_errno == ENOTTY) __set_errno (EINVAL); else __set_errno (save_errno); return -1; } /* Make sure that we own the device. */ uid_t uid = __getuid (); if (st.st_uid != uid) { if (__chown (buf, uid, st.st_gid) < 0) goto helper; } static int tty_gid = -1; if (__glibc_unlikely (tty_gid == -1)) { char *grtmpbuf; struct group grbuf; size_t grbuflen = __sysconf (_SC_GETGR_R_SIZE_MAX); struct group *p; /* Get the group ID of the special `tty' group. */ if (grbuflen == (size_t) -1L) /* `sysconf' does not support _SC_GETGR_R_SIZE_MAX. Try a moderate value. */ grbuflen = 1024; grtmpbuf = (char *) __alloca (grbuflen); __getgrnam_r (TTY_GROUP, &grbuf, grtmpbuf, grbuflen, &p); if (p != NULL) tty_gid = p->gr_gid; } gid_t gid = tty_gid == -1 ? __getgid () : tty_gid; /* Make sure the group of the device is that special group. */ if (st.st_gid != gid) { if (__chown (buf, uid, gid) < 0) goto helper; } /* Make sure the permission mode is set to readable and writable by the owner, and writable by the group. */ if ((st.st_mode & ACCESSPERMS) != (S_IRUSR|S_IWUSR|S_IWGRP)) { if (__chmod (buf, S_IRUSR|S_IWUSR|S_IWGRP) < 0) goto helper; } retval = 0; goto cleanup; /* We have to use the helper program if it is available. */ helper:; #ifdef HAVE_PT_CHOWN pid_t pid = __fork (); if (pid == -1) goto cleanup; else if (pid == 0) { /* Disable core dumps. */ struct rlimit rl = { 0, 0 }; __setrlimit (RLIMIT_CORE, &rl); /* We pass the master pseudo terminal as file descriptor PTY_FILENO. */ if (fd != PTY_FILENO) if (__dup2 (fd, PTY_FILENO) < 0) _exit (FAIL_EBADF); # ifdef CLOSE_ALL_FDS CLOSE_ALL_FDS (); # endif execle (_PATH_PT_CHOWN, basename (_PATH_PT_CHOWN), NULL, NULL); _exit (FAIL_EXEC); } else { int w; if (__waitpid (pid, &w, 0) == -1) goto cleanup; if (!WIFEXITED (w)) __set_errno (ENOEXEC); else switch (WEXITSTATUS (w)) { case 0: retval = 0; break; case FAIL_EBADF: __set_errno (EBADF); break; case FAIL_EINVAL: __set_errno (EINVAL); break; case FAIL_EACCES: __set_errno (EACCES); break; case FAIL_EXEC: __set_errno (ENOEXEC); break; case FAIL_ENOMEM: __set_errno (ENOMEM); break; default: assert(! "getpt: internal error: invalid exit code from pt_chown"); } } #endif cleanup: if (buf != _buf) free (buf); return retval; }
void __pthread_initialize_minimal_internal (void) { #ifndef SHARED /* Unlike in the dynamically linked case the dynamic linker has not taken care of initializing the TLS data structures. */ __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN); /* We must prevent gcc from being clever and move any of the following code ahead of the __libc_setup_tls call. This function will initialize the thread register which is subsequently used. */ __asm __volatile (""); #endif /* Minimal initialization of the thread descriptor. */ struct pthread *pd = THREAD_SELF; INTERNAL_SYSCALL_DECL (err); pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid); THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); THREAD_SETMEM (pd, user_stack, true); if (LLL_LOCK_INITIALIZER != 0) THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER); #if HP_TIMING_AVAIL THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset)); #endif /* Initialize the robust mutex data. */ #ifdef __PTHREAD_MUTEX_HAVE_PREV pd->robust_prev = &pd->robust_head; #endif pd->robust_head.list = &pd->robust_head; #ifdef __NR_set_robust_list pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) - offsetof (pthread_mutex_t, __data.__list.__next)); int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, sizeof (struct robust_list_head)); if (INTERNAL_SYSCALL_ERROR_P (res, err)) #endif set_robust_list_not_avail (); #ifndef __ASSUME_PRIVATE_FUTEX /* Private futexes are always used (at least internally) so that doing the test once this early is beneficial. */ { int word = 0; word = INTERNAL_SYSCALL (futex, err, 3, &word, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1); if (!INTERNAL_SYSCALL_ERROR_P (word, err)) THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG); } /* Private futexes have been introduced earlier than the FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we know the former are not supported. This also means we know the kernel will return ENOSYS for unknown operations. */ if (THREAD_GETMEM (pd, header.private_futex) != 0) #endif #ifndef __ASSUME_FUTEX_CLOCK_REALTIME { int word = 0; /* NB: the syscall actually takes six parameters. The last is the bit mask. But since we will not actually wait at all the value is irrelevant. Given that passing six parameters is difficult on some architectures we just pass whatever random value the calling convention calls for to the kernel. It causes no harm. */ word = INTERNAL_SYSCALL (futex, err, 5, &word, FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME | FUTEX_PRIVATE_FLAG, 1, NULL, 0); assert (INTERNAL_SYSCALL_ERROR_P (word, err)); if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS) __set_futex_clock_realtime (); } #endif /* Set initial thread's stack block from 0 up to __libc_stack_end. It will be bigger than it actually is, but for unwind.c/pt-longjmp.c purposes this is good enough. */ THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end); /* Initialize the list of all running threads with the main thread. */ INIT_LIST_HEAD (&__stack_user); list_add (&pd->list, &__stack_user); /* Before initializing __stack_user, the debugger could not find us and had to set __nptl_initial_report_events. Propagate its setting. */ THREAD_SETMEM (pd, report_events, __nptl_initial_report_events); /* Install the cancellation signal handler. If for some reason we cannot install the handler we do not abort. Maybe we should, but it is only asynchronous cancellation which is affected. */ struct sigaction sa; sa.sa_sigaction = sigcancel_handler; sa.sa_flags = SA_SIGINFO; __sigemptyset (&sa.sa_mask); (void) __libc_sigaction (SIGCANCEL, &sa, NULL); /* Install the handle to change the threads' uid/gid. */ sa.sa_sigaction = sighandler_setxid; sa.sa_flags = SA_SIGINFO | SA_RESTART; (void) __libc_sigaction (SIGSETXID, &sa, NULL); /* The parent process might have left the signals blocked. Just in case, unblock it. We reuse the signal mask in the sigaction structure. It is already cleared. */ __sigaddset (&sa.sa_mask, SIGCANCEL); __sigaddset (&sa.sa_mask, SIGSETXID); (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask, NULL, _NSIG / 8); /* Get the size of the static and alignment requirements for the TLS block. */ size_t static_tls_align; _dl_get_tls_static_info (&__static_tls_size, &static_tls_align); /* Make sure the size takes all the alignments into account. */ if (STACK_ALIGN > static_tls_align) static_tls_align = STACK_ALIGN; __static_tls_align_m1 = static_tls_align - 1; __static_tls_size = roundup (__static_tls_size, static_tls_align); /* Determine the default allowed stack size. This is the size used in case the user does not specify one. */ struct rlimit limit; if (getrlimit (RLIMIT_STACK, &limit) != 0 || limit.rlim_cur == RLIM_INFINITY) /* The system limit is not usable. Use an architecture-specific default. */ limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE; else if (limit.rlim_cur < PTHREAD_STACK_MIN) /* The system limit is unusably small. Use the minimal size acceptable. */ limit.rlim_cur = PTHREAD_STACK_MIN; /* Make sure it meets the minimum size that allocate_stack (allocatestack.c) will demand, which depends on the page size. */ const uintptr_t pagesz = __sysconf (_SC_PAGESIZE); const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK; if (limit.rlim_cur < minstack) limit.rlim_cur = minstack; /* Round the resource limit up to page size. */ limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz; __default_stacksize = limit.rlim_cur; #ifdef SHARED /* Transfer the old value from the dynamic linker's internal location. */ *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) (); GL(dl_error_catch_tsd) = &__libc_dl_error_tsd; /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock, keep the lock count from the ld.so implementation. */ GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock); GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock); unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count; GL(dl_load_lock).mutex.__data.__count = 0; while (rtld_lock_count-- > 0) INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex); GL(dl_make_stack_executable_hook) = &__make_stacks_executable; #endif GL(dl_init_static_tls) = &__pthread_init_static_tls; GL(dl_wait_lookup_done) = &__wait_lookup_done; /* Register the fork generation counter with the libc. */ #ifndef TLS_MULTIPLE_THREADS_IN_TCB __libc_multiple_threads_ptr = #endif __libc_pthread_init (&__fork_generation, __reclaim_stacks, ptr_pthread_functions); /* Determine whether the machine is SMP or not. */ __is_smp = is_smp_system (); }
/* Return the system page size. */ int __getpagesize (void) { return __sysconf (_SC_PAGESIZE); }
void qsort (void *b, size_t n, size_t s, __compar_fn_t cmp) { const size_t size = n * s; if (size < 1024) { void *buf = __alloca (size); /* The temporary array is small, so put it on the stack. */ msort_with_tmp (b, n, s, cmp, buf); } else { /* We should avoid allocating too much memory since this might have to be backed up by swap space. */ static long int phys_pages; static int pagesize; if (phys_pages == 0) { phys_pages = __sysconf (_SC_PHYS_PAGES); if (phys_pages == -1) /* Error while determining the memory size. So let's assume there is enough memory. Otherwise the implementer should provide a complete implementation of the `sysconf' function. */ phys_pages = (long int) (~0ul >> 1); /* The following determines that we will never use more than a quarter of the physical memory. */ phys_pages /= 4; pagesize = __sysconf (_SC_PAGESIZE); } /* Just a comment here. We cannot compute phys_pages * pagesize and compare the needed amount of memory against this value. The problem is that some systems might have more physical memory then can be represented with a `size_t' value (when measured in bytes. */ /* If the memory requirements are too high don't allocate memory. */ if (size / pagesize > phys_pages) _quicksort (b, n, s, cmp); else { /* It's somewhat large, so malloc it. */ int save = errno; char *tmp = malloc (size); if (tmp == NULL) { /* Couldn't get space, so use the slower algorithm that doesn't need a temporary array. */ __set_errno (save); _quicksort (b, n, s, cmp); } else { __set_errno (save); msort_with_tmp (b, n, s, cmp, tmp); free (tmp); } } }
void __pthread_initialize_minimal_internal (void) { #ifndef SHARED /* Unlike in the dynamically linked case the dynamic linker has not taken care of initializing the TLS data structures. */ __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN); /* We must prevent gcc from being clever and move any of the following code ahead of the __libc_setup_tls call. This function will initialize the thread register which is subsequently used. */ __asm __volatile (""); #endif /* Minimal initialization of the thread descriptor. */ struct pthread *pd = THREAD_SELF; INTERNAL_SYSCALL_DECL (err); pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid); #ifdef __PTHREAD_MUTEX_HAVE_PREV pd->robust_list.__prev = &pd->robust_list; #endif pd->robust_list.__next = &pd->robust_list; THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); THREAD_SETMEM (pd, user_stack, true); if (LLL_LOCK_INITIALIZER != 0) THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER); #if HP_TIMING_AVAIL THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset)); #endif /* Set initial thread's stack block from 0 up to __libc_stack_end. It will be bigger than it actually is, but for unwind.c/pt-longjmp.c purposes this is good enough. */ THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end); /* Initialize the list of all running threads with the main thread. */ INIT_LIST_HEAD (&__stack_user); list_add (&pd->list, &__stack_user); /* Install the cancellation signal handler. If for some reason we cannot install the handler we do not abort. Maybe we should, but it is only asynchronous cancellation which is affected. */ struct sigaction sa; sa.sa_sigaction = sigcancel_handler; sa.sa_flags = SA_SIGINFO; __sigemptyset (&sa.sa_mask); (void) __libc_sigaction (SIGCANCEL, &sa, NULL); /* Install the handle to change the threads' uid/gid. */ sa.sa_sigaction = sighandler_setxid; sa.sa_flags = SA_SIGINFO | SA_RESTART; (void) __libc_sigaction (SIGSETXID, &sa, NULL); /* The parent process might have left the signals blocked. Just in case, unblock it. We reuse the signal mask in the sigaction structure. It is already cleared. */ __sigaddset (&sa.sa_mask, SIGCANCEL); __sigaddset (&sa.sa_mask, SIGSETXID); (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask, NULL, _NSIG / 8); /* Get the size of the static and alignment requirements for the TLS block. */ size_t static_tls_align; _dl_get_tls_static_info (&__static_tls_size, &static_tls_align); /* Make sure the size takes all the alignments into account. */ if (STACK_ALIGN > static_tls_align) static_tls_align = STACK_ALIGN; __static_tls_align_m1 = static_tls_align - 1; __static_tls_size = roundup (__static_tls_size, static_tls_align); /* Determine the default allowed stack size. This is the size used in case the user does not specify one. */ struct rlimit limit; if (getrlimit (RLIMIT_STACK, &limit) != 0 || limit.rlim_cur == RLIM_INFINITY) /* The system limit is not usable. Use an architecture-specific default. */ limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE; else if (limit.rlim_cur < PTHREAD_STACK_MIN) /* The system limit is unusably small. Use the minimal size acceptable. */ limit.rlim_cur = PTHREAD_STACK_MIN; /* Make sure it meets the minimum size that allocate_stack (allocatestack.c) will demand, which depends on the page size. */ const uintptr_t pagesz = __sysconf (_SC_PAGESIZE); const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK; if (limit.rlim_cur < minstack) limit.rlim_cur = minstack; /* Round the resource limit up to page size. */ limit.rlim_cur = (limit.rlim_cur + pagesz - 1) & -pagesz; __default_stacksize = limit.rlim_cur; #ifdef SHARED /* Transfer the old value from the dynamic linker's internal location. */ *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) (); GL(dl_error_catch_tsd) = &__libc_dl_error_tsd; /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock, keep the lock count from the ld.so implementation. */ GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock); GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock); unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count; GL(dl_load_lock).mutex.__data.__count = 0; while (rtld_lock_count-- > 0) INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex); GL(dl_make_stack_executable_hook) = &__make_stacks_executable; #endif GL(dl_init_static_tls) = &__pthread_init_static_tls; /* Register the fork generation counter with the libc. */ #ifndef TLS_MULTIPLE_THREADS_IN_TCB __libc_multiple_threads_ptr = #endif __libc_pthread_init (&__fork_generation, __reclaim_stacks, ptr_pthread_functions); /* Determine whether the machine is SMP or not. */ __is_smp = is_smp_system (); }