int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *cpuset) { size_t cnt; if (unlikely (__kernel_cpumask_size == 0)) { INTERNAL_SYSCALL_DECL (err); int res; size_t psize = 128; void *p = alloca (psize); while (res = INTERNAL_SYSCALL (sched_getaffinity, err, 3, getpid (), psize, p), INTERNAL_SYSCALL_ERROR_P (res, err) && INTERNAL_SYSCALL_ERRNO (res, err) == EINVAL) p = extend_alloca (p, psize, 2 * psize); if (res == 0 || INTERNAL_SYSCALL_ERROR_P (res, err)) { __set_errno (INTERNAL_SYSCALL_ERRNO (res, err)); return -1; } __kernel_cpumask_size = res; } /* We now know the size of the kernel cpumask_t. Make sure the user does not request to set a bit beyond that. */ for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt) if (((char *) cpuset)[cnt] != '\0') { /* Found a nonzero byte. This means the user request cannot be fulfilled. */ __set_errno (EINVAL); return -1; } return INLINE_SYSCALL (sched_setaffinity, 3, pid, cpusetsize, cpuset); }
int __feholdexcept (fenv_t *envp) { fenv_union_t u; INTERNAL_SYSCALL_DECL (err); int r; /* Get the current state. */ r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]); if (INTERNAL_SYSCALL_ERROR_P (r, err)) return -1; u.l[1] = fegetenv_register (); *envp = u.fenv; /* Clear everything except for the rounding mode and trapping to the kernel. */ u.l[0] &= ~(PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND | PR_FP_EXC_RES | PR_FP_EXC_INV); u.l[1] &= SPEFSCR_FRMC | (SPEFSCR_ALL_EXCEPT_ENABLE & ~SPEFSCR_FINXE); /* Put the new state in effect. */ fesetenv_register (u.l[1]); r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, u.l[0] | PR_FP_EXC_SW_ENABLE); if (INTERNAL_SYSCALL_ERROR_P (r, err)) return -1; return 0; }
int fedisableexcept (int excepts) { int result = 0, pflags, r; INTERNAL_SYSCALL_DECL (err); r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags); if (INTERNAL_SYSCALL_ERROR_P (r, err)) return -1; /* Save old enable bits. */ result = __fexcepts_from_prctl (pflags); pflags &= ~__fexcepts_to_prctl (excepts); r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC, pflags | PR_FP_EXC_SW_ENABLE); if (INTERNAL_SYSCALL_ERROR_P (r, err)) return -1; /* If disabling signals for "inexact", also disable trapping to the kernel. */ if ((excepts & FE_INEXACT) != 0) { unsigned long fpescr; fpescr = fegetenv_register (); fpescr &= ~SPEFSCR_FINXE; fesetenv_register (fpescr); } return result; }
/* Return any pending signal or wait for one for the given time. */ static int do_sigwait (const sigset_t *set, int *sig) { int ret; #ifdef SIGCANCEL sigset_t tmpset; if (set != NULL && (__builtin_expect (__sigismember (set, SIGCANCEL), 0) # ifdef SIGSETXID || __builtin_expect (__sigismember (set, SIGSETXID), 0) # endif )) { /* Create a temporary mask without the bit for SIGCANCEL set. */ // We are not copying more than we have to. memcpy (&tmpset, set, _NSIG / 8); __sigdelset (&tmpset, SIGCANCEL); # ifdef SIGSETXID __sigdelset (&tmpset, SIGSETXID); # endif set = &tmpset; } #endif /* XXX The size argument hopefully will have to be changed to the real size of the user-level sigset_t. */ #ifdef INTERNAL_SYSCALL INTERNAL_SYSCALL_DECL (err); do ret = INTERNAL_SYSCALL (rt_sigtimedwait, err, 4, set, NULL, NULL, _NSIG / 8); while (INTERNAL_SYSCALL_ERROR_P (ret, err) && INTERNAL_SYSCALL_ERRNO (ret, err) == EINTR); if (! INTERNAL_SYSCALL_ERROR_P (ret, err)) { *sig = ret; ret = 0; } else ret = INTERNAL_SYSCALL_ERRNO (ret, err); #else do ret = INLINE_SYSCALL (rt_sigtimedwait, 4, set, NULL, NULL, _NSIG / 8); while (ret == -1 && errno == EINTR); if (ret != -1) { *sig = ret; ret = 0; } else ret = errno; #endif return ret; }
int __pthread_kill ( pthread_t threadid, int signo) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (DEBUGGING_P && INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; /* Force load of pd->tid into local variable or register. Otherwise if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); if (__builtin_expect (tid <= 0, 0)) /* Not a valid thread handle. */ return ESRCH; /* Disallow sending the signal we use for cancellation, timers, for for the setxid implementation. */ if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID) return EINVAL; /* We have a special syscall to do the work. */ INTERNAL_SYSCALL_DECL (err); /* One comment: The PID field in the TCB can temporarily be changed (in fork). But this must not affect this code here. Since this function would have to be called while the thread is executing fork, it would have to happen in a signal handler. But this is no allowed, pthread_kill is not guaranteed to be async-safe. */ int val; #if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid), tid, signo); #else # ifdef __NR_tgkill val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid), tid, signo); if (INTERNAL_SYSCALL_ERROR_P (val, err) && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS) # endif val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo); #endif return (INTERNAL_SYSCALL_ERROR_P (val, err) ? INTERNAL_SYSCALL_ERRNO (val, err) : 0); }
/* Reserve storage for the data of the file associated with FD. */ int posix_fallocate (int fd, __off_t offset, __off_t len) { #ifdef __NR_fallocate # ifndef __ASSUME_FALLOCATE if (__glibc_likely (__have_fallocate >= 0)) # endif { INTERNAL_SYSCALL_DECL (err); # ifdef INTERNAL_SYSCALL_TYPES int res = INTERNAL_SYSCALL_TYPES (fallocate, err, 4, int, fd, int, 0, off_t, offset, off_t, len); # else int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len); # endif if (! INTERNAL_SYSCALL_ERROR_P (res, err)) return 0; # ifndef __ASSUME_FALLOCATE if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS)) __have_fallocate = -1; else # endif if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP) return INTERNAL_SYSCALL_ERRNO (res, err); }
/* Return any pending signal or wait for one for the given time. */ static __inline__ int do_sigwait (const sigset_t *set, int *sig) { int ret; /* XXX The size argument hopefully will have to be changed to the real size of the user-level sigset_t. */ #ifdef INTERNAL_SYSCALL INTERNAL_SYSCALL_DECL (err); ret = INTERNAL_SYSCALL (rt_sigtimedwait, err, 4, set, NULL, NULL, _NSIG / 8); if (! INTERNAL_SYSCALL_ERROR_P (ret, err)) { *sig = ret; ret = 0; } else ret = INTERNAL_SYSCALL_ERRNO (ret, err); #else ret = INLINE_SYSCALL (rt_sigtimedwait, 4, set, NULL, NULL, _NSIG / 8); if (ret != -1) { *sig = ret; ret = 0; } else ret = errno; #endif return ret; }
int posix_fadvise64(int fd, off64_t offset, off64_t len, int advice) { INTERNAL_SYSCALL_DECL (err); /* ARM has always been funky. */ #if defined (__arm__) || \ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && (defined(__powerpc__) || defined(__xtensa__))) /* arch with 64-bit data in even reg alignment #1: [powerpc/xtensa] * custom syscall handler (rearranges @advice to avoid register hole punch) */ int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, advice, OFF64_HI_LO (offset), OFF64_HI_LO (len)); #elif defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future] * stock syscall handler in kernel (reg hole punched) */ int ret = INTERNAL_SYSCALL (fadvise64_64, err, 7, fd, 0, OFF64_HI_LO (offset), OFF64_HI_LO (len), advice); # else int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, OFF64_HI_LO (offset), OFF64_HI_LO (len), advice); # endif if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
int pthread_sigmask (int how, const sigset_t *newmask, sigset_t *oldmask) { sigset_t local_newmask; /* The only thing we have to make sure here is that SIGCANCEL and SIGSETXID is not blocked. */ if (newmask != NULL && (__builtin_expect (__sigismember (newmask, SIGCANCEL), 0) || __builtin_expect (__sigismember (newmask, SIGSETXID), 0))) { local_newmask = *newmask; __sigdelset (&local_newmask, SIGCANCEL); __sigdelset (&local_newmask, SIGSETXID); newmask = &local_newmask; } #ifdef INTERNAL_SYSCALL /* We know that realtime signals are available if NPTL is used. */ INTERNAL_SYSCALL_DECL (err); int result = INTERNAL_SYSCALL (rt_sigprocmask, err, 4, how, newmask, oldmask, _NSIG / 8); return (INTERNAL_SYSCALL_ERROR_P (result, err) ? INTERNAL_SYSCALL_ERRNO (result, err) : 0); #else return sigprocmask (how, newmask, oldmask) == -1 ? errno : 0; #endif }
clock_t __times (struct tms *buf) { INTERNAL_SYSCALL_DECL (err); clock_t ret = INTERNAL_SYSCALL (times, err, 1, buf); if (INTERNAL_SYSCALL_ERROR_P (ret, err) && __builtin_expect (INTERNAL_SYSCALL_ERRNO (ret, err) == EFAULT, 0)) { /* This might be an error or not. For architectures which have no separate return value and error indicators we cannot distinguish a return value of -1 from an error. Do it the hard way. We crash applications which pass in an invalid BUF pointer. */ #define touch(v) \ do { \ clock_t temp = v; \ asm volatile ("" : "+r" (temp)); \ v = temp; \ } while (0) touch (buf->tms_utime); touch (buf->tms_stime); touch (buf->tms_cutime); touch (buf->tms_cstime); /* If we come here the memory is valid and the kernel did not return an EFAULT error. Return the value given by the kernel. */ } /* Return value (clock_t) -1 signals an error, but if there wasn't any, return the following value. */ if (ret == (clock_t) -1) return (clock_t) 0; return ret; }
/* Remove message queue named NAME. */ int mq_unlink (const char *name) { if (name[0] != '/') { __set_errno (EINVAL); return -1; } INTERNAL_SYSCALL_DECL (err); int ret = INTERNAL_SYSCALL (mq_unlink, err, 1, name + 1); /* While unlink can return either EPERM or EACCES, mq_unlink should return just EACCES. */ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (ret, err))) { ret = INTERNAL_SYSCALL_ERRNO (ret, err); if (ret == EPERM) ret = EACCES; __set_errno (ret); ret = -1; } return ret; }
int posix_fadvise (int fd, off_t offset, off_t len, int advise) { INTERNAL_SYSCALL_DECL (err); # ifdef __NR_fadvise64 int ret = INTERNAL_SYSCALL_CALL (fadvise64, err, fd, __ALIGNMENT_ARG SYSCALL_LL (offset), len, advise); # else # ifdef __ASSUME_FADVISE64_64_6ARG int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd, advise, __ALIGNMENT_ARG SYSCALL_LL (offset), SYSCALL_LL (len)); # else # ifdef __ASSUME_FADVISE64_64_NO_ALIGN # undef __ALIGNMENT_ARG # define __ALIGNMENT_ARG # endif int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd, __ALIGNMENT_ARG SYSCALL_LL (offset), SYSCALL_LL (len), advise); # endif # endif if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
int setup_thread (struct database_dyn *db) { #ifdef __NR_set_tid_address /* Only supported when NPTL is used. */ char buf[100]; if (confstr (_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof (buf)) >= sizeof (buf) || strncmp (buf, "NPTL", 4) != 0) return 0; /* Do not try this at home, kids. We play with the SETTID address even thought the process is multi-threaded. This can only work since none of the threads ever terminates. */ INTERNAL_SYSCALL_DECL (err); int r = INTERNAL_SYSCALL (set_tid_address, err, 1, &db->head->nscd_certainly_running); if (!INTERNAL_SYSCALL_ERROR_P (r, err)) /* We know the kernel can reset this field when nscd terminates. So, set the field to a nonzero value which indicates that nscd is certainly running and clients can skip the test. */ return db->head->nscd_certainly_running = 1; #endif return 0; }
/* Reserve storage for the data of the file associated with FD. */ int posix_fallocate (int fd, __off_t offset, __off_t len) { #ifdef __NR_fallocate # ifndef __ASSUME_FALLOCATE if (__builtin_expect (__have_fallocate >= 0, 1)) # endif { INTERNAL_SYSCALL_DECL (err); int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len); if (! INTERNAL_SYSCALL_ERROR_P (res, err)) return 0; # ifndef __ASSUME_FALLOCATE if (__builtin_expect (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS, 0)) __have_fallocate = -1; else # endif if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP) return INTERNAL_SYSCALL_ERRNO (res, err); } #endif return internal_fallocate (fd, offset, len); }
int __pthread_setaffinity_new (pthread_t th, size_t cpusetsize, const cpu_set_t *cpuset) { const struct pthread *pd = (const struct pthread *) th; INTERNAL_SYSCALL_DECL (err); int res; if (__builtin_expect (__kernel_cpumask_size == 0, 0)) { res = __determine_cpumask_size (pd->tid); if (res != 0) return res; } /* We now know the size of the kernel cpumask_t. Make sure the user does not request to set a bit beyond that. */ for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt) if (((char *) cpuset)[cnt] != '\0') /* Found a nonzero byte. This means the user request cannot be fulfilled. */ return EINVAL; res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, cpusetsize, cpuset); return (INTERNAL_SYSCALL_ERROR_P (res, err) ? INTERNAL_SYSCALL_ERRNO (res, err) : 0); }
/* We can simply use the syscall. The CPU clocks are not supported with this function. */ int __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req, struct timespec *rem) { INTERNAL_SYSCALL_DECL (err); int r; if (clock_id == CLOCK_THREAD_CPUTIME_ID) return EINVAL; if (clock_id == CLOCK_PROCESS_CPUTIME_ID) clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED); if (SINGLE_THREAD_P) r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem); else { int oldstate = LIBC_CANCEL_ASYNC (); r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem); LIBC_CANCEL_RESET (oldstate); } return (INTERNAL_SYSCALL_ERROR_P (r, err) ? INTERNAL_SYSCALL_ERRNO (r, err) : 0); }
gid_t __getegid (void) { INTERNAL_SYSCALL_DECL (err); #if __ASSUME_32BITUIDS > 0 /* No error checking. */ return INTERNAL_SYSCALL (getegid32, err, 0); #else # ifdef __NR_getegid32 if (__libc_missing_32bit_uids <= 0) { int result; result = INTERNAL_SYSCALL (getegid32, err, 0); if (! INTERNAL_SYSCALL_ERROR_P (result, err) || INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS) return result; __libc_missing_32bit_uids = 1; } # endif /* __NR_getegid32 */ /* No error checking. */ return INTERNAL_SYSCALL (getegid, err, 0); #endif }
int setfsuid (uid_t uid) { INTERNAL_SYSCALL_DECL (err); # if __ASSUME_32BITUIDS > 0 /* No error checking. */ return INTERNAL_SYSCALL (setfsuid32, err, 1, uid); # else # ifdef __NR_setfsuid32 if (__libc_missing_32bit_uids <= 0) { int result; result = INTERNAL_SYSCALL (setfsuid32, err, 1, uid); if (! INTERNAL_SYSCALL_ERROR_P (result, err) || INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS) return result; __libc_missing_32bit_uids = 1; } # endif /* __NR_setfsuid32 */ if (uid != (uid_t) ((__kernel_uid_t) uid)) { __set_errno (EINVAL); return -1; } /* No error checking. */ return INTERNAL_SYSCALL (setfsuid, err, 1, uid); # endif }
int __pthread_kill (pthread_t threadid, int signo) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (DEBUGGING_P && INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; /* Force load of pd->tid into local variable or register. Otherwise if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); if (__glibc_unlikely (tid <= 0)) /* Not a valid thread handle. */ return ESRCH; /* Disallow sending the signal we use for cancellation, timers, for the setxid implementation. */ if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID) return EINVAL; /* We have a special syscall to do the work. */ INTERNAL_SYSCALL_DECL (err); pid_t pid = __getpid (); int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, tid, signo); return (INTERNAL_SYSCALL_ERROR_P (val, err) ? INTERNAL_SYSCALL_ERRNO (val, err) : 0); }
int __sched_setaffinity_new (pid_t pid, size_t cpusetsize, const cpu_set_t *cpuset) { if (__builtin_expect (__kernel_cpumask_size == 0, 0)) { INTERNAL_SYSCALL_DECL (err); int res; size_t psize = 128; void *p = alloca (psize); while (res = INTERNAL_SYSCALL (sched_getaffinity, err, 3, getpid (), psize, p), INTERNAL_SYSCALL_ERROR_P (res, err) && INTERNAL_SYSCALL_ERRNO (res, err) == EINVAL) p = extend_alloca (p, psize, 2 * psize); if (res == 0 || INTERNAL_SYSCALL_ERROR_P (res, err)) { __set_errno (INTERNAL_SYSCALL_ERRNO (res, err)); return -1; } __kernel_cpumask_size = res; } /* We now know the size of the kernel cpumask_t. Make sure the user does not request to set a bit beyond that. */ for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt) if (((char *) cpuset)[cnt] != '\0') { /* Found a nonzero byte. This means the user request cannot be fulfilled. */ __set_errno (EINVAL); return -1; } int result = INLINE_SYSCALL (sched_setaffinity, 3, pid, cpusetsize, cpuset); #ifdef RESET_VGETCPU_CACHE if (result != -1) RESET_VGETCPU_CACHE (); #endif return result; }
static int maybe_syscall_gettime_cpu (clockid_t clock_id, struct timespec *tp) { int e = EINVAL; if (!__libc_missing_posix_cpu_timers) { INTERNAL_SYSCALL_DECL (err); int r = INTERNAL_GETTIME (clock_id, tp); if (!INTERNAL_SYSCALL_ERROR_P (r, err)) return 0; e = INTERNAL_SYSCALL_ERRNO (r, err); # ifndef __ASSUME_POSIX_TIMERS if (e == ENOSYS) { __libc_missing_posix_timers = 1; __libc_missing_posix_cpu_timers = 1; e = EINVAL; } else # endif { if (e == EINVAL) { # ifdef HAVE_CLOCK_GETRES_VSYSCALL /* Check whether the kernel supports CPU clocks at all. If not, record it for the future. */ r = INTERNAL_VSYSCALL (clock_getres, err, 2, MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED), NULL); # else /* Check whether the kernel supports CPU clocks at all. If not, record it for the future. */ r = INTERNAL_SYSCALL (clock_getres, err, 2, MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED), NULL); # endif if (INTERNAL_SYSCALL_ERROR_P (r, err)) __libc_missing_posix_cpu_timers = 1; } } } return e; }
int pthread_sigqueue ( pthread_t threadid, int signo, const union sigval value) { #ifdef __NR_rt_tgsigqueueinfo struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (DEBUGGING_P && INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; /* Force load of pd->tid into local variable or register. Otherwise if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); if (__builtin_expect (tid <= 0, 0)) /* Not a valid thread handle. */ return ESRCH; /* Disallow sending the signal we use for cancellation, timers, for for the setxid implementation. */ if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID) return EINVAL; /* Set up the siginfo_t structure. */ siginfo_t info; memset (&info, '\0', sizeof (siginfo_t)); info.si_signo = signo; info.si_code = SI_QUEUE; info.si_pid = THREAD_GETMEM (THREAD_SELF, pid); info.si_uid = getuid (); info.si_value = value; /* We have a special syscall to do the work. */ INTERNAL_SYSCALL_DECL (err); /* One comment: The PID field in the TCB can temporarily be changed (in fork). But this must not affect this code here. Since this function would have to be called while the thread is executing fork, it would have to happen in a signal handler. But this is no allowed, pthread_sigqueue is not guaranteed to be async-safe. */ int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4, THREAD_GETMEM (THREAD_SELF, pid), tid, signo, &info); return (INTERNAL_SYSCALL_ERROR_P (val, err) ? INTERNAL_SYSCALL_ERRNO (val, err) : 0); #else return ENOSYS; #endif }
int __determine_cpumask_size (pid_t tid) { INTERNAL_SYSCALL_DECL (err); int res; size_t psize = 128; void *p = alloca (psize); while (res = INTERNAL_SYSCALL (sched_getaffinity, err, 3, tid, psize, p), INTERNAL_SYSCALL_ERROR_P (res, err) && INTERNAL_SYSCALL_ERRNO (res, err) == EINVAL) p = extend_alloca (p, psize, 2 * psize); if (res == 0 || INTERNAL_SYSCALL_ERROR_P (res, err)) return INTERNAL_SYSCALL_ERRNO (res, err); __kernel_cpumask_size = res; return 0; }
int posix_fadvise(int fd, off_t offset, off_t len, int advise) { INTERNAL_SYSCALL_DECL (err); int ret = INTERNAL_SYSCALL (arm_fadvise64_64, err, 6, fd, advise, __LONG_LONG_PAIR (HIGH_BITS(offset), (long)offset), __LONG_LONG_PAIR (HIGH_BITS(len), (long)len)); if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
static bool writev_for_fatal (int fd, const struct iovec *iov, size_t niov, size_t total) { INTERNAL_SYSCALL_DECL (err); ssize_t cnt; do cnt = INTERNAL_SYSCALL (writev, err, 3, fd, iov, niov); while (INTERNAL_SYSCALL_ERROR_P (cnt, err) && INTERNAL_SYSCALL_ERRNO (cnt, err) == EINTR); return cnt == total; }
/* Reserve storage for the data of the file associated with FD. */ int posix_fallocate (int fd, __off_t offset, __off_t len) { INTERNAL_SYSCALL_DECL (err); int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len); if (! INTERNAL_SYSCALL_ERROR_P (res, err)) return 0; if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP) return INTERNAL_SYSCALL_ERRNO (res, err); return internal_fallocate (fd, offset, len); }
int posix_fadvise (int fd, off_t offset, off_t len, int advise) { #ifdef __NR_fadvise64 INTERNAL_SYSCALL_DECL (err); int ret = INTERNAL_SYSCALL (fadvise64, err, 4, fd, offset, len, advise); if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; #else return ENOSYS; #endif }
/* Get information about the file NAME in BUF. */ int __xstat (int vers, const char *name, struct stat *buf) { INTERNAL_SYSCALL_DECL (err); int result; struct kernel_stat kbuf; if (vers == _STAT_VER_KERNEL64) { result = INTERNAL_SYSCALL (stat64, err, 2, name, buf); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return result; __set_errno (INTERNAL_SYSCALL_ERRNO (result, err)); return -1; } result = INTERNAL_SYSCALL (stat, err, 2, name, &kbuf); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return __xstat_conv (vers, &kbuf, buf); __set_errno (INTERNAL_SYSCALL_ERRNO (result, err)); return -1; }
/* Get information about the file NAME in BUF. */ int __xstat (int vers, const char *name, struct stat *buf) { INTERNAL_SYSCALL_DECL (err); int result; struct kernel_stat kbuf; #if __ASSUME_STAT64_SYSCALL > 0 if (vers == _STAT_VER_KERNEL64) { result = INTERNAL_SYSCALL (stat64, err, 2, name, buf); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return result; __set_errno (INTERNAL_SYSCALL_ERRNO (result, err)); return -1; } #elif defined __NR_stat64 if (vers == _STAT_VER_KERNEL64 && !__libc_missing_axp_stat64) { int errno_out; result = INTERNAL_SYSCALL (stat64, err, 2, name, buf); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return result; errno_out = INTERNAL_SYSCALL_ERRNO (result, err); if (errno_out != ENOSYS) { __set_errno (errno_out); return -1; } __libc_missing_axp_stat64 = 1; } #endif result = INTERNAL_SYSCALL (stat, err, 2, name, &kbuf); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return __xstat_conv (vers, &kbuf, buf); __set_errno (INTERNAL_SYSCALL_ERRNO (result, err)); return -1; }
/* Get information about the file NAME relative to FD in ST. */ int __fxstatat (int vers, int fd, const char *file, struct stat *st, int flag) { int result; INTERNAL_SYSCALL_DECL (err); struct stat64 st64; result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag); if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) return INLINE_SYSCALL_ERROR_RETURN_VALUE (-result); else return __xstat32_conv (vers, &st64, st); }