int posix_fadvise (int fd, off_t offset, off_t len, int advise) { INTERNAL_SYSCALL_DECL (err); # ifdef __NR_fadvise64 int ret = INTERNAL_SYSCALL_CALL (fadvise64, err, fd, __ALIGNMENT_ARG SYSCALL_LL (offset), len, advise); # else # ifdef __ASSUME_FADVISE64_64_6ARG int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd, advise, __ALIGNMENT_ARG SYSCALL_LL (offset), SYSCALL_LL (len)); # else # ifdef __ASSUME_FADVISE64_64_NO_ALIGN # undef __ALIGNMENT_ARG # define __ALIGNMENT_ARG # endif int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, err, fd, __ALIGNMENT_ARG SYSCALL_LL (offset), SYSCALL_LL (len), advise); # endif # endif if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
int __pthread_kill (pthread_t threadid, int signo) { struct pthread *pd = (struct pthread *) threadid; /* Make sure the descriptor is valid. */ if (DEBUGGING_P && INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; /* Force load of pd->tid into local variable or register. Otherwise if a thread exits between ESRCH test and tgkill, we might return EINVAL, because pd->tid would be cleared by the kernel. */ pid_t tid = atomic_forced_read (pd->tid); if (__glibc_unlikely (tid <= 0)) /* Not a valid thread handle. */ return ESRCH; /* Disallow sending the signal we use for cancellation, timers, for the setxid implementation. */ if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID) return EINVAL; /* We have a special syscall to do the work. */ INTERNAL_SYSCALL_DECL (err); pid_t pid = __getpid (); int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, tid, signo); return (INTERNAL_SYSCALL_ERROR_P (val, err) ? INTERNAL_SYSCALL_ERRNO (val, err) : 0); }
/* Reserve storage for the data of the file associated with FD. */ int __posix_fallocate64_l64 (int fd, __off64_t offset, __off64_t len) { INTERNAL_SYSCALL_DECL (err); #ifdef INTERNAL_SYSCALL_TYPES int res = INTERNAL_SYSCALL_TYPES (fallocate, err, 4, int, fd, int, 0, off_t, offset, off_t, len); #else int res = INTERNAL_SYSCALL_CALL (fallocate, err, fd, 0, SYSCALL_LL64 (offset), SYSCALL_LL64 (len)); #endif if (! INTERNAL_SYSCALL_ERROR_P (res, err)) return 0; if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP) return INTERNAL_SYSCALL_ERRNO (res, err); return internal_fallocate64 (fd, offset, len); }
int pthread_cancel (pthread_t th) { volatile struct pthread *pd = (volatile struct pthread *) th; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; #ifdef SHARED pthread_cancel_init (); #endif int result = 0; int oldval; int newval; do { again: oldval = pd->cancelhandling; newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK; /* Avoid doing unnecessary work. The atomic operation can potentially be expensive if the bug has to be locked and remote cache lines have to be invalidated. */ if (oldval == newval) break; /* If the cancellation is handled asynchronously just send a signal. We avoid this if possible since it's more expensive. */ if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { /* Mark the cancellation as "in progress". */ if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, oldval | CANCELING_BITMASK, oldval)) goto again; #ifdef SIGCANCEL /* The cancellation handler will take care of marking the thread as canceled. */ pid_t pid = getpid (); INTERNAL_SYSCALL_DECL (err); int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid, SIGCANCEL); if (INTERNAL_SYSCALL_ERROR_P (val, err)) result = INTERNAL_SYSCALL_ERRNO (val, err); #else /* It should be impossible to get here at all, since pthread_setcanceltype should never have allowed PTHREAD_CANCEL_ASYNCHRONOUS to be set. */ abort (); #endif break; } /* A single-threaded process should be able to kill itself, since there is nothing in the POSIX specification that says that it cannot. So we set multiple_threads to true so that cancellation points get executed. */ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); #ifndef TLS_MULTIPLE_THREADS_IN_TCB __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; #endif } /* Mark the thread as canceled. This has to be done atomically since other bits could be modified as well. */ while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval, oldval)); return result; }
void thrd_yield (void) { INTERNAL_SYSCALL_DECL (err); INTERNAL_SYSCALL_CALL (sched_yield, err); }