int __clock_gettime(clockid_t clk, struct timespec *ts) { int r; #ifdef VDSO_CGT_SYM int (*f)(clockid_t, struct timespec *) = (int (*)(clockid_t, struct timespec *))vdso_func; if (f) { r = f(clk, ts); if (!r) return r; if (r == -EINVAL) return __syscall_ret(r); /* Fall through on errors other than EINVAL. Some buggy * vdso implementations return ENOSYS for clocks they * can't handle, rather than making the syscall. This * also handles the case where cgt_init fails to find * a vdso function to use. */ } #endif r = __syscall(SYS_clock_gettime, clk, ts); if (r == -ENOSYS) { if (clk == CLOCK_REALTIME) { __syscall(SYS_gettimeofday, ts, 0); ts->tv_nsec = (int)ts->tv_nsec * 1000; return 0; } r = -EINVAL; } return __syscall_ret(r); }
int fchmodat(int fd, const char *path, mode_t mode, int flag) { if (!flag) return syscall(SYS_fchmodat, fd, path, mode, flag); if (flag != AT_SYMLINK_NOFOLLOW) return __syscall_ret(-EINVAL); struct stat st; int ret, fd2; char proc[15+3*sizeof(int)]; if ((ret = __syscall(SYS_fstatat, fd, path, &st, flag))) return __syscall_ret(ret); if (S_ISLNK(st.st_mode)) return __syscall_ret(-EOPNOTSUPP); if ((fd2 = __syscall(SYS_openat, fd, path, O_RDONLY|O_PATH|O_NOFOLLOW|O_NOCTTY)) < 0) { if (fd2 == -ELOOP) return __syscall_ret(-EOPNOTSUPP); return __syscall_ret(fd2); } __procfdname(proc, fd2); if (!(ret = __syscall(SYS_stat, proc, &st)) && !S_ISLNK(st.st_mode)) ret = __syscall(SYS_chmod, proc, mode); __syscall(SYS_close, fd2); return __syscall_ret(ret); }
static int child(void *args_vp) { int ret; struct args *args = args_vp; int p = args->p[1]; const posix_spawnattr_t *restrict attr = args->attr; close(args->p[0]); /* Close-on-exec flag may have been lost if we moved the pipe * to a different fd. We don't use F_DUPFD_CLOEXEC above because * it would fail on older kernels and atomicity is not needed -- * in this process there are no threads or signal handlers. */ __syscall(SYS_fcntl, p, F_SETFD, FD_CLOEXEC); pthread_sigmask(SIG_SETMASK, (attr->__flags & POSIX_SPAWN_SETSIGMASK) ? &attr->__ss : &args->oldmask, 0); args->exec(args->path, args->argv, args->envp); ret = -errno; /* Since sizeof errno < PIPE_BUF, the write is atomic. */ ret = -ret; if (ret) while (__syscall(SYS_write, p, &ret, sizeof ret) < 0); _exit(127); }
int futimens(int fd, const struct timespec times[2]) { int r = __syscall(SYS_utimensat, fd, 0, times, 0); #ifdef SYS_futimesat if (r != -ENOSYS) return __syscall_ret(r); struct timeval *tv = 0, tmp[2]; if (times) { int i; tv = tmp; for (i=0; i<2; i++) { if (times[i].tv_nsec >= 1000000000ULL) { if (times[i].tv_nsec == UTIME_NOW && times[1-i].tv_nsec == UTIME_NOW) { tv = 0; break; } if (times[i].tv_nsec == UTIME_OMIT) return __syscall_ret(-ENOSYS); return __syscall_ret(-EINVAL); } tmp[i].tv_sec = times[i].tv_sec; tmp[i].tv_usec = times[i].tv_nsec / 1000; } } r = __syscall(SYS_futimesat, fd, 0, tv); if (r != -ENOSYS || fd != AT_FDCWD) return __syscall_ret(r); r = __syscall(SYS_utimes, 0, tv); #endif return __syscall_ret(r); }
int epoll_create1(int flags) { int r = __syscall(SYS_epoll_create1, flags); #ifdef SYS_epoll_create if (r==-ENOSYS && !flags) r = __syscall(SYS_epoll_create, 1); #endif return __syscall_ret(r); }
int epoll_pwait(int fd, struct epoll_event *ev, int cnt, int to, const sigset_t *sigs) { int r = __syscall(SYS_epoll_pwait, fd, ev, cnt, to, sigs, _NSIG/8); #ifdef SYS_epoll_wait if (r==-ENOSYS && !sigs) r = __syscall(SYS_epoll_wait, fd, ev, cnt, to); #endif return __syscall_ret(r); }
int __setrlimit(int resource, const struct rlimit *rlim) { unsigned long k_rlim[2]; int ret = __syscall(SYS_prlimit64, 0, resource, rlim, 0); if (ret != -ENOSYS) return ret; k_rlim[0] = MIN(rlim->rlim_cur, -1UL); k_rlim[1] = MIN(rlim->rlim_max, -1UL); return __syscall(SYS_setrlimit, resource, k_rlim); }
int fchdir(int fd) { int ret = __syscall(SYS_fchdir, fd); if (ret != -EBADF || __syscall(SYS_fcntl, fd, F_GETFD) < 0) return __syscall_ret(ret); char buf[15+3*sizeof(int)]; __procfdname(buf, fd); return syscall(SYS_chdir, buf); }
int fstat(int fd, struct stat *st) { int ret = __syscall(SYS_fstat, fd, mcfi_sandbox_mask(st)); if (ret != -EBADF || __syscall(SYS_fcntl, fd, F_GETFD) < 0) return __syscall_ret(ret); char buf[15+3*sizeof(int)]; __procfdname(buf, fd); return syscall(SYS_stat, buf, mcfi_sandbox_mask(st)); }
FILE *__fdopen(int fd, const char *mode) { FILE *f; struct termios tio; /* Check for valid initial mode character */ if (!strchr("rwa", *mode)) { errno = EINVAL; return 0; } /* Allocate FILE+buffer or fail */ if (!(f=malloc(sizeof *f + UNGET + BUFSIZ))) return 0; /* Zero-fill only the struct, not the buffer */ memset(f, 0, sizeof *f); /* Impose mode restrictions */ if (!strchr(mode, '+')) f->flags = (*mode == 'r') ? F_NOWR : F_NORD; /* Apply close-on-exec flag */ if (strchr(mode, 'e')) __syscall(SYS_fcntl, fd, F_SETFD, FD_CLOEXEC); /* Set append mode on fd if opened for append */ if (*mode == 'a') { int flags = __syscall(SYS_fcntl, fd, F_GETFL); __syscall(SYS_fcntl, fd, F_SETFL, flags | O_APPEND); } f->fd = fd; f->buf = (unsigned char *)f + sizeof *f + UNGET; f->buf_size = BUFSIZ; /* Activate line buffered mode for terminals */ f->lbf = EOF; if (!(f->flags & F_NOWR) && !__syscall(SYS_ioctl, fd, TCGETS, &tio)) f->lbf = '\n'; /* Initialize op ptrs. No problem if some are unneeded. */ f->read = __stdio_read; f->write = __stdio_write; f->seek = __stdio_seek; f->close = __stdio_close; if (!libc.threaded) f->lock = -1; /* Add new FILE to open file list */ OFLLOCK(); f->next = libc.ofl_head; if (libc.ofl_head) libc.ofl_head->prev = f; libc.ofl_head = f; OFLUNLOCK(); return f; }
int raise(int sig) { int pid, tid, ret; sigset_t set; __syscall(SYS_rt_sigprocmask, SIG_BLOCK, SIGALL_SET, &set, __SYSCALL_SSLEN); tid = syscall(SYS_gettid); pid = syscall(SYS_getpid); ret = syscall(SYS_tgkill, pid, tid, sig); __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &set, 0, __SYSCALL_SSLEN); return ret; }
int fstat(int fd, struct stat* st) { int ret = __syscall(SYS_fstat, fd, st); if (ret != -EBADF || __syscall(SYS_fcntl, fd, F_GETFD) < 0) return __syscall_ret(ret); char buf[15 + 3 * sizeof(int)]; __procfdname(buf, fd); #ifdef SYS_stat return syscall(SYS_stat, buf, st); #else return syscall(SYS_fstatat, AT_FDCWD, buf, st, 0); #endif }
const char unsigned* __map_file(const char* pathname, size_t* size) { struct stat st; const unsigned char* map = MAP_FAILED; int fd = __sys_open(pathname, O_RDONLY | O_CLOEXEC | O_NONBLOCK); if (fd < 0) return 0; if (!__syscall(SYS_fstat, fd, &st)) { map = __mmap(0, st.st_size, PROT_READ, MAP_SHARED, fd, 0); *size = st.st_size; } __syscall(SYS_close, fd); return map == MAP_FAILED ? 0 : map; }
int posix_fadvise(int fd, off_t base, off_t len, int advice) { #if defined(SYSCALL_FADVISE_6_ARG) /* Some archs, at least arm and powerpc, have the syscall * arguments reordered to avoid needing 7 argument registers * due to 64-bit argument alignment. */ return -__syscall(SYS_fadvise, fd, advice, __SYSCALL_LL_E(base), __SYSCALL_LL_E(len)); #else return -__syscall(SYS_fadvise, fd, __SYSCALL_LL_O(base), __SYSCALL_LL_E(len), advice); #endif }
int signalfd(int fd, const sigset_t *sigs, int flags) { int ret = __syscall(SYS_signalfd4, fd, sigs, _NSIG/8, flags); if (ret != -ENOSYS) return __syscall_ret(ret); ret = __syscall(SYS_signalfd, fd, sigs, _NSIG/8); if (ret >= 0) { if (flags & SFD_CLOEXEC) __syscall(SYS_fcntl, ret, F_SETFD, FD_CLOEXEC); if (flags & SFD_NONBLOCK) __syscall(SYS_fcntl, ret, F_SETFL, O_NONBLOCK); } return __syscall_ret(ret); }
static int sc_clock_gettime(clockid_t clk, struct timespec *ts) { int r = __syscall(SYS_clock_gettime, clk, ts); if (!r) return r; if (r == -ENOSYS) { if (clk == CLOCK_REALTIME) { __syscall(SYS_gettimeofday, clk, ts, 0); ts->tv_nsec = (int)ts->tv_nsec * 1000; return 0; } r = -EINVAL; } errno = -r; return -1; }
static void do_setxid(void *p) { struct ctx *c = p; if (c->err) return; if (c->rlim && c->id >= 0 && c->id != getuid()) { struct rlimit inf = { RLIM_INFINITY, RLIM_INFINITY }, old; getrlimit(RLIMIT_NPROC, &old); if ((c->err = -__setrlimit(RLIMIT_NPROC, &inf)) && libc.threads_minus_1) return; c->err = -__syscall(c->nr, c->id, c->eid, c->sid); __setrlimit(RLIMIT_NPROC, &old); return; } c->err = -__syscall(c->nr, c->id, c->eid, c->sid); }
_Noreturn void quick_exit(int code) { static int lock; while (a_swap(&lock, 1)) __syscall(SYS_pause); __funcs_on_quick_exit(); _Exit(code); }
int sem_post(sem_t *sem) { while (1) { int64_t value = sem->__value; if (value == -1) { // set to 1, and wake up any waiters if successful if (__sync_val_compare_and_swap(&sem->__value, value, 1UL) == value) { if (__syscall(__SYS_unblock, &sem->__value) != 0) { errno = EINVAL; return -1; }; return 0; }; } else { if (__sync_val_compare_and_swap(&sem->__value, value, value+1) == value) { return 0; }; }; }; };
static int init_main_thread() { __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8); if (__set_thread_area(TP_ADJ(main_thread)) < 0) return -1; main_thread->canceldisable = libc.canceldisable; main_thread->tsd = (void **)__pthread_tsd_main; main_thread->errno_ptr = __errno_location(); main_thread->self = main_thread; main_thread->tid = main_thread->pid = __syscall(SYS_set_tid_address, &main_thread->tid); if (!main_thread->dtv) main_thread->dtv = (void *)dummy; libc.main_thread = main_thread; return 0; }
void __wait(volatile int *addr, volatile int *waiters, int val, int priv) { int spins=10000; if (priv) priv = 128; priv=0; while (spins--) { if (*addr==val) a_spin(); else return; } if (waiters) a_inc(waiters); while (*addr==val) { #ifdef __EMSCRIPTEN__ if (pthread_self()->cancelasync == PTHREAD_CANCEL_ASYNCHRONOUS) { // Must wait in slices in case this thread is cancelled in between. int e; do { if (_pthread_isduecanceled(pthread_self())) { if (waiters) a_dec(waiters); return; } e = emscripten_futex_wait((void*)addr, val, 100); } while(e == -ETIMEDOUT); } else { // Can wait in one go. emscripten_futex_wait((void*)addr, val, INFINITY); } #else __syscall(SYS_futex, addr, FUTEX_WAIT|priv, val, 0); #endif } if (waiters) a_dec(waiters); }
int socket(int domain, int type, int protocol) { int s = socketcall(socket, domain, type, protocol, 0, 0, 0); if (s<0 && (errno==EINVAL || errno==EPROTONOSUPPORT) && (type&(SOCK_CLOEXEC|SOCK_NONBLOCK))) { s = socketcall(socket, domain, type & ~(SOCK_CLOEXEC|SOCK_NONBLOCK), protocol, 0, 0, 0); if (s < 0) return s; if (type & SOCK_CLOEXEC) __syscall(SYS_fcntl, s, F_SETFD, FD_CLOEXEC); if (type & SOCK_NONBLOCK) __syscall(SYS_fcntl, s, F_SETFL, O_NONBLOCK); } return s; }
FILE *fopen(const char *filename, const char *mode) { FILE *f; int fd; int flags; int plus = !!strchr(mode, '+'); /* Check for valid initial mode character */ if (!strchr("rwa", *mode)) { errno = EINVAL; return 0; } /* Compute the flags to pass to open() */ if (plus) flags = O_RDWR; else if (*mode == 'r') flags = O_RDONLY; else flags = O_WRONLY; if (*mode != 'r') flags |= O_CREAT; if (*mode == 'w') flags |= O_TRUNC; if (*mode == 'a') flags |= O_APPEND; fd = syscall_cp(SYS_open, filename, flags|O_LARGEFILE, 0666); if (fd < 0) return 0; f = __fdopen(fd, mode); if (f) return f; __syscall(SYS_close, fd); return 0; }
int pthread_setschedprio(pthread_t t, int prio) { int r; LOCK(t->killlock); r = !t->tid ? ESRCH : -__syscall(SYS_sched_setparam, t->tid, &prio); UNLOCK(t->killlock); return r; }
static inline void unlock_requeue(volatile int *l, volatile int *r, int w) { a_store(l, 0); #ifdef __EMSCRIPTEN__ // Here the intent is to wake one waiter, and requeue all other waiters from waiting on address 'l' // to wait on address 'r' instead. This is not possible at the moment with SharedArrayBuffer Atomics, // as it does not have a "wake X waiters and requeue the rest" primitive. However this kind of // primitive is strictly not needed, since it is more like an optimization to avoid spuriously waking // all waiters, just to make them wait on another location immediately afterwards. Here we do exactly // that: wake every waiter. emscripten_futex_wake(l, 0x7FFFFFFF); #else if (w) __wake(l, 1, 1); else __syscall(SYS_futex, l, FUTEX_REQUEUE|128, 0, 1, r) != -ENOSYS || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r); #endif }
int pthread_setschedprio(pthread_t t, int prio) { int r; __lock(t->killlock); r = t->dead ? ESRCH : -__syscall(SYS_sched_setparam, t->tid, &prio); __unlock(t->killlock); return r; }
int pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param) { int r; LOCK(t->killlock); r = !t->tid ? ESRCH : -__syscall(SYS_sched_setscheduler, t->tid, policy, param); UNLOCK(t->killlock); return r; }
int clock_nanosleep(clockid_t clk, int flags, const struct timespec *req, struct timespec *rem) { int ret; CANCELPT_BEGIN; ret = -__syscall(SYS_clock_nanosleep, clk, flags, req, rem); CANCELPT_END; return ret; }
size_t __stdout_write(FILE *f, const unsigned char *buf, size_t len) { struct termios tio; f->write = __stdio_write; if (!(f->flags & F_SVB) && __syscall(SYS_ioctl, f->fd, TCGETS, &tio)) f->lbf = -1; return __stdio_write(f, buf, len); }
int pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param) { int r; __lock(t->killlock); r = t->dead ? ESRCH : -__syscall(SYS_sched_setscheduler, t->tid, policy, ¶m); __unlock(t->killlock); return r; }