FLOAT M_DECL_FUNC (__w_log1p) (FLOAT x) { if (__glibc_unlikely (islessequal (x, M_LIT (-1.0)))) { if (x == -1) __set_errno (ERANGE); else __set_errno (EDOM); } return M_SUF (__log1p) (x); }
static DIR * opendir_tail (int fd) { if (__glibc_unlikely (fd < 0)) return NULL; /* Now make sure this really is a directory and nothing changed since the `stat' call. The S_ISDIR check is superfluous if O_DIRECTORY works, but it's cheap and we need the stat call for st_blksize anyway. */ struct stat64 statbuf; if (__glibc_unlikely (__fxstat64 (_STAT_VER, fd, &statbuf) < 0)) goto lose; if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode))) { __set_errno (ENOTDIR); lose: close_not_cancel_no_status (fd); return NULL; } return __alloc_dir (fd, true, 0, &statbuf); }
long double __w_log1pl (long double x) { if (__glibc_unlikely (islessequal (x, -1.0L))) { if (x == -1.0L) __set_errno (ERANGE); else __set_errno (EDOM); } return __log1pl (x); }
double __asinh (double x) { double w; int32_t hx, ix; GET_HIGH_WORD (hx, x); ix = hx & 0x7fffffff; if (__glibc_unlikely (ix < 0x3e300000)) /* |x|<2**-28 */ { if (fabs (x) < DBL_MIN) { double force_underflow = x * x; math_force_eval (force_underflow); } if (huge + x > one) return x; /* return x inexact except 0 */ } if (__glibc_unlikely (ix > 0x41b00000)) /* |x| > 2**28 */ { if (ix >= 0x7ff00000) return x + x; /* x is inf or NaN */ w = __ieee754_log (fabs (x)) + ln2; } else { double xa = fabs (x); if (ix > 0x40000000) /* 2**28 > |x| > 2.0 */ { w = __ieee754_log (2.0 * xa + one / (__ieee754_sqrt (xa * xa + one) + xa)); } else /* 2.0 > |x| > 2**-28 */ { double t = xa * xa; w = __log1p (xa + t / (one + __ieee754_sqrt (one + t))); } } return __copysign (w, x); }
float __atan2f (float y, float x) { float z; if (__builtin_expect (x == 0.0f && y == 0.0f, 0) && _LIB_VERSION == _SVID_) return __kernel_standard_f (y, x, 103); /* atan2(+-0,+-0) */ z = __ieee754_atan2f (y, x); if (__glibc_unlikely (z == 0.0f && y != 0.0f && isfinite (x))) __set_errno (ERANGE); return z; }
double __atan2 (double y, double x) { double z; if (__builtin_expect (x == 0.0 && y == 0.0, 0) && _LIB_VERSION == _SVID_) return __kernel_standard (y, x, 3); /* atan2(+-0,+-0) */ z = __ieee754_atan2 (y, x); if (__glibc_unlikely (z == 0.0 && y != 0.0 && isfinite (x))) __set_errno (ERANGE); return z; }
size_t __wcstombs_chk (char *dst, const wchar_t *src, size_t len, size_t dstlen) { if (__glibc_unlikely (dstlen < len)) __chk_fail (); mbstate_t state; memset (&state, '\0', sizeof state); /* Return how many we wrote (or maybe an error). */ return __wcsrtombs (dst, &src, len, &state); }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { if (__glibc_unlikely (invalid_name (name))) return NULL; if (need_isdir_precheck ()) { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ struct stat64 statbuf; if (__glibc_unlikely (__xstat64 (_STAT_VER, name, &statbuf) < 0)) return NULL; if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode))) { __set_errno (ENOTDIR); return NULL; } } return opendir_tail (open_not_cancel_2 (name, opendir_oflags ())); }
/* Get information about the file NAME relative to FD in ST. */ int __fxstatat (int vers, int fd, const char *file, struct stat *st, int flag) { int result; INTERNAL_SYSCALL_DECL (err); struct stat64 st64; result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag); if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) return INLINE_SYSCALL_ERROR_RETURN_VALUE (INTERNAL_SYSCALL_ERRNO (result, err)); else return __xstat32_conv (vers, &st64, st); }
/* For asynchronous cancellation we use a signal. This is the handler. */ static void sigcancel_handler (int sig, siginfo_t *si, void *ctx) { /* Determine the process ID. It might be negative if the thread is in the middle of a fork() call. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); if (__glibc_unlikely (pid < 0)) pid = -pid; /* Safety check. It would be possible to call this function for other signals and send a signal from another process. This is not correct and might even be a security problem. Try to catch as many incorrect invocations as possible. */ if (sig != SIGCANCEL || si->si_pid != pid || si->si_code != SI_TKILL) return; struct pthread *self = THREAD_SELF; int oldval = THREAD_GETMEM (self, cancelhandling); while (1) { /* We are canceled now. When canceled by another thread this flag is already set but if the signal is directly send (internally or from another process) is has to be done here. */ int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK; if (oldval == newval || (oldval & EXITING_BITMASK) != 0) /* Already canceled or exiting. */ break; int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); if (curval == oldval) { /* Set the return value. */ THREAD_SETMEM (self, result, PTHREAD_CANCELED); /* Make sure asynchronous cancellation is still enabled. */ if ((newval & CANCELTYPE_BITMASK) != 0) /* Run the registered destructors and terminate the thread. */ __do_cancel (); break; } oldval = curval; } }
/* wrapper powl */ long double __powl (long double x, long double y) { long double z = __ieee754_powl (x, y); if (__glibc_unlikely (!isfinite (z))) { if (_LIB_VERSION != _IEEE_) { if (isnan (x)) { if (y == 0.0L) /* pow(NaN,0.0) */ return __kernel_standard_l (x, y, 242); } else if (isfinite (x) && isfinite (y)) { if (isnan (z)) /* pow neg**non-int */ return __kernel_standard_l (x, y, 224); else if (x == 0.0L && y < 0.0L) { if (signbit (x) && signbit (z)) /* pow(-0.0,negative) */ return __kernel_standard_l (x, y, 223); else /* pow(+0.0,negative) */ return __kernel_standard_l (x, y, 243); } else /* pow overflow */ return __kernel_standard_l (x, y, 221); } } } else if (__builtin_expect (z == 0.0L, 0) && isfinite (x) && isfinite (y) && _LIB_VERSION != _IEEE_) { if (x == 0.0L) { if (y == 0.0L) /* pow(0.0,0.0) */ return __kernel_standard_l (x, y, 220); } else /* pow underflow */ return __kernel_standard_l (x, y, 222); } return z; }
/* wrapper powf */ float __powf (float x, float y) { float z = __ieee754_powf (x, y); if (__glibc_unlikely (!isfinite (z))) { if (_LIB_VERSION != _IEEE_) { if (isnan (x)) { if (y == 0.0f) /* pow(NaN,0.0) */ return __kernel_standard_f (x, y, 142); } else if (isfinite (x) && isfinite (y)) { if (isnan (z)) /* pow neg**non-int */ return __kernel_standard_f (x, y, 124); else if (x == 0.0f && y < 0.0f) { if (signbit (x) && signbit (z)) /* pow(-0.0,negative) */ return __kernel_standard_f (x, y, 123); else /* pow(+0.0,negative) */ return __kernel_standard_f (x, y, 143); } else /* pow overflow */ return __kernel_standard_f (x, y, 121); } } } else if (__builtin_expect (z == 0.0f, 0) && isfinite (x) && isfinite (y) && _LIB_VERSION != _IEEE_) { if (x == 0.0f) { if (y == 0.0f) /* pow(0.0,0.0) */ return __kernel_standard_f (x, y, 120); } else /* pow underflow */ return __kernel_standard_f (x, y, 122); } return z; }
int __lll_timedwait_tid (int *tidp, const struct timespec *abstime) { /* Reject invalid timeouts. */ if (__glibc_unlikely (abstime->tv_nsec < 0) || __glibc_unlikely (abstime->tv_nsec >= 1000000000)) return EINVAL; /* Repeat until thread terminated. */ int tid; while ((tid = atomic_load_relaxed (tidp)) != 0) { /* See exit-thread.h for details. */ if (tid == NACL_EXITING_TID) /* The thread should now be in the process of exiting, so it will finish quick enough that the timeout doesn't matter. If any thread ever stays in this state for long, there is something catastrophically wrong. */ atomic_spin_nop (); else { assert (tid > 0); /* If *FUTEX == TID, wait until woken or timeout. */ int err = __nacl_irt_futex.futex_wait_abs ((volatile int *) tidp, tid, abstime); if (err != 0) { if (__glibc_likely (err == ETIMEDOUT)) return err; assert (err == EAGAIN); } } } return 0; }
int pthread_mutex_trylock (pthread_mutex_t *mtxp) { struct pthread *self = PTHREAD_SELF; int ret; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: ret = lll_trylock (&mtxp->__lock); break; case PTHREAD_MUTEX_RECURSIVE: if (mtx_owned_p (mtxp, self, mtxp->__flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return (EAGAIN); ++mtxp->__cnt; ret = 0; } else if ((ret = lll_trylock (&mtxp->__lock)) == 0) { mtx_set_owner (mtxp, self, mtxp->__flags); mtxp->__cnt = 1; } break; case PTHREAD_MUTEX_ERRORCHECK: if (mtx_owned_p (mtxp, self, mtxp->__flags)) ret = EDEADLK; else if ((ret = lll_trylock (&mtxp->__lock)) == 0) mtx_set_owner (mtxp, self, mtxp->__flags); break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: ROBUST_LOCK (self, mtxp, lll_robust_trylock); break; default: ret = EINVAL; break; } return (ret); }
nis_result * nis_next_entry (const_nis_name name, const netobj *cookie) { nis_result *res; ib_request *ibreq; nis_error status; res = calloc (1, sizeof (nis_result)); if (res == NULL) return NULL; if (name == NULL) { NIS_RES_STATUS (res) = NIS_BADNAME; return res; } ibreq = __create_ib_request (name, 0); if (ibreq == NULL) { NIS_RES_STATUS (res) = NIS_BADNAME; return res; } if (cookie != NULL) { ibreq->ibr_cookie.n_bytes = cookie->n_bytes; ibreq->ibr_cookie.n_len = cookie->n_len; } status = __do_niscall (ibreq->ibr_name, NIS_IBNEXT, (xdrproc_t) _xdr_ib_request, (caddr_t) ibreq, (xdrproc_t) _xdr_nis_result, (caddr_t) res, 0, NULL); if (__glibc_unlikely (status != NIS_SUCCESS)) NIS_RES_STATUS (res) = status; if (cookie != NULL) { /* Don't give cookie free, it is not from us */ ibreq->ibr_cookie.n_bytes = NULL; ibreq->ibr_cookie.n_len = 0; } nis_free_request (ibreq); return res; }
static int hp_timing_gettime (clockid_t clock_id, struct timespec *tp) { hp_timing_t tsc; if (__glibc_unlikely (freq == 0)) { /* This can only happen if we haven't initialized the `freq' variable yet. Do this now. We don't have to protect this code against multiple execution since all of them should lead to the same result. */ freq = __get_clockfreq (); if (__glibc_unlikely (freq == 0)) /* Something went wrong. */ return -1; } if (clock_id != CLOCK_PROCESS_CPUTIME_ID && __pthread_clock_gettime != NULL) return __pthread_clock_gettime (clock_id, freq, tp); /* Get the current counter. */ HP_TIMING_NOW (tsc); /* Compute the offset since the start time of the process. */ tsc -= GL(dl_cpuclock_offset); /* Compute the seconds. */ tp->tv_sec = tsc / freq; /* And the nanoseconds. This computation should be stable until we get machines with about 16GHz frequency. */ tp->tv_nsec = ((tsc % freq) * UINT64_C (1000000000)) / freq; return 0; }
_Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop, void *stop_argument) { if (__glibc_unlikely (libgcc_s_handle == NULL)) pthread_cancel_init (); else atomic_read_barrier (); _Unwind_Reason_Code (*forcedunwind) (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *) = libgcc_s_forcedunwind; PTR_DEMANGLE (forcedunwind); return forcedunwind (exc, stop, stop_argument); }
int pthread_mutex_lock (pthread_mutex_t *mtxp) { struct pthread *self = PTHREAD_SELF; int flags = mtxp->__flags & GSYNC_SHARED; int ret = 0; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: lll_lock (&mtxp->__lock, flags); break; case PTHREAD_MUTEX_RECURSIVE: if (mtx_owned_p (mtxp, self, flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return (EAGAIN); ++mtxp->__cnt; return (ret); } lll_lock (&mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); mtxp->__cnt = 1; break; case PTHREAD_MUTEX_ERRORCHECK: if (mtx_owned_p (mtxp, self, flags)) return (EDEADLK); lll_lock (&mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: ROBUST_LOCK (self, mtxp, lll_robust_lock, flags); break; default: ret = EINVAL; break; } return (ret); }
/* Open shared memory object. This implementation assumes the shmfs implementation introduced in the late 2.3.x kernel series to be available. Normally the filesystem will be mounted at /dev/shm but we fall back on searching for the actual mount point should opening such a file fail. */ int shm_open (const char *name, int oflag, mode_t mode) { size_t namelen; char *fname; int fd; /* Determine where the shmfs is mounted. */ __libc_once (once, where_is_shmfs); /* If we don't know the mount points there is nothing we can do. Ever. */ if (mountpoint.dir == NULL) { __set_errno (ENOSYS); return -1; } /* Construct the filename. */ while (name[0] == '/') ++name; namelen = strlen (name); /* Validate the filename. */ if (name[0] == '\0' || namelen > NAME_MAX || strchr (name, '/') != NULL) { __set_errno (EINVAL); return -1; } fname = (char *) alloca (mountpoint.dirlen + namelen + 1); __mempcpy (__mempcpy (fname, mountpoint.dir, mountpoint.dirlen), name, namelen + 1); /* And get the file descriptor. XXX Maybe we should test each descriptor whether it really is for a file on the shmfs. If this is what should be done the whole function should be revamped since we can determine whether shmfs is available while trying to open the file, all in one turn. */ fd = open (fname, oflag | O_CLOEXEC | O_NOFOLLOW, mode); if (fd == -1 && __glibc_unlikely (errno == EISDIR)) /* It might be better to fold this error with EINVAL since directory names are just another example for unsuitable shared object names and the standard does not mention EISDIR. */ __set_errno (EINVAL); return fd; }
sysv_scalbf (float x, float fn) { float z = __ieee754_scalbf (x, fn); if (__glibc_unlikely (isinf (z))) { if (isfinite (x)) return __kernel_standard_f (x, fn, 132); /* scalb overflow */ else __set_errno (ERANGE); } else if (__builtin_expect (z == 0.0f, 0) && z != x) return __kernel_standard_f (x, fn, 133); /* scalb underflow */ return z; }
int __fxstatat64 (int vers, int fd, const char *file, struct stat64 *st, int flag) { if (__glibc_unlikely (vers != _STAT_VER_LINUX)) return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL); int result; INTERNAL_SYSCALL_DECL (err); result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, st, flag); if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return 0; else return INLINE_SYSCALL_ERROR_RETURN_VALUE (INTERNAL_SYSCALL_ERRNO (result, err)); }
sysv_scalbl (long double x, long double fn) { long double z = __ieee754_scalbl (x, fn); if (__glibc_unlikely (__isinfl (z))) { if (__finitel (x)) return __kernel_standard_l (x, fn, 232); /* scalb overflow */ else __set_errno (ERANGE); } else if (__builtin_expect (z == 0.0L, 0) && z != x) return __kernel_standard_l (x, fn, 233); /* scalb underflow */ return z; }
const char * __shm_directory (size_t *len) { /* Determine where the shmfs is mounted. */ __libc_once (once, where_is_shmfs); /* If we don't know the mount points there is nothing we can do. Ever. */ if (__glibc_unlikely (mountpoint.dir == NULL)) { __set_errno (ENOSYS); return NULL; } *len = mountpoint.dirlen; return mountpoint.dir; }
/* Special callback replacing the underflow callbacks if we mmap the file. */ int _IO_file_underflow_mmap (_IO_FILE *fp) { if (fp->_IO_read_ptr < fp->_IO_read_end) return *(unsigned char *) fp->_IO_read_ptr; if (__glibc_unlikely (mmap_remap_check (fp))) /* We punted to the regular file functions. */ return _IO_UNDERFLOW (fp); if (fp->_IO_read_ptr < fp->_IO_read_end) return *(unsigned char *) fp->_IO_read_ptr; fp->_flags |= _IO_EOF_SEEN; return EOF; }
/* Return the length of the maximum initial segment of S which contains only characters in ACCEPT. */ size_t STRSPN (const char *str, const char *accept) { if (accept[0] == '\0') return 0; if (__glibc_unlikely (accept[1] == '\0')) { const char *a = str; for (; *str == *accept; str++); return str - a; } /* Use multiple small memsets to enable inlining on most targets. */ unsigned char table[256]; unsigned char *p = memset (table, 0, 64); memset (p + 64, 0, 64); memset (p + 128, 0, 64); memset (p + 192, 0, 64); unsigned char *s = (unsigned char*) accept; /* Different from strcspn it does not add the NULL on the table so can avoid check if str[i] is NULL, since table['\0'] will be 0 and thus stopping the loop check. */ do p[*s++] = 1; while (*s); s = (unsigned char*) str; if (!p[s[0]]) return 0; if (!p[s[1]]) return 1; if (!p[s[2]]) return 2; if (!p[s[3]]) return 3; s = (unsigned char *) PTR_ALIGN_DOWN (s, 4); unsigned int c0, c1, c2, c3; do { s += 4; c0 = p[s[0]]; c1 = p[s[1]]; c2 = p[s[2]]; c3 = p[s[3]]; } while ((c0 & c1 & c2 & c3) != 0); size_t count = s - (unsigned char *) str; return (c0 & c1) == 0 ? count + c0 : count + c2 + 2; }
int __sched_setaffinity_new (pid_t pid, size_t cpusetsize, const cpu_set_t *cpuset) { if (__glibc_unlikely (__kernel_cpumask_size == 0)) { INTERNAL_SYSCALL_DECL (err); int res; size_t psize = 128; void *p = alloca (psize); while (res = INTERNAL_SYSCALL (sched_getaffinity, err, 3, getpid (), psize, p), INTERNAL_SYSCALL_ERROR_P (res, err) && INTERNAL_SYSCALL_ERRNO (res, err) == EINVAL) p = extend_alloca (p, psize, 2 * psize); if (res == 0 || INTERNAL_SYSCALL_ERROR_P (res, err)) { __set_errno (INTERNAL_SYSCALL_ERRNO (res, err)); return -1; } __kernel_cpumask_size = res; } /* We now know the size of the kernel cpumask_t. Make sure the user does not request to set a bit beyond that. */ for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt) if (((char *) cpuset)[cnt] != '\0') { /* Found a nonzero byte. This means the user request cannot be fulfilled. */ __set_errno (EINVAL); return -1; } int result = INLINE_SYSCALL (sched_setaffinity, 3, pid, cpusetsize, cpuset); #ifdef RESET_VGETCPU_CACHE if (result != -1) RESET_VGETCPU_CACHE (); #endif return result; }
int __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { int result = EBUSY; bool wake = false; int futex_shared = rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED; if (ELIDE_TRYLOCK (rwlock->__data.__rwelision, rwlock->__data.__lock == 0 && rwlock->__data.__nr_readers == 0 && rwlock->__data.__writer, 0)) return 0; lll_lock (rwlock->__data.__lock, rwlock->__data.__shared); if (rwlock->__data.__writer == 0 && (rwlock->__data.__nr_writers_queued == 0 || PTHREAD_RWLOCK_PREFER_READER_P (rwlock))) { if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0)) { --rwlock->__data.__nr_readers; result = EAGAIN; } else { result = 0; /* See pthread_rwlock_rdlock. */ if (rwlock->__data.__nr_readers == 1 && rwlock->__data.__nr_readers_queued > 0 && rwlock->__data.__nr_writers_queued > 0) { ++rwlock->__data.__readers_wakeup; wake = true; } } } lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared); if (wake) futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared); return result; }
clock_t clock (void) { struct timespec ts; _Static_assert (CLOCKS_PER_SEC == 1000000, "CLOCKS_PER_SEC should be 1000000"); /* clock_gettime shouldn't fail here since CLOCK_PROCESS_CPUTIME_ID is supported since 2.6.12. Check the return value anyway in case the kernel barfs on us for some reason. */ if (__glibc_unlikely (__clock_gettime (CLOCK_PROCESS_CPUTIME_ID, &ts) != 0)) return (clock_t) -1; return (ts.tv_sec * CLOCKS_PER_SEC + ts.tv_nsec / (1000000000 / CLOCKS_PER_SEC)); }
/* Copy SRC to DEST, returning the address of the terminating L'\0' in DEST. Check for overflows. */ wchar_t * __wcpcpy_chk (wchar_t *dest, const wchar_t *src, size_t destlen) { wchar_t *wcp = (wchar_t *) dest - 1; wint_t c; const ptrdiff_t off = src - dest + 1; do { if (__glibc_unlikely (destlen-- == 0)) __chk_fail (); c = wcp[off]; *++wcp = c; } while (c != L'\0'); return wcp; }
int lockf64 (int fd, int cmd, off64_t len64) { struct flock64 fl64; int cmd64; int result; memset ((char *) &fl64, '\0', sizeof (fl64)); fl64.l_whence = SEEK_CUR; fl64.l_start = 0; fl64.l_len = len64; switch (cmd) { case F_TEST: /* Test the lock: return 0 if FD is unlocked or locked by this process; return -1, set errno to EACCES, if another process holds the lock. */ fl64.l_type = F_RDLCK; INTERNAL_SYSCALL_DECL (err); result = INTERNAL_SYSCALL (fcntl64, err, 3, fd, F_GETLK64, &fl64); if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) return INLINE_SYSCALL_ERROR_RETURN_VALUE (INTERNAL_SYSCALL_ERRNO (result, err)); if (fl64.l_type == F_UNLCK || fl64.l_pid == __getpid ()) return 0; return INLINE_SYSCALL_ERROR_RETURN_VALUE (EACCES); case F_ULOCK: fl64.l_type = F_UNLCK; cmd64 = F_SETLK64; break; case F_LOCK: fl64.l_type = F_WRLCK; cmd64 = F_SETLKW64; break; case F_TLOCK: fl64.l_type = F_WRLCK; cmd64 = F_SETLK64; break; default: return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL); } return INLINE_SYSCALL (fcntl64, 3, fd, cmd64, &fl64); }