int truncate64(const char * path, __off64_t length) { # if defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) return INLINE_SYSCALL(truncate64, 4, path, 0, OFF64_HI_LO(length)); # else return INLINE_SYSCALL(truncate64, 3, path, OFF64_HI_LO(length)); # endif }
int posix_fadvise64(int fd, off64_t offset, off64_t len, int advice) { INTERNAL_SYSCALL_DECL (err); /* ARM has always been funky. */ # if defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) || defined(__arm__) int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, advice, OFF64_HI_LO (offset), OFF64_HI_LO (len)); # else int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, OFF64_HI_LO (offset), OFF64_HI_LO (len), advice); # endif if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
int posix_fadvise64(int fd, off64_t offset, off64_t len, int advice) { INTERNAL_SYSCALL_DECL (err); /* ARM has always been funky. */ #if defined (__arm__) || \ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && (defined(__powerpc__) || defined(__xtensa__))) /* arch with 64-bit data in even reg alignment #1: [powerpc/xtensa] * custom syscall handler (rearranges @advice to avoid register hole punch) */ int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, advice, OFF64_HI_LO (offset), OFF64_HI_LO (len)); #elif defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future] * stock syscall handler in kernel (reg hole punched) */ int ret = INTERNAL_SYSCALL (fadvise64_64, err, 7, fd, 0, OFF64_HI_LO (offset), OFF64_HI_LO (len), advice); # else int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, OFF64_HI_LO (offset), OFF64_HI_LO (len), advice); # endif if (INTERNAL_SYSCALL_ERROR_P (ret, err)) return INTERNAL_SYSCALL_ERRNO (ret, err); return 0; }
static int __NC(sync_file_range)(int fd, off64_t offset, off64_t nbytes, unsigned int flags) { # if defined __powerpc__ && __WORDSIZE == 64 return INLINE_SYSCALL(sync_file_range, 4, fd, flags, offset, nbytes); # elif defined __arm__ && defined __thumb__ return INLINE_SYSCALL(sync_file_range, 6, fd, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags); # elif (defined __mips__ && _MIPS_SIM == _ABIO32) || \ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && !(defined(__powerpc__) || defined(__xtensa__) || defined(__nds32__) || defined(__csky__))) /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future] * stock syscall handler in kernel (reg hole punched) * see libc/sysdeps/linux/common/posix_fadvise.c for more details */ return INLINE_SYSCALL(sync_file_range, 7, fd, 0, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags); # elif defined __NR_sync_file_range2 return INLINE_SYSCALL(sync_file_range, 6, fd, flags, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes)); # else return INLINE_SYSCALL(sync_file_range, 6, fd, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags); # endif }
static int __NC(sync_file_range)(int fd, off64_t offset, off64_t nbytes, unsigned int flags) { # if defined __powerpc__ && __WORDSIZE == 64 return INLINE_SYSCALL(sync_file_range, 4, fd, flags, offset, nbytes); # elif defined __mips__ && _MIPS_SIM == _ABIO32 return INLINE_SYSCALL(sync_file_range, 7, fd, 0, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags); # elif defined __NR_sync_file_range2 return INLINE_SYSCALL(sync_file_range, 6, fd, flags, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes)); # else return INLINE_SYSCALL(sync_file_range, 6, fd, OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags); # endif }