/* Allocate a file buffer, or switch to unbuffered I/O. Streams for TTY devices default to line buffered. */ int _IO_file_doallocate (_IO_FILE *fp) { _IO_size_t size; char *p; struct stat64 st; size = _IO_BUFSIZ; if (fp->_fileno >= 0 && __builtin_expect (_IO_SYSSTAT (fp, &st), 0) >= 0) { if (S_ISCHR (st.st_mode)) { /* Possibly a tty. */ if ( #ifdef DEV_TTY_P DEV_TTY_P (&st) || #endif local_isatty (fp->_fileno)) fp->_flags |= _IO_LINE_BUF; } #if _IO_HAVE_ST_BLKSIZE if (st.st_blksize > 0 && st.st_blksize < _IO_BUFSIZ) size = st.st_blksize; #endif } p = malloc (size); if (__glibc_unlikely (p == NULL)) return EOF; _IO_setb (fp, p, p + size, 1); return 1; }
_IO_off64_t attribute_compat_text_section _IO_old_file_seekoff (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) { _IO_off_t result; _IO_off64_t delta, new_offset; long count; /* POSIX.1 8.2.3.7 says that after a call the fflush() the file offset of the underlying file must be exact. */ int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end && fp->_IO_write_base == fp->_IO_write_ptr); if (mode == 0) dir = _IO_seek_cur, offset = 0; /* Don't move any pointers. */ /* Flush unwritten characters. (This may do an unneeded write if we seek within the buffer. But to be able to switch to reading, we would need to set egptr to pptr. That can't be done in the current design, which assumes file_ptr() is eGptr. Anyway, since we probably end up flushing when we close(), it doesn't make much difference.) FIXME: simulate mem-mapped files. */ if (fp->_IO_write_ptr > fp->_IO_write_base || _IO_in_put_mode (fp)) if (_IO_switch_to_get_mode (fp)) return EOF; if (fp->_IO_buf_base == NULL) { /* It could be that we already have a pushback buffer. */ if (fp->_IO_read_base != NULL) { free (fp->_IO_read_base); fp->_flags &= ~_IO_IN_BACKUP; } _IO_doallocbuf (fp); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); } switch (dir) { case _IO_seek_cur: /* Adjust for read-ahead (bytes is buffer). */ offset -= fp->_IO_read_end - fp->_IO_read_ptr; if (fp->_old_offset == _IO_pos_BAD) goto dumb; /* Make offset absolute, assuming current pointer is file_ptr(). */ offset += fp->_old_offset; dir = _IO_seek_set; break; case _IO_seek_set: break; case _IO_seek_end: { struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode)) { offset += st.st_size; dir = _IO_seek_set; } else goto dumb; } } /* At this point, dir==_IO_seek_set. */ /* If we are only interested in the current position we've found it now. */ if (mode == 0) return offset; /* If destination is within current buffer, optimize: */ if (fp->_old_offset != _IO_pos_BAD && fp->_IO_read_base != NULL && !_IO_in_backup (fp)) { /* Offset relative to start of main get area. */ _IO_off_t rel_offset = (offset - fp->_old_offset + (fp->_IO_read_end - fp->_IO_read_base)); if (rel_offset >= 0) { #if 0 if (_IO_in_backup (fp)) _IO_switch_to_main_get_area (fp); #endif if (rel_offset <= fp->_IO_read_end - fp->_IO_read_base) { _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + rel_offset, fp->_IO_read_end); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } } #ifdef TODO /* If we have streammarkers, seek forward by reading ahead. */ if (_IO_have_markers (fp)) { int to_skip = rel_offset - (fp->_IO_read_ptr - fp->_IO_read_base); if (ignore (to_skip) != to_skip) goto dumb; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } #endif } #ifdef TODO if (rel_offset < 0 && rel_offset >= Bbase () - Bptr ()) { if (!_IO_in_backup (fp)) _IO_switch_to_backup_area (fp); gbump (fp->_IO_read_end + rel_offset - fp->_IO_read_ptr); _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } #endif } #ifdef TODO _IO_unsave_markers (fp); #endif if (fp->_flags & _IO_NO_READS) goto dumb; /* Try to seek to a block boundary, to improve kernel page management. */ new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1); delta = offset - new_offset; if (delta > fp->_IO_buf_end - fp->_IO_buf_base) { new_offset = offset; delta = 0; } result = _IO_SYSSEEK (fp, new_offset, 0); if (result < 0) return EOF; if (delta == 0) count = 0; else { count = _IO_SYSREAD (fp, fp->_IO_buf_base, (must_be_exact ? delta : fp->_IO_buf_end - fp->_IO_buf_base)); if (count < delta) { /* We weren't allowed to read, but try to seek the remainder. */ offset = count == EOF ? delta : delta-count; dir = _IO_seek_cur; goto dumb; } } _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, fp->_IO_buf_base + count); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); fp->_old_offset = result + count; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); return offset; dumb: _IO_unsave_markers (fp); result = _IO_SYSSEEK (fp, offset, dir); if (result != EOF) { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); fp->_old_offset = result; _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); } return result; resync: /* We need to do it since it is possible that the file offset in the kernel may be changed behind our back. It may happen when we fopen a file and then do a fork. One process may access the file and the kernel file offset will be changed. */ if (fp->_old_offset >= 0) _IO_SYSSEEK (fp, fp->_old_offset, 0); return offset; }
_IO_off64_t _IO_new_file_seekoff (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) { _IO_off64_t result; _IO_off64_t delta, new_offset; long count; /* Short-circuit into a separate function. We don't want to mix any functionality and we don't want to touch anything inside the FILE object. */ if (mode == 0) return do_ftell (fp); /* POSIX.1 8.2.3.7 says that after a call the fflush() the file offset of the underlying file must be exact. */ int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end && fp->_IO_write_base == fp->_IO_write_ptr); bool was_writing = (fp->_IO_write_ptr > fp->_IO_write_base || _IO_in_put_mode (fp)); /* Flush unwritten characters. (This may do an unneeded write if we seek within the buffer. But to be able to switch to reading, we would need to set egptr to pptr. That can't be done in the current design, which assumes file_ptr() is eGptr. Anyway, since we probably end up flushing when we close(), it doesn't make much difference.) FIXME: simulate mem-mapped files. */ if (was_writing && _IO_switch_to_get_mode (fp)) return EOF; if (fp->_IO_buf_base == NULL) { /* It could be that we already have a pushback buffer. */ if (fp->_IO_read_base != NULL) { free (fp->_IO_read_base); fp->_flags &= ~_IO_IN_BACKUP; } _IO_doallocbuf (fp); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); } switch (dir) { case _IO_seek_cur: /* Adjust for read-ahead (bytes is buffer). */ offset -= fp->_IO_read_end - fp->_IO_read_ptr; if (fp->_offset == _IO_pos_BAD) goto dumb; /* Make offset absolute, assuming current pointer is file_ptr(). */ offset += fp->_offset; if (offset < 0) { __set_errno (EINVAL); return EOF; } dir = _IO_seek_set; break; case _IO_seek_set: break; case _IO_seek_end: { struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode)) { offset += st.st_size; dir = _IO_seek_set; } else goto dumb; } } /* At this point, dir==_IO_seek_set. */ /* If destination is within current buffer, optimize: */ if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL && !_IO_in_backup (fp)) { _IO_off64_t start_offset = (fp->_offset - (fp->_IO_read_end - fp->_IO_buf_base)); if (offset >= start_offset && offset < fp->_offset) { _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + (offset - start_offset), fp->_IO_read_end); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } } if (fp->_flags & _IO_NO_READS) goto dumb; /* Try to seek to a block boundary, to improve kernel page management. */ new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1); delta = offset - new_offset; if (delta > fp->_IO_buf_end - fp->_IO_buf_base) { new_offset = offset; delta = 0; } result = _IO_SYSSEEK (fp, new_offset, 0); if (result < 0) return EOF; if (delta == 0) count = 0; else { count = _IO_SYSREAD (fp, fp->_IO_buf_base, (must_be_exact ? delta : fp->_IO_buf_end - fp->_IO_buf_base)); if (count < delta) { /* We weren't allowed to read, but try to seek the remainder. */ offset = count == EOF ? delta : delta-count; dir = _IO_seek_cur; goto dumb; } } _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, fp->_IO_buf_base + count); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); fp->_offset = result + count; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); return offset; dumb: _IO_unsave_markers (fp); result = _IO_SYSSEEK (fp, offset, dir); if (result != EOF) { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); fp->_offset = result; _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); } return result; resync: /* We need to do it since it is possible that the file offset in the kernel may be changed behind our back. It may happen when we fopen a file and then do a fork. One process may access the file and the kernel file offset will be changed. */ if (fp->_offset >= 0) _IO_SYSSEEK (fp, fp->_offset, 0); return offset; }
static void decide_maybe_mmap (_IO_FILE *fp) { /* We use the file in read-only mode. This could mean we can mmap the file and use it without any copying. But not all file descriptors are for mmap-able objects and on 32-bit machines we don't want to map files which are too large since this would require too much virtual memory. */ struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode) && st.st_size != 0 /* Limit the file size to 1MB for 32-bit machines. */ && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024) /* Sanity check. */ && (fp->_offset == _IO_pos_BAD || fp->_offset <= st.st_size)) { /* Try to map the file. */ void *p; p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0); if (p != MAP_FAILED) { /* OK, we managed to map the file. Set the buffer up and use a special jump table with simplified underflow functions which never tries to read anything from the file. */ if (__lseek64 (fp->_fileno, st.st_size, SEEK_SET) != st.st_size) { (void) __munmap (p, st.st_size); fp->_offset = _IO_pos_BAD; } else { _IO_setb (fp, p, (char *) p + st.st_size, 0); if (fp->_offset == _IO_pos_BAD) fp->_offset = 0; _IO_setg (fp, p, p + fp->_offset, p + st.st_size); fp->_offset = st.st_size; if (fp->_mode <= 0) _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap; else _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps_mmap; fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap; return; } } } /* We couldn't use mmap, so revert to the vanilla file operations. */ if (fp->_mode <= 0) _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps; else _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps; fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; }
/* Guts of underflow callback if we mmap the file. This stats the file and updates the stream state to match. In the normal case we return zero. If the file is no longer eligible for mmap, its jump tables are reset to the vanilla ones and we return nonzero. */ static int mmap_remap_check (_IO_FILE *fp) { struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode) && st.st_size != 0 /* Limit the file size to 1MB for 32-bit machines. */ && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024)) { const size_t pagesize = __getpagesize (); # define ROUNDED(x) (((x) + pagesize - 1) & ~(pagesize - 1)) if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)) { /* We can trim off some pages past the end of the file. */ (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size), ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base) - ROUNDED (st.st_size)); fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; } else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)) { /* The file added some pages. We need to remap it. */ void *p; #ifdef _G_HAVE_MREMAP p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base), ROUNDED (st.st_size), MREMAP_MAYMOVE); if (p == MAP_FAILED) { (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base); goto punt; } #else (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base); p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0); if (p == MAP_FAILED) goto punt; #endif fp->_IO_buf_base = p; fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; } else { /* The number of pages didn't change. */ fp->_IO_buf_end = fp->_IO_buf_base + st.st_size; } # undef ROUNDED fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr; _IO_setg (fp, fp->_IO_buf_base, fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base ? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end, fp->_IO_buf_end); /* If we are already positioned at or past the end of the file, don't change the current offset. If not, seek past what we have mapped, mimicking the position left by a normal underflow reading into its buffer until EOF. */ if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base) { if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base, SEEK_SET) != fp->_IO_buf_end - fp->_IO_buf_base) fp->_flags |= _IO_ERR_SEEN; else fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base; } return 0; } else { /* Life is no longer good for mmap. Punt it. */ (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base); punt: fp->_IO_buf_base = fp->_IO_buf_end = NULL; _IO_setg (fp, NULL, NULL, NULL); if (fp->_mode <= 0) _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps; else _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps; fp->_wide_data->_wide_vtable = &_IO_wfile_jumps; return 1; } }