wint_t _IO_wdefault_pbackfail (FILE *fp, wint_t c) { if (fp->_wide_data->_IO_read_ptr > fp->_wide_data->_IO_read_base && !_IO_in_backup (fp) && (wint_t) fp->_IO_read_ptr[-1] == c) --fp->_IO_read_ptr; else { /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/ if (!_IO_in_backup (fp)) { /* We need to keep the invariant that the main get area logically follows the backup area. */ if (fp->_wide_data->_IO_read_ptr > fp->_wide_data->_IO_read_base && _IO_have_wbackup (fp)) { if (save_for_wbackup (fp, fp->_wide_data->_IO_read_ptr)) return WEOF; } else if (!_IO_have_wbackup (fp)) { /* No backup buffer: allocate one. */ /* Use nshort buffer, if unused? (probably not) FIXME */ int backup_size = 128; wchar_t *bbuf = (wchar_t *) malloc (backup_size * sizeof (wchar_t)); if (bbuf == NULL) return WEOF; fp->_wide_data->_IO_save_base = bbuf; fp->_wide_data->_IO_save_end = (fp->_wide_data->_IO_save_base + backup_size); fp->_wide_data->_IO_backup_base = fp->_wide_data->_IO_save_end; } fp->_wide_data->_IO_read_base = fp->_wide_data->_IO_read_ptr; _IO_switch_to_wbackup_area (fp); } else if (fp->_wide_data->_IO_read_ptr <= fp->_wide_data->_IO_read_base) { /* Increase size of existing backup buffer. */ size_t new_size; size_t old_size = (fp->_wide_data->_IO_read_end - fp->_wide_data->_IO_read_base); wchar_t *new_buf; new_size = 2 * old_size; new_buf = (wchar_t *) malloc (new_size * sizeof (wchar_t)); if (new_buf == NULL) return WEOF; __wmemcpy (new_buf + (new_size - old_size), fp->_wide_data->_IO_read_base, old_size); free (fp->_wide_data->_IO_read_base); _IO_wsetg (fp, new_buf, new_buf + (new_size - old_size), new_buf + new_size); fp->_wide_data->_IO_backup_base = fp->_wide_data->_IO_read_ptr; } *--fp->_wide_data->_IO_read_ptr = c; } return c; }
wint_t __wunderflow (FILE *fp) { if (fp->_mode < 0 || (fp->_mode == 0 && _IO_fwide (fp, 1) != 1)) return WEOF; if (fp->_mode == 0) _IO_fwide (fp, 1); if (_IO_in_put_mode (fp)) if (_IO_switch_to_wget_mode (fp) == EOF) return WEOF; if (fp->_wide_data->_IO_read_ptr < fp->_wide_data->_IO_read_end) return *fp->_wide_data->_IO_read_ptr; if (_IO_in_backup (fp)) { _IO_switch_to_main_wget_area (fp); if (fp->_wide_data->_IO_read_ptr < fp->_wide_data->_IO_read_end) return *fp->_wide_data->_IO_read_ptr; } if (_IO_have_markers (fp)) { if (save_for_wbackup (fp, fp->_wide_data->_IO_read_end)) return WEOF; } else if (_IO_have_backup (fp)) _IO_free_wbackup_area (fp); return _IO_UNDERFLOW (fp); }
int __uflow (_IO_FILE *fp) { #if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1) return EOF; #endif if (fp->_mode == 0) _IO_fwide (fp, -1); if (_IO_in_put_mode (fp)) if (_IO_switch_to_get_mode (fp) == EOF) return EOF; if (fp->_IO_read_ptr < fp->_IO_read_end) return *(unsigned char *) fp->_IO_read_ptr++; if (_IO_in_backup (fp)) { _IO_switch_to_main_get_area (fp); if (fp->_IO_read_ptr < fp->_IO_read_end) return *(unsigned char *) fp->_IO_read_ptr++; } if (_IO_have_markers (fp)) { if (save_for_backup (fp, fp->_IO_read_end)) return EOF; } else if (_IO_have_backup (fp)) _IO_free_backup_area (fp); return _IO_UFLOW (fp); }
int _IO_new_fgetpos64 (FILE *fp, __fpos64_t *posp) { off64_t pos; int result = 0; CHECK_FILE (fp, EOF); _IO_acquire_lock (fp); pos = _IO_seekoff_unlocked (fp, 0, _IO_seek_cur, 0); if (_IO_in_backup (fp) && pos != _IO_pos_BAD) { if (fp->_mode <= 0) pos -= fp->_IO_save_end - fp->_IO_save_base; } if (pos == _IO_pos_BAD) { /* ANSI explicitly requires setting errno to a positive value on failure. */ if (errno == 0) __set_errno (EIO); result = EOF; } else { posp->__pos = pos; if (fp->_mode > 0 && __libio_codecvt_encoding (fp->_codecvt) < 0) /* This is a stateful encoding, safe the state. */ posp->__state = fp->_wide_data->_IO_state; } _IO_release_lock (fp); return result; }
long int _IO_ftell (_IO_FILE *fp) { _IO_off64_t pos; CHECK_FILE (fp, -1L); _IO_acquire_lock (fp); pos = _IO_seekoff_unlocked (fp, 0, _IO_seek_cur, 0); if (_IO_in_backup (fp) && pos != _IO_pos_BAD) { if (_IO_vtable_offset (fp) != 0 || fp->_mode <= 0) pos -= fp->_IO_save_end - fp->_IO_save_base; } _IO_release_lock (fp); if (pos == _IO_pos_BAD) { #ifdef EIO if (errno == 0) __set_errno (EIO); #endif return -1L; } if ((_IO_off64_t) (long int) pos != pos) { #ifdef EOVERFLOW __set_errno (EOVERFLOW); #endif return -1L; } return pos; }
int attribute_compat_text_section _IO_old_file_sync (_IO_FILE *fp) { _IO_ssize_t delta; int retval = 0; /* char* ptr = cur_ptr(); */ if (fp->_IO_write_ptr > fp->_IO_write_base) if (_IO_old_do_flush(fp)) return EOF; delta = fp->_IO_read_ptr - fp->_IO_read_end; if (delta != 0) { #ifdef TODO if (_IO_in_backup (fp)) delta -= eGptr () - Gbase (); #endif _IO_off_t new_pos = _IO_SYSSEEK (fp, delta, 1); if (new_pos != (_IO_off_t) EOF) fp->_IO_read_end = fp->_IO_read_ptr; #ifdef ESPIPE else if (errno == ESPIPE) ; /* Ignore error from unseekable devices. */ #endif else retval = EOF; } if (retval != EOF) fp->_old_offset = _IO_pos_BAD; /* FIXME: Cleanup - can this be shared? */ /* setg(base(), ptr, ptr); */ return retval; }
int _IO_new_file_overflow (_IO_FILE *f, int ch) { if (f->_flags & _IO_NO_WRITES) /* SET ERROR */ { f->_flags |= _IO_ERR_SEEN; __set_errno (EBADF); return EOF; } /* If currently reading or no buffer allocated. */ if ((f->_flags & _IO_CURRENTLY_PUTTING) == 0 || f->_IO_write_base == NULL) { /* Allocate a buffer if needed. */ if (f->_IO_write_base == NULL) { _IO_doallocbuf (f); _IO_setg (f, f->_IO_buf_base, f->_IO_buf_base, f->_IO_buf_base); } /* Otherwise must be currently reading. If _IO_read_ptr (and hence also _IO_read_end) is at the buffer end, logically slide the buffer forwards one block (by setting the read pointers to all point at the beginning of the block). This makes room for subsequent output. Otherwise, set the read pointers to _IO_read_end (leaving that alone, so it can continue to correspond to the external position). */ if (__glibc_unlikely (_IO_in_backup (f))) { size_t nbackup = f->_IO_read_end - f->_IO_read_ptr; _IO_free_backup_area (f); f->_IO_read_base -= MIN (nbackup, f->_IO_read_base - f->_IO_buf_base); f->_IO_read_ptr = f->_IO_read_base; } if (f->_IO_read_ptr == f->_IO_buf_end) f->_IO_read_end = f->_IO_read_ptr = f->_IO_buf_base; f->_IO_write_ptr = f->_IO_read_ptr; f->_IO_write_base = f->_IO_write_ptr; f->_IO_write_end = f->_IO_buf_end; f->_IO_read_base = f->_IO_read_ptr = f->_IO_read_end; f->_flags |= _IO_CURRENTLY_PUTTING; if (f->_mode <= 0 && f->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED)) f->_IO_write_end = f->_IO_write_ptr; } if (ch == EOF) return _IO_do_write (f, f->_IO_write_base, f->_IO_write_ptr - f->_IO_write_base); if (f->_IO_write_ptr == f->_IO_buf_end ) /* Buffer is really full */ if (_IO_do_flush (f) == EOF) return EOF; *f->_IO_write_ptr++ = ch; if ((f->_flags & _IO_UNBUFFERED) || ((f->_flags & _IO_LINE_BUF) && ch == '\n')) if (_IO_do_write (f, f->_IO_write_base, f->_IO_write_ptr - f->_IO_write_base) == EOF) return EOF; return (unsigned char) ch; }
void _IO_free_wbackup_area (FILE *fp) { if (_IO_in_backup (fp)) _IO_switch_to_main_wget_area (fp); /* Just in case. */ free (fp->_wide_data->_IO_save_base); fp->_wide_data->_IO_save_base = NULL; fp->_wide_data->_IO_save_end = NULL; fp->_wide_data->_IO_backup_base = NULL; }
int _IO_seekmark (_IO_FILE *fp, struct _IO_marker *mark, int delta) { if (mark->_sbuf != fp) return EOF; if (mark->_pos >= 0) { if (_IO_in_backup (fp)) _IO_switch_to_main_get_area (fp); fp->_IO_read_ptr = fp->_IO_read_base + mark->_pos; } else { if (!_IO_in_backup (fp)) _IO_switch_to_backup_area (fp); fp->_IO_read_ptr = fp->_IO_read_end + mark->_pos; } return 0; }
static _IO_size_t _IO_file_xsgetn_mmap (_IO_FILE *fp, void *data, _IO_size_t n) { _IO_size_t have; char *read_ptr = fp->_IO_read_ptr; char *s = (char *) data; have = fp->_IO_read_end - fp->_IO_read_ptr; if (have < n) { if (__glibc_unlikely (_IO_in_backup (fp))) { #ifdef _LIBC s = __mempcpy (s, read_ptr, have); #else memcpy (s, read_ptr, have); s += have; #endif n -= have; _IO_switch_to_main_get_area (fp); read_ptr = fp->_IO_read_ptr; have = fp->_IO_read_end - fp->_IO_read_ptr; } if (have < n) { /* Check that we are mapping all of the file, in case it grew. */ if (__glibc_unlikely (mmap_remap_check (fp))) /* We punted mmap, so complete with the vanilla code. */ return s - (char *) data + _IO_XSGETN (fp, data, n); read_ptr = fp->_IO_read_ptr; have = fp->_IO_read_end - read_ptr; } } if (have < n) fp->_flags |= _IO_EOF_SEEN; if (have != 0) { have = MIN (have, n); #ifdef _LIBC s = __mempcpy (s, read_ptr, have); #else memcpy (s, read_ptr, have); s += have; #endif fp->_IO_read_ptr = read_ptr + have; } return s - (char *) data; }
void __fpurge (FILE *fp) { if (fp->_mode > 0) { /* Wide-char stream. */ if (_IO_in_backup (fp)) _IO_free_wbackup_area (fp); fp->_wide_data->_IO_read_end = fp->_wide_data->_IO_read_ptr; fp->_wide_data->_IO_write_ptr = fp->_wide_data->_IO_write_base; } else { /* Byte stream. */ if (_IO_in_backup (fp)) _IO_free_backup_area (fp); fp->_IO_read_end = fp->_IO_read_ptr; fp->_IO_write_ptr = fp->_IO_write_base; } }
/* Return difference between MARK and current position of MARK's stream. */ int _IO_wmarker_delta (struct _IO_marker *mark) { int cur_pos; if (mark->_sbuf == NULL) return BAD_DELTA; if (_IO_in_backup (mark->_sbuf)) cur_pos = (mark->_sbuf->_wide_data->_IO_read_ptr - mark->_sbuf->_wide_data->_IO_read_end); else cur_pos = (mark->_sbuf->_wide_data->_IO_read_ptr - mark->_sbuf->_wide_data->_IO_read_base); return mark->_pos - cur_pos; }
void _IO_init_marker (struct _IO_marker *marker, _IO_FILE *fp) { marker->_sbuf = fp; if (_IO_in_put_mode (fp)) _IO_switch_to_get_mode (fp); if (_IO_in_backup (fp)) marker->_pos = fp->_IO_read_ptr - fp->_IO_read_end; else marker->_pos = fp->_IO_read_ptr - fp->_IO_read_base; /* Should perhaps sort the chain? */ marker->_next = fp->_markers; fp->_markers = marker; }
static void _IO_wstr_switch_to_get_mode (_IO_FILE *fp) { if (_IO_in_backup (fp)) fp->_wide_data->_IO_read_base = fp->_wide_data->_IO_backup_base; else { fp->_wide_data->_IO_read_base = fp->_wide_data->_IO_buf_base; if (fp->_wide_data->_IO_write_ptr > fp->_wide_data->_IO_read_end) fp->_wide_data->_IO_read_end = fp->_wide_data->_IO_write_ptr; } fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_write_ptr; fp->_wide_data->_IO_read_end = fp->_wide_data->_IO_write_ptr; fp->_flags &= ~_IO_CURRENTLY_PUTTING; }
void _IO_remove_marker (struct _IO_marker *marker) { /* Unlink from sb's chain. */ struct _IO_marker **ptr = &marker->_sbuf->_markers; for (; ; ptr = &(*ptr)->_next) { if (*ptr == NULL) break; else if (*ptr == marker) { *ptr = marker->_next; return; } } #if 0 if _sbuf has a backup area that is no longer needed, should we delete it now, or wait until the next underflow? #endif } #define BAD_DELTA EOF int _IO_marker_difference (struct _IO_marker *mark1, struct _IO_marker *mark2) { return mark1->_pos - mark2->_pos; } /* Return difference between MARK and current position of MARK's stream. */ int _IO_marker_delta (struct _IO_marker *mark) { int cur_pos; if (mark->_sbuf == NULL) return BAD_DELTA; if (_IO_in_backup (mark->_sbuf)) cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_end; else cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_base; return mark->_pos - cur_pos; }
int _IO_new_fgetpos (_IO_FILE *fp, _IO_fpos_t *posp) { _IO_off64_t pos; int result = 0; CHECK_FILE (fp, EOF); _IO_acquire_lock (fp); pos = _IO_seekoff_unlocked (fp, 0, _IO_seek_cur, 0); if (_IO_in_backup (fp) && pos != _IO_pos_BAD) { if (fp->_mode <= 0) pos -= fp->_IO_save_end - fp->_IO_save_base; } if (pos == _IO_pos_BAD) { /* ANSI explicitly requires setting errno to a positive value on failure. */ #ifdef EIO if (errno == 0) __set_errno (EIO); #endif result = EOF; } else if ((_IO_off64_t) (__typeof (posp->__pos)) pos != pos) { #ifdef EOVERFLOW __set_errno (EOVERFLOW); #endif result = EOF; } else { posp->__pos = pos; if (fp->_mode > 0 && (*fp->_codecvt->__codecvt_do_encoding) (fp->_codecvt) < 0) /* This is a stateful encoding, safe the state. */ posp->__state = fp->_wide_data->_IO_state; } _IO_release_lock (fp); return result; }
static int _IO_file_sync_mmap (_IO_FILE *fp) { if (fp->_IO_read_ptr != fp->_IO_read_end) { #ifdef TODO if (_IO_in_backup (fp)) delta -= eGptr () - Gbase (); #endif if (__lseek64 (fp->_fileno, fp->_IO_read_ptr - fp->_IO_buf_base, SEEK_SET) != fp->_IO_read_ptr - fp->_IO_buf_base) { fp->_flags |= _IO_ERR_SEEN; return EOF; } } fp->_offset = fp->_IO_read_ptr - fp->_IO_buf_base; fp->_IO_read_end = fp->_IO_read_ptr = fp->_IO_read_base; return 0; }
int _IO_switch_to_get_mode (_IO_FILE *fp) { if (fp->_IO_write_ptr > fp->_IO_write_base) if (_IO_OVERFLOW (fp, EOF) == EOF) return EOF; if (_IO_in_backup (fp)) fp->_IO_read_base = fp->_IO_backup_base; else { fp->_IO_read_base = fp->_IO_buf_base; if (fp->_IO_write_ptr > fp->_IO_read_end) fp->_IO_read_end = fp->_IO_write_ptr; } fp->_IO_read_ptr = fp->_IO_write_ptr; fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = fp->_IO_read_ptr; fp->_flags &= ~_IO_CURRENTLY_PUTTING; return 0; }
int _IO_switch_to_wget_mode (FILE *fp) { if (fp->_wide_data->_IO_write_ptr > fp->_wide_data->_IO_write_base) if ((wint_t)_IO_WOVERFLOW (fp, WEOF) == WEOF) return EOF; if (_IO_in_backup (fp)) fp->_wide_data->_IO_read_base = fp->_wide_data->_IO_backup_base; else { fp->_wide_data->_IO_read_base = fp->_wide_data->_IO_buf_base; if (fp->_wide_data->_IO_write_ptr > fp->_wide_data->_IO_read_end) fp->_wide_data->_IO_read_end = fp->_wide_data->_IO_write_ptr; } fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_write_ptr; fp->_wide_data->_IO_write_base = fp->_wide_data->_IO_write_ptr = fp->_wide_data->_IO_write_end = fp->_wide_data->_IO_read_ptr; fp->_flags &= ~_IO_CURRENTLY_PUTTING; return 0; }
off64_t ftello64 (_IO_FILE *fp) { _IO_off64_t pos; CHECK_FILE (fp, -1L); _IO_acquire_lock (fp); pos = _IO_seekoff_unlocked (fp, 0, _IO_seek_cur, 0); if (_IO_in_backup (fp) && pos != _IO_pos_BAD) { if (fp->_mode <= 0) pos -= fp->_IO_save_end - fp->_IO_save_base; } _IO_release_lock (fp); if (pos == _IO_pos_BAD) { #ifdef EIO if (errno == 0) __set_errno (EIO); #endif return -1L; } return pos; }
int attribute_compat_text_section _IO_old_fgetpos64 (_IO_FILE *fp, _IO_fpos64_t *posp) { _IO_off64_t pos; CHECK_FILE (fp, EOF); _IO_acquire_lock (fp); pos = _IO_seekoff_unlocked (fp, 0, _IO_seek_cur, 0); if (_IO_in_backup (fp) && pos != _IO_pos_BAD) pos -= fp->_IO_save_end - fp->_IO_save_base; _IO_release_lock (fp); if (pos == _IO_pos_BAD) { /* ANSI explicitly requires setting errno to a positive value on failure. */ #ifdef EIO if (errno == 0) __set_errno (EIO); #endif return EOF; } posp->__pos = pos; return 0; }
_IO_size_t _IO_file_xsgetn (_IO_FILE *fp, void *data, _IO_size_t n) { _IO_size_t want, have; _IO_ssize_t count; char *s = data; want = n; if (fp->_IO_buf_base == NULL) { /* Maybe we already have a push back pointer. */ if (fp->_IO_save_base != NULL) { free (fp->_IO_save_base); fp->_flags &= ~_IO_IN_BACKUP; } _IO_doallocbuf (fp); } while (want > 0) { have = fp->_IO_read_end - fp->_IO_read_ptr; if (want <= have) { memcpy (s, fp->_IO_read_ptr, want); fp->_IO_read_ptr += want; want = 0; } else { if (have > 0) { #ifdef _LIBC s = __mempcpy (s, fp->_IO_read_ptr, have); #else memcpy (s, fp->_IO_read_ptr, have); s += have; #endif want -= have; fp->_IO_read_ptr += have; } /* Check for backup and repeat */ if (_IO_in_backup (fp)) { _IO_switch_to_main_get_area (fp); continue; } /* If we now want less than a buffer, underflow and repeat the copy. Otherwise, _IO_SYSREAD directly to the user buffer. */ if (fp->_IO_buf_base && want < (size_t) (fp->_IO_buf_end - fp->_IO_buf_base)) { if (__underflow (fp) == EOF) break; continue; } /* These must be set before the sysread as we might longjmp out waiting for input. */ _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); /* Try to maintain alignment: read a whole number of blocks. */ count = want; if (fp->_IO_buf_base) { _IO_size_t block_size = fp->_IO_buf_end - fp->_IO_buf_base; if (block_size >= 128) count -= want % block_size; } count = _IO_SYSREAD (fp, s, count); if (count <= 0) { if (count == 0) fp->_flags |= _IO_EOF_SEEN; else fp->_flags |= _IO_ERR_SEEN; break; } s += count; want -= count; if (fp->_offset != _IO_pos_BAD) _IO_pos_adjust (fp->_offset, count); } } return n - want; }
/* ftell{,o} implementation for wide mode. Don't modify any state of the file pointer while we try to get the current state of the stream except in one case, which is when we have unflushed writes in append mode. */ static _IO_off64_t do_ftell_wide (_IO_FILE *fp) { _IO_off64_t result, offset = 0; /* No point looking for offsets in the buffer if it hasn't even been allocated. */ if (fp->_wide_data->_IO_buf_base != NULL) { const wchar_t *wide_read_base; const wchar_t *wide_read_ptr; const wchar_t *wide_read_end; bool unflushed_writes = (fp->_wide_data->_IO_write_ptr > fp->_wide_data->_IO_write_base); bool append_mode = (fp->_flags & _IO_IS_APPENDING) == _IO_IS_APPENDING; /* When we have unflushed writes in append mode, seek to the end of the file and record that offset. This is the only time we change the file stream state and it is safe since the file handle is active. */ if (unflushed_writes && append_mode) { result = _IO_SYSSEEK (fp, 0, _IO_seek_end); if (result == _IO_pos_BAD) return EOF; else fp->_offset = result; } /* XXX For wide stream with backup store it is not very reasonable to determine the offset. The pushed-back character might require a state change and we need not be able to compute the initial state by reverse transformation since there is no guarantee of symmetry. So we don't even try and return an error. */ if (_IO_in_backup (fp)) { if (fp->_wide_data->_IO_read_ptr < fp->_wide_data->_IO_read_end) { __set_errno (EINVAL); return -1; } /* Nothing in the backup store, so note the backed up pointers without changing the state. */ wide_read_base = fp->_wide_data->_IO_save_base; wide_read_ptr = wide_read_base; wide_read_end = fp->_wide_data->_IO_save_end; } else { wide_read_base = fp->_wide_data->_IO_read_base; wide_read_ptr = fp->_wide_data->_IO_read_ptr; wide_read_end = fp->_wide_data->_IO_read_end; } struct _IO_codecvt *cv = fp->_codecvt; int clen = (*cv->__codecvt_do_encoding) (cv); if (!unflushed_writes) { if (clen > 0) { offset -= (wide_read_end - wide_read_ptr) * clen; offset -= fp->_IO_read_end - fp->_IO_read_ptr; } else { int nread; size_t delta = wide_read_ptr - wide_read_base; __mbstate_t state = fp->_wide_data->_IO_last_state; nread = (*cv->__codecvt_do_length) (cv, &state, fp->_IO_read_base, fp->_IO_read_end, delta); offset -= fp->_IO_read_end - fp->_IO_read_base - nread; } } else { if (clen > 0) offset += (fp->_wide_data->_IO_write_ptr - fp->_wide_data->_IO_write_base) * clen; else { size_t delta = (fp->_wide_data->_IO_write_ptr - fp->_wide_data->_IO_write_base); /* Allocate enough space for the conversion. */ size_t outsize = delta * sizeof (wchar_t); char *out = malloc (outsize); char *outstop = out; const wchar_t *in = fp->_wide_data->_IO_write_base; enum __codecvt_result status; __mbstate_t state = fp->_wide_data->_IO_last_state; status = (*cv->__codecvt_do_out) (cv, &state, in, in + delta, &in, out, out + outsize, &outstop); /* We don't check for __codecvt_partial because it can be returned on one of two conditions: either the output buffer is full or the input sequence is incomplete. We take care to allocate enough buffer and our input sequences must be complete since they are accepted as wchar_t; if not, then that is an error. */ if (__glibc_unlikely (status != __codecvt_ok)) { free (out); return WEOF; } offset += outstop - out; free (out); } /* We don't trust _IO_read_end to represent the current file offset when writing in append mode because the value would have to be shifted to the end of the file during a flush. Use the write base instead, along with the new offset we got above when we did a seek to the end of the file. */ if (append_mode) offset += fp->_IO_write_ptr - fp->_IO_write_base; /* For all other modes, _IO_read_end represents the file offset. */ else offset += fp->_IO_write_ptr - fp->_IO_read_end; } } if (fp->_offset != _IO_pos_BAD) result = fp->_offset; else result = _IO_SYSSEEK (fp, 0, _IO_seek_cur); if (result == EOF) return result; result += offset; if (result < 0) { __set_errno (EINVAL); return EOF; } return result; }
_IO_off64_t attribute_compat_text_section _IO_old_file_seekoff (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) { _IO_off_t result; _IO_off64_t delta, new_offset; long count; /* POSIX.1 8.2.3.7 says that after a call the fflush() the file offset of the underlying file must be exact. */ int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end && fp->_IO_write_base == fp->_IO_write_ptr); if (mode == 0) dir = _IO_seek_cur, offset = 0; /* Don't move any pointers. */ /* Flush unwritten characters. (This may do an unneeded write if we seek within the buffer. But to be able to switch to reading, we would need to set egptr to pptr. That can't be done in the current design, which assumes file_ptr() is eGptr. Anyway, since we probably end up flushing when we close(), it doesn't make much difference.) FIXME: simulate mem-mapped files. */ if (fp->_IO_write_ptr > fp->_IO_write_base || _IO_in_put_mode (fp)) if (_IO_switch_to_get_mode (fp)) return EOF; if (fp->_IO_buf_base == NULL) { /* It could be that we already have a pushback buffer. */ if (fp->_IO_read_base != NULL) { free (fp->_IO_read_base); fp->_flags &= ~_IO_IN_BACKUP; } _IO_doallocbuf (fp); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); } switch (dir) { case _IO_seek_cur: /* Adjust for read-ahead (bytes is buffer). */ offset -= fp->_IO_read_end - fp->_IO_read_ptr; if (fp->_old_offset == _IO_pos_BAD) goto dumb; /* Make offset absolute, assuming current pointer is file_ptr(). */ offset += fp->_old_offset; dir = _IO_seek_set; break; case _IO_seek_set: break; case _IO_seek_end: { struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode)) { offset += st.st_size; dir = _IO_seek_set; } else goto dumb; } } /* At this point, dir==_IO_seek_set. */ /* If we are only interested in the current position we've found it now. */ if (mode == 0) return offset; /* If destination is within current buffer, optimize: */ if (fp->_old_offset != _IO_pos_BAD && fp->_IO_read_base != NULL && !_IO_in_backup (fp)) { /* Offset relative to start of main get area. */ _IO_off_t rel_offset = (offset - fp->_old_offset + (fp->_IO_read_end - fp->_IO_read_base)); if (rel_offset >= 0) { #if 0 if (_IO_in_backup (fp)) _IO_switch_to_main_get_area (fp); #endif if (rel_offset <= fp->_IO_read_end - fp->_IO_read_base) { _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + rel_offset, fp->_IO_read_end); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } } #ifdef TODO /* If we have streammarkers, seek forward by reading ahead. */ if (_IO_have_markers (fp)) { int to_skip = rel_offset - (fp->_IO_read_ptr - fp->_IO_read_base); if (ignore (to_skip) != to_skip) goto dumb; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } #endif } #ifdef TODO if (rel_offset < 0 && rel_offset >= Bbase () - Bptr ()) { if (!_IO_in_backup (fp)) _IO_switch_to_backup_area (fp); gbump (fp->_IO_read_end + rel_offset - fp->_IO_read_ptr); _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } #endif } #ifdef TODO _IO_unsave_markers (fp); #endif if (fp->_flags & _IO_NO_READS) goto dumb; /* Try to seek to a block boundary, to improve kernel page management. */ new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1); delta = offset - new_offset; if (delta > fp->_IO_buf_end - fp->_IO_buf_base) { new_offset = offset; delta = 0; } result = _IO_SYSSEEK (fp, new_offset, 0); if (result < 0) return EOF; if (delta == 0) count = 0; else { count = _IO_SYSREAD (fp, fp->_IO_buf_base, (must_be_exact ? delta : fp->_IO_buf_end - fp->_IO_buf_base)); if (count < delta) { /* We weren't allowed to read, but try to seek the remainder. */ offset = count == EOF ? delta : delta-count; dir = _IO_seek_cur; goto dumb; } } _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, fp->_IO_buf_base + count); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); fp->_old_offset = result + count; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); return offset; dumb: _IO_unsave_markers (fp); result = _IO_SYSSEEK (fp, offset, dir); if (result != EOF) { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); fp->_old_offset = result; _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); } return result; resync: /* We need to do it since it is possible that the file offset in the kernel may be changed behind our back. It may happen when we fopen a file and then do a fork. One process may access the file and the kernel file offset will be changed. */ if (fp->_old_offset >= 0) _IO_SYSSEEK (fp, fp->_old_offset, 0); return offset; }
_IO_off64_t _IO_new_file_seekoff (_IO_FILE *fp, _IO_off64_t offset, int dir, int mode) { _IO_off64_t result; _IO_off64_t delta, new_offset; long count; /* Short-circuit into a separate function. We don't want to mix any functionality and we don't want to touch anything inside the FILE object. */ if (mode == 0) return do_ftell (fp); /* POSIX.1 8.2.3.7 says that after a call the fflush() the file offset of the underlying file must be exact. */ int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end && fp->_IO_write_base == fp->_IO_write_ptr); bool was_writing = (fp->_IO_write_ptr > fp->_IO_write_base || _IO_in_put_mode (fp)); /* Flush unwritten characters. (This may do an unneeded write if we seek within the buffer. But to be able to switch to reading, we would need to set egptr to pptr. That can't be done in the current design, which assumes file_ptr() is eGptr. Anyway, since we probably end up flushing when we close(), it doesn't make much difference.) FIXME: simulate mem-mapped files. */ if (was_writing && _IO_switch_to_get_mode (fp)) return EOF; if (fp->_IO_buf_base == NULL) { /* It could be that we already have a pushback buffer. */ if (fp->_IO_read_base != NULL) { free (fp->_IO_read_base); fp->_flags &= ~_IO_IN_BACKUP; } _IO_doallocbuf (fp); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); } switch (dir) { case _IO_seek_cur: /* Adjust for read-ahead (bytes is buffer). */ offset -= fp->_IO_read_end - fp->_IO_read_ptr; if (fp->_offset == _IO_pos_BAD) goto dumb; /* Make offset absolute, assuming current pointer is file_ptr(). */ offset += fp->_offset; if (offset < 0) { __set_errno (EINVAL); return EOF; } dir = _IO_seek_set; break; case _IO_seek_set: break; case _IO_seek_end: { struct stat64 st; if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode)) { offset += st.st_size; dir = _IO_seek_set; } else goto dumb; } } /* At this point, dir==_IO_seek_set. */ /* If destination is within current buffer, optimize: */ if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL && !_IO_in_backup (fp)) { _IO_off64_t start_offset = (fp->_offset - (fp->_IO_read_end - fp->_IO_buf_base)); if (offset >= start_offset && offset < fp->_offset) { _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + (offset - start_offset), fp->_IO_read_end); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } } if (fp->_flags & _IO_NO_READS) goto dumb; /* Try to seek to a block boundary, to improve kernel page management. */ new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1); delta = offset - new_offset; if (delta > fp->_IO_buf_end - fp->_IO_buf_base) { new_offset = offset; delta = 0; } result = _IO_SYSSEEK (fp, new_offset, 0); if (result < 0) return EOF; if (delta == 0) count = 0; else { count = _IO_SYSREAD (fp, fp->_IO_buf_base, (must_be_exact ? delta : fp->_IO_buf_end - fp->_IO_buf_base)); if (count < delta) { /* We weren't allowed to read, but try to seek the remainder. */ offset = count == EOF ? delta : delta-count; dir = _IO_seek_cur; goto dumb; } } _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, fp->_IO_buf_base + count); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); fp->_offset = result + count; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); return offset; dumb: _IO_unsave_markers (fp); result = _IO_SYSSEEK (fp, offset, dir); if (result != EOF) { _IO_mask_flags (fp, 0, _IO_EOF_SEEN); fp->_offset = result; _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); } return result; resync: /* We need to do it since it is possible that the file offset in the kernel may be changed behind our back. It may happen when we fopen a file and then do a fork. One process may access the file and the kernel file offset will be changed. */ if (fp->_offset >= 0) _IO_SYSSEEK (fp, fp->_offset, 0); return offset; }