/* * fstream_read * * Read 'size' bytes of data from the filestream into 'buffer'. * If 'read_whole_lines' is specified then read up to the last logical row * in the source buffer. 'fo' keeps the state (name, offset, etc) of the current * filestream file we are reading from. */ int fstream_read(fstream_t *fs, void *dest, int size, struct fstream_filename_and_offset *fo, const int read_whole_lines, const char *line_delim_str, const int line_delim_length) { int buffer_capacity = fs->options.bufsize; static char err_buf[FILE_ERROR_SZ] = {0}; if (fs->ferror) return -1; for (;;) { ssize_t bytesread; /* num bytes read from filestream */ ssize_t bytesread2; /* same, but when reading a second round */ if (!size || fs->fidx == fs->glob.gl_pathc) return 0; /* * If data source has a header, we consume it now and in order to * move on to real data that follows it. */ if (fs->skip_header_line) { char* p = fs->buffer; char* q = p + fs->buffer_cur_size; size_t len = 0; assert(fs->buffer_cur_size < buffer_capacity); /* * read data from the source file and fill up the file stream buffer */ len = buffer_capacity - fs->buffer_cur_size; bytesread = gfile_read(&fs->fd, q, len); if (bytesread < 0) { fs->ferror = format_error("cannot read file - ", fs->glob.gl_pathv[fs->fidx]); return -1; } /* update the buffer size according to new byte count we just read */ fs->buffer_cur_size += bytesread; q += bytesread; if (fs->options.is_csv) { /* csv header */ p = scan_csv_records(p, q, 1, fs); } else { if (line_delim_length > 0) { /* text header with defined EOL */ p = find_first_eol_delim (p, q, line_delim_str, line_delim_length); } else { /* text header with \n as delimiter (by default) */ for (; p < q && *p != '\n'; p++) ; } p = (p < q) ? p + 1 : 0; fs->line_number++; } if (!p) { if (fs->buffer_cur_size == buffer_capacity) { gfile_printf_then_putc_newline( "fstream ERROR: header too long in file %s", fs->glob.gl_pathv[fs->fidx]); fs->ferror = "line too long in file"; return -1; } p = q; } /* * update the filestream buffer offset to past last line read and * copy the end of the buffer (past header data) to the beginning. * we now bypassed the header data and can continue to real data. */ fs->foff += p - fs->buffer; fs->buffer_cur_size = q - p; memmove(fs->buffer, p, fs->buffer_cur_size); fs->skip_header_line = 0; } /* * If we need to read all the data up to the last *complete* logical * line in the data buffer (like gpfdist for example) - we choose this * path. We grab the bigger chunk we can get that includes whole lines. * Otherwise, if we just want the whole buffer we skip. */ if (read_whole_lines) { char *p; ssize_t total_bytes = fs->buffer_cur_size; assert(size >= buffer_capacity); if (total_bytes > 0) { /* * source buffer is not empty. copy the data from the beginning * up to the current length before moving on to reading more */ fs->buffer_cur_size = 0; updateCurFileState(fs, fo); memcpy(dest, fs->buffer, total_bytes); } /* read more data from source file into destination buffer */ bytesread2 = gfile_read(&fs->fd, (char*) dest + total_bytes, size - total_bytes); if (bytesread2 < 0) { fs->ferror = format_error("cannot read file - ", fs->glob.gl_pathv[fs->fidx]); return -1; } if (bytesread2 < size - total_bytes) { /* * We didn't read as much as we asked for. Check why. * We could be done reading data, we may need to move * on the reading the next data file (if any). */ if (total_bytes == 0) { if (bytesread2 == 0) { if (nextFile(fs)) return -1; /* found next file but failed to open */ continue; } updateCurFileState(fs, fo); } /* * try to open the next file if any, and return the number of * bytes read to buffer earlier, if next file was found but * could not open return -1 */ return nextFile(fs) ? -1 : total_bytes + bytesread2; } updateCurFileState(fs, fo); /* * Now that we have enough data in our filestream buffer, get a * chunk of whole rows and copy it into our dest buffer to be sent * out later. */ if (fs->options.is_csv) { /* CSV: go slow, scan byte-by-byte for record boundary */ p = scan_csv_records(dest, (char*)dest + size, 0, fs); } else { /* * TEXT: go fast, scan for end of line delimiter (\n by default) for * record boundary. * find the last end of line delimiter from the back */ if (line_delim_length > 0) { p = find_last_eol_delim((char*)dest, size, line_delim_str, line_delim_length); } else { for (p = (char*)dest + size; (char*)dest <= --p && *p != '\n';) ; } p = (char*)dest <= p ? p + 1 : 0; fs->line_number = 0; } /* * could we not find even one complete row in this buffer? error. */ if (!p || (char*)dest + size >= p + buffer_capacity) { snprintf(err_buf, sizeof(err_buf), "line too long in file %s near (%lld bytes)", fs->glob.gl_pathv[fs->fidx], (long long) fs->foff); fs->ferror = err_buf; gfile_printf_then_putc_newline("%s", err_buf); return -1; } /* copy the result chunk of data into our buffer and we're done */ fs->buffer_cur_size = (char*)dest + size - p; memcpy(fs->buffer, p, fs->buffer_cur_size); fs->foff += p - (char*)dest; return p - (char*)dest; } /* * if we're here it means that we just want chunks of data and don't * care if it includes whole rows or not (for example, backend url_read * code - a segdb that reads all the data that gpfdist sent to it, * buffer by buffer and then parses the lines internally). */ if (fs->buffer_cur_size) { ssize_t total_bytes = fs->buffer_cur_size; updateCurFileState(fs, fo); if (total_bytes > size) total_bytes = size; memcpy(dest, fs->buffer, total_bytes); fs->buffer_cur_size -= total_bytes; memmove(fs->buffer, fs->buffer + total_bytes, fs->buffer_cur_size); fs->foff += total_bytes; fs->line_number = 0; return total_bytes; } bytesread = gfile_read(&fs->fd, dest, size); if (bytesread < 0) { fs->ferror = format_error("cannot read file - ", fs->glob.gl_pathv[fs->fidx]); return -1; } if (bytesread) { updateCurFileState(fs, fo); fs->foff += bytesread; fs->line_number = 0; return bytesread; } if (nextFile(fs)) return -1; } }
// Background part as separate thread // Don't do any GUI things on this thread. void print_gdi_thread(void *pdummy) { PGDI_THREAD *pth= (PGDI_THREAD *)pdummy; CPrintDIB printdib; char buf[MAXSTR]; int length; LPBYTE pLine; LPBYTE p; DWORD dwRead; int i; GFile *pFile = gfile_open_handle((int)pth->hPipeRd); int page = 0; BOOL print_it; printdib.debug = (debug & DEBUG_GDI); while (printdib.ReadHeader(pFile)) { page++; print_it = TRUE; if ((pth->oddeven == EVEN_PAGES) && ((page & 1) == 1)) print_it = FALSE; else if ((pth->oddeven == ODD_PAGES) && ((page & 1) == 0)) print_it = FALSE; if ((pth->from > 0) && (page < pth->from)) print_it = FALSE; if ((pth->to > 0) && (page > pth->to)) print_it = FALSE; sprintf(buf, "Page %d, %s\n", page, print_it ? "PRINT" : "ignore"); gs_addmess(buf); if (print_it) StartPage(pth->hdc); length = printdib.m_bytewidth; pLine = new BYTE[length]; for (i=0; i < printdib.m_PageBmp.bmp2.biHeight; i++) { // read a scan line length = printdib.m_bytewidth; p = pLine; while (length && (dwRead = gfile_read(pFile, p, length)) != 0) { length -= dwRead; p += dwRead; } if (print_it) printdib.AddPrintLine(pth->hdc, i, pLine); } if (print_it) { printdib.FlushPrintBitmap(pth->hdc); EndPage(pth->hdc); } delete pLine; } EndDoc(pth->hdc); DeleteDC(pth->hdc); pth->hdc = NULL; gfile_close(pFile); free(pth); print_count--; /* if printing from command line, close GSview */ if (print_exit && (print_count==0) && !(debug & DEBUG_GDI)) gsview_command(IDM_EXIT); gs_addmess("\nPrint GDI finished\n"); }