static HeapTuple TupleFormerNullTuple(TupleFormer *former) { memset(former->values, 0, former->desc->natts * sizeof(Datum)); memset(former->isnull, true, former->desc->natts * sizeof(bool)); return TupleFormerTuple(former); }
/** * @brief Read one record from input file and transfer literal string to * PostgreSQL internal format. * * Process flow * - If record buffer is empty * + Read records up to READ_LINE_NUM by read(2) * * Return 0 if we reach EOF. * * If an error occurs, notify it to caller by ereport(). * + Count the number of records in the record buffer. * + Initialize the number of used records to 0. * + Store the head byte of the next record. * - If the number of records remained in the record buffer and there is not * enough room, notify it to the caller by ereport(). * - Get back the stored head byte, and store the head byte of the next record. * - Update the number of records used. * @param rd [in/out] Control information * @return Return true if there is a next record, or false if EOF. */ static HeapTuple BinaryParserRead(BinaryParser *self, Checker *checker) { HeapTuple tuple; char *record; int i; /* Skip first offset lines in the input file */ if (unlikely(self->need_offset > 0)) { int i; for (i = 0; i < self->need_offset; i++) { int len; len = SourceRead(self->source, self->buffer, self->rec_len); if (len != self->rec_len) { if (errno == 0) errno = EINVAL; ereport(ERROR, (errcode_for_file_access(), errmsg("could not skip " int64_FMT " lines (" int64_FMT " bytes) in the input file: %m", self->need_offset, self->rec_len * self->need_offset))); } } self->need_offset = 0; } /* * If the record buffer is exhausted, read next records from file * up to READ_LINE_NUM rows at once. */ if (self->used_rec_cnt >= self->total_rec_cnt) { int len; div_t v; BULKLOAD_PROFILE(&prof_reader_parser); while ((len = SourceRead(self->source, self->buffer, self->rec_len * READ_LINE_NUM)) < 0) { if (errno != EAGAIN && errno != EINTR) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read input file: %m"))); } BULKLOAD_PROFILE(&prof_reader_source); /* * Calculate the actual number of rows. Trailing remainder bytes * at the end of the input file are ingored with WARNING. */ v = div(len, self->rec_len); if (v.rem != 0) elog(WARNING, "Ignore %d bytes at the end of file", v.rem); self->total_rec_cnt = v.quot; self->used_rec_cnt = 0; if (self->total_rec_cnt <= 0) return NULL; /* eof */ record = self->buffer; } else { record = self->buffer + (self->rec_len * self->used_rec_cnt); } /* * Increment the position *before* parsing the record so that we can * skip it when there are some errors on parsing it. */ self->used_rec_cnt++; self->base.count++; for (i = 0; i < self->nfield; i++) { /* Convert it to server encoding. */ if (self->fields[i].character) { char *str = record + self->fields[i].offset; int next_head = self->fields[i].offset + self->fields[i].len; self->next_head = record[next_head]; record[next_head] = '\0'; self->base.parsing_field = i + 1; self->fields[i].in = CheckerConversion(checker, str); record[next_head] = self->next_head; } else { self->fields[i].in = record + self->fields[i].offset; } } ExtractValuesFromFixed(self, record); self->next_head = '\0'; self->base.parsing_field = -1; if (self->filter.funcstr) tuple = FilterTuple(&self->filter, &self->former, &self->base.parsing_field); else tuple = TupleFormerTuple(&self->former); return tuple; }