////////////////////////////////////////////////////////////////////// // increment the totals by the batch counts // also sets the status to indicate that processing should be completed. void usa_InputFile::BatchCountsAdd( size_t batch_size) { _processingStarted = true; _processed += batch_size; // add the batch totals to the overall totals CountIter biter( _type_batch.begin()); CountIter titer( _type_totals.begin()); for (;titer != _type_totals.end(); ++biter, ++titer) { *titer += *biter; } biter = _recs_batch.begin(); titer = _recs_totals.begin(); for (;titer != _recs_totals.end(); ++biter, ++titer) { *titer += *biter; } _total_DUC += _recs_DUC; _total_DUP += _recs_DUP; _total_TAX += _recs_TAX; _total_WTAX += _recs_WTAX; BatchCountsReset(); // set admin start count back to the customer start count _start_group_admin = _start_group_cust; if ( Processed_Total() == file_groups()) { _status = COMPLETED; } GFRPOSTCONDITION2( Processed_Total() <= file_groups(), GFR_GENERIC_DEBUG, "Processed Total %1 is larger than File Groups %2", Processed_Total(), file_groups()) }
bool Table::serializeTo(int32_t offset, int32_t limit, SerializeOutput &serialize_io) { // The table is serialized as: // [(int) total size] // [(int) header size] [num columns] [column types] [column names] // [(int) num tuples] [tuple data] /* NOTE: VoltDBEngine uses a binary template to create tables of single integers. It's called m_templateSingleLongTable and if you are seeing a serialization bug in tables of single integers, make sure that's correct. */ // a placeholder for the total table size std::size_t pos = serialize_io.position(); serialize_io.writeInt(-1); if (!serializeColumnHeaderTo(serialize_io)) return false; // active tuple counts uint32_t output_size = m_tupleCount; if (limit != -1 || offset != -1) { if (offset == -1) { output_size = (limit < m_tupleCount ? limit : m_tupleCount); } else if (offset > m_tupleCount) { output_size = 0; } else { output_size = m_tupleCount - offset; if (limit != -1 && limit < output_size) output_size = limit; } } serialize_io.writeInt(static_cast<int32_t>(output_size)); // fprintf(stderr, "SERIALIZE(output=%d, offset=%d, limit=%d, total=%d)\n", output_size, offset, limit, m_tupleCount); int64_t written_count = 0; int64_t read_count = 0; TableIterator titer(this); TableTuple tuple(m_schema); while (titer.next(tuple)) { if (offset == -1 || read_count >= offset) { tuple.serializeTo(serialize_io); if (limit != -1 && ++written_count == limit) break; } read_count++; } // assert(written_count == m_tupleCount); // length prefix is non-inclusive int32_t sz = static_cast<int32_t>(serialize_io.position() - pos - sizeof(int32_t)); assert(sz > 0); serialize_io.writeIntAt(pos, sz); return true; }
typename SectionContainer<TValue, TContainer>::value_type& SectionContainer<TValue, TContainer>:: back(int secIndex) { assert((secIndex >= -1) && (secIndex < num_sections()) && "Bad section index"); if(secIndex == -1) return m_container.back(); // things are a little more complicated here, since there is no rbegin // check whether the last element is the last element of the underlying // container, too. If so, we'll return the last element of this container. if(m_vSections[secIndex].m_elemsEnd == m_container.end()) return m_container.back(); // since it is not, it points to the element in m_container, which // directly follows the last element of this section. // we'll thus create a reverse iterator on m_elemsEnd, increase it // once, and return the element, to which the iterator now points. iterator titer(m_vSections[secIndex].m_elemsEnd); --titer; return *titer; }
bool Table::serializeTo(SerializeOutput &serialize_io) { // The table is serialized as: // [(int) total size] // [(int) header size] [num columns] [column types] [column names] // [(int) num tuples] [tuple data] /* NOTE: VoltDBEngine uses a binary template to create tables of single integers. It's called m_templateSingleLongTable and if you are seeing a serialization bug in tables of single integers, make sure that's correct. */ // a placeholder for the total table size std::size_t pos = serialize_io.position(); serialize_io.writeInt(-1); if (!serializeColumnHeaderTo(serialize_io)) return false; // active tuple counts serialize_io.writeInt(static_cast<int32_t>(m_tupleCount)); int64_t written_count = 0; TableIterator titer(this); TableTuple tuple(m_schema); while (titer.next(tuple)) { tuple.serializeTo(serialize_io); ++written_count; } assert(written_count == m_tupleCount); // length prefix is non-inclusive int32_t sz = static_cast<int32_t>(serialize_io.position() - pos - sizeof(int32_t)); assert(sz > 0); serialize_io.writeIntAt(pos, sz); return true; }