void BuildIndex(std::shared_ptr<index::Index> index, storage::DataTable *table) { auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); auto txn = txn_manager.BeginTransaction(); oid_t start_tile_group_count = START_OID; oid_t table_tile_group_count = table->GetTileGroupCount(); while (start_tile_group_count < table_tile_group_count) { auto tile_group = table->GetTileGroup(start_tile_group_count++); auto column_count = table->GetSchema()->GetColumnCount(); oid_t active_tuple_count = tile_group->GetNextTupleSlot(); for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) { std::unique_ptr<storage::Tuple> tuple_ptr( new storage::Tuple(table->GetSchema(), true)); CopyTuple(tuple_id, tuple_ptr.get(), tile_group.get(), column_count); ItemPointer location(tile_group->GetTileGroupId(), tuple_id); ItemPointer *index_entry_ptr = nullptr; table->InsertInIndexes(tuple_ptr.get(), location, txn, &index_entry_ptr); } index->IncrementIndexedTileGroupOffset(); } txn_manager.CommitTransaction(txn); }
/** * Grab next slot (thread-safe) and fill in the tuple if tuple != nullptr * * Returns slot where inserted (INVALID_ID if not inserted) */ oid_t TileGroup::InsertTuple(const Tuple *tuple) { oid_t tuple_slot_id = tile_group_header->GetNextEmptyTupleSlot(); LOG_TRACE("Tile Group Id :: %u status :: %u out of %u slots ", tile_group_id, tuple_slot_id, num_tuple_slots); // No more slots if (tuple_slot_id == INVALID_OID) { LOG_TRACE("Failed to get next empty tuple slot within tile group."); return INVALID_OID; } // if the input tuple is nullptr, then it means that the tuple with be filled // in // outside the function. directly return the empty slot. if (tuple == nullptr) { return tuple_slot_id; } // copy tuple. CopyTuple(tuple, tuple_slot_id); // Set MVCC info PL_ASSERT(tile_group_header->GetTransactionId(tuple_slot_id) == INVALID_TXN_ID); PL_ASSERT(tile_group_header->GetBeginCommitId(tuple_slot_id) == MAX_CID); PL_ASSERT(tile_group_header->GetEndCommitId(tuple_slot_id) == MAX_CID); return tuple_slot_id; }
void IndexTuner::BuildIndex(storage::DataTable* table, std::shared_ptr<index::Index> index) { auto table_schema = table->GetSchema(); auto index_tile_group_offset = index->GetIndexedTileGroupOff(); auto table_tile_group_count = table->GetTileGroupCount(); oid_t tile_groups_indexed = 0; auto index_schema = index->GetKeySchema(); auto indexed_columns = index_schema->GetIndexedColumns(); std::unique_ptr<storage::Tuple> key(new storage::Tuple(index_schema, true)); while (index_tile_group_offset < table_tile_group_count && (tile_groups_indexed < max_tile_groups_indexed)) { std::unique_ptr<storage::Tuple> tuple_ptr( new storage::Tuple(table_schema, true)); auto tile_group = table->GetTileGroup(index_tile_group_offset); auto tile_group_id = tile_group->GetTileGroupId(); oid_t active_tuple_count = tile_group->GetNextTupleSlot(); for (oid_t tuple_id = 0; tuple_id < active_tuple_count; tuple_id++) { // Copy over the tuple tile_group->CopyTuple(tuple_id, tuple_ptr.get()); // Set the location ItemPointer location(tile_group_id, tuple_id); // Set the key key->SetFromTuple(tuple_ptr.get(), indexed_columns, index->GetPool()); // Insert in specific index // index->InsertEntry(key.get(), location); } // Update indexed tile group offset (set of tgs indexed) index->IncrementIndexedTileGroupOffset(); // Sleep a bit // std::this_thread::sleep_for(std::chrono::microseconds(sleep_duration)); index_tile_group_offset++; tile_groups_indexed++; } }