bool TableMetricsCatalog::InsertTableMetrics( oid_t database_oid, oid_t table_oid, int64_t reads, int64_t updates, int64_t deletes, int64_t inserts, int64_t time_stamp, type::AbstractPool *pool, concurrency::Transaction *txn) { std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetIntegerValue(database_oid); auto val1 = type::ValueFactory::GetIntegerValue(table_oid); auto val2 = type::ValueFactory::GetIntegerValue(reads); auto val3 = type::ValueFactory::GetIntegerValue(updates); auto val4 = type::ValueFactory::GetIntegerValue(deletes); auto val5 = type::ValueFactory::GetIntegerValue(inserts); auto val6 = type::ValueFactory::GetIntegerValue(time_stamp); tuple->SetValue(ColumnId::DATABASE_OID, val0, pool); tuple->SetValue(ColumnId::TABLE_OID, val1, pool); tuple->SetValue(ColumnId::READS, val2, pool); tuple->SetValue(ColumnId::UPDATES, val3, pool); tuple->SetValue(ColumnId::DELETES, val4, pool); tuple->SetValue(ColumnId::INSERTS, val5, pool); tuple->SetValue(ColumnId::TIME_STAMP, val6, pool); // Insert the tuple return InsertTuple(std::move(tuple), txn); }
bool IndexCatalog::InsertIndex(oid_t index_oid, const std::string &index_name, oid_t table_oid, IndexType index_type, IndexConstraintType index_constraint, bool unique_keys, std::vector<oid_t> indekeys, type::AbstractPool *pool, concurrency::Transaction *txn) { // Create the tuple first std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetIntegerValue(index_oid); auto val1 = type::ValueFactory::GetVarcharValue(index_name, nullptr); auto val2 = type::ValueFactory::GetIntegerValue(table_oid); auto val3 = type::ValueFactory::GetIntegerValue(static_cast<int>(index_type)); auto val4 = type::ValueFactory::GetIntegerValue(static_cast<int>(index_constraint)); auto val5 = type::ValueFactory::GetBooleanValue(unique_keys); std::stringstream os; for (oid_t indkey : indekeys) os << std::to_string(indkey) << " "; auto val6 = type::ValueFactory::GetVarcharValue(os.str(), nullptr); tuple->SetValue(0, val0, pool); tuple->SetValue(1, val1, pool); tuple->SetValue(2, val2, pool); tuple->SetValue(3, val3, pool); tuple->SetValue(4, val4, pool); tuple->SetValue(5, val5, pool); tuple->SetValue(6, val6, pool); // Insert the tuple return InsertTuple(std::move(tuple), txn); }
bool TriggerCatalog::InsertTrigger(oid_t table_oid, std::string trigger_name, int16_t trigger_type, std::string proc_oid, std::string function_arguments, type::Value fire_condition, type::Value timestamp, type::AbstractPool *pool, concurrency::TransactionContext *txn) { std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); LOG_INFO("type of trigger inserted:%d", trigger_type); auto val0 = type::ValueFactory::GetIntegerValue(GetNextOid()); auto val1 = type::ValueFactory::GetIntegerValue(table_oid); auto val2 = type::ValueFactory::GetVarcharValue(trigger_name); auto val3 = type::ValueFactory::GetVarcharValue(proc_oid); auto val4 = type::ValueFactory::GetIntegerValue(trigger_type); auto val5 = type::ValueFactory::GetVarcharValue(function_arguments); auto val6 = fire_condition; auto val7 = timestamp; tuple->SetValue(ColumnId::TRIGGER_OID, val0, pool); tuple->SetValue(ColumnId::TABLE_OID, val1, pool); tuple->SetValue(ColumnId::TRIGGER_NAME, val2, pool); tuple->SetValue(ColumnId::FUNCTION_OID, val3, pool); tuple->SetValue(ColumnId::TRIGGER_TYPE, val4, pool); tuple->SetValue(ColumnId::FUNCTION_ARGS, val5, pool); tuple->SetValue(ColumnId::FIRE_CONDITION, val6, pool); tuple->SetValue(ColumnId::TIMESTAMP, val7, pool); // Insert the tuple return InsertTuple(std::move(tuple), txn); }
bool QueryMetricsCatalog::InsertQueryMetrics(concurrency::TransactionContext *txn, const std::string &name, oid_t database_oid, int64_t num_params, const stats::QueryMetric::QueryParamBuf &type_buf, const stats::QueryMetric::QueryParamBuf &format_buf, const stats::QueryMetric::QueryParamBuf &value_buf, int64_t reads, int64_t updates, int64_t deletes, int64_t inserts, int64_t latency, int64_t cpu_time, int64_t time_stamp, type::AbstractPool *pool) { std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetVarcharValue(name, pool); auto val1 = type::ValueFactory::GetIntegerValue(database_oid); auto val2 = type::ValueFactory::GetIntegerValue(num_params); auto val3 = type::ValueFactory::GetNullValueByType(type::TypeId::VARBINARY); auto val4 = type::ValueFactory::GetNullValueByType(type::TypeId::VARBINARY); auto val5 = type::ValueFactory::GetNullValueByType(type::TypeId::VARBINARY); if (num_params != 0) { val3 = type::ValueFactory::GetVarbinaryValue(type_buf.buf, type_buf.len, true); val4 = type::ValueFactory::GetVarbinaryValue(format_buf.buf, format_buf.len, true); val5 = type::ValueFactory::GetVarbinaryValue(value_buf.buf, value_buf.len, true); } auto val6 = type::ValueFactory::GetIntegerValue(reads); auto val7 = type::ValueFactory::GetIntegerValue(updates); auto val8 = type::ValueFactory::GetIntegerValue(deletes); auto val9 = type::ValueFactory::GetIntegerValue(inserts); auto val10 = type::ValueFactory::GetIntegerValue(latency); auto val11 = type::ValueFactory::GetIntegerValue(cpu_time); auto val12 = type::ValueFactory::GetIntegerValue(time_stamp); tuple->SetValue(ColumnId::NAME, val0, pool); tuple->SetValue(ColumnId::DATABASE_OID, val1, pool); tuple->SetValue(ColumnId::NUM_PARAMS, val2, pool); tuple->SetValue(ColumnId::PARAM_TYPES, val3, pool); tuple->SetValue(ColumnId::PARAM_FORMATS, val4, pool); tuple->SetValue(ColumnId::PARAM_VALUES, val5, pool); tuple->SetValue(ColumnId::READS, val6, pool); tuple->SetValue(ColumnId::UPDATES, val7, pool); tuple->SetValue(ColumnId::DELETES, val8, pool); tuple->SetValue(ColumnId::INSERTS, val9, pool); tuple->SetValue(ColumnId::LATENCY, val10, pool); tuple->SetValue(ColumnId::CPU_TIME, val11, pool); tuple->SetValue(ColumnId::TIME_STAMP, val12, pool); // Insert the tuple return InsertTuple(txn, std::move(tuple)); }
/** * @brief read tuple record from log file and add them tuples to recovery txn * @param recovery txn */ void AriesFrontendLogger::InsertTuple(concurrency::Transaction *recovery_txn) { TupleRecord tuple_record(LOGRECORD_TYPE_ARIES_TUPLE_INSERT); // Check for torn log write if (ReadTupleRecordHeader(tuple_record, log_file, log_file_size) == false) { LOG_ERROR("Could not read tuple record header."); return; } auto txn_id = tuple_record.GetTransactionId(); if (recovery_txn_table.find(txn_id) == recovery_txn_table.end()) { LOG_ERROR("Insert txd id %d not found in recovery txn table", (int)txn_id); return; } auto table = GetTable(tuple_record); // Read off the tuple record body from the log auto tuple = ReadTupleRecordBody(table->GetSchema(), recovery_pool, log_file, log_file_size); // Check for torn log write if (tuple == nullptr) { return; } auto target_location = tuple_record.GetInsertLocation(); auto tile_group_id = target_location.block; auto tuple_slot = target_location.offset; auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(tile_group_id); auto txn = recovery_txn_table.at(txn_id); // Create new tile group if table doesn't already have that tile group if (tile_group == nullptr) { table->AddTileGroupWithOid(tile_group_id); tile_group = manager.GetTileGroup(tile_group_id); if (max_oid < tile_group_id) { max_oid = tile_group_id; } } // Do the insert ! auto inserted_tuple_slot = tile_group->InsertTuple( recovery_txn->GetTransactionId(), tuple_slot, tuple); if (inserted_tuple_slot == INVALID_OID) { // TODO: We need to abort on failure ! recovery_txn->SetResult(Result::RESULT_FAILURE); } else { txn->RecordInsert(target_location); table->IncreaseNumberOfTuplesBy(1); } delete tuple; }
bool DatabaseCatalog::InsertDatabase(oid_t database_oid, const std::string &database_name, type::AbstractPool *pool, concurrency::TransactionContext *txn) { std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetIntegerValue(database_oid); auto val1 = type::ValueFactory::GetVarcharValue(database_name, nullptr); tuple->SetValue(ColumnId::DATABASE_OID, val0, pool); tuple->SetValue(ColumnId::DATABASE_NAME, val1, pool); // Insert the tuple return InsertTuple(std::move(tuple), txn); }
bool DatabaseMetricsCatalog::InsertDatabaseMetrics( oid_t database_oid, oid_t txn_committed, oid_t txn_aborted, oid_t time_stamp, type::AbstractPool *pool, concurrency::TransactionContext *txn) { std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetIntegerValue(database_oid); auto val1 = type::ValueFactory::GetIntegerValue(txn_committed); auto val2 = type::ValueFactory::GetIntegerValue(txn_aborted); auto val3 = type::ValueFactory::GetIntegerValue(time_stamp); tuple->SetValue(ColumnId::DATABASE_OID, val0, pool); tuple->SetValue(ColumnId::TXN_COMMITTED, val1, pool); tuple->SetValue(ColumnId::TXN_ABORTED, val2, pool); tuple->SetValue(ColumnId::TIME_STAMP, val3, pool); // Insert the tuple into catalog table return InsertTuple(std::move(tuple), txn); }
bool IndexCatalog::InsertIndex(concurrency::TransactionContext *txn, const std::string &schema_name, oid_t table_oid, oid_t index_oid, const std::string &index_name, IndexType index_type, IndexConstraintType index_constraint, bool unique_keys, std::vector<oid_t> index_keys, type::AbstractPool *pool) { // Create the tuple first std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(catalog_table_->GetSchema(), true)); auto val0 = type::ValueFactory::GetIntegerValue(index_oid); auto val1 = type::ValueFactory::GetVarcharValue(index_name, nullptr); auto val2 = type::ValueFactory::GetIntegerValue(table_oid); auto val3 = type::ValueFactory::GetVarcharValue(schema_name, nullptr); auto val4 = type::ValueFactory::GetIntegerValue(static_cast<int>(index_type)); auto val5 = type::ValueFactory::GetIntegerValue(static_cast<int>(index_constraint)); auto val6 = type::ValueFactory::GetBooleanValue(unique_keys); std::stringstream os; for (oid_t indkey : index_keys) os << std::to_string(indkey) << " "; auto val7 = type::ValueFactory::GetVarcharValue(os.str(), nullptr); tuple->SetValue(IndexCatalog::ColumnId::INDEX_OID, val0, pool); tuple->SetValue(IndexCatalog::ColumnId::INDEX_NAME, val1, pool); tuple->SetValue(IndexCatalog::ColumnId::TABLE_OID, val2, pool); tuple->SetValue(IndexCatalog::ColumnId::SCHEMA_NAME, val3, pool); tuple->SetValue(IndexCatalog::ColumnId::INDEX_TYPE, val4, pool); tuple->SetValue(IndexCatalog::ColumnId::INDEX_CONSTRAINT, val5, pool); tuple->SetValue(IndexCatalog::ColumnId::UNIQUE_KEYS, val6, pool); tuple->SetValue(IndexCatalog::ColumnId::INDEXED_ATTRIBUTES, val7, pool); // Insert the tuple return InsertTuple(txn, std::move(tuple)); }
/** * @brief read tuple record from log file and add them tuples to recovery txn * @param recovery txn */ void AriesFrontendLogger::UpdateTuple(concurrency::Transaction *recovery_txn) { TupleRecord tuple_record(LOGRECORD_TYPE_ARIES_TUPLE_UPDATE); // Check for torn log write if (ReadTupleRecordHeader(tuple_record, log_file, log_file_size) == false) { return; } auto txn_id = tuple_record.GetTransactionId(); if (recovery_txn_table.find(txn_id) == recovery_txn_table.end()) { LOG_TRACE("Update txd id %d not found in recovery txn table", (int)txn_id); return; } auto txn = recovery_txn_table.at(txn_id); auto table = GetTable(tuple_record); auto tuple = ReadTupleRecordBody(table->GetSchema(), recovery_pool, log_file, log_file_size); // Check for torn log write if (tuple == nullptr) { return; } // First, redo the delete ItemPointer delete_location = tuple_record.GetDeleteLocation(); bool status = table->DeleteTuple(recovery_txn, delete_location); if (status == false) { recovery_txn->SetResult(Result::RESULT_FAILURE); } else { txn->RecordDelete(delete_location); auto target_location = tuple_record.GetInsertLocation(); auto tile_group_id = target_location.block; auto tuple_slot = target_location.offset; auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(tile_group_id); // Create new tile group if table doesn't already have that tile group if (tile_group == nullptr) { table->AddTileGroupWithOid(tile_group_id); tile_group = manager.GetTileGroup(tile_group_id); if (max_oid < tile_group_id) { max_oid = tile_group_id; } } // Do the insert ! auto inserted_tuple_slot = tile_group->InsertTuple( recovery_txn->GetTransactionId(), tuple_slot, tuple); if (inserted_tuple_slot == INVALID_OID) { recovery_txn->SetResult(Result::RESULT_FAILURE); } else { txn->RecordInsert(target_location); } } delete tuple; }
/** * @brief Recovery system based on log file */ void AriesFrontendLogger::DoRecovery() { // Set log file size log_file_size = GetLogFileSize(log_file_fd); // Go over the log size if needed if (log_file_size > 0) { bool reached_end_of_file = false; // Start the recovery transaction auto &txn_manager = concurrency::TransactionManager::GetInstance(); // Although we call BeginTransaction here, recovery txn will not be // recoreded in log file since we are in recovery mode auto recovery_txn = txn_manager.BeginTransaction(); // Go over each log record in the log file while (reached_end_of_file == false) { // Read the first byte to identify log record type // If that is not possible, then wrap up recovery auto record_type = GetNextLogRecordType(log_file, log_file_size); switch (record_type) { case LOGRECORD_TYPE_TRANSACTION_BEGIN: AddTransactionToRecoveryTable(); break; case LOGRECORD_TYPE_TRANSACTION_END: RemoveTransactionFromRecoveryTable(); break; case LOGRECORD_TYPE_TRANSACTION_COMMIT: MoveCommittedTuplesToRecoveryTxn(recovery_txn); break; case LOGRECORD_TYPE_TRANSACTION_ABORT: AbortTuplesFromRecoveryTable(); break; case LOGRECORD_TYPE_ARIES_TUPLE_INSERT: InsertTuple(recovery_txn); break; case LOGRECORD_TYPE_ARIES_TUPLE_DELETE: DeleteTuple(recovery_txn); break; case LOGRECORD_TYPE_ARIES_TUPLE_UPDATE: UpdateTuple(recovery_txn); break; default: reached_end_of_file = true; break; } } // Commit the recovery transaction txn_manager.CommitTransaction(); // Finally, abort ACTIVE transactions in recovery_txn_table AbortActiveTransactions(); // After finishing recovery, set the next oid with maximum oid // observed during the recovery auto &manager = catalog::Manager::GetInstance(); manager.SetNextOid(max_oid); } }
cid_t SimpleCheckpoint::DoRecovery() { // No checkpoint to recover from if (checkpoint_version < 0) { return 0; } // we open checkpoint file in read + binary mode std::string file_name = ConcatFileName(checkpoint_dir, checkpoint_version); bool success = LoggingUtil::InitFileHandle(file_name.c_str(), file_handle_, "rb"); if (!success) { return 0; } auto size = LoggingUtil::GetLogFileSize(file_handle_); PL_ASSERT(size > 0); file_handle_.size = size; bool should_stop = false; cid_t commit_id = 0; while (!should_stop) { auto record_type = LoggingUtil::GetNextLogRecordType(file_handle_); switch (record_type) { case LOGRECORD_TYPE_WAL_TUPLE_INSERT: { LOG_TRACE("Read checkpoint insert entry"); InsertTuple(commit_id); break; } case LOGRECORD_TYPE_TRANSACTION_COMMIT: { should_stop = true; break; } case LOGRECORD_TYPE_TRANSACTION_BEGIN: { LOG_TRACE("Read checkpoint begin entry"); TransactionRecord txn_rec(record_type); if (LoggingUtil::ReadTransactionRecordHeader(txn_rec, file_handle_) == false) { LOG_ERROR("Failed to read checkpoint begin entry"); return false; } commit_id = txn_rec.GetTransactionId(); break; } default: { LOG_ERROR("Invalid checkpoint entry"); should_stop = true; break; } } } // After finishing recovery, set the next oid with maximum oid // observed during the recovery auto &manager = catalog::Manager::GetInstance(); if (max_oid_ > manager.GetNextOid()) { manager.SetNextOid(max_oid_); } // FIXME this is not thread safe for concurrent checkpoint recovery concurrency::TransactionManagerFactory::GetInstance().SetNextCid(commit_id); CheckpointManager::GetInstance().SetRecoveredCid(commit_id); return commit_id; }