bool PessimisticTxnManager::PerformUpdate(const oid_t &tile_group_id, const oid_t &tuple_id, const ItemPointer &new_location) { LOG_INFO("Performing Write %lu %lu", tile_group_id, tuple_id); auto transaction_id = current_txn->GetTransactionId(); auto tile_group_header = catalog::Manager::GetInstance().GetTileGroup(tile_group_id)->GetHeader(); auto new_tile_group_header = catalog::Manager::GetInstance() .GetTileGroup(new_location.block)->GetHeader(); // if we can perform update, then we must have already locked the older // version. assert(tile_group_header->GetTransactionId(tuple_id) == transaction_id); assert(new_tile_group_header->GetTransactionId(new_location.offset) == INVALID_TXN_ID); assert(new_tile_group_header->GetBeginCommitId(new_location.offset) == MAX_CID); assert(new_tile_group_header->GetEndCommitId(new_location.offset) == MAX_CID); tile_group_header->SetTransactionId(tuple_id, transaction_id); // The write lock must have been acquired // Notice: if the executor doesn't call PerformUpdate after AcquireOwnership, // no // one will possibly release the write lock acquired by this txn. // Set double linked list tile_group_header->SetNextItemPointer(tuple_id, new_location); new_tile_group_header->SetPrevItemPointer( new_location.offset, ItemPointer(tile_group_id, tuple_id)); new_tile_group_header->SetTransactionId(new_location.offset, transaction_id); // Add the old tuple into the update set current_txn->RecordUpdate(tile_group_id, tuple_id); return true; }
ECode RetrieveConf::constructor() { MultimediaMessagePdu::constructor(); SetMessageType(IPduHeaders::MESSAGE_TYPE_RETRIEVE_CONF); SetTransactionId(GenerateTransactionId()); return NOERROR; }
ECode RetrieveConf::constructor( /* [in] */ IPduHeaders* headers) { MultimediaMessagePdu::constructor(headers); SetTransactionId(GenerateTransactionId()); return NOERROR; }
bool PessimisticTxnManager::PerformDelete(const oid_t &tile_group_id, const oid_t &tuple_id, const ItemPointer &new_location) { LOG_TRACE("Performing Delete"); auto transaction_id = current_txn->GetTransactionId(); auto tile_group_header = catalog::Manager::GetInstance().GetTileGroup(tile_group_id)->GetHeader(); auto new_tile_group_header = catalog::Manager::GetInstance() .GetTileGroup(new_location.block)->GetHeader(); assert(tile_group_header->GetTransactionId(tuple_id) == transaction_id); assert(new_tile_group_header->GetTransactionId(new_location.offset) == INVALID_TXN_ID); assert(new_tile_group_header->GetBeginCommitId(new_location.offset) == MAX_CID); assert(new_tile_group_header->GetEndCommitId(new_location.offset) == MAX_CID); // Set up double linked list tile_group_header->SetNextItemPointer(tuple_id, new_location); new_tile_group_header->SetPrevItemPointer( new_location.offset, ItemPointer(tile_group_id, tuple_id)); new_tile_group_header->SetTransactionId(new_location.offset, transaction_id); new_tile_group_header->SetEndCommitId(new_location.offset, INVALID_CID); current_txn->RecordDelete(tile_group_id, tuple_id); return true; }
// Return false if the tuple's table (tile group) is dropped. // In such case, this recycled tuple can not be added to the recycled_list. // Since no one will use it any more, keeping track of it is useless. // Note that, if we drop a single tile group without dropping the whole table, // such assumption is problematic. bool GCManager::ResetTuple(const TupleMetadata &tuple_metadata) { auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(tuple_metadata.tile_group_id); // During the resetting, a table may deconstruct because of the DROP TABLE request if (tile_group == nullptr) { LOG_TRACE("Garbage tuple(%u, %u) in table %u no longer exists", tuple_metadata.tile_group_id, tuple_metadata.tuple_slot_id, tuple_metadata.table_id); return false; } // From now on, the tile group shared pointer is held by us // It's safe to set headers from now on. auto tile_group_header = tile_group->GetHeader(); // Reset the header tile_group_header->SetTransactionId(tuple_metadata.tuple_slot_id, INVALID_TXN_ID); tile_group_header->SetBeginCommitId(tuple_metadata.tuple_slot_id, MAX_CID); tile_group_header->SetEndCommitId(tuple_metadata.tuple_slot_id, MAX_CID); tile_group_header->SetPrevItemPointer(tuple_metadata.tuple_slot_id, INVALID_ITEMPOINTER); tile_group_header->SetNextItemPointer(tuple_metadata.tuple_slot_id, INVALID_ITEMPOINTER); PL_MEMSET( tile_group_header->GetReservedFieldRef(tuple_metadata.tuple_slot_id), 0, storage::TileGroupHeader::GetReservedSize()); LOG_TRACE("Garbage tuple(%u, %u) in table %u is reset", tuple_metadata.tile_group_id, tuple_metadata.tuple_slot_id, tuple_metadata.table_id); return true; }
TileGroupHeader::TileGroupHeader(const BackendType &backend_type, const int &tuple_count) : backend_type(backend_type), data(nullptr), num_tuple_slots(tuple_count), next_tuple_slot(0), tile_header_lock() { header_size = num_tuple_slots * header_entry_size; // allocate storage space for header auto &storage_manager = storage::StorageManager::GetInstance(); data = reinterpret_cast<char *>( storage_manager.Allocate(backend_type, header_size)); PL_ASSERT(data != nullptr); // zero out the data PL_MEMSET(data, 0, header_size); // Set MVCC Initial Value for (oid_t tuple_slot_id = START_OID; tuple_slot_id < num_tuple_slots; tuple_slot_id++) { SetTransactionId(tuple_slot_id, INVALID_TXN_ID); SetBeginCommitId(tuple_slot_id, MAX_CID); SetEndCommitId(tuple_slot_id, MAX_CID); SetNextItemPointer(tuple_slot_id, INVALID_ITEMPOINTER); SetPrevItemPointer(tuple_slot_id, INVALID_ITEMPOINTER); SetInsertCommit(tuple_slot_id, false); // unused SetDeleteCommit(tuple_slot_id, false); // unused } }
// ----------------------------------------------------------------------------- // CRefreshItem::SendL // From CTransactionItemBase: // ----------------------------------------------------------------------------- // void CRefreshItem::SendL (const TSIPTransportParams& /*aTransportParams*/, TTransactionId& aTransactionId, TRegistrationId aRegistrationId, CSIPRequest* aRequest, CURIContainer& aRemoteTarget) { if (!iFirstRequestSent) { iRefreshMgr.RefreshL(aTransactionId,iRefreshId,aRegistrationId,aRequest, iRefreshOwner,iSIPSecUser,aRemoteTarget, ETrue,ETrue); iFirstRequestSent= ETrue; } else { TBool terminate = EFalse; CSIPExpiresHeader* expires = static_cast<CSIPExpiresHeader*> (aRequest->Header( SIPStrings::StringF(SipStrConsts::EExpiresHeader),0)); if (expires && expires->Value() == 0) { terminate = ETrue; } iRefreshMgr.UpdateRefreshL(aTransactionId,iRefreshId,aRequest, iRefreshOwner,ETrue); if (terminate) { SetTerminated(); } // After a refresh update pass the next response from Refreshes to the // dialog owner iPassResponseToOwner = ETrue; } SetTransactionId (aTransactionId); }
TileGroupHeader::TileGroupHeader(BackendType backend_type, int tuple_count) : backend_type(backend_type), data(nullptr), num_tuple_slots(tuple_count), next_tuple_slot(0) { header_size = num_tuple_slots * header_entry_size; // allocate storage space for header auto &storage_manager = storage::StorageManager::GetInstance(); data = reinterpret_cast<char *>( storage_manager.Allocate(backend_type, header_size)); assert(data != nullptr); // zero out the data std::memset(data, 0, header_size); // Set MVCC Initial Value for (oid_t tuple_slot_id = START_OID; tuple_slot_id < num_tuple_slots; tuple_slot_id++) { SetTransactionId(tuple_slot_id, INVALID_TXN_ID); SetBeginCommitId(tuple_slot_id, MAX_CID); SetEndCommitId(tuple_slot_id, MAX_CID); SetInsertCommit(tuple_slot_id, false); SetDeleteCommit(tuple_slot_id, false); } }
void TimestampOrderingTransactionManager::PerformInsert( TransactionContext *const current_txn, const ItemPointer &location, ItemPointer *index_entry_ptr) { PELOTON_ASSERT(!current_txn->IsReadOnly()); oid_t tile_group_id = location.block; oid_t tuple_id = location.offset; auto storage_manager = storage::StorageManager::GetInstance(); auto tile_group_header = storage_manager->GetTileGroup(tile_group_id)->GetHeader(); auto transaction_id = current_txn->GetTransactionId(); // check MVCC info // the tuple slot must be empty. PELOTON_ASSERT(tile_group_header->GetTransactionId(tuple_id) == INVALID_TXN_ID); PELOTON_ASSERT(tile_group_header->GetBeginCommitId(tuple_id) == MAX_CID); PELOTON_ASSERT(tile_group_header->GetEndCommitId(tuple_id) == MAX_CID); tile_group_header->SetTransactionId(tuple_id, transaction_id); tile_group_header->SetLastReaderCommitId(tuple_id, current_txn->GetCommitId()); // no need to set next item pointer. // Add the new tuple into the insert set current_txn->RecordInsert(location); // Write down the head pointer's address in tile group header tile_group_header->SetIndirection(tuple_id, index_entry_ptr); }
// ----------------------------------------------------------------------------- // CTransactionItem::SendL // From CTransactionItemBase: // ----------------------------------------------------------------------------- // void CTransactionItem::SendL (const TSIPTransportParams& aTransportParams, TTransactionId& aTransactionId, TRegistrationId aRegistrationId, CSIPRequest* aRequest, CURIContainer& aRemoteTarget) { iTU.SendL (aTransactionId,aRegistrationId,aRequest,iTransactionOwner, aRemoteTarget,aTransportParams,ETrue); SetTransactionId (aTransactionId); }
/* * Helper method responsible for inserting the results of the aggregation * into a new tuple in the output tile group as well as passing through any * additional columns from the input tile group. * * Output tuple is projected from two tuples: * Left is the 'delegate' tuple, which is usually the first tuple in the group, * used to retrieve pass-through values; * Right is the tuple holding all aggregated values. */ bool Helper(const planner::AggregatePlan *node, Agg **aggregates, storage::DataTable *output_table, const AbstractTuple *delegate_tuple, executor::ExecutorContext *econtext) { auto schema = output_table->GetSchema(); std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(schema, true)); /* * 1) Construct a vector of aggregated values */ std::vector<common::Value> aggregate_values; auto &aggregate_terms = node->GetUniqueAggTerms(); for (oid_t column_itr = 0; column_itr < aggregate_terms.size(); column_itr++) { if (aggregates[column_itr] != nullptr) { common::Value final_val = aggregates[column_itr]->Finalize(); aggregate_values.push_back(final_val); } } /* * 2) Evaluate filter predicate; * if fail, just return */ std::unique_ptr<expression::ContainerTuple<std::vector<common::Value>>> aggref_tuple( new expression::ContainerTuple<std::vector<common::Value>>(&aggregate_values)); auto predicate = node->GetPredicate(); if (nullptr != predicate && (predicate->Evaluate(delegate_tuple, aggref_tuple.get(), econtext)).IsFalse()) { return true; // Qual fails, do nothing } /* * 3) Construct the tuple to insert using projectInfo */ node->GetProjectInfo()->Evaluate(tuple.get(), delegate_tuple, aggref_tuple.get(), econtext); LOG_TRACE("Tuple to Output :"); LOG_TRACE("GROUP TUPLE :: %s", tuple->GetInfo().c_str()); auto location = output_table->InsertTuple(tuple.get()); if (location.block == INVALID_OID) { LOG_ERROR("Failed to insert tuple "); return false; } else { auto &manager = catalog::Manager::GetInstance(); auto tile_group_header = manager.GetTileGroup(location.block)->GetHeader(); tile_group_header->SetTransactionId(location.offset, INITIAL_TXN_ID); } return true; }
void PessimisticTxnManager::SetOwnership(const oid_t &tile_group_id, const oid_t &tuple_id) { auto &manager = catalog::Manager::GetInstance(); auto tile_group_header = manager.GetTileGroup(tile_group_id)->GetHeader(); auto transaction_id = current_txn->GetTransactionId(); // Set MVCC info assert(tile_group_header->GetTransactionId(tuple_id) == INVALID_TXN_ID); assert(tile_group_header->GetBeginCommitId(tuple_id) == MAX_CID); assert(tile_group_header->GetEndCommitId(tuple_id) == MAX_CID); tile_group_header->SetTransactionId(tuple_id, transaction_id); }
// ----------------------------------------------------------------------------- // CTransactionItem::SendAndGetHeadersL // From CTransactionItemBase: // ----------------------------------------------------------------------------- // MTransactionHeaders* CTransactionItem::SendAndGetHeadersL ( const TSIPTransportParams& aTransportParams, TTransactionId& aTransactionId, TRegistrationId aRegistrationId, CSIPRequest* aRequest, CURIContainer& aRemoteTarget) { MTransactionHeaders* headers = iTU.SendAndGetHeadersL (aTransactionId,aRegistrationId,aRequest, iTransactionOwner,aRemoteTarget, aTransportParams,ETrue); SetTransactionId (aTransactionId); return headers; }
ECode SendReq::constructor( /* [in] */ ArrayOf<Byte>* contentType, /* [in] */ IEncodedStringValue* from, /* [in] */ Int32 mmsVersion, /* [in] */ ArrayOf<Byte>* transactionId) { MultimediaMessagePdu::constructor(); SetMessageType(IPduHeaders::MESSAGE_TYPE_SEND_REQ); SetContentType(contentType); SetFrom(from); SetMmsVersion(mmsVersion); SetTransactionId(transactionId); return NOERROR; }
int DecodeError(Message *message, BNode *dict) { assert(message != NULL && "NULL Message pointer"); assert(dict != NULL && "NULL BNode pointer"); assert(dict->type == BDictionary && "Not a dictionary"); message->type = RError; int rc = SetTransactionId(message, dict); check(rc == 0, "SetTransactionId failed"); BNode *eVal = BNode_GetValue(dict, "e", 1); check(eVal != NULL, "No 'e' value"); check(eVal->type == BList, "Value not a BList"); if (eVal == NULL || eVal->type != BList || eVal->count != 2) { message->errors |= MERROR_INVALID_DATA; return 0; } BNode *code = eVal->value.nodes[0], *error_msg = eVal->value.nodes[1]; if (code->type == BInteger) { message->data.rerror.code = code->value.integer; } else { message->errors |= MERROR_INVALID_DATA; } if (error_msg->type == BString) { message->data.rerror.message = BNode_bstring(error_msg); check(message->data.rerror.message != NULL, "Failed to create bstring"); } else { message->errors |= MERROR_INVALID_DATA; } return 0; error: return -1; }
// ----------------------------------------------------------------------------- // CRefreshItem::SendAndGetHeadersL // From CTransactionItemBase: // ----------------------------------------------------------------------------- // MTransactionHeaders* CRefreshItem::SendAndGetHeadersL ( const TSIPTransportParams& /*aTransportParams*/, TTransactionId& aTransactionId, TRegistrationId aRegistrationId, CSIPRequest* aRequest, CURIContainer& aRemoteTarget) { MTransactionHeaders* headers = iRefreshMgr.RefreshAndGetHeadersL(aTransactionId,iRefreshId, aRegistrationId, aRequest,iRefreshOwner, iSIPSecUser,aRemoteTarget); iFirstRequestSent = ETrue; SetTransactionId (aTransactionId); return headers; }
int DecodeQuery(Message *message, BNode *dict) { assert(message != NULL && "NULL Message pointer"); assert(dict != NULL && "NULL BNode pointer"); assert(dict->type == BDictionary && "Not a dictionary"); SetQueryType(message, dict); SetQueryId(message, dict); int rc = SetTransactionId(message, dict); check(rc == 0, "SetTransactionId failed"); rc = SetQueryData(message, dict); check(rc == 0, "SetQueryData failed"); return 0; error: return -1; }
int DecodeResponse(Message *message, BNode *dict, struct PendingResponses *pending) { assert(message != NULL && "NULL Message pointer"); assert(dict != NULL && "NULL BNode pointer"); assert(dict->type == BDictionary && "Not a dictionary"); assert(pending != NULL && "NULL struct PendingResponses pointer"); int rc = SetTransactionId(message, dict); check(rc == 0, "SetTransactionId failed"); if (message->t_len != sizeof(tid_t)) { message->errors |= MERROR_INVALID_TID; } SetResponseId(message, dict); PendingResponse entry = pending->getPendingResponse(pending, message->t, &rc); if (rc == 0) { if (!Hash_Equals(&message->id, &entry.id) && !entry.is_new) { message->errors |= MERROR_INVALID_NODE_ID; } message->type = entry.type; message->context = entry.context; } else { message->errors |= MERROR_INVALID_TID; } rc = SetResponseData(message, dict); check(rc == 0, "SetResponseData failed"); return 0; error: return -1; }
ECode SendReq::constructor() { MultimediaMessagePdu::constructor(); // try { SetMessageType(IPduHeaders::MESSAGE_TYPE_SEND_REQ); SetMmsVersion(IPduHeaders::CURRENT_MMS_VERSION); // FIXME: Content-type must be decided according to whether // SMIL part present. SetContentType(String("application/vnd.wap.multipart.related").GetBytes()); AutoPtr<IEncodedStringValue> p; CEncodedStringValue::New(IPduHeaders::FROM_INSERT_ADDRESS_TOKEN_STR.GetBytes(), (IEncodedStringValue**)&p); SetFrom(p); SetTransactionId(GenerateTransactionId()); // } catch (InvalidHeaderValueException e) { // // Impossible to reach here since all headers we set above are valid. // Log.e(TAG, "Unexpected InvalidHeaderValueException.", e); // throw new RuntimeException(e); // } return NOERROR; }
bool TransactionLevelGCManager::ResetTuple(const ItemPointer &location) { auto &manager = catalog::Manager::GetInstance(); auto tile_group = manager.GetTileGroup(location.block).get(); auto tile_group_header = tile_group->GetHeader(); // Reset the header tile_group_header->SetTransactionId(location.offset, INVALID_TXN_ID); tile_group_header->SetBeginCommitId(location.offset, MAX_CID); tile_group_header->SetEndCommitId(location.offset, MAX_CID); tile_group_header->SetPrevItemPointer(location.offset, INVALID_ITEMPOINTER); tile_group_header->SetNextItemPointer(location.offset, INVALID_ITEMPOINTER); PL_MEMSET( tile_group_header->GetReservedFieldRef(location.offset), 0, storage::TileGroupHeader::GetReservedSize()); // Reclaim the varlen pool CheckAndReclaimVarlenColumns(tile_group, location.offset); LOG_TRACE("Garbage tuple(%u, %u) is reset", location.block, location.offset); return true; }
void TimestampOrderingTransactionManager::PerformInsert( TransactionContext *const current_txn, const ItemPointer &location, ItemPointer *index_entry_ptr) { PL_ASSERT(current_txn->GetIsolationLevel() != IsolationLevelType::READ_ONLY); oid_t tile_group_id = location.block; oid_t tuple_id = location.offset; auto &manager = catalog::Manager::GetInstance(); auto tile_group_header = manager.GetTileGroup(tile_group_id)->GetHeader(); auto transaction_id = current_txn->GetTransactionId(); // check MVCC info // the tuple slot must be empty. PL_ASSERT(tile_group_header->GetTransactionId(tuple_id) == INVALID_TXN_ID); PL_ASSERT(tile_group_header->GetBeginCommitId(tuple_id) == MAX_CID); PL_ASSERT(tile_group_header->GetEndCommitId(tuple_id) == MAX_CID); tile_group_header->SetTransactionId(tuple_id, transaction_id); // no need to set next item pointer. // Add the new tuple into the insert set current_txn->RecordInsert(location); InitTupleReserved(tile_group_header, tuple_id); // Write down the head pointer's address in tile group header tile_group_header->SetIndirection(tuple_id, index_entry_ptr); // Increment table insert op stats if (static_cast<StatsType>(settings::SettingsManager::GetInt(settings::SettingId::stats_mode)) != StatsType::INVALID) { stats::BackendStatsContext::GetInstance()->IncrementTableInserts( location.block); } }
/** * @brief Creates logical tile(s) wrapping the results of aggregation. * @return true on success, false otherwise. */ bool AggregateExecutor::DExecute() { // Already performed the aggregation if (done) { if (result_itr == INVALID_OID || result_itr == result.size()) { return false; } else { // Return appropriate tile and go to next tile SetOutput(result[result_itr]); result_itr++; return true; } } // Grab info from plan node const planner::AggregatePlan &node = GetPlanNode<planner::AggregatePlan>(); // Get an aggregator std::unique_ptr<AbstractAggregator> aggregator(nullptr); // Get input tiles and aggregate them while (children_[0]->Execute() == true) { std::unique_ptr<LogicalTile> tile(children_[0]->GetOutput()); if (nullptr == aggregator.get()) { // Initialize the aggregator switch (node.GetAggregateStrategy()) { case AGGREGATE_TYPE_HASH: LOG_TRACE("Use HashAggregator"); aggregator.reset(new HashAggregator( &node, output_table, executor_context_, tile->GetColumnCount())); break; case AGGREGATE_TYPE_SORTED: LOG_TRACE("Use SortedAggregator"); aggregator.reset(new SortedAggregator( &node, output_table, executor_context_, tile->GetColumnCount())); break; case AGGREGATE_TYPE_PLAIN: LOG_TRACE("Use PlainAggregator"); aggregator.reset( new PlainAggregator(&node, output_table, executor_context_)); break; default: LOG_ERROR("Invalid aggregate type. Return."); return false; } } LOG_TRACE("Looping over tile.."); for (oid_t tuple_id : *tile) { std::unique_ptr<expression::ContainerTuple<LogicalTile>> cur_tuple( new expression::ContainerTuple<LogicalTile>(tile.get(), tuple_id)); if (aggregator->Advance(cur_tuple.get()) == false) { return false; } } LOG_TRACE("Finished processing logical tile"); } LOG_TRACE("Finalizing.."); if (!aggregator.get() || !aggregator->Finalize()) { // If there's no tuples and no group-by, count() aggregations should return // 0 according to the test in MySQL. // TODO: We only checked whether all AggTerms are counts here. If there're // mixed terms, we should return 0 for counts and null for others. bool all_count_aggs = true; for (oid_t aggno = 0; aggno < node.GetUniqueAggTerms().size(); aggno++) { auto agg_type = node.GetUniqueAggTerms()[aggno].aggtype; if (agg_type != EXPRESSION_TYPE_AGGREGATE_COUNT && agg_type != EXPRESSION_TYPE_AGGREGATE_COUNT_STAR) all_count_aggs = false; } // If there's no tuples in the table and only if no group-by in the // query, // we should return a NULL tuple // this is required by SQL if (!aggregator.get() && node.GetGroupbyColIds().empty()) { LOG_TRACE( "No tuples received and no group-by. Should insert a NULL tuple " "here."); std::unique_ptr<storage::Tuple> tuple( new storage::Tuple(output_table->GetSchema(), true)); if (all_count_aggs == true) { tuple->SetAllZeros(); } else { tuple->SetAllNulls(); } auto location = output_table->InsertTuple(tuple.get()); PL_ASSERT(location.block != INVALID_OID); auto &manager = catalog::Manager::GetInstance(); auto tile_group_header = manager.GetTileGroup(location.block)->GetHeader(); tile_group_header->SetTransactionId(location.offset, INITIAL_TXN_ID); } else { done = true; return false; } } // Transform output table into result auto tile_group_count = output_table->GetTileGroupCount(); if (tile_group_count == 0) return false; for (oid_t tile_group_itr = 0; tile_group_itr < tile_group_count; tile_group_itr++) { auto tile_group = output_table->GetTileGroup(tile_group_itr); // Get the logical tiles corresponding to the given tile group auto logical_tile = LogicalTileFactory::WrapTileGroup(tile_group); result.push_back(logical_tile); } done = true; LOG_TRACE("Result tiles : %lu ", result.size()); SetOutput(result[result_itr]); result_itr++; return true; }
void TimestampOrderingTransactionManager::PerformUpdate( TransactionContext *const current_txn, const ItemPointer &location, const ItemPointer &new_location) { PL_ASSERT(current_txn->GetIsolationLevel() != IsolationLevelType::READ_ONLY); ItemPointer old_location = location; LOG_TRACE("Performing Update old tuple %u %u", old_location.block, old_location.offset); LOG_TRACE("Performing Update new tuple %u %u", new_location.block, new_location.offset); auto &manager = catalog::Manager::GetInstance(); auto tile_group_header = manager.GetTileGroup(old_location.block)->GetHeader(); auto new_tile_group_header = manager.GetTileGroup(new_location.block)->GetHeader(); auto transaction_id = current_txn->GetTransactionId(); // if we can perform update, then we must have already locked the older // version. PL_ASSERT(tile_group_header->GetTransactionId(old_location.offset) == transaction_id); PL_ASSERT(tile_group_header->GetPrevItemPointer(old_location.offset) .IsNull() == true); // check whether the new version is empty. PL_ASSERT(new_tile_group_header->GetTransactionId(new_location.offset) == INVALID_TXN_ID); PL_ASSERT(new_tile_group_header->GetBeginCommitId(new_location.offset) == MAX_CID); PL_ASSERT(new_tile_group_header->GetEndCommitId(new_location.offset) == MAX_CID); // if the executor doesn't call PerformUpdate after AcquireOwnership, // no one will possibly release the write lock acquired by this txn. // Set double linked list tile_group_header->SetPrevItemPointer(old_location.offset, new_location); new_tile_group_header->SetNextItemPointer(new_location.offset, old_location); new_tile_group_header->SetTransactionId(new_location.offset, transaction_id); // we should guarantee that the newer version is all set before linking the // newer version to older version. COMPILER_MEMORY_FENCE; InitTupleReserved(new_tile_group_header, new_location.offset); // we must be updating the latest version. // Set the header information for the new version ItemPointer *index_entry_ptr = tile_group_header->GetIndirection(old_location.offset); // if there's no primary index on a table, then index_entry_ptr == nullptr. if (index_entry_ptr != nullptr) { new_tile_group_header->SetIndirection(new_location.offset, index_entry_ptr); // Set the index header in an atomic way. // We do it atomically because we don't want any one to see a half-done // pointer. // In case of contention, no one can update this pointer when we are // updating it // because we are holding the write lock. This update should success in // its first trial. UNUSED_ATTRIBUTE auto res = AtomicUpdateItemPointer(index_entry_ptr, new_location); PL_ASSERT(res == true); } // Add the old tuple into the update set current_txn->RecordUpdate(old_location); // Increment table update op stats if (static_cast<StatsType>(settings::SettingsManager::GetInt(settings::SettingId::stats_mode)) != StatsType::INVALID) { stats::BackendStatsContext::GetInstance()->IncrementTableUpdates( new_location.block); } }
Result PessimisticTxnManager::AbortTransaction() { LOG_TRACE("Aborting peloton txn : %lu ", current_txn->GetTransactionId()); auto &manager = catalog::Manager::GetInstance(); auto &rw_set = current_txn->GetRWSet(); for (auto &tile_group_entry : rw_set) { oid_t tile_group_id = tile_group_entry.first; auto tile_group = manager.GetTileGroup(tile_group_id); auto tile_group_header = tile_group->GetHeader(); for (auto &tuple_entry : tile_group_entry.second) { auto tuple_slot = tuple_entry.first; if (tuple_entry.second == RW_TYPE_READ) { if (pessimistic_released_rdlock.find(tile_group_id) == pessimistic_released_rdlock.end() || pessimistic_released_rdlock[tile_group_id].find(tuple_slot) == pessimistic_released_rdlock[tile_group_id].end()) { ReleaseReadLock(tile_group_header, tuple_slot); pessimistic_released_rdlock[tile_group_id].insert(tuple_slot); } } else if (tuple_entry.second == RW_TYPE_UPDATE) { ItemPointer new_version = tile_group_header->GetNextItemPointer(tuple_slot); auto new_tile_group_header = manager.GetTileGroup(new_version.block)->GetHeader(); new_tile_group_header->SetBeginCommitId(new_version.offset, MAX_CID); new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID); COMPILER_MEMORY_FENCE; tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); COMPILER_MEMORY_FENCE; new_tile_group_header->SetTransactionId(new_version.offset, INVALID_TXN_ID); tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID); } else if (tuple_entry.second == RW_TYPE_DELETE) { ItemPointer new_version = tile_group_header->GetNextItemPointer(tuple_slot); auto new_tile_group_header = manager.GetTileGroup(new_version.block)->GetHeader(); new_tile_group_header->SetBeginCommitId(new_version.offset, MAX_CID); new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID); COMPILER_MEMORY_FENCE; tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); COMPILER_MEMORY_FENCE; new_tile_group_header->SetTransactionId(new_version.offset, INVALID_TXN_ID); tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID); } else if (tuple_entry.second == RW_TYPE_INSERT) { tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); tile_group_header->SetBeginCommitId(tuple_slot, MAX_CID); COMPILER_MEMORY_FENCE; tile_group_header->SetTransactionId(tuple_slot, INVALID_TXN_ID); } else if (tuple_entry.second == RW_TYPE_INS_DEL) { tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); tile_group_header->SetBeginCommitId(tuple_slot, MAX_CID); COMPILER_MEMORY_FENCE; tile_group_header->SetTransactionId(tuple_slot, INVALID_TXN_ID); } } } EndTransaction(); pessimistic_released_rdlock.clear(); return Result::RESULT_ABORTED; }
Result PessimisticTxnManager::CommitTransaction() { LOG_TRACE("Committing peloton txn : %lu ", current_txn->GetTransactionId()); auto &manager = catalog::Manager::GetInstance(); auto &rw_set = current_txn->GetRWSet(); //***************************************************** // we can optimize read-only transaction. if (current_txn->IsReadOnly() == true) { // validate read set. for (auto &tile_group_entry : rw_set) { oid_t tile_group_id = tile_group_entry.first; auto tile_group = manager.GetTileGroup(tile_group_id); auto tile_group_header = tile_group->GetHeader(); for (auto &tuple_entry : tile_group_entry.second) { auto tuple_slot = tuple_entry.first; // if this tuple is not newly inserted. if (tuple_entry.second == RW_TYPE_READ) { // Release read locks if (pessimistic_released_rdlock.find(tile_group_id) == pessimistic_released_rdlock.end() || pessimistic_released_rdlock[tile_group_id].find(tuple_slot) == pessimistic_released_rdlock[tile_group_id].end()) { ReleaseReadLock(tile_group_header, tuple_slot); pessimistic_released_rdlock[tile_group_id].insert(tuple_slot); } } else { assert(tuple_entry.second == RW_TYPE_INS_DEL); } } } // is it always true??? Result ret = current_txn->GetResult(); EndTransaction(); return ret; } //***************************************************** // generate transaction id. cid_t end_commit_id = GetNextCommitId(); auto &log_manager = logging::LogManager::GetInstance(); log_manager.LogBeginTransaction(end_commit_id); // install everything. for (auto &tile_group_entry : rw_set) { oid_t tile_group_id = tile_group_entry.first; auto tile_group = manager.GetTileGroup(tile_group_id); auto tile_group_header = tile_group->GetHeader(); for (auto &tuple_entry : tile_group_entry.second) { auto tuple_slot = tuple_entry.first; if (tuple_entry.second == RW_TYPE_READ) { // Release read locks if (pessimistic_released_rdlock.find(tile_group_id) == pessimistic_released_rdlock.end() || pessimistic_released_rdlock[tile_group_id].find(tuple_slot) == pessimistic_released_rdlock[tile_group_id].end()) { ReleaseReadLock(tile_group_header, tuple_slot); pessimistic_released_rdlock[tile_group_id].insert(tuple_slot); } } else if (tuple_entry.second == RW_TYPE_UPDATE) { // we must guarantee that, at any time point, only one version is // visible. ItemPointer new_version = tile_group_header->GetNextItemPointer(tuple_slot); ItemPointer old_version(tile_group_id, tuple_slot); // logging. log_manager.LogUpdate(current_txn, end_commit_id, old_version, new_version); auto new_tile_group_header = manager.GetTileGroup(new_version.block)->GetHeader(); new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID); new_tile_group_header->SetBeginCommitId(new_version.offset, end_commit_id); COMPILER_MEMORY_FENCE; tile_group_header->SetEndCommitId(tuple_slot, end_commit_id); COMPILER_MEMORY_FENCE; new_tile_group_header->SetTransactionId(new_version.offset, INITIAL_TXN_ID); tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID); } else if (tuple_entry.second == RW_TYPE_DELETE) { ItemPointer new_version = tile_group_header->GetNextItemPointer(tuple_slot); ItemPointer delete_location(tile_group_id, tuple_slot); // logging. log_manager.LogDelete(end_commit_id, delete_location); // we do not change begin cid for old tuple. auto new_tile_group_header = manager.GetTileGroup(new_version.block)->GetHeader(); new_tile_group_header->SetEndCommitId(new_version.offset, MAX_CID); new_tile_group_header->SetBeginCommitId(new_version.offset, end_commit_id); COMPILER_MEMORY_FENCE; tile_group_header->SetEndCommitId(tuple_slot, end_commit_id); COMPILER_MEMORY_FENCE; new_tile_group_header->SetTransactionId(new_version.offset, INVALID_TXN_ID); tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID); } else if (tuple_entry.second == RW_TYPE_INSERT) { assert(tile_group_header->GetTransactionId(tuple_slot) == current_txn->GetTransactionId()); // set the begin commit id to persist insert ItemPointer insert_location(tile_group_id, tuple_slot); log_manager.LogInsert(current_txn, end_commit_id, insert_location); tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); tile_group_header->SetBeginCommitId(tuple_slot, end_commit_id); COMPILER_MEMORY_FENCE; tile_group_header->SetTransactionId(tuple_slot, INITIAL_TXN_ID); } else if (tuple_entry.second == RW_TYPE_INS_DEL) { assert(tile_group_header->GetTransactionId(tuple_slot) == current_txn->GetTransactionId()); tile_group_header->SetEndCommitId(tuple_slot, MAX_CID); tile_group_header->SetBeginCommitId(tuple_slot, MAX_CID); COMPILER_MEMORY_FENCE; // set the begin commit id to persist insert tile_group_header->SetTransactionId(tuple_slot, INVALID_TXN_ID); } } } log_manager.LogCommitTransaction(end_commit_id); EndTransaction(); pessimistic_released_rdlock.clear(); return Result::RESULT_SUCCESS; }