/*! @brief Outputs a log of starting a Checkpoint. */ void CheckpointOperationHandler::writeCheckpointStartLog( util::StackAllocator &alloc, int32_t mode, PartitionGroupId pgId, PartitionId pId, CheckpointId cpId) { try { util::XArray<uint64_t> dirtyChunkList(alloc); util::XArray<ClientId> activeClientIds(alloc); util::XArray<TransactionId> activeTxnIds(alloc); util::XArray<ContainerId> activeRefContainerIds(alloc); util::XArray<StatementId> activeLastExecStmtIds(alloc); util::XArray<int32_t> activeTimeoutIntervalSec(alloc); TransactionId maxAssignedTxnId = 0; transactionManager_->backupTransactionActiveContext(pId, maxAssignedTxnId, activeClientIds, activeTxnIds, activeRefContainerIds, activeLastExecStmtIds, activeTimeoutIntervalSec); util::XArray<uint8_t> logBuffer(alloc); logManager_->putCheckpointStartLog(logBuffer, pId, maxAssignedTxnId, logManager_->getLSN(pId), activeClientIds, activeTxnIds, activeRefContainerIds, activeLastExecStmtIds, activeTimeoutIntervalSec); } catch (std::exception &e) { GS_RETHROW_USER_ERROR( e, "Write checkpoint start log failed. (pgId=" << pgId << ", pId=" << pId << ", mode=" << mode << ", cpId=" << cpId << ", reason=" << GS_EXCEPTION_MESSAGE(e) << ")"); } }
FullContainerKey::FullContainerKey(const KeyConstraint &constraint, const void *data) : constraint_(constraint), body_(NULL), size_(0) { try { if (data == NULL) { GS_THROW_USER_ERROR(GS_ERROR_DS_DS_CONTAINER_NAME_INVALID, "container/table name is empty"); } ContainerKeyInStream in(util::ArrayInStream(data, sizeof(uint32_t))); if (ValueProcessor::varSizeIs1Byte(static_cast<const uint8_t*>(data)[0]) || ValueProcessor::varSizeIs4Byte(static_cast<const uint8_t*>(data)[0])) { size_ = static_cast<size_t>(decodeVarInt(in)); } else { GS_THROW_USER_ERROR(GS_ERROR_DS_DS_CONTAINER_NAME_INVALID, "failed to decode container/table name size"); } body_ = static_cast<const uint8_t*>(data) + in.base().position(); } catch (std::exception &e) { GS_RETHROW_USER_ERROR(e, GS_EXCEPTION_MERGE_MESSAGE(e, "failed to construct container/table name")); } }
void CheckpointService::PIdLsnInfo::writeFile() { util::LockGuard<util::Mutex> guard(mutex_); std::string lsnInfoFileName; util::NamedFile file; try { picojson::object jsonNodeInfo; NodeAddress &address = checkpointService_->partitionTable_->getNodeInfo(0) .getNodeAddress(); jsonNodeInfo["address"] = picojson::value(address.toString(false)); jsonNodeInfo["port"] = picojson::value(static_cast<double>(address.port_)); picojson::object jsonLsnInfo; picojson::array lsnList; for (PartitionId pId = 0; pId < partitionNum_; ++pId) { lsnList.push_back( picojson::value(static_cast<double>(lsnList_[pId]))); } picojson::object jsonObject; jsonObject["nodeInfo"] = picojson::value(jsonNodeInfo); jsonObject["partitionNum"] = picojson::value(static_cast<double>(partitionNum_)); jsonObject["groupNum"] = picojson::value(static_cast<double>(partitionGroupNum_)); jsonObject["lsnInfo"] = picojson::value(lsnList); std::string jsonString(picojson::value(jsonObject).serialize()); util::FileSystem::createPath( path_.c_str(), PID_LSN_INFO_FILE_NAME.c_str(), lsnInfoFileName); file.open(lsnInfoFileName.c_str(), util::FileFlag::TYPE_READ_WRITE | util::FileFlag::TYPE_CREATE | util::FileFlag::TYPE_TRUNCATE); file.lock(); file.write(jsonString.c_str(), jsonString.length()); file.close(); } catch (std::exception &e) { GS_RETHROW_USER_ERROR(e, "Write lsn info file failed. (fileName=" << lsnInfoFileName.c_str() << ", reason=" << GS_EXCEPTION_MESSAGE(e) << ")"); } }
FullContainerKey::FullContainerKey(util::StackAllocator &alloc, const KeyConstraint &constraint, DatabaseId dbId, const char8_t *str, uint32_t length) : constraint_(constraint), body_(NULL), size_(0) { try { FullContainerKeyComponents components; BitArray upperCaseBit(DEFAULT_UPPER_CASE_BIT_LENGTH); parseAndValidate(dbId, str, length, components, upperCaseBit); serialize(alloc, components, upperCaseBit); } catch (std::exception &e) { GS_RETHROW_USER_ERROR(e, GS_EXCEPTION_MERGE_MESSAGE(e, "failed to construct container/table name")); } }
FullContainerKey::FullContainerKey(util::StackAllocator &alloc, const KeyConstraint &constraint, const FullContainerKeyComponents &components) : constraint_(constraint), body_(NULL), size_(0) { try { validate(components); BitArray upperCaseBit(components.getStringSize()); setUpperCaseBit(components, upperCaseBit); serialize(alloc, components, upperCaseBit); } catch (std::exception &e) { GS_RETHROW_USER_ERROR(e, GS_EXCEPTION_MERGE_MESSAGE(e, "failed to construct container/table name")); } }
FullContainerKey::FullContainerKey(util::StackAllocator &alloc, const KeyConstraint &constraint, const void *body, size_t size) : constraint_(constraint), body_(static_cast<const uint8_t*>(body)), size_(size) { try { FullContainerKeyComponents components; BitArray upperCaseBit(DEFAULT_UPPER_CASE_BIT_LENGTH); deserialize(alloc, components, upperCaseBit, false); validate(components); } catch (std::exception &e) { GS_RETHROW_USER_ERROR(e, GS_EXCEPTION_MERGE_MESSAGE(e, "failed to construct container/table name")); } }
/*! @brief Outputs a log of metadata of Chunk. */ void CheckpointOperationHandler::writeChunkMetaDataLog( util::StackAllocator &alloc, int32_t mode, PartitionGroupId pgId, PartitionId pId, CheckpointId cpId, bool isRestored) { try { if (isRestored) { int32_t count = 0; util::XArray<uint8_t> logBuffer(alloc); util::XArray<uint8_t> metaDataBinary(alloc); ChunkCategoryId categoryId; ChunkId chunkId; ChunkId startChunkId = 0; int64_t scanSize = chunkManager_->getScanSize(pId); ChunkManager::MetaChunk *metaChunk = chunkManager_->begin(pId, categoryId, chunkId); GS_TRACE_INFO(CHECKPOINT_SERVICE_DETAIL, GS_TRACE_CP_STATUS, "writeChunkMetaDataLog: pId=" << pId << ",chunkId" << chunkId); for (int64_t index = 0; index < scanSize; index++) { uint8_t tmp[LogManager::LOGMGR_VARINT_MAX_LEN * 2 + 1]; uint8_t *addr = tmp; if (!metaChunk) { tmp[0] = 0xff; ++addr; uint32_t dummyData = 0; addr += util::varIntEncode64(addr, dummyData); if (chunkManager_->isBatchFreeMode(categoryId)) { addr += util::varIntEncode32(addr, dummyData); } } else { tmp[0] = metaChunk->getUnoccupiedSize(); ++addr; int64_t filePos = metaChunk->getCheckpointPos(); assert(filePos != -1); addr += util::varIntEncode64(addr, filePos); if (chunkManager_->isBatchFreeMode(categoryId)) { addr += util::varIntEncode32( addr, metaChunk->getChunkKey()); } GS_TRACE_DEBUG(CHECKPOINT_SERVICE_DETAIL, GS_TRACE_CP_STATUS, "chunkMetaData: (chunkId," << chunkId << ",freeInfo," << (int32_t)metaChunk->getUnoccupiedSize() << ",pos," << metaChunk->getCheckpointPos() << ",chunkKey," << metaChunk->getChunkKey()); } metaDataBinary.push_back(tmp, (addr - tmp)); ++count; if (count == CHUNK_META_DATA_LOG_MAX_NUM) { logManager_->putChunkMetaDataLog(logBuffer, pId, categoryId, startChunkId, count, &metaDataBinary, false); GS_TRACE_INFO(CHECKPOINT_SERVICE_DETAIL, GS_TRACE_CP_STATUS, "writeChunkMetaDaLog,pgId," << pgId << ",pId," << pId << ",chunkCategoryId," << (int32_t)categoryId << ",startChunkId," << startChunkId << ",chunkNum," << count); startChunkId += count; count = 0; metaDataBinary.clear(); logBuffer.clear(); } ChunkCategoryId prevCategoryId = categoryId; metaChunk = chunkManager_->next(pId, categoryId, chunkId); if (categoryId != prevCategoryId) { if (count > 0) { logManager_->putChunkMetaDataLog(logBuffer, pId, prevCategoryId, startChunkId, count, &metaDataBinary, false); GS_TRACE_INFO(CHECKPOINT_SERVICE_DETAIL, GS_TRACE_CP_STATUS, "writeChunkMetaDaLog,pgId," << pgId << ",pId," << pId << ",chunkCategoryId," << (int32_t)prevCategoryId << ",startChunkId," << startChunkId << ",chunkNum," << count); startChunkId = 0; count = 0; metaDataBinary.clear(); logBuffer.clear(); } } } logBuffer.clear(); logManager_->putChunkMetaDataLog( logBuffer, pId, UNDEF_CHUNK_CATEGORY_ID, 0, 1, NULL, true); } else { util::XArray<uint8_t> logBuffer(alloc); logManager_->putChunkMetaDataLog( logBuffer, pId, UNDEF_CHUNK_CATEGORY_ID, 0, 0, NULL, true); } } catch (std::exception &e) { GS_RETHROW_USER_ERROR( e, "Write chunk meta data log failed. (pgId=" << pgId << ", pId=" << pId << ", mode=" << mode << ", cpId=" << cpId << ", reason=" << GS_EXCEPTION_MESSAGE(e) << ")"); } }