Tx BlockDataViewer::getSpenderTxForTxOut(uint32_t height, uint32_t txindex, uint16_t txoutid) const { StoredTxOut stxo; db_->getStoredTxOut(stxo, height, txindex, txoutid); if (!stxo.isSpent()) return Tx(); TxRef txref(stxo.spentByTxInKey_.getSliceCopy(0, 6)); return txref.attached(db_).getTxCopy(); }
void BlockchainScanner::preloadUtxos() { LMDBEnv::Transaction tx; db_->beginDBTransaction(&tx, STXO, LMDB::ReadOnly); auto dbIter = db_->getIterator(STXO); dbIter.seekToFirst(); while (dbIter.advanceAndRead()) { StoredTxOut stxo; stxo.unserializeDBKey(dbIter.getKeyRef()); stxo.unserializeDBValue(dbIter.getValueRef()); if (stxo.spentness_ == TXOUT_SPENT) continue; stxo.parentHash_ = move(db_->getTxHashForLdbKey( stxo.getDBKeyOfParentTx(false))); auto& idMap = utxoMap_[stxo.parentHash_]; idMap.insert(make_pair(stxo.txOutIndex_, move(stxo))); } }
map<BinaryData, map<BinaryData, TxIOPair> > ZeroConfContainer::ZCisMineBulkFilter(const Tx & tx, const BinaryData & ZCkey, uint32_t txtime, function<bool(const BinaryData&)> filter, bool withSecondOrderMultisig) { // Since 99.999%+ of all transactions are not ours, let's do the // fastest bulk filter possible, even though it will add // redundant computation to the tx that are ours. In fact, // we will skip the TxIn/TxOut convenience methods and follow the // pointers directly to the data we want /***filter is a pointer to a function that takes in a scrAddr (21 bytes, including the prefix) and returns a bool. For supernode, it should return true all the time. ***/ map<BinaryData, map<BinaryData, TxIOPair> > processedTxIO; BinaryData txHash = tx.getThisHash(); TxRef txref = db_->getTxRef(txHash); if (txref.isInitialized()) { //Found this tx in the db. It is already part of a block thus //is invalid as a ZC return processedTxIO; } uint8_t const * txStartPtr = tx.getPtr(); for (uint32_t iin = 0; iin<tx.getNumTxIn(); iin++) { // We have the txin, now check if it contains one of our TxOuts OutPoint op; op.unserialize(txStartPtr + tx.getTxInOffset(iin), 36); //check ZC txhash first, always cheaper than grabing a stxo from DB, //and will always be checked if the tx doesn't hit in DB outpoints. { BinaryData opZcKey; if (getKeyForTxHash(op.getTxHash(), opZcKey)) { TxRef outPointRef(opZcKey); uint16_t outPointId = op.getTxOutIndex(); TxIOPair txio(outPointRef, outPointId, TxRef(ZCkey), iin); Tx chainedZC = getTxByHash(op.getTxHash()); const TxOut& chainedTxOut = chainedZC.getTxOutCopy(outPointId); txio.setTxHashOfOutput(op.getTxHash()); txio.setTxHashOfInput(txHash); txio.setValue(chainedTxOut.getValue()); txio.setTxTime(txtime); BinaryData spentSA = chainedTxOut.getScrAddressStr(); auto& key_txioPair = processedTxIO[spentSA]; key_txioPair[txio.getDBKeyOfOutput()] = txio; auto& wltIdVec = keyToSpentScrAddr_[ZCkey]; wltIdVec.push_back(spentSA); txOutsSpentByZC_.insert(txio.getDBKeyOfOutput()); continue; } } //fetch the TxOut from DB BinaryData opKey = op.getDBkey(db_); if (opKey.getSize() == 8) { //found outPoint DBKey, grab the StoredTxOut StoredTxOut stxOut; if (db_->getStoredTxOut(stxOut, opKey)) { BinaryData sa = stxOut.getScrAddress(); if (filter(sa)) { TxIOPair txio(TxRef(opKey.getSliceRef(0, 6)), op.getTxOutIndex(), TxRef(ZCkey), iin); txio.setTxHashOfOutput(op.getTxHash()); txio.setTxHashOfInput(txHash); txio.setValue(stxOut.getValue()); txio.setTxTime(txtime); auto& key_txioPair = processedTxIO[sa]; key_txioPair[opKey] = txio; auto& wltIdVec = keyToSpentScrAddr_[ZCkey]; wltIdVec.push_back(sa); txOutsSpentByZC_.insert(opKey); } } } } // Simply convert the TxOut scripts to scrAddrs and check if registered for (uint32_t iout = 0; iout<tx.getNumTxOut(); iout++) { TxOut txout = tx.getTxOutCopy(iout); BinaryData scrAddr = txout.getScrAddressStr(); if (filter(scrAddr)) { TxIOPair txio(TxRef(ZCkey), iout); txio.setValue(txout.getValue()); txio.setTxHashOfOutput(txHash); txio.setTxTime(txtime); txio.setUTXO(true); auto& key_txioPair = processedTxIO[scrAddr]; key_txioPair[txio.getDBKeyOfOutput()] = txio; continue; } // It's still possible this is a multisig addr involving one of our // existing scrAddrs, even if we aren't explicitly looking for this multisig if (withSecondOrderMultisig && txout.getScriptType() == TXOUT_SCRIPT_MULTISIG) { BinaryRefReader brrmsig(scrAddr); uint8_t PREFIX = brrmsig.get_uint8_t(); (void)PREFIX; uint8_t M = brrmsig.get_uint8_t(); (void)M; uint8_t N = brrmsig.get_uint8_t(); for (uint8_t a = 0; a<N; a++) if (filter(HASH160PREFIX + brrmsig.get_BinaryDataRef(20))) { TxIOPair txio(TxRef(ZCkey), iout); txio.setTxHashOfOutput(txHash); txio.setValue(txout.getValue()); txio.setTxTime(txtime); txio.setUTXO(true); txio.setMultisig(true); auto& key_txioPair = processedTxIO[scrAddr]; key_txioPair[txio.getDBKeyOfOutput()] = txio; } } } // If we got here, it's either non std or not ours return processedTxIO; }
void BlockchainScanner::undo(Blockchain::ReorganizationState& reorgState) { //dont undo subssh, these are skipped by dupID when loading history BlockHeader* blockPtr = reorgState.prevTopBlock; map<uint32_t, BlockFileMapPointer> fileMaps_; map<DB_SELECT, set<BinaryData>> keysToDelete; map<BinaryData, StoredScriptHistory> sshMap; set<BinaryData> undoSpentness; //TODO: add spentness DB //TODO: sanity checks on header ptrs from reorgState if (reorgState.prevTopBlock->getBlockHeight() <= reorgState.reorgBranchPoint->getBlockHeight()) throw runtime_error("invalid reorg state"); while (blockPtr != reorgState.reorgBranchPoint) { auto currentHeight = blockPtr->getBlockHeight(); auto currentDupId = blockPtr->getDuplicateID(); //create tx to pull subssh data LMDBEnv::Transaction sshTx; db_->beginDBTransaction(&sshTx, SUBSSH, LMDB::ReadOnly); //grab blocks from previous top until branch point if (blockPtr == nullptr) throw runtime_error("reorg failed while tracing back to " "branch point"); auto filenum = blockPtr->getBlockFileNum(); auto fileIter = fileMaps_.find(filenum); if (fileIter == fileMaps_.end()) { fileIter = fileMaps_.insert(make_pair( filenum, move(blockDataLoader_.get(filenum, false)))).first; } auto& filemap = fileIter->second; BlockData bdata; bdata.deserialize(filemap.get()->getPtr() + blockPtr->getOffset(), blockPtr->getBlockSize(), blockPtr); auto& txns = bdata.getTxns(); for (unsigned i = 0; i < txns.size(); i++) { auto& txn = txns[i]; //undo tx outs added by this block for (unsigned y = 0; y < txn->txouts_.size(); y++) { auto& txout = txn->txouts_[y]; BinaryRefReader brr( txn->data_ + txout.first, txout.second); brr.advance(8); unsigned scriptSize = (unsigned)brr.get_var_int(); auto&& scrAddr = BtcUtils::getTxOutScrAddr( brr.get_BinaryDataRef(scriptSize)); if (!scrAddrFilter_->hasScrAddress(scrAddr)) continue; //update ssh value and txio count auto& ssh = sshMap[scrAddr]; if (!ssh.isInitialized()) db_->getStoredScriptHistorySummary(ssh, scrAddr); if (ssh.alreadyScannedUpToBlk_ < currentHeight) continue; brr.resetPosition(); uint64_t value = brr.get_uint64_t(); ssh.totalUnspent_ -= value; ssh.totalTxioCount_--; //mark stxo key for deletion auto&& txoutKey = DBUtils::getBlkDataKey( currentHeight, currentDupId, i, y); keysToDelete[STXO].insert(txoutKey); //decrement summary count at height, remove entry if necessary auto& sum = ssh.subsshSummary_[currentHeight]; sum--; if (sum <= 0) ssh.subsshSummary_.erase(currentHeight); } //undo spends from this block for (unsigned y = 0; y < txn->txins_.size(); y++) { auto& txin = txn->txins_[y]; BinaryDataRef outHash( txn->data_ + txin.first, 32); auto&& txKey = db_->getDBKeyForHash(outHash, currentDupId); if (txKey.getSize() != 6) continue; uint16_t txOutId = (uint16_t)READ_UINT32_LE( txn->data_ + txin.first + 32); txKey.append(WRITE_UINT16_BE(txOutId)); StoredTxOut stxo; if (!db_->getStoredTxOut(stxo, txKey)) continue; //update ssh value and txio count auto& scrAddr = stxo.getScrAddress(); auto& ssh = sshMap[scrAddr]; if (!ssh.isInitialized()) db_->getStoredScriptHistorySummary(ssh, scrAddr); if (ssh.alreadyScannedUpToBlk_ < currentHeight) continue; ssh.totalUnspent_ += stxo.getValue(); ssh.totalTxioCount_--; //mark txout key for undoing spentness undoSpentness.insert(txKey); //decrement summary count at height, remove entry if necessary auto& sum = ssh.subsshSummary_[currentHeight]; sum--; if (sum <= 0) ssh.subsshSummary_.erase(currentHeight); } } //set blockPtr to prev block blockPtr = &blockchain_->getHeaderByHash(blockPtr->getPrevHashRef()); } //at this point we have a map of updated ssh, as well as a //set of keys to delete from the DB and spentness to undo by stxo key //stxo { LMDBEnv::Transaction tx; db_->beginDBTransaction(&tx, STXO, LMDB::ReadWrite); //grab stxos and revert spentness map<BinaryData, StoredTxOut> stxos; for (auto& stxoKey : undoSpentness) { auto& stxo = stxos[stxoKey]; if (!db_->getStoredTxOut(stxo, stxoKey)) continue; stxo.spentByTxInKey_.clear(); stxo.spentness_ = TXOUT_UNSPENT; } //put updated stxos for (auto& stxo : stxos) { if (stxo.second.isInitialized()) db_->putStoredTxOut(stxo.second); } //delete invalidated stxos auto& stxoKeysToDelete = keysToDelete[STXO]; for (auto& key : stxoKeysToDelete) db_->deleteValue(STXO, key); } auto branchPointHeight = reorgState.reorgBranchPoint->getBlockHeight(); //ssh { LMDBEnv::Transaction tx; db_->beginDBTransaction(&tx, SSH, LMDB::ReadWrite); //go thourgh all ssh in scrAddrFilter auto& scrAddrMap = scrAddrFilter_->getScrAddrMap(); for (auto& scrAddr : scrAddrMap) { auto& ssh = sshMap[scrAddr.first]; //if the ssh isn't in our map, pull it from DB if (!ssh.isInitialized()) { db_->getStoredScriptHistorySummary(ssh, scrAddr.first); if (ssh.uniqueKey_.getSize() == 0) { sshMap.erase(scrAddr.first); continue; } } //update alreadyScannedUpToBlk_ to branch point height if (ssh.alreadyScannedUpToBlk_ > branchPointHeight) ssh.alreadyScannedUpToBlk_ = branchPointHeight; } //write it all up for (auto& ssh : sshMap) { if (!scrAddrFilter_->hasScrAddress(ssh.second.uniqueKey_)) { LOGWARN << "invalid scrAddr during undo"; continue; } BinaryWriter bw; ssh.second.serializeDBValue(bw, ARMORY_DB_BARE, DB_PRUNE_NONE); db_->putValue(SSH, ssh.second.getDBKey().getRef(), bw.getDataRef()); } //update SSH sdbi StoredDBInfo sdbi; db_->getStoredDBInfo(SSH, sdbi); sdbi.topScannedBlkHash_ = reorgState.reorgBranchPoint->getThisHash(); sdbi.topBlkHgt_ = branchPointHeight; db_->putStoredDBInfo(SSH, sdbi); } }
void BlockchainScanner::scanBlockData(shared_ptr<BlockDataBatch> batch) { //getBlock lambda auto getBlock = [&](unsigned height)->BlockData { auto iter = batch->blocks_.find(height); if (iter == batch->blocks_.end()) { //TODO: encapsulate in try block to catch deser errors and signal pull thread //termination before exiting scope. cant have the scan thread hanging if this //one fails. Also update batch->end_ if we didn't go as far as that block height //grab block file map BlockHeader* blockheader = nullptr; blockheader = &blockchain_->getHeaderByHeight(height); auto filenum = blockheader->getBlockFileNum(); auto mapIter = batch->fileMaps_.find(filenum); if (mapIter == batch->fileMaps_.end()) { //we haven't grabbed that file map yet auto insertPair = batch->fileMaps_.insert( make_pair(filenum, move(blockDataLoader_.get(filenum, true)))); mapIter = insertPair.first; } auto filemap = mapIter->second.get(); //find block and deserialize it try { BlockData bdata; bdata.deserialize( filemap->getPtr() + blockheader->getOffset(), blockheader->getBlockSize(), blockheader, false); auto insertPair = batch->blocks_.insert(make_pair(height, move(bdata))); iter = insertPair.first; } catch (...) { LOGERR << "unknown block deser error during scan at height #" << height; batch->exceptionPtr_ = current_exception(); return BlockData(); } } return iter->second; }; //parser lambda auto blockDataLoop = [&](function<void(const BlockData&)> callback) { auto currentBlock = batch->start_; while (currentBlock <= batch->end_) { BlockData&& bdata = getBlock(currentBlock); if (!bdata.isInitialized()) return; callback(bdata); currentBlock += totalThreadCount_; } }; //txout lambda auto txoutParser = [&](const BlockData& blockdata)->void { //TODO: flag isMultisig const BlockHeader* header = blockdata.header(); //update processed height auto topHeight = header->getBlockHeight(); batch->highestProcessedHeight_ = topHeight; auto& txns = blockdata.getTxns(); for (unsigned i = 0; i < txns.size(); i++) { const BCTX& txn = *(txns[i].get()); for (unsigned y = 0; y < txn.txouts_.size(); y++) { auto& txout = txn.txouts_[y]; BinaryRefReader brr( txn.data_ + txout.first, txout.second); brr.advance(8); unsigned scriptSize = (unsigned)brr.get_var_int(); auto&& scrAddr = BtcUtils::getTxOutScrAddr( brr.get_BinaryDataRef(scriptSize)); if (!scrAddrFilter_->hasScrAddress(scrAddr)) continue; //if we got this far, this txout is ours //get tx hash auto& txHash = txn.getHash(); //construct StoredTxOut StoredTxOut stxo; stxo.dataCopy_ = BinaryData( txn.data_ + txout.first, txout.second); stxo.parentHash_ = txHash; stxo.blockHeight_ = header->getBlockHeight(); stxo.duplicateID_ = header->getDuplicateID(); stxo.txIndex_ = i; stxo.txOutIndex_ = y; stxo.scrAddr_ = scrAddr; stxo.spentness_ = TXOUT_UNSPENT; stxo.parentTxOutCount_ = txn.txouts_.size(); stxo.isCoinbase_ = txn.isCoinbase_; auto value = stxo.getValue(); auto&& hgtx = DBUtils::heightAndDupToHgtx( stxo.blockHeight_, stxo.duplicateID_); auto&& txioKey = DBUtils::getBlkDataKeyNoPrefix( stxo.blockHeight_, stxo.duplicateID_, i, y); //update utxos_ auto& stxoHashMap = batch->utxos_[txHash]; stxoHashMap.insert(make_pair(y, move(stxo))); //update ssh_ auto& ssh = batch->ssh_[scrAddr]; auto& subssh = ssh.subHistMap_[hgtx]; //deal with txio count in subssh at serialization TxIOPair txio; txio.setValue(value); txio.setTxOut(txioKey); txio.setFromCoinbase(txn.isCoinbase_); subssh.txioMap_.insert(make_pair(txioKey, move(txio))); } } }; //txin lambda auto txinParser = [&](const BlockData& blockdata)->void { const BlockHeader* header = blockdata.header(); auto& txns = blockdata.getTxns(); for (unsigned i = 0; i < txns.size(); i++) { const BCTX& txn = *(txns[i].get()); for (unsigned y = 0; y < txn.txins_.size(); y++) { auto& txin = txn.txins_[y]; BinaryDataRef outHash( txn.data_ + txin.first, 32); auto utxoIter = utxoMap_.find(outHash); if (utxoIter == utxoMap_.end()) continue; unsigned txOutId = READ_UINT32_LE( txn.data_ + txin.first + 32); auto idIter = utxoIter->second.find(txOutId); if (idIter == utxoIter->second.end()) continue; //if we got this far, this txins consumes one of our utxos //create spent txout auto&& hgtx = DBUtils::getBlkDataKeyNoPrefix( header->getBlockHeight(), header->getDuplicateID()); auto&& txinkey = DBUtils::getBlkDataKeyNoPrefix( header->getBlockHeight(), header->getDuplicateID(), i, y); StoredTxOut stxo = idIter->second; stxo.spentness_ = TXOUT_SPENT; stxo.spentByTxInKey_ = txinkey; //if this tx's hash was never pulled, let's add it to the stxo's //parent hash, in order to keep track of this tx in the hint db if (txn.txHash_.getSize() == 0) stxo.parentHash_ = move(txn.getHash()); //add to ssh_ auto& ssh = batch->ssh_[stxo.getScrAddress()]; auto& subssh = ssh.subHistMap_[hgtx]; //deal with txio count in subssh at serialization TxIOPair txio; auto&& txoutkey = stxo.getDBKey(false); txio.setTxOut(txoutkey); txio.setTxIn(txinkey); txio.setValue(stxo.getValue()); subssh.txioMap_[txoutkey] = move(txio); //add to spentTxOuts_ batch->spentTxOuts_.push_back(move(stxo)); } } }; //txout loop blockDataLoop(txoutParser); //done with txouts, fill the future flag and wait on the mutex //to move to txins processing batch->flagUtxoScanDone(); unique_lock<mutex> txinLock(batch->parseTxinMutex_); //txins loop blockDataLoop(txinParser); }