int stateTest() { KeyPair me = sha3("Gav Wood"); KeyPair myMiner = sha3("Gav's Miner"); // KeyPair you = sha3("123"); Defaults::setDBPath("/tmp"); Overlay stateDB = State::openDB(); BlockChain bc; State s(myMiner.address(), stateDB); cout << bc; // Sync up - this won't do much until we use the last state. s.sync(bc); cout << s; // Mine to get some ether! s.commitToMine(bc); while (s.mine(100).completed) {} bc.attemptImport(s.blockData(), stateDB); cout << bc; s.sync(bc); cout << s; // Inject a transaction to transfer funds from miner to me. bytes tx; { Transaction t; t.nonce = s.transactionsFrom(myMiner.address()); t.value = 1000; // 1e3 wei. t.receiveAddress = me.address(); t.sign(myMiner.secret()); assert(t.sender() == myMiner.address()); tx = t.rlp(); } s.execute(tx); cout << s; // Mine to get some ether and set in stone. s.commitToMine(bc); while (s.mine(100).completed) {} bc.attemptImport(s.blockData(), stateDB); cout << bc; s.sync(bc); cout << s; return 0; }
bool State::sync(BlockChain const& _bc, h256 _block) { bool ret = false; // BLOCK BlockInfo bi; try { auto b = _bc.block(_block); bi.populate(b); bi.verifyInternals(_bc.block(_block)); } catch (...) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; exit(1); } if (bi == m_currentBlock) { // We mined the last block. // Our state is good - we just need to move on to next. m_previousBlock = m_currentBlock; resetCurrent(); m_currentNumber++; ret = true; } else if (bi == m_previousBlock) { // No change since last sync. // Carry on as we were. } else { // New blocks available, or we've switched to a different branch. All change. // Find most recent state dump and replay what's left. // (Most recent state dump might end up being genesis.) std::vector<h256> chain; while (bi.stateRoot != BlockInfo::genesis().hash && m_db.lookup(bi.stateRoot).empty()) // while we don't have the state root of the latest block... { chain.push_back(bi.hash); // push back for later replay. bi.populate(_bc.block(bi.parentHash)); // move to parent. } m_previousBlock = bi; resetCurrent(); // Iterate through in reverse, playing back each of the blocks. for (auto it = chain.rbegin(); it != chain.rend(); ++it) playback(_bc.block(*it), true); m_currentNumber = _bc.details(_block).number + 1; resetCurrent(); ret = true; } return ret; }
// @returns the block that represents the difference between m_previousBlock and m_currentBlock. // (i.e. all the transactions we executed). void State::commitToMine(BlockChain const& _bc) { if (m_currentBlock.sha3Transactions != h256() || m_currentBlock.sha3Uncles != h256()) { Addresses uncleAddresses; for (auto i: RLP(m_currentUncles)) uncleAddresses.push_back(i[2].toHash<Address>()); unapplyRewards(uncleAddresses); } cnote << "Commiting to mine on" << m_previousBlock.hash; RLPStream uncles; Addresses uncleAddresses; if (m_previousBlock != BlockInfo::genesis()) { // Find uncles if we're not a direct child of the genesis. // cout << "Checking " << m_previousBlock.hash << ", parent=" << m_previousBlock.parentHash << endl; auto us = _bc.details(m_previousBlock.parentHash).children; assert(us.size() >= 1); // must be at least 1 child of our grandparent - it's our own parent! uncles.appendList(us.size() - 1); // one fewer - uncles precludes our parent from the list of grandparent's children. for (auto const& u: us) if (u != m_previousBlock.hash) // ignore our own parent - it's not an uncle. { BlockInfo ubi(_bc.block(u)); ubi.fillStream(uncles, true); uncleAddresses.push_back(ubi.coinbaseAddress); } } else uncles.appendList(0); applyRewards(uncleAddresses); RLPStream txs(m_transactions.size()); for (auto const& i: m_transactions) i.fillStream(txs); txs.swapOut(m_currentTxs); uncles.swapOut(m_currentUncles); m_currentBlock.sha3Transactions = sha3(m_currentTxs); m_currentBlock.sha3Uncles = sha3(m_currentUncles); // Commit any and all changes to the trie that are in the cache, then update the state root accordingly. commit(); cnote << "stateRoot:" << m_state.root(); // cnote << m_state; // cnote << *this; m_currentBlock.stateRoot = m_state.root(); m_currentBlock.parentHash = m_previousBlock.hash; }
LastHashes State::getLastHashes(BlockChain const& _bc, unsigned _n) const { LastHashes ret; ret.resize(256); if (c_protocolVersion > 49) { ret[0] = _bc.numberHash(_n); for (unsigned i = 1; i < 256; ++i) ret[i] = ret[i - 1] ? _bc.details(ret[i - 1]).parent : h256(); } return ret; }
u256 State::enactOn(bytesConstRef _block, BlockInfo const& _bi, BlockChain const& _bc) { // Check family: BlockInfo biParent(_bc.block(_bi.parentHash)); _bi.verifyParent(biParent); BlockInfo biGrandParent; if (biParent.number) biGrandParent.populate(_bc.block(biParent.parentHash)); sync(_bc, _bi.parentHash); resetCurrent(); m_previousBlock = biParent; return enact(_block, _bc); }
int main() { // Our address. h256 privkey = sha3("123"); Address us = toPublic(privkey); // TODO: should be loaded from config file/set at command-line. BlockChain bc; // Maintains block database. TransactionQueue tq; // Maintains list of incoming transactions not yet on the block chain. State s(us); // Synchronise the state according to the block chain - i.e. replay all transactions in block chain, in order. // In practise this won't need to be done since the State DB will contain the keys for the tries for most recent (and many old) blocks. // TODO: currently it contains keys for *all* blocks. Make it remove old ones. s.sync(bc); s.sync(tq); PeerNetwork net; // TODO: Implement - should run in background and send us events when blocks found and allow us to send blocks as required. while (true) { // Process network events. net.process(); // Synchronise block chain with network. // Will broadcast any of our (new) transactions and blocks, and collect & add any of their (new) transactions and blocks. net.sync(bc, tq); // Synchronise state to block chain. // This should remove any transactions on our queue that are included within our state. // It also guarantees that the state reflects the longest (valid!) chain on the block chain. // This might mean reverting to an earlier state and replaying some blocks, or, (worst-case: // if there are no checkpoints before our fork) reverting to the genesis block and replaying // all blocks. s.sync(bc); // Resynchronise state with block chain & trans s.sync(tq); // Mine for a while. if (s.mine(100)) { // Mined block bytes b = s.blockData(); // Import block. bc.import(b); } } return 0; }
Executive::Executive(State& _s, BlockChain const& _bc, EnvInfo const& _envInfo, unsigned _level): m_s(_s), m_envInfo(_envInfo), m_depth(_level) { m_envInfo.setLastHashes(_bc.lastHashes((unsigned)m_envInfo.number() - 1)); }
int main(int argc,const char **argv) { uint8_t publicKey[65] = { 0x04,0x50,0x86,0x3A,0xD6,0x4A,0x87,0xAE,0x8A,0x2F,0xE8,0x3C,0x1A, 0xF1,0xA8,0x40,0x3C,0xB5,0x3F,0x53,0xE4,0x86,0xD8,0x51,0x1D,0xAD, 0x8A,0x04,0x88,0x7E,0x5B,0x23,0x52,0x2C,0xD4,0x70,0x24,0x34,0x53, 0xA2,0x99,0xFA,0x9E,0x77,0x23,0x77,0x16,0x10,0x3A,0xBC,0x11,0xA1, 0xDF,0x38,0x85,0x5E,0xD6,0xF2,0xEE,0x18,0x7E,0x9C,0x58,0x2B,0xA6 }; char scratch[256]; uint8_t address1[25]; uint8_t address2[25]; bitcoinPublicKeyToAddress(0,publicKey,address1); bitcoinPublicKeyToAscii(0,publicKey,scratch,256); bitcoinAsciiToAddress(scratch,address2); const char *dataPath = "..\\..\\"; if ( argc < 2 ) { printf("Using local test FILE of the first 4310 blocks in the block chain.\r\n"); } else { dataPath = argv[1]; } BlockChain *b = createBlockChain(dataPath); // Create the block-chain parser using this root path if ( b ) { const BlockChain::Block *block = b->readBlock(); // read the first block while ( block ) { if ( block->blockIndex > 171 ) // only process the first 171 blocks for the moment { break; } b->printBlock(block); block = b->readBlock(); // keep reading blocks until we are done. } b->release(); // release the block-chain parser } return 0; }
TransactionReceipts State::sync(BlockChain const& _bc, TransactionQueue& _tq, bool* o_transactionQueueChanged) { // TRANSACTIONS TransactionReceipts ret; auto ts = _tq.transactions(); auto lh = getLastHashes(_bc, _bc.number()); for (int goodTxs = 1; goodTxs;) { goodTxs = 0; for (auto const& i: ts) if (!m_transactionSet.count(i.first)) { // don't have it yet! Execute it now. try { uncommitToMine(); // boost::timer t; execute(lh, i.second); ret.push_back(m_receipts.back()); _tq.noteGood(i); ++goodTxs; // cnote << "TX took:" << t.elapsed() * 1000; } catch (InvalidNonce const& in) { if (in.required > in.candidate) { // too old _tq.drop(i.first); if (o_transactionQueueChanged) *o_transactionQueueChanged = true; } else _tq.setFuture(i); } catch (Exception const& _e) { // Something else went wrong - drop it. _tq.drop(i.first); if (o_transactionQueueChanged) *o_transactionQueueChanged = true; cwarn << "Sync went wrong\n" << diagnostic_information(_e); } catch (std::exception const&) { // Something else went wrong - drop it. _tq.drop(i.first); if (o_transactionQueueChanged) *o_transactionQueueChanged = true; } } } return ret; }
void MachineBlockPlacement::placeChainsTopologically(MachineFunction &F) { MachineBasicBlock *EntryB = &F.front(); assert(BlockToChain[EntryB] && "Missing chain for entry block"); assert(*BlockToChain[EntryB]->begin() == EntryB && "Entry block is not the head of the entry block chain"); // Walk the blocks in RPO, and insert each block for a chain in order the // first time we see that chain. MachineFunction::iterator InsertPos = F.begin(); SmallPtrSet<BlockChain *, 16> VisitedChains; ReversePostOrderTraversal<MachineBasicBlock *> RPOT(EntryB); typedef ReversePostOrderTraversal<MachineBasicBlock *>::rpo_iterator rpo_iterator; for (rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) { BlockChain *Chain = BlockToChain[*I]; assert(Chain); if(!VisitedChains.insert(Chain)) continue; for (BlockChain::iterator BI = Chain->begin(), BE = Chain->end(); BI != BE; ++BI) { DEBUG(dbgs() << (BI == Chain->begin() ? "Placing chain " : " ... ") << getBlockName(*BI) << "\n"); if (InsertPos != MachineFunction::iterator(*BI)) F.splice(InsertPos, *BI); else ++InsertPos; } } // Now that every block is in its final position, update all of the // terminators. SmallVector<MachineOperand, 4> Cond; // For AnalyzeBranch. for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { // FIXME: It would be awesome of updateTerminator would just return rather // than assert when the branch cannot be analyzed in order to remove this // boiler plate. Cond.clear(); MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch. if (!TII->AnalyzeBranch(*FI, TBB, FBB, Cond)) FI->updateTerminator(); } }
EthereumHost::EthereumHost(BlockChain const& _ch, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId): HostCapability<EthereumPeer>(), Worker ("ethsync"), m_chain (_ch), m_tq (_tq), m_bq (_bq), m_networkId (_networkId) { m_latestBlockSent = _ch.currentHash(); }
PopulationStatistics State::populateFromChain(BlockChain const& _bc, h256 const& _h, ImportRequirements::value _ir) { PopulationStatistics ret { 0.0, 0.0 }; if (!_bc.isKnown(_h)) { // Might be worth throwing here. cwarn << "Invalid block given for state population: " << _h; return ret; } auto b = _bc.block(_h); BlockInfo bi(b); if (bi.number) { // Non-genesis: // 1. Start at parent's end state (state root). BlockInfo bip; bip.populate(_bc.block(bi.parentHash)); sync(_bc, bi.parentHash, bip, _ir); // 2. Enact the block's transactions onto this state. m_ourAddress = bi.coinbaseAddress; Timer t; auto vb = BlockChain::verifyBlock(b); ret.verify = t.elapsed(); t.restart(); enact(vb, _bc, _ir); ret.enact = t.elapsed(); } else { // Genesis required: // We know there are no transactions, so just populate directly. m_state.init(); sync(_bc, _h, bi, _ir); } return ret; }
PopulationStatistics Block::populateFromChain(BlockChain const& _bc, h256 const& _h, ImportRequirements::value _ir) { PopulationStatistics ret { 0.0, 0.0 }; if (!_bc.isKnown(_h)) { // Might be worth throwing here. cwarn << "Invalid block given for state population: " << _h; BOOST_THROW_EXCEPTION(BlockNotFound() << errinfo_target(_h)); } auto b = _bc.block(_h); BlockInfo bi(b); if (bi.number()) { // Non-genesis: // 1. Start at parent's end state (state root). BlockInfo bip(_bc.block(bi.parentHash())); sync(_bc, bi.parentHash(), bip); // 2. Enact the block's transactions onto this state. m_beneficiary = bi.beneficiary(); Timer t; auto vb = _bc.verifyBlock(&b, function<void(Exception&)>(), _ir | ImportRequirements::TransactionBasic); ret.verify = t.elapsed(); t.restart(); enact(vb, _bc); ret.enact = t.elapsed(); } else { // Genesis required: // We know there are no transactions, so just populate directly. m_state = State(m_state.db(), BaseState::Empty); // TODO: try with PreExisting. sync(_bc, _h, bi); } return ret; }
State::State(OverlayDB const& _db, BlockChain const& _bc, h256 _h): m_db(_db), m_state(&m_db), m_blockReward(c_blockReward) { // TODO THINK: is this necessary? m_state.init(); auto b = _bc.block(_h); BlockInfo bi; BlockInfo bip; if (_h) bi.populate(b); if (bi && bi.number) bip.populate(_bc.block(bi.parentHash)); if (!_h || !bip) return; m_ourAddress = bi.coinbaseAddress; sync(_bc, bi.parentHash, bip); enact(&b, _bc); }
bool PeerServer::ensureInitialised(BlockChain& _bc, TransactionQueue& _tq) { if (m_latestBlockSent == h256()) { // First time - just initialise. m_latestBlockSent = _bc.currentHash(); clog(NetNote) << "Initialising: latest=" << m_latestBlockSent; for (auto const& i: _tq.transactions()) m_transactionsSent.insert(i.first); m_lastPeersRequest = chrono::steady_clock::time_point::min(); return true; } return false; }
EthereumCapability::EthereumCapability(shared_ptr<p2p::CapabilityHostFace> _host, BlockChain const& _ch, OverlayDB const& _db, TransactionQueue& _tq, BlockQueue& _bq, u256 _networkId) : m_host(move(_host)), m_chain(_ch), m_db(_db), m_tq(_tq), m_bq(_bq), m_networkId(_networkId), m_hostData(new EthereumHostData(m_chain, m_db)) { // TODO: Composition would be better. Left like that to avoid initialization // issues as BlockChainSync accesses other EthereumHost members. m_sync.reset(new BlockChainSync(*this)); m_peerObserver.reset(new EthereumPeerObserver(m_sync, m_tq)); m_latestBlockSent = _ch.currentHash(); m_tq.onImport([this](ImportResult _ir, h256 const& _h, h512 const& _nodeId) { onTransactionImported(_ir, _h, _nodeId); }); std::random_device seed; m_urng = std::mt19937_64(seed()); }
u256 State::execute(BlockChain const& _bc, bytesConstRef _rlp, bytes* o_output, bool _commit) { return execute(getLastHashes(_bc, _bc.number()), _rlp, o_output, _commit); }
Executive::Executive(Block& _s, BlockChain const& _bc, unsigned _level): Executive(_s, _bc.lastHashes(unsigned(_s.info().number() - 1)), _level) {}
bool State::sync(BlockChain const& _bc, h256 _block, BlockInfo const& _bi, ImportRequirements::value _ir) { (void)_ir; bool ret = false; // BLOCK BlockInfo bi = _bi ? _bi : _bc.info(_block); /* if (!bi) while (1) { try { auto b = _bc.block(_block); bi.populate(b); // bi.verifyInternals(_bc.block(_block)); // Unneeded - we already verify on import into the blockchain. break; } catch (Exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << diagnostic_information(_e) << endl; } catch (std::exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << _e.what() << endl; } }*/ if (bi == m_currentBlock) { // We mined the last block. // Our state is good - we just need to move on to next. m_previousBlock = m_currentBlock; resetCurrent(); ret = true; } else if (bi == m_previousBlock) { // No change since last sync. // Carry on as we were. } else { // New blocks available, or we've switched to a different branch. All change. // Find most recent state dump and replay what's left. // (Most recent state dump might end up being genesis.) if (m_db.lookup(bi.stateRoot).empty()) { cwarn << "Unable to sync to" << bi.hash() << "; state root" << bi.stateRoot << "not found in database."; cwarn << "Database corrupt: contains block without stateRoot:" << bi; cwarn << "Bailing."; exit(-1); } m_previousBlock = bi; resetCurrent(); ret = true; } #if ALLOW_REBUILD else {
int main(int argc,const char **argv) { bool analyze = false; uint32_t maxBlocks = 10000000; uint32_t searchForTextLength = 0; const char *dataPath = "."; searchForTextLength = 0; bool rebuildPublicKeyDatabase = false; int i = 1; while ( i < argc ) { const char *option = argv[i]; if ( *option == '-' ) { if ( strcmp(option,"-max_blocks") == 0 ) { i++; if ( i < argc ) { maxBlocks = atoi(argv[i]); if ( maxBlocks < 1 ) { printf("Invalid max_blocks value '%s'\n", argv[i] ); maxBlocks = 1000000; } else { printf("Maximum blocks set to %d\r\n", maxBlocks); } } else { printf("Error parsing option '-max_blocks', missing block number.\n"); } } else if (strcmp(option, "-analyze") == 0) { analyze = true; } else if (strcmp(option, "-rebuild") == 0) { rebuildPublicKeyDatabase = true; } else if (strcmp(option, "-text") == 0) { i++; if (i < argc) { searchForTextLength = atoi(argv[i]); if (maxBlocks < 8 ) { printf("Invalid search for text value value '%s'\n", argv[i]); searchForTextLength = 0; } else { printf("Search for text length %d\r\n", maxBlocks); } } else { printf("Error parsing option '-text', missing text length.\n"); } } else { printf("Unknown option '%s'\n", option ); } } else { if ( (i+1) == argc ) { printf("Using directory: %s to locate bitcoin data blocks.\n", option ); dataPath = option; } else { printf("%s not a valid option.\n", option ); } } i++; } PublicKeyDatabase *p = PublicKeyDatabase::create(analyze); if (p) { if (analyze) { if (rebuildPublicKeyDatabase) { printf("Rebuilding the public-key database.\r\n"); p->buildPublicKeyDatabase(); } else { p->reportDailyTransactions("Transactions.csv"); p->reportTopBalances("TopBalances.csv", 50000, 0xFFFFFFFF); } } else { BlockChain *b = BlockChain::createBlockChain(dataPath, maxBlocks); if (b) { b->setSearchTextLength(searchForTextLength); printf("Scanning the blockchain for blocks.\r\n"); for (;;) { uint32_t lastBlockRead; bool finished = b->scanBlockChain(lastBlockRead); if (finished) { break; } else { if ((lastBlockRead % 1000) == 0) { printf("Scanned block header: %d\r\n", lastBlockRead); } } } printf("Finished scanning the available blocks.\r\n"); printf("Now building the blockchain\r\n"); uint32_t ret = b->buildBlockChain(); printf("Found %d blocks.\r\n", ret); for (uint32_t i = 0; i < ret; i++) { if (((i + 1) % 100) == 0) { printf("Processing block: %d\r\n", i); } const BlockChain::Block *block = b->readBlock(i); if (block == nullptr) { printf("Finished reading blocks.\r\n"); break; } else { p->addBlock(block); if (kbhit()) { int c = getch(); if (c == 27) { break; } } } } printf("Completed parsing the blockchain.\r\n"); printf("Now building the public-key records database.\r\n"); p->buildPublicKeyDatabase(); b->release(); // release the blockchain parser } } p->release(); } #ifdef WIN32 // getch(); #endif return 0; }
void BasicGasPricer::update(BlockChain const &_bc) { unsigned c = 0; h256 p = _bc.currentHash(); m_gasPerBlock = _bc.info(p).gasLimit(); map<u256, u256> dist; u256 total = 0; // make gasPrice versus gasUsed distribution for the last 1000 blocks while (c < 1000 && p) { BlockHeader bi = _bc.info(p); if (bi.transactionsRoot() != EmptyTrie) { auto bb = _bc.block(p); RLP r(bb); BlockReceipts brs(_bc.receipts(bi.hash())); size_t i = 0; for (auto const &tr: r[1]) { Transaction tx(tr.data(), CheckTransaction::None); u256 gu = brs.receipts[i].gasUsed(); dist[tx.gasPrice()] += gu; total += gu; i++; } } p = bi.parentHash(); ++c; } // fill m_octiles with weighted gasPrices if (total > 0) { m_octiles[0] = dist.begin()->first; // calc mean u256 mean = 0; for (auto const &i: dist) mean += i.first * i.second; mean /= total; // calc standard deviation u256 sdSquared = 0; for (auto const &i: dist) sdSquared += i.second * (i.first - mean) * (i.first - mean); sdSquared /= total; if (sdSquared) { long double sd = sqrt(sdSquared.convert_to<long double>()); long double normalizedSd = sd / mean.convert_to<long double>(); // calc octiles normalized to gaussian distribution boost::math::normal gauss(1.0, (normalizedSd > 0.01) ? normalizedSd : 0.01); for (size_t i = 1; i < 8; i++) m_octiles[i] = u256(mean.convert_to<long double>() * boost::math::quantile(gauss, i / 8.0)); m_octiles[8] = dist.rbegin()->first; } else { for (size_t i = 0; i < 9; i++) m_octiles[i] = (i + 1) * mean / 5; } } }
void State::commitToMine(BlockChain const& _bc) { uncommitToMine(); // cnote << "Committing to mine on block" << m_previousBlock.hash.abridged(); #ifdef ETH_PARANOIA commit(); cnote << "Pre-reward stateRoot:" << m_state.root(); #endif m_lastTx = m_db; Addresses uncleAddresses; RLPStream unclesData; unsigned unclesCount = 0; if (m_previousBlock.number != 0) { // Find great-uncles (or second-cousins or whatever they are) - children of great-grandparents, great-great-grandparents... that were not already uncles in previous generations. // cout << "Checking " << m_previousBlock.hash << ", parent=" << m_previousBlock.parentHash << endl; set<h256> knownUncles = _bc.allUnclesFrom(m_currentBlock.parentHash); auto p = m_previousBlock.parentHash; for (unsigned gen = 0; gen < 6 && p != _bc.genesisHash(); ++gen, p = _bc.details(p).parent) { auto us = _bc.details(p).children; assert(us.size() >= 1); // must be at least 1 child of our grandparent - it's our own parent! for (auto const& u: us) if (!knownUncles.count(u)) // ignore any uncles/mainline blocks that we know about. { BlockInfo ubi(_bc.block(u)); ubi.streamRLP(unclesData, WithNonce); ++unclesCount; uncleAddresses.push_back(ubi.coinbaseAddress); } } } MemoryDB tm; GenericTrieDB<MemoryDB> transactionsTrie(&tm); transactionsTrie.init(); MemoryDB rm; GenericTrieDB<MemoryDB> receiptsTrie(&rm); receiptsTrie.init(); RLPStream txs; txs.appendList(m_transactions.size()); for (unsigned i = 0; i < m_transactions.size(); ++i) { RLPStream k; k << i; RLPStream receiptrlp; m_receipts[i].streamRLP(receiptrlp); receiptsTrie.insert(&k.out(), &receiptrlp.out()); RLPStream txrlp; m_transactions[i].streamRLP(txrlp); transactionsTrie.insert(&k.out(), &txrlp.out()); txs.appendRaw(txrlp.out()); } txs.swapOut(m_currentTxs); RLPStream(unclesCount).appendRaw(unclesData.out(), unclesCount).swapOut(m_currentUncles); m_currentBlock.transactionsRoot = transactionsTrie.root(); m_currentBlock.receiptsRoot = receiptsTrie.root(); m_currentBlock.logBloom = logBloom(); m_currentBlock.sha3Uncles = sha3(m_currentUncles); // Apply rewards last of all. applyRewards(uncleAddresses); // Commit any and all changes to the trie that are in the cache, then update the state root accordingly. commit(); // cnote << "Post-reward stateRoot:" << m_state.root().abridged(); // cnote << m_state; // cnote << *this; m_currentBlock.gasUsed = gasUsed(); m_currentBlock.stateRoot = m_state.root(); m_currentBlock.parentHash = m_previousBlock.hash; }
bool PeerServer::sync(BlockChain& _bc, TransactionQueue& _tq, Overlay& _o) { bool ret = ensureInitialised(_bc, _tq); if (sync()) ret = true; if (m_mode == NodeMode::Full) { for (auto it = m_incomingTransactions.begin(); it != m_incomingTransactions.end(); ++it) if (_tq.import(*it)) {}//ret = true; // just putting a transaction in the queue isn't enough to change the state - it might have an invalid nonce... else m_transactionsSent.insert(sha3(*it)); // if we already had the transaction, then don't bother sending it on. m_incomingTransactions.clear(); auto h = _bc.currentHash(); bool resendAll = (h != m_latestBlockSent); // Send any new transactions. for (auto j: m_peers) if (auto p = j.second.lock()) { bytes b; uint n = 0; for (auto const& i: _tq.transactions()) if ((!m_transactionsSent.count(i.first) && !p->m_knownTransactions.count(i.first)) || p->m_requireTransactions || resendAll) { b += i.second; ++n; m_transactionsSent.insert(i.first); } if (n) { RLPStream ts; PeerSession::prep(ts); ts.appendList(n + 1) << TransactionsPacket; ts.appendRaw(b, n).swapOut(b); seal(b); p->send(&b); } p->m_knownTransactions.clear(); p->m_requireTransactions = false; } // Send any new blocks. if (h != m_latestBlockSent) { // TODO: find where they diverge and send complete new branch. RLPStream ts; PeerSession::prep(ts); ts.appendList(2) << BlocksPacket; bytes b; ts.appendRaw(_bc.block(_bc.currentHash())).swapOut(b); seal(b); for (auto j: m_peers) if (auto p = j.second.lock()) { if (!p->m_knownBlocks.count(_bc.currentHash())) p->send(&b); p->m_knownBlocks.clear(); } } m_latestBlockSent = h; for (int accepted = 1, n = 0; accepted; ++n) { accepted = 0; if (m_incomingBlocks.size()) for (auto it = prev(m_incomingBlocks.end());; --it) { try { _bc.import(*it, _o); it = m_incomingBlocks.erase(it); ++accepted; ret = true; } catch (UnknownParent) { // Don't (yet) know its parent. Leave it for later. m_unknownParentBlocks.push_back(*it); it = m_incomingBlocks.erase(it); } catch (...) { // Some other error - erase it. it = m_incomingBlocks.erase(it); } if (it == m_incomingBlocks.begin()) break; } if (!n && accepted) { for (auto i: m_unknownParentBlocks) m_incomingBlocks.push_back(i); m_unknownParentBlocks.clear(); } } // Connect to additional peers while (m_peers.size() < m_idealPeerCount) { if (m_freePeers.empty()) { if (chrono::steady_clock::now() > m_lastPeersRequest + chrono::seconds(10)) { RLPStream s; bytes b; (PeerSession::prep(s).appendList(1) << GetPeersPacket).swapOut(b); seal(b); for (auto const& i: m_peers) if (auto p = i.second.lock()) if (p->isOpen()) p->send(&b); m_lastPeersRequest = chrono::steady_clock::now(); } if (!m_accepting) ensureAccepting(); break; } auto x = time(0) % m_freePeers.size(); m_incomingPeers[m_freePeers[x]].second++; connect(m_incomingPeers[m_freePeers[x]].first); m_freePeers.erase(m_freePeers.begin() + x); } } // platform for consensus of social contract. // restricts your freedom but does so fairly. and that's the value proposition. // guarantees that everyone else respect the rules of the system. (i.e. obeys laws). // We'll keep at most twice as many as is ideal, halfing what counts as "too young to kill" until we get there. for (uint old = 15000; m_peers.size() > m_idealPeerCount * 2 && old > 100; old /= 2) while (m_peers.size() > m_idealPeerCount) { // look for worst peer to kick off // first work out how many are old enough to kick off. shared_ptr<PeerSession> worst; unsigned agedPeers = 0; for (auto i: m_peers) if (auto p = i.second.lock()) if ((m_mode != NodeMode::PeerServer || p->m_caps != 0x01) && chrono::steady_clock::now() > p->m_connect + chrono::milliseconds(old)) // don't throw off new peers; peer-servers should never kick off other peer-servers. { ++agedPeers; if ((!worst || p->m_rating < worst->m_rating || (p->m_rating == worst->m_rating && p->m_connect > worst->m_connect))) // kill older ones worst = p; } if (!worst || agedPeers <= m_idealPeerCount) break; worst->disconnect(TooManyPeers); } return ret; }
ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc, bool _isOurs) { clog(BlockQueueTraceChannel) << std::this_thread::get_id(); // Check if we already know this block. h256 h = BlockInfo::headerHash(_block); clog(BlockQueueTraceChannel) << "Queuing block" << h << "for import..."; UpgradableGuard l(m_lock); if (m_readySet.count(h) || m_drainingSet.count(h) || m_unknownSet.count(h) || m_knownBad.count(h)) { // Already know about this one. clog(BlockQueueTraceChannel) << "Already known."; return ImportResult::AlreadyKnown; } // VERIFY: populates from the block and checks the block is internally coherent. BlockInfo bi; try { // TODO: quick verify bi.populate(_block); bi.verifyInternals(_block); } catch (Exception const& _e) { cwarn << "Ignoring malformed block: " << diagnostic_information(_e); return ImportResult::Malformed; } clog(BlockQueueTraceChannel) << "Block" << h << "is" << bi.number << "parent is" << bi.parentHash; // Check block doesn't already exist first! if (_bc.isKnown(h)) { cblockq << "Already known in chain."; return ImportResult::AlreadyInChain; } UpgradeGuard ul(l); DEV_INVARIANT_CHECK; // Check it's not in the future (void)_isOurs; if (bi.timestamp > (u256)time(0)/* && !_isOurs*/) { m_future.insert(make_pair((unsigned)bi.timestamp, make_pair(h, _block.toBytes()))); char buf[24]; time_t bit = (unsigned)bi.timestamp; if (strftime(buf, 24, "%X", localtime(&bit)) == 0) buf[0] = '\0'; // empty if case strftime fails clog(BlockQueueTraceChannel) << "OK - queued for future [" << bi.timestamp << "vs" << time(0) << "] - will wait until" << buf; m_unknownSize += _block.size(); m_unknownCount++; m_difficulty += bi.difficulty; bool unknown = !m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash); return unknown ? ImportResult::FutureTimeUnknown : ImportResult::FutureTimeKnown; } else { // We now know it. if (m_knownBad.count(bi.parentHash)) { m_knownBad.insert(bi.hash()); updateBad_WITH_LOCK(bi.hash()); // bad parent; this is bad too, note it as such return ImportResult::BadChain; } else if (!m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash)) { // We don't know the parent (yet) - queue it up for later. It'll get resent to us if we find out about its ancestry later on. clog(BlockQueueTraceChannel) << "OK - queued as unknown parent:" << bi.parentHash; m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes()))); m_unknownSet.insert(h); m_unknownSize += _block.size(); m_difficulty += bi.difficulty; m_unknownCount++; return ImportResult::UnknownParent; } else { // If valid, append to blocks. clog(BlockQueueTraceChannel) << "OK - ready for chain insertion."; DEV_GUARDED(m_verification) m_unverified.push_back(UnverifiedBlock { h, bi.parentHash, _block.toBytes() }); m_moreToVerify.notify_one(); m_readySet.insert(h); m_knownSize += _block.size(); m_difficulty += bi.difficulty; m_knownCount++; noteReady_WITH_LOCK(h); return ImportResult::Success; } } }
bool Block::sync(BlockChain const& _bc) { return sync(_bc, _bc.currentHash()); }
Executive::Executive(State& _s, BlockChain const& _bc, unsigned _number, unsigned _level): m_s(_s), m_envInfo(_bc.info(_bc.numberHash(_number)), _bc.lastHashes(_number - 1)), m_depth(_level) {}
bool State::sync(BlockChain const& _bc, h256 _block, BlockInfo const& _bi) { bool ret = false; // BLOCK BlockInfo bi = _bi; if (!bi) while (1) { try { auto b = _bc.block(_block); bi.populate(b); // bi.verifyInternals(_bc.block(_block)); // Unneeded - we already verify on import into the blockchain. break; } catch (Exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << diagnostic_information(_e) << endl; } catch (std::exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << _e.what() << endl; } } if (bi == m_currentBlock) { // We mined the last block. // Our state is good - we just need to move on to next. m_previousBlock = m_currentBlock; resetCurrent(); ret = true; } else if (bi == m_previousBlock) { // No change since last sync. // Carry on as we were. } else { // New blocks available, or we've switched to a different branch. All change. // Find most recent state dump and replay what's left. // (Most recent state dump might end up being genesis.) std::vector<h256> chain; while (bi.number != 0 && m_db.lookup(bi.stateRoot).empty()) // while we don't have the state root of the latest block... { chain.push_back(bi.hash); // push back for later replay. bi.populate(_bc.block(bi.parentHash)); // move to parent. } m_previousBlock = bi; resetCurrent(); // Iterate through in reverse, playing back each of the blocks. try { for (auto it = chain.rbegin(); it != chain.rend(); ++it) { auto b = _bc.block(*it); enact(&b, _bc); cleanup(true); } } catch (...) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << boost::current_exception_diagnostic_information() << endl; exit(1); } resetCurrent(); ret = true; } return ret; }
u256 State::enact(bytesConstRef _block, BlockChain const& _bc, bool _checkNonce) { // m_currentBlock is assumed to be prepopulated and reset. #if !ETH_RELEASE BlockInfo bi(_block, _checkNonce); assert(m_previousBlock.hash == bi.parentHash); assert(m_currentBlock.parentHash == bi.parentHash); assert(rootHash() == m_previousBlock.stateRoot); #endif if (m_currentBlock.parentHash != m_previousBlock.hash) BOOST_THROW_EXCEPTION(InvalidParentHash()); // Populate m_currentBlock with the correct values. m_currentBlock.populate(_block, _checkNonce); m_currentBlock.verifyInternals(_block); // cnote << "playback begins:" << m_state.root(); // cnote << m_state; MemoryDB tm; GenericTrieDB<MemoryDB> transactionsTrie(&tm); transactionsTrie.init(); MemoryDB rm; GenericTrieDB<MemoryDB> receiptsTrie(&rm); receiptsTrie.init(); LastHashes lh = getLastHashes(_bc, (unsigned)m_previousBlock.number); // All ok with the block generally. Play back the transactions now... unsigned i = 0; for (auto const& tr: RLP(_block)[1]) { RLPStream k; k << i; transactionsTrie.insert(&k.out(), tr.data()); execute(lh, tr.data()); RLPStream receiptrlp; m_receipts.back().streamRLP(receiptrlp); receiptsTrie.insert(&k.out(), &receiptrlp.out()); ++i; } if (transactionsTrie.root() != m_currentBlock.transactionsRoot) { cwarn << "Bad transactions state root!"; BOOST_THROW_EXCEPTION(InvalidTransactionsStateRoot()); } if (receiptsTrie.root() != m_currentBlock.receiptsRoot) { cwarn << "Bad receipts state root."; cwarn << "Block:" << toHex(_block); cwarn << "Block RLP:" << RLP(_block); cwarn << "Calculated: " << receiptsTrie.root(); for (unsigned j = 0; j < i; ++j) { RLPStream k; k << j; auto b = asBytes(receiptsTrie.at(&k.out())); cwarn << j << ": "; cwarn << "RLP: " << RLP(b); cwarn << "Hex: " << toHex(b); cwarn << TransactionReceipt(&b); } cwarn << "Recorded: " << m_currentBlock.receiptsRoot; auto rs = _bc.receipts(m_currentBlock.hash); for (unsigned j = 0; j < rs.receipts.size(); ++j) { auto b = rs.receipts[j].rlp(); cwarn << j << ": "; cwarn << "RLP: " << RLP(b); cwarn << "Hex: " << toHex(b); cwarn << rs.receipts[j]; } BOOST_THROW_EXCEPTION(InvalidReceiptsStateRoot()); } if (m_currentBlock.logBloom != logBloom()) { cwarn << "Bad log bloom!"; BOOST_THROW_EXCEPTION(InvalidLogBloom()); } // Initialise total difficulty calculation. u256 tdIncrease = m_currentBlock.difficulty; // Check uncles & apply their rewards to state. set<h256> nonces = { m_currentBlock.nonce }; Addresses rewarded; set<h256> knownUncles = _bc.allUnclesFrom(m_currentBlock.parentHash); for (auto const& i: RLP(_block)[2]) { if (knownUncles.count(sha3(i.data()))) BOOST_THROW_EXCEPTION(UncleInChain(knownUncles, sha3(i.data()) )); BlockInfo uncle = BlockInfo::fromHeader(i.data()); if (nonces.count(uncle.nonce)) BOOST_THROW_EXCEPTION(DuplicateUncleNonce()); BlockInfo uncleParent(_bc.block(uncle.parentHash)); if ((bigint)uncleParent.number < (bigint)m_currentBlock.number - 7) BOOST_THROW_EXCEPTION(UncleTooOld()); uncle.verifyParent(uncleParent); nonces.insert(uncle.nonce); tdIncrease += uncle.difficulty; rewarded.push_back(uncle.coinbaseAddress); } applyRewards(rewarded); // Commit all cached state changes to the state trie. commit(); // Hash the state trie and check against the state_root hash in m_currentBlock. if (m_currentBlock.stateRoot != m_previousBlock.stateRoot && m_currentBlock.stateRoot != rootHash()) { cwarn << "Bad state root!"; cnote << "Given to be:" << m_currentBlock.stateRoot; cnote << TrieDB<Address, OverlayDB>(&m_db, m_currentBlock.stateRoot); cnote << "Calculated to be:" << rootHash(); cnote << m_state; cnote << *this; // Rollback the trie. m_db.rollback(); BOOST_THROW_EXCEPTION(InvalidStateRoot()); } if (m_currentBlock.gasUsed != gasUsed()) { // Rollback the trie. m_db.rollback(); BOOST_THROW_EXCEPTION(InvalidGasUsed() << RequirementError(bigint(gasUsed()), bigint(m_currentBlock.gasUsed))); } return tdIncrease; }
bool Block::sync(BlockChain const& _bc, h256 const& _block, BlockInfo const& _bi) { bool ret = false; // BLOCK BlockInfo bi = _bi ? _bi : _bc.info(_block); #if ETH_PARANOIA if (!bi) while (1) { try { auto b = _bc.block(_block); bi.populate(b); break; } catch (Exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << diagnostic_information(_e) << endl; } catch (std::exception const& _e) { // TODO: Slightly nicer handling? :-) cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl; cerr << _e.what() << endl; } } #endif if (bi == m_currentBlock) { // We mined the last block. // Our state is good - we just need to move on to next. m_previousBlock = m_currentBlock; resetCurrent(); ret = true; } else if (bi == m_previousBlock) { // No change since last sync. // Carry on as we were. } else { // New blocks available, or we've switched to a different branch. All change. // Find most recent state dump and replay what's left. // (Most recent state dump might end up being genesis.) if (m_state.db().lookup(bi.stateRoot()).empty()) // TODO: API in State for this? { cwarn << "Unable to sync to" << bi.hash() << "; state root" << bi.stateRoot() << "not found in database."; cwarn << "Database corrupt: contains block without stateRoot:" << bi; cwarn << "Try rescuing the database by running: eth --rescue"; BOOST_THROW_EXCEPTION(InvalidStateRoot() << errinfo_target(bi.stateRoot())); } m_previousBlock = bi; resetCurrent(); ret = true; } #if ALLOW_REBUILD else {
ImportResult BlockQueue::import(bytesConstRef _block, BlockChain const& _bc) { // Check if we already know this block. h256 h = BlockInfo::headerHash(_block); cblockq << "Queuing block" << h.abridged() << "for import..."; UpgradableGuard l(m_lock); if (m_readySet.count(h) || m_drainingSet.count(h) || m_unknownSet.count(h)) { // Already know about this one. cblockq << "Already known."; return ImportResult::AlreadyKnown; } // VERIFY: populates from the block and checks the block is internally coherent. BlockInfo bi; #if ETH_CATCH try #endif { bi.populate(_block); bi.verifyInternals(_block); } #if ETH_CATCH catch (Exception const& _e) { cwarn << "Ignoring malformed block: " << diagnostic_information(_e); return false; return ImportResult::Malformed; } #endif // Check block doesn't already exist first! if (_bc.details(h)) { cblockq << "Already known in chain."; return ImportResult::AlreadyInChain; } UpgradeGuard ul(l); // Check it's not in the future if (bi.timestamp > (u256)time(0)) { m_future.insert(make_pair((unsigned)bi.timestamp, _block.toBytes())); cblockq << "OK - queued for future."; return ImportResult::FutureTime; } else { // We now know it. if (!m_readySet.count(bi.parentHash) && !m_drainingSet.count(bi.parentHash) && !_bc.isKnown(bi.parentHash)) { // We don't know the parent (yet) - queue it up for later. It'll get resent to us if we find out about its ancestry later on. cblockq << "OK - queued as unknown parent:" << bi.parentHash.abridged(); m_unknown.insert(make_pair(bi.parentHash, make_pair(h, _block.toBytes()))); m_unknownSet.insert(h); return ImportResult::UnknownParent; } else { // If valid, append to blocks. cblockq << "OK - ready for chain insertion."; m_ready.push_back(_block.toBytes()); m_readySet.insert(h); noteReadyWithoutWriteGuard(h); return ImportResult::Success; } } }