tuple<ImportRoute, bool, unsigned> BlockChain::sync(BlockQueue& _bq, OverlayDB const& _stateDB, unsigned _max) { // _bq.tick(*this); VerifiedBlocks blocks; _bq.drain(blocks, _max); h256s fresh; h256s dead; h256s badBlocks; Transactions goodTransactions; unsigned count = 0; for (VerifiedBlock const& block: blocks) { do { try { // Nonce & uncle nonces already verified in verification thread at this point. ImportRoute r; DEV_TIMED_ABOVE("Block import " + toString(block.verified.info.number()), 500) r = import(block.verified, _stateDB, (ImportRequirements::Everything & ~ImportRequirements::ValidSeal & ~ImportRequirements::CheckUncles) != 0); fresh += r.liveBlocks; dead += r.deadBlocks; goodTransactions.reserve(goodTransactions.size() + r.goodTranactions.size()); std::move(std::begin(r.goodTranactions), std::end(r.goodTranactions), std::back_inserter(goodTransactions)); ++count; } catch (dev::eth::UnknownParent) { cwarn << "ODD: Import queue contains block with unknown parent.";// << LogTag::Error << boost::current_exception_diagnostic_information(); // NOTE: don't reimport since the queue should guarantee everything in the right order. // Can't continue - chain bad. badBlocks.push_back(block.verified.info.hash()); } catch (dev::eth::FutureTime) { cwarn << "ODD: Import queue contains a block with future time."; this_thread::sleep_for(chrono::seconds(1)); continue; } catch (dev::eth::TransientError) { this_thread::sleep_for(chrono::milliseconds(100)); continue; } catch (Exception& ex) { // cnote << "Exception while importing block. Someone (Jeff? That you?) seems to be giving us dodgy blocks!";// << LogTag::Error << diagnostic_information(ex); if (m_onBad) m_onBad(ex); // NOTE: don't reimport since the queue should guarantee everything in the right order. // Can't continue - chain bad. badBlocks.push_back(block.verified.info.hash()); } } while (false); } return make_tuple(ImportRoute{dead, fresh, goodTransactions}, _bq.doneDrain(badBlocks), count); }
Transactions ClientBase::transactions(h256 _blockHash) const { auto bl = bc().block(_blockHash); RLP b(bl); Transactions res; for (unsigned i = 0; i < b[1].itemCount(); i++) res.emplace_back(b[1][i].data(), CheckTransaction::Cheap); return res; }
Json::Value Eth::eth_pendingTransactions() { //Return list of transaction that being sent by local accounts Transactions ours; for (Transaction const& pending:client()->pending()) { for (Address const& account:m_ethAccounts.allAccounts()) { if (pending.sender() == account) { ours.push_back(pending); break; } } } return toJson(ours); }
Json::Value toJson(dev::eth::BlockInfo const& _bi, BlockDetails const& _bd, UncleHashes const& _us, Transactions const& _ts) { Json::Value res = toJson(_bi); if (_bi) { res["totalDifficulty"] = toJS(_bd.totalDifficulty); res["uncles"] = Json::Value(Json::arrayValue); for (h256 h: _us) res["uncles"].append(toJS(h)); res["transactions"] = Json::Value(Json::arrayValue); for (unsigned i = 0; i < _ts.size(); i++) res["transactions"].append(toJson(_ts[i], std::make_pair(_bi.hash(), i), (BlockNumber)_bi.number)); } return res; }
ImportRoute BlockChain::import(VerifiedBlockRef const& _block, OverlayDB const& _db, bool _mustBeNew) { //@tidy This is a behemoth of a method - could do to be split into a few smaller ones. #if ETH_TIMED_IMPORTS Timer total; double preliminaryChecks; double enactment; double collation; double writing; double checkBest; Timer t; #endif // Check block doesn't already exist first! if (isKnown(_block.info.hash()) && _mustBeNew) { clog(BlockChainNote) << _block.info.hash() << ": Not new."; BOOST_THROW_EXCEPTION(AlreadyHaveBlock()); } // Work out its number as the parent's number + 1 if (!isKnown(_block.info.parentHash())) { clog(BlockChainNote) << _block.info.hash() << ": Unknown parent " << _block.info.parentHash(); // We don't know the parent (yet) - discard for now. It'll get resent to us if we find out about its ancestry later on. BOOST_THROW_EXCEPTION(UnknownParent()); } auto pd = details(_block.info.parentHash()); if (!pd) { auto pdata = pd.rlp(); clog(BlockChainDebug) << "Details is returning false despite block known:" << RLP(pdata); auto parentBlock = block(_block.info.parentHash()); clog(BlockChainDebug) << "isKnown:" << isKnown(_block.info.parentHash()); clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _block.info.number(); clog(BlockChainDebug) << "Block:" << BlockInfo(&parentBlock); clog(BlockChainDebug) << "RLP:" << RLP(parentBlock); clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE"; exit(-1); } // Check it's not crazy if (_block.info.timestamp() > (u256)time(0)) { clog(BlockChainChat) << _block.info.hash() << ": Future time " << _block.info.timestamp() << " (now at " << time(0) << ")"; // Block has a timestamp in the future. This is no good. BOOST_THROW_EXCEPTION(FutureTime()); } // Verify parent-critical parts verifyBlock(_block.block, m_onBad, ImportRequirements::InOrderChecks); clog(BlockChainChat) << "Attempting import of " << _block.info.hash() << "..."; #if ETH_TIMED_IMPORTS preliminaryChecks = t.elapsed(); t.restart(); #endif ldb::WriteBatch blocksBatch; ldb::WriteBatch extrasBatch; h256 newLastBlockHash = currentHash(); unsigned newLastBlockNumber = number(); BlockLogBlooms blb; BlockReceipts br; u256 td; Transactions goodTransactions; #if ETH_CATCH try #endif { // Check transactions are valid and that they result in a state equivalent to our state_root. // Get total difficulty increase and update state, checking it. Block s(_db); auto tdIncrease = s.enactOn(_block, *this); for (unsigned i = 0; i < s.pending().size(); ++i) { blb.blooms.push_back(s.receipt(i).bloom()); br.receipts.push_back(s.receipt(i)); goodTransactions.push_back(s.pending()[i]); } s.cleanup(true); td = pd.totalDifficulty + tdIncrease; #if ETH_TIMED_IMPORTS enactment = t.elapsed(); t.restart(); #endif #if ETH_PARANOIA || !ETH_TRUE checkConsistency(); #endif // All ok - insert into DB // ensure parent is cached for later addition. // TODO: this is a bit horrible would be better refactored into an enveloping UpgradableGuard // together with an "ensureCachedWithUpdatableLock(l)" method. // This is safe in practice since the caches don't get flushed nearly often enough to be // done here. details(_block.info.parentHash()); DEV_WRITE_GUARDED(x_details) m_details[_block.info.parentHash()].children.push_back(_block.info.hash()); #if ETH_TIMED_IMPORTS || !ETH_TRUE collation = t.elapsed(); t.restart(); #endif blocksBatch.Put(toSlice(_block.info.hash()), ldb::Slice(_block.block)); DEV_READ_GUARDED(x_details) extrasBatch.Put(toSlice(_block.info.parentHash(), ExtraDetails), (ldb::Slice)dev::ref(m_details[_block.info.parentHash()].rlp())); extrasBatch.Put(toSlice(_block.info.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _block.info.parentHash(), {}).rlp())); extrasBatch.Put(toSlice(_block.info.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp())); extrasBatch.Put(toSlice(_block.info.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp())); #if ETH_TIMED_IMPORTS || !ETH_TRUE writing = t.elapsed(); t.restart(); #endif } #if ETH_CATCH catch (BadRoot& ex) { cwarn << "*** BadRoot error! Trying to import" << _block.info.hash() << "needed root" << ex.root; cwarn << _block.info; // Attempt in import later. BOOST_THROW_EXCEPTION(TransientError()); } catch (Exception& ex) { ex << errinfo_now(time(0)); ex << errinfo_block(_block.block.toBytes()); // only populate extraData if we actually managed to extract it. otherwise, // we might be clobbering the existing one. if (!_block.info.extraData().empty()) ex << errinfo_extraData(_block.info.extraData()); throw; } #endif StructuredLogger::chainReceivedNewBlock( _block.info.hashWithout().abridged(), "",//_block.info.proof.nonce.abridged(), currentHash().abridged(), "", // TODO: remote id ?? _block.info.parentHash().abridged() ); // cnote << "Parent " << bi.parentHash() << " has " << details(bi.parentHash()).children.size() << " children."; h256s route; h256 common; // This might be the new best block... h256 last = currentHash(); if (td > details(last).totalDifficulty) { // don't include bi.hash() in treeRoute, since it's not yet in details DB... // just tack it on afterwards. unsigned commonIndex; tie(route, common, commonIndex) = treeRoute(last, _block.info.parentHash()); route.push_back(_block.info.hash()); // Most of the time these two will be equal - only when we're doing a chain revert will they not be if (common != last) { // Erase the number-lookup cache for the segment of the chain that we're reverting (if any). unsigned n = number(route.front()); DEV_WRITE_GUARDED(x_blockHashes) for (auto i = route.begin(); i != route.end() && *i != common; ++i, --n) m_blockHashes.erase(n); DEV_WRITE_GUARDED(x_transactionAddresses) m_transactionAddresses.clear(); // TODO: could perhaps delete them individually? // If we are reverting previous blocks, we need to clear their blooms (in particular, to // rebuild any higher level blooms that they contributed to). clearBlockBlooms(number(common) + 1, number(last) + 1); }