RLP::iterator::iterator(RLP const& _parent, bool _begin) { if (_begin && _parent.isList()) { auto pl = _parent.payload(); m_lastItem = pl.cropped(0, RLP(pl).actualSize()); m_remaining = pl.size() - m_lastItem.size(); } else { m_lastItem = _parent.data().cropped(_parent.data().size()); m_remaining = 0; } }
RLP::iterator::iterator(RLP const& _parent, bool _begin) { if (_begin && _parent.isList()) { auto pl = _parent.payload(); m_currentItem = pl.cropped(0, sizeAsEncoded(pl)); m_remaining = pl.size() - m_currentItem.size(); } else { m_currentItem = _parent.data().cropped(_parent.data().size()); m_remaining = 0; } }
RLP BlockHeader::extractHeader(bytesConstRef _block) { RLP root(_block); if (!root.isList()) BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block needs to be a list") << BadFieldError(0, _block.toString())); RLP header = root[0]; if (!header.isList()) BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block header needs to be a list") << BadFieldError(0, header.data().toString())); if (!root[1].isList()) BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block transactions need to be a list") << BadFieldError(1, root[1].data().toString())); if (!root[2].isList()) BOOST_THROW_EXCEPTION(InvalidBlockFormat() << errinfo_comment("block uncles need to be a list") << BadFieldError(2, root[2].data().toString())); return header; }
void DappLoader::loadDapp(RLP const& _rlp) { Dapp dapp; unsigned len = _rlp.itemCountStrict(); dapp.manifest = loadManifest(_rlp[0].toString()); for (unsigned c = 1; c < len; ++c) { bytesConstRef content = _rlp[c].toBytesConstRef(); h256 hash = sha3(content); auto entry = std::find_if(dapp.manifest.entries.cbegin(), dapp.manifest.entries.cend(), [=](ManifestEntry const& _e) { return _e.hash == hash; }); if (entry != dapp.manifest.entries.cend()) { if (entry->path == "/deployment.js") { //inject web3 code bytes b(web3Content().data(), web3Content().data() + web3Content().size()); b.insert(b.end(), content.begin(), content.end()); dapp.content[hash] = b; } else dapp.content[hash] = content.toBytes(); } else throw dev::Exception() << errinfo_comment("Dapp content hash does not match"); } emit dappReady(dapp); }
void BlockHeader::populate(RLP const &_header) { int field = 0; try { m_parentHash = _header[field = 0].toHash<h256>(RLP::VeryStrict); m_sha3Uncles = _header[field = 1].toHash<h256>(RLP::VeryStrict); m_author = _header[field = 2].toHash<Address>(RLP::VeryStrict); m_stateRoot = _header[field = 3].toHash<h256>(RLP::VeryStrict); m_transactionsRoot = _header[field = 4].toHash<h256>(RLP::VeryStrict); m_receiptsRoot = _header[field = 5].toHash<h256>(RLP::VeryStrict); m_logBloom = _header[field = 6].toHash<LogBloom>(RLP::VeryStrict); m_difficulty = _header[field = 7].toInt<u256>(); m_number = _header[field = 8].toInt<u256>(); m_gasLimit = _header[field = 9].toInt<u256>(); m_gasUsed = _header[field = 10].toInt<u256>(); m_timestamp = _header[field = 11].toInt<u256>(); m_extraData = _header[field = 12].toBytes(); m_seal.clear(); for (unsigned i = 13; i < _header.itemCount(); ++i) m_seal.push_back(_header[i].data().toBytes()); } catch (Exception const &_e) { _e << errinfo_name("invalid block header format") << BadFieldError(field, toHex(_header[field].data().toBytes())); throw; } }
BlockDetails::BlockDetails(RLP const &_r) { number = _r[0].toInt<unsigned>(); totalDifficulty = _r[1].toInt<u256>(); parent = _r[2].toHash<h256>(); children = _r[3].toVector<h256>(); size = _r.size(); }
void BlockChainSync::onPeerNewBlock(std::shared_ptr<EthereumPeer> _peer, RLP const& _r) { DEV_INVARIANT_CHECK; RecursiveGuard l(x_sync); auto h = BlockInfo::headerHashFromBlock(_r[0].data()); if (_r.itemCount() != 2) _peer->disable("NewBlock without 2 data fields."); else { switch (host().bq().import(_r[0].data())) { case ImportResult::Success: _peer->addRating(100); logNewBlock(h); break; case ImportResult::FutureTimeKnown: //TODO: Rating dependent on how far in future it is. break; case ImportResult::Malformed: case ImportResult::BadChain: logNewBlock(h); _peer->disable("Malformed block received."); return; case ImportResult::AlreadyInChain: case ImportResult::AlreadyKnown: break; case ImportResult::FutureTimeUnknown: case ImportResult::UnknownParent: { logNewBlock(h); u256 totalDifficulty = _r[1].toInt<u256>(); if (totalDifficulty > _peer->m_totalDifficulty) { clog(NetMessageDetail) << "Received block with no known parent. Peer needs syncing..."; resetSyncFor(_peer, h, totalDifficulty); } break; } default:; } DEV_GUARDED(_peer->x_knownBlocks) _peer->m_knownBlocks.insert(h); } }
void BlockChainSync::onPeerBlockBodies(std::shared_ptr<ElementremPeer> _peer, RLP const& _r) { RecursiveGuard l(x_sync); DEV_INVARIANT_CHECK; size_t itemCount = _r.itemCount(); clog(NetMessageSummary) << "BlocksBodies (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBodies"); clearPeerDownload(_peer); if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks && m_state != SyncState::Waiting) { clog(NetMessageSummary) << "Ignoring unexpected blocks"; return; } if (m_state == SyncState::Waiting) { clog(NetAllDetail) << "Ignored blocks while waiting"; return; } if (itemCount == 0) { clog(NetAllDetail) << "Peer does not have the blocks requested"; _peer->addRating(-1); } for (unsigned i = 0; i < itemCount; i++) { RLP body(_r[i]); auto txList = body[0]; h256 transactionRoot = trieRootOver(txList.itemCount(), [&](unsigned i){ return rlp(i); }, [&](unsigned i){ return txList[i].data().toBytes(); }); h256 uncles = sha3(body[1].data()); HeaderId id { transactionRoot, uncles }; auto iter = m_headerIdToNumber.find(id); if (iter == m_headerIdToNumber.end() || !haveItem(m_headers, iter->second)) { clog(NetAllDetail) << "Ignored unknown block body"; continue; } unsigned blockNumber = iter->second; m_headerIdToNumber.erase(id); mergeInto(m_bodies, blockNumber, body.data().toBytes()); } collectBlocks(); continueSync(); }
RLP::iterator::iterator(RLP const& _parent, bool _begin) { if (_begin && _parent.isList()) { auto pl = _parent.payload(); m_lastItem = pl.cropped(0, RLP(pl).actualSize()); uint t = 0; for (uint i = 0; i < _parent.itemCount(); ++i) t += _parent[i].actualSize(); if (pl.size() != t) cout << _parent.itemCount() << " " << asHex(pl); assert(pl.size() == t); m_remaining = pl.size() - m_lastItem.size(); } else { m_lastItem = _parent.data().cropped(_parent.data().size()); m_remaining = 0; } }
bool EthereumPeer::interpret(unsigned _id, RLP const& _r) { m_lastAsk = std::chrono::system_clock::to_time_t(chrono::system_clock::now()); try { switch (_id) { case StatusPacket: { m_protocolVersion = _r[0].toInt<unsigned>(); m_networkId = _r[1].toInt<u256>(); m_totalDifficulty = _r[2].toInt<u256>(); m_latestHash = _r[3].toHash<h256>(); m_genesisHash = _r[4].toHash<h256>(); if (m_peerCapabilityVersion == host()->protocolVersion()) m_protocolVersion = host()->protocolVersion(); clog(NetMessageSummary) << "Status:" << m_protocolVersion << "/" << m_networkId << "/" << m_genesisHash << ", TD:" << m_totalDifficulty << "=" << m_latestHash; setIdle(); host()->onPeerStatus(dynamic_pointer_cast<EthereumPeer>(dynamic_pointer_cast<EthereumPeer>(shared_from_this()))); break; } case TransactionsPacket: { host()->onPeerTransactions(dynamic_pointer_cast<EthereumPeer>(dynamic_pointer_cast<EthereumPeer>(shared_from_this())), _r); break; } case GetBlockHashesPacket: { h256 later = _r[0].toHash<h256>(); unsigned limit = _r[1].toInt<unsigned>(); clog(NetMessageSummary) << "GetBlockHashes (" << limit << "entries," << later << ")"; unsigned c = min<unsigned>(host()->chain().number(later), min(limit, c_maxHashesToSend)); RLPStream s; prep(s, BlockHashesPacket, c); h256 p = host()->chain().details(later).parent; for (unsigned i = 0; i < c && p; ++i, p = host()->chain().details(p).parent) s << p; sealAndSend(s); addRating(0); break; } case GetBlockHashesByNumberPacket: { u256 number256 = _r[0].toInt<u256>(); unsigned number = (unsigned) number256; unsigned limit = _r[1].toInt<unsigned>(); clog(NetMessageSummary) << "GetBlockHashesByNumber (" << number << "-" << number + limit - 1 << ")"; RLPStream s; if (number <= host()->chain().number()) { unsigned c = min<unsigned>(host()->chain().number() - number + 1, min(limit, c_maxHashesToSend)); prep(s, BlockHashesPacket, c); for (unsigned n = number; n < number + c; n++) { h256 p = host()->chain().numberHash(n); s << p; } } else prep(s, BlockHashesPacket, 0); sealAndSend(s); addRating(0); break; } case BlockHashesPacket: { unsigned itemCount = _r.itemCount(); clog(NetMessageSummary) << "BlockHashes (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreHashes"); if (m_asking != Asking::Hashes) { clog(NetAllDetail) << "Peer giving us hashes when we didn't ask for them."; break; } setIdle(); if (itemCount > m_lastAskedHashes) { disable("Too many hashes"); break; } h256s hashes(itemCount); for (unsigned i = 0; i < itemCount; ++i) hashes[i] = _r[i].toHash<h256>(); host()->onPeerHashes(dynamic_pointer_cast<EthereumPeer>(shared_from_this()), hashes); break; } case GetBlocksPacket: { unsigned count = _r.itemCount(); clog(NetMessageSummary) << "GetBlocks (" << dec << count << "entries)"; if (!count) { clog(NetImpolite) << "Zero-entry GetBlocks: Not replying."; addRating(-10); break; } // return the requested blocks. bytes rlp; unsigned n = 0; for (unsigned i = 0; i < min(count, c_maxBlocks) && rlp.size() < c_maxPayload; ++i) { auto h = _r[i].toHash<h256>(); if (host()->chain().isKnown(h)) { rlp += host()->chain().block(_r[i].toHash<h256>()); ++n; } } if (count > 20 && n == 0) clog(NetWarn) << "all" << count << "unknown blocks requested; peer on different chain?"; else clog(NetMessageSummary) << n << "blocks known and returned;" << (min(count, c_maxBlocks) - n) << "blocks unknown;" << (count > c_maxBlocks ? count - c_maxBlocks : 0) << "blocks ignored"; addRating(0); RLPStream s; prep(s, BlocksPacket, n).appendRaw(rlp, n); sealAndSend(s); break; } case BlocksPacket: { if (m_asking != Asking::Blocks) clog(NetImpolite) << "Peer giving us blocks when we didn't ask for them."; else { setIdle(); host()->onPeerBlocks(dynamic_pointer_cast<EthereumPeer>(shared_from_this()), _r); } break; } case NewBlockPacket: { host()->onPeerNewBlock(dynamic_pointer_cast<EthereumPeer>(shared_from_this()), _r); break; } case NewBlockHashesPacket: { unsigned itemCount = _r.itemCount(); clog(NetMessageSummary) << "BlockHashes (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreHashes"); if (itemCount > c_maxIncomingNewHashes) { disable("Too many new hashes"); break; } h256s hashes(itemCount); for (unsigned i = 0; i < itemCount; ++i) hashes[i] = _r[i].toHash<h256>(); host()->onPeerNewHashes(dynamic_pointer_cast<EthereumPeer>(shared_from_this()), hashes); break; } default: return false; } } catch (Exception const&) { clog(NetWarn) << "Peer causing an Exception:" << boost::current_exception_diagnostic_information() << _r; } catch (std::exception const& _e) { clog(NetWarn) << "Peer causing an exception:" << _e.what() << _r; } return true; }
void BlockChainSync::onPeerBlocks(std::shared_ptr<EthereumPeer> _peer, RLP const& _r) { RecursiveGuard l(x_sync); unsigned itemCount = _r.itemCount(); clog(NetMessageSummary) << "Blocks (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreBlocks"); if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks && m_state != SyncState::Waiting) { clog(NetMessageSummary) << "Ignoring unexpected blocks"; return; } if (m_state == SyncState::Waiting) { clog(NetAllDetail) << "Ignored blocks while waiting"; return; } if (itemCount == 0) { // Got to this peer's latest block - just give up. peerDoneBlocks(_peer); if (downloadMan().isComplete()) completeSync(); return; } unsigned success = 0; unsigned future = 0; unsigned unknown = 0; unsigned got = 0; unsigned repeated = 0; u256 maxUnknownNumber = 0; h256 maxUnknown; for (unsigned i = 0; i < itemCount; ++i) { auto h = BlockInfo::headerHashFromBlock(_r[i].data()); if (_peer->m_sub.noteBlock(h)) { _peer->addRating(10); switch (host().bq().import(_r[i].data())) { case ImportResult::Success: success++; logNewBlock(h); break; case ImportResult::Malformed: case ImportResult::BadChain: logNewBlock(h); _peer->disable("Malformed block received."); restartSync(); return; case ImportResult::FutureTimeKnown: logNewBlock(h); future++; break; case ImportResult::AlreadyInChain: case ImportResult::AlreadyKnown: got++; break; case ImportResult::FutureTimeUnknown: future++; //Fall through case ImportResult::UnknownParent: { unknown++; logNewBlock(h); if (m_state == SyncState::NewBlocks) { BlockInfo bi(_r[i].data()); if (bi.number() > maxUnknownNumber) { maxUnknownNumber = bi.number(); maxUnknown = h; } } break; } default:; } } else { _peer->addRating(0); // -1? repeated++; } } clog(NetMessageSummary) << dec << success << "imported OK," << unknown << "with unknown parents," << future << "with future timestamps," << got << " already known," << repeated << " repeats received."; if (host().bq().unknownFull()) { clog(NetWarn) << "Too many unknown blocks, restarting sync"; restartSync(); return; } if (m_state == SyncState::NewBlocks && unknown > 0) { completeSync(); resetSyncFor(_peer, maxUnknown, std::numeric_limits<u256>::max()); //TODO: proper total difficuty } if (m_state == SyncState::Blocks || m_state == SyncState::NewBlocks) { if (downloadMan().isComplete()) completeSync(); else requestBlocks(_peer); // Some of the blocks might have been downloaded by helping peers, proceed anyway } DEV_INVARIANT_CHECK; }
BlockHeader::BlockHeader(bytesConstRef _block, BlockDataType _bdt, h256 const &_hashWith) { RLP header = _bdt == BlockData ? extractHeader(_block) : RLP(_block); m_hash = _hashWith ? _hashWith : sha3(header.data()); populate(header); }
void BlockChainSync::onPeerBlockHeaders(std::shared_ptr<ElementremPeer> _peer, RLP const& _r) { RecursiveGuard l(x_sync); DEV_INVARIANT_CHECK; size_t itemCount = _r.itemCount(); clog(NetMessageSummary) << "BlocksHeaders (" << dec << itemCount << "entries)" << (itemCount ? "" : ": NoMoreHeaders"); clearPeerDownload(_peer); if (m_state != SyncState::Blocks && m_state != SyncState::NewBlocks && m_state != SyncState::Waiting) { clog(NetMessageSummary) << "Ignoring unexpected blocks"; return; } if (m_state == SyncState::Waiting) { clog(NetAllDetail) << "Ignored blocks while waiting"; return; } if (itemCount == 0) { clog(NetAllDetail) << "Peer does not have the blocks requested"; _peer->addRating(-1); } for (unsigned i = 0; i < itemCount; i++) { BlockHeader info(_r[i].data(), HeaderData); unsigned blockNumber = static_cast<unsigned>(info.number()); if (haveItem(m_headers, blockNumber)) { clog(NetMessageSummary) << "Skipping header " << blockNumber; continue; } if (blockNumber <= m_lastImportedBlock && m_haveCommonHeader) { clog(NetMessageSummary) << "Skipping header " << blockNumber; continue; } if (blockNumber > m_highestBlock) m_highestBlock = blockNumber; auto status = host().bq().blockStatus(info.hash()); if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(info.hash())) { m_haveCommonHeader = true; m_lastImportedBlock = (unsigned)info.number(); m_lastImportedBlockHash = info.hash(); } else { Header hdr { _r[i].data().toBytes(), info.hash(), info.parentHash() }; // validate chain HeaderId headerId { info.transactionsRoot(), info.sha3Uncles() }; if (m_haveCommonHeader) { Header const* prevBlock = findItem(m_headers, blockNumber - 1); if ((prevBlock && prevBlock->hash != info.parentHash()) || (blockNumber == m_lastImportedBlock + 1 && info.parentHash() != m_lastImportedBlockHash)) { // mismatching parent id, delete the previous block and don't add this one clog(NetImpolite) << "Unknown block header " << blockNumber << " " << info.hash() << " (Restart syncing)"; _peer->addRating(-1); restartSync(); return ; } Header const* nextBlock = findItem(m_headers, blockNumber + 1); if (nextBlock && nextBlock->parent != info.hash()) { clog(NetImpolite) << "Unknown block header " << blockNumber + 1 << " " << nextBlock->hash; // clear following headers unsigned n = blockNumber + 1; auto headers = m_headers.at(n); for (auto const& h : headers) { BlockHeader deletingInfo(h.data, HeaderData); m_headerIdToNumber.erase(headerId); m_downloadingBodies.erase(n); m_downloadingHeaders.erase(n); ++n; } removeAllStartingWith(m_headers, blockNumber + 1); removeAllStartingWith(m_bodies, blockNumber + 1); } } mergeInto(m_headers, blockNumber, std::move(hdr)); if (headerId.transactionsRoot == EmptyTrie && headerId.uncles == EmptyListSHA3) { //empty body, just mark as downloaded RLPStream r(2); r.appendRaw(RLPEmptyList); r.appendRaw(RLPEmptyList); bytes body; r.swapOut(body); mergeInto(m_bodies, blockNumber, std::move(body)); } else m_headerIdToNumber[headerId] = blockNumber; } } collectBlocks(); continueSync(); }
bool EthereumCapability::interpretCapabilityPacket( NodeID const& _peerID, unsigned _id, RLP const& _r) { auto& peer = m_peers[_peerID]; peer.setLastAsk(std::chrono::system_clock::to_time_t(chrono::system_clock::now())); try { switch (_id) { case StatusPacket: { auto const peerProtocolVersion = _r[0].toInt<unsigned>(); auto const networkId = _r[1].toInt<u256>(); auto const totalDifficulty = _r[2].toInt<u256>(); auto const latestHash = _r[3].toHash<h256>(); auto const genesisHash = _r[4].toHash<h256>(); LOG(m_logger) << "Status (from " << _peerID << "): " << peerProtocolVersion << " / " << networkId << " / " << genesisHash << ", TD: " << totalDifficulty << " = " << latestHash; peer.setStatus( peerProtocolVersion, networkId, totalDifficulty, latestHash, genesisHash); setIdle(_peerID); m_peerObserver->onPeerStatus(peer); break; } case TransactionsPacket: { m_peerObserver->onPeerTransactions(_peerID, _r); break; } case GetBlockHeadersPacket: { /// Packet layout: /// [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] const auto blockId = _r[0]; const auto maxHeaders = _r[1].toInt<u256>(); const auto skip = _r[2].toInt<u256>(); const auto reverse = _r[3].toInt<bool>(); auto numHeadersToSend = maxHeaders <= c_maxHeadersToSend ? static_cast<unsigned>(maxHeaders) : c_maxHeadersToSend; if (skip > std::numeric_limits<unsigned>::max() - 1) { LOG(m_loggerDetail) << "Requested block skip is too big: " << skip << " (peer: " << _peerID << ")"; break; } pair<bytes, unsigned> const rlpAndItemCount = m_hostData->blockHeaders(blockId, numHeadersToSend, skip, reverse); RLPStream s; m_host->prep(_peerID, name(), s, BlockHeadersPacket, rlpAndItemCount.second) .appendRaw(rlpAndItemCount.first, rlpAndItemCount.second); m_host->sealAndSend(_peerID, s); m_host->updateRating(_peerID, 0); break; } case BlockHeadersPacket: { if (peer.asking() != Asking::BlockHeaders) LOG(m_loggerImpolite) << "Peer " << _peerID << " giving us block headers when we didn't ask for them."; else { setIdle(_peerID); m_peerObserver->onPeerBlockHeaders(_peerID, _r); } break; } case GetBlockBodiesPacket: { unsigned count = static_cast<unsigned>(_r.itemCount()); LOG(m_logger) << "GetBlockBodies (" << dec << count << " entries) from " << _peerID; if (!count) { LOG(m_loggerImpolite) << "Zero-entry GetBlockBodies: Not replying to " << _peerID; m_host->updateRating(_peerID, -10); break; } pair<bytes, unsigned> const rlpAndItemCount = m_hostData->blockBodies(_r); m_host->updateRating(_peerID, 0); RLPStream s; m_host->prep(_peerID, name(), s, BlockBodiesPacket, rlpAndItemCount.second) .appendRaw(rlpAndItemCount.first, rlpAndItemCount.second); m_host->sealAndSend(_peerID, s); break; } case BlockBodiesPacket: { if (peer.asking() != Asking::BlockBodies) LOG(m_loggerImpolite) << "Peer " << _peerID << " giving us block bodies when we didn't ask for them."; else { setIdle(_peerID); m_peerObserver->onPeerBlockBodies(_peerID, _r); } break; } case NewBlockPacket: { m_peerObserver->onPeerNewBlock(_peerID, _r); break; } case NewBlockHashesPacket: { unsigned itemCount = _r.itemCount(); LOG(m_logger) << "BlockHashes (" << dec << itemCount << " entries) " << (itemCount ? "" : " : NoMoreHashes") << " from " << _peerID; if (itemCount > c_maxIncomingNewHashes) { disablePeer(_peerID, "Too many new hashes"); break; } vector<pair<h256, u256>> hashes(itemCount); for (unsigned i = 0; i < itemCount; ++i) hashes[i] = std::make_pair(_r[i][0].toHash<h256>(), _r[i][1].toInt<u256>()); m_peerObserver->onPeerNewHashes(_peerID, hashes); break; } case GetNodeDataPacket: { unsigned count = static_cast<unsigned>(_r.itemCount()); if (!count) { LOG(m_loggerImpolite) << "Zero-entry GetNodeData: Not replying to " << _peerID; m_host->updateRating(_peerID, -10); break; } LOG(m_logger) << "GetNodeData (" << dec << count << " entries) from " << _peerID; strings const data = m_hostData->nodeData(_r); m_host->updateRating(_peerID, 0); RLPStream s; m_host->prep(_peerID, name(), s, NodeDataPacket, data.size()); for (auto const& element : data) s.append(element); m_host->sealAndSend(_peerID, s); break; } case GetReceiptsPacket: { unsigned count = static_cast<unsigned>(_r.itemCount()); if (!count) { LOG(m_loggerImpolite) << "Zero-entry GetReceipts: Not replying to " << _peerID; m_host->updateRating(_peerID, -10); break; } LOG(m_logger) << "GetReceipts (" << dec << count << " entries) from " << _peerID; pair<bytes, unsigned> const rlpAndItemCount = m_hostData->receipts(_r); m_host->updateRating(_peerID, 0); RLPStream s; m_host->prep(_peerID, name(), s, ReceiptsPacket, rlpAndItemCount.second) .appendRaw(rlpAndItemCount.first, rlpAndItemCount.second); m_host->sealAndSend(_peerID, s); break; } case NodeDataPacket: { if (peer.asking() != Asking::NodeData) LOG(m_loggerImpolite) << "Peer " << _peerID << " giving us node data when we didn't ask for them."; else { setIdle(_peerID); m_peerObserver->onPeerNodeData(_peerID, _r); } break; } case ReceiptsPacket: { if (peer.asking() != Asking::Receipts) LOG(m_loggerImpolite) << "Peer " << _peerID << " giving us receipts when we didn't ask for them."; else { setIdle(_peerID); m_peerObserver->onPeerReceipts(_peerID, _r); } break; } default: return false; } } catch (Exception const&) { LOG(m_loggerWarn) << "Peer " << _peerID << " causing an exception: " << boost::current_exception_diagnostic_information() << " " << _r; } catch (std::exception const& _e) { LOG(m_loggerWarn) << "Peer " << _peerID << " causing an exception: " << _e.what() << " " << _r; } return true; }
void BlockChainSync::onPeerNewBlock(std::shared_ptr<ElementremPeer> _peer, RLP const& _r) { RecursiveGuard l(x_sync); DEV_INVARIANT_CHECK; if (_r.itemCount() != 2) { _peer->disable("NewBlock without 2 data fields."); return; } BlockHeader info(_r[0][0].data(), HeaderData); auto h = info.hash(); DEV_GUARDED(_peer->x_knownBlocks) _peer->m_knownBlocks.insert(h); unsigned blockNumber = static_cast<unsigned>(info.number()); if (blockNumber > (m_lastImportedBlock + 1)) { clog(NetAllDetail) << "Received unknown new block"; syncPeer(_peer, true); return; } switch (host().bq().import(_r[0].data())) { case ImportResult::Success: _peer->addRating(100); logNewBlock(h); if (blockNumber > m_lastImportedBlock) { m_lastImportedBlock = max(m_lastImportedBlock, blockNumber); m_lastImportedBlockHash = h; } m_highestBlock = max(m_lastImportedBlock, m_highestBlock); m_downloadingBodies.erase(blockNumber); m_downloadingHeaders.erase(blockNumber); removeItem(m_headers, blockNumber); removeItem(m_bodies, blockNumber); if (m_headers.empty()) { assert(m_bodies.empty()); completeSync(); } break; case ImportResult::FutureTimeKnown: //TODO: Rating dependent on how far in future it is. break; case ImportResult::Malformed: case ImportResult::BadChain: logNewBlock(h); _peer->disable("Malformed block received."); return; case ImportResult::AlreadyInChain: case ImportResult::AlreadyKnown: break; case ImportResult::FutureTimeUnknown: case ImportResult::UnknownParent: { _peer->m_unknownNewBlocks++; if (_peer->m_unknownNewBlocks > c_maxPeerUknownNewBlocks) { _peer->disable("Too many uknown new blocks"); restartSync(); } logNewBlock(h); u256 totalDifficulty = _r[1].toInt<u256>(); if (totalDifficulty > _peer->m_totalDifficulty) { clog(NetMessageDetail) << "Received block with no known parent. Peer needs syncing..."; syncPeer(_peer, true); } break; } default:; } }
bool interpretCapabilityPacket(NodeID const&, unsigned _id, RLP const& _r) override { return _id > 0 || _r.size() > 0; }
BlockInfo::BlockInfo(bytesConstRef _block, Strictness _s, h256 const& _hashWith, BlockDataType _bdt) { RLP header = _bdt == BlockData ? extractHeader(_block) : RLP(_block); m_hash = _hashWith ? _hashWith : sha3(header.data()); populateFromHeader(header, _s); }