예제 #1
0
void PV60Sync::onPeerHashes(std::shared_ptr<EthereumPeer> _peer, h256s const& _hashes)
{
	RecursiveGuard l(x_sync);
	DEV_INVARIANT_CHECK;
	if (!isSyncing(_peer))
	{
		clog(NetMessageSummary) << "Ignoring hashes since not syncing";
		return;
	}
	if (_peer->m_syncHash != (m_syncingLastReceivedHash ? m_syncingLastReceivedHash : m_syncingLatestHash))
	{
		clog(NetMessageSummary) << "Ignoring unexpected hashes";
		return;
	}
	if (_hashes.size() == 0)
	{
		transition(_peer, SyncState::Blocks);
		return;
	}
	unsigned knowns = 0;
	unsigned unknowns = 0;
	for (unsigned i = 0; i < _hashes.size(); ++i)
	{
		auto h = _hashes[i];
		auto status = host().bq().blockStatus(h);
		if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h))
		{
			clog(NetMessageSummary) << "block hash ready:" << h << ". Start blocks download...";
			assert (isSyncing(_peer));
			transition(_peer, SyncState::Blocks);
			return;
		}
		else if (status == QueueStatus::Bad)
		{
			cwarn << "block hash bad!" << h << ". Bailing...";
			_peer->disable("Bad blocks");
			restartSync();
			return;
		}
		else if (status == QueueStatus::Unknown)
		{
			unknowns++;
			m_syncingNeededBlocks.push_back(h);
		}
		else
			knowns++;
		m_syncingLastReceivedHash = h;
	}
	clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << m_syncingLastReceivedHash;
	if (m_syncingNeededBlocks.size() > _peer->m_expectedHashes)
	{
		_peer->disable("Too many hashes");
		restartSync();
		return;
	}
	// run through - ask for more.
	transition(_peer, SyncState::Hashes);
}
예제 #2
0
void EthereumPeer::requestBlocks(h256s const& _blocks)
{
	setAsking(Asking::Blocks);
	if (_blocks.size())
	{
		RLPStream s;
		prep(s, GetBlocksPacket, _blocks.size());
		for (auto const& i: _blocks)
			s << i;
		sealAndSend(s);
	}
	else
		setIdle();
}
예제 #3
0
bool BlockQueue::doneDrain(h256s const& _bad)
{
    WriteGuard l(m_lock);
    DEV_INVARIANT_CHECK;
    m_drainingSet.clear();
    m_difficulty -= m_drainingDifficulty;
    m_drainingDifficulty = 0;
    if (_bad.size())
    {
        // at least one of them was bad.
        m_knownBad += _bad;
        for (h256 const& b : _bad)
            updateBad_WITH_LOCK(b);
    }
    return !m_readySet.empty();
}
예제 #4
0
bool BlockQueue::doneDrain(h256s const& _bad)
{
	WriteGuard l(m_lock);
	m_drainingSet.clear();
	if (_bad.size())
	{
		vector<bytes> old;
		swap(m_ready, old);
		for (auto& b: old)
		{
			BlockInfo bi(b);
			if (m_knownBad.count(bi.parentHash))
				m_knownBad.insert(bi.hash());
			else
				m_ready.push_back(std::move(b));
		}
	}
	m_knownBad += _bad;
	return !m_readySet.empty();
}
예제 #5
0
void PV61Sync::onPeerHashes(std::shared_ptr<EthereumPeer> _peer, h256s const& _hashes)
{
	RecursiveGuard l(x_sync);
	if (m_syncingBlockNumber == 0 || (_peer == m_syncer.lock() && _peer->m_protocolVersion != host().protocolVersion()))
	{
		// Syncing in pv60 mode
		PV60Sync::onPeerHashes(_peer, _hashes);
		return;
	}
	if (_hashes.size() == 0)
	{
		if (isSyncing(_peer) && _peer->m_syncHashNumber == m_syncingBlockNumber)
		{
			// End of hash chain, add last chunk to download
			m_readyChainMap.insert(make_pair(m_syncingBlockNumber, SubChain{ h256s{ _peer->m_latestHash }, _peer->m_latestHash }));
			m_knownHashes.insert(_peer->m_latestHash);
			m_hashScanComplete = true;
			_peer->m_syncHashNumber = 0;
			requestSubchain(_peer);
		}
		else
		{
			auto syncPeer = m_chainSyncPeers.find(_peer);
			if (syncPeer == m_chainSyncPeers.end())
				clog(NetMessageDetail) << "Hashes response from unexpected peer";
			else
			{
				// Peer does not have request hashes, move back from downloading to ready
				unsigned number = syncPeer->second;
				m_chainSyncPeers.erase(_peer);
				m_readyChainMap[number] = move(m_downloadingChainMap.at(number));
				m_downloadingChainMap.erase(number);
				resetNeedsSyncing(_peer);
				requestSubchains();
			}
		}
		return;
	}
	if (isSyncing(_peer) && _peer->m_syncHashNumber == m_syncingBlockNumber)
	{
		// Got new subchain marker
		if (_hashes.size() != 1)
		{
			clog(NetWarn) << "Peer sent too many hashes";
			_peer->disable("Too many hashes");
			restartSync();
			return;
		}
		m_knownHashes.insert(_hashes[0]);
		m_readyChainMap.insert(make_pair(m_syncingBlockNumber, SubChain{ h256s{ _hashes[0] }, _hashes[0] }));
		if ((m_readyChainMap.size() + m_downloadingChainMap.size() + m_completeChainMap.size()) * c_hashSubchainSize > _peer->m_expectedHashes)
		{
			_peer->disable("Too many hashes from lead peer");
			restartSync();
			return;
		}
		transition(_peer, SyncState::Hashes);
		requestSubchains();
	}
	else
	{
		auto syncPeer = m_chainSyncPeers.find(_peer);
		unsigned number = 0;
		if (syncPeer == m_chainSyncPeers.end())
		{
			//check downlading peers
			for (auto const& downloader: m_downloadingChainMap)
				if (downloader.second.lastHash == _peer->m_syncHash)
				{
					number = downloader.first;
					break;
				}
		}
		else
			number = syncPeer->second;
		if (number == 0)
		{
			clog(NetAllDetail) << "Hashes response from unexpected/expired peer";
			return;
		}

		auto downloadingPeer = m_downloadingChainMap.find(number);
		if (downloadingPeer == m_downloadingChainMap.end() || downloadingPeer->second.lastHash != _peer->m_syncHash)
		{
			// Too late, other peer has already downloaded our hashes
			m_chainSyncPeers.erase(_peer);
			requestSubchain(_peer);
			return;
		}

		SubChain& subChain = downloadingPeer->second;
		unsigned knowns = 0;
		unsigned unknowns = 0;
		for (unsigned i = 0; i < _hashes.size(); ++i)
		{
			auto h = _hashes[i];
			auto status = host().bq().blockStatus(h);
			if (status == QueueStatus::Importing || status == QueueStatus::Ready || host().chain().isKnown(h) || !!m_knownHashes.count(h))
			{
				clog(NetMessageSummary) << "Subchain download complete";
				m_chainSyncPeers.erase(_peer);
				completeSubchain(_peer, number);
				return;
			}
			else if (status == QueueStatus::Bad)
			{
				cwarn << "block hash bad!" << h << ". Bailing...";
				_peer->disable("Bad hashes");
				if (isSyncing(_peer))
					restartSync();
				else
				{
					//try with other peer
					m_readyChainMap[number] = move(m_downloadingChainMap.at(number));
					m_downloadingChainMap.erase(number);
					m_chainSyncPeers.erase(_peer);
				}
				return;
			}
			else if (status == QueueStatus::Unknown)
			{
				unknowns++;
				subChain.hashes.push_back(h);
			}
			else
				knowns++;
			subChain.lastHash = h;
		}
		clog(NetMessageSummary) << knowns << "knowns," << unknowns << "unknowns; now at" << subChain.lastHash;
		if (subChain.hashes.size() > c_hashSubchainSize)
		{
			_peer->disable("Too many subchain hashes");
			restartSync();
			return;
		}
		requestSubchain(_peer);
	}
	DEV_INVARIANT_CHECK;
}