コード例 #1
0
ファイル: Eth.cpp プロジェクト: ethereum/cpp-ethereum
Json::Value Eth::eth_pendingTransactions()
{
	//Return list of transaction that being sent by local accounts
	Transactions ours;
	for (Transaction const& pending:client()->pending())
	{
		for (Address const& account:m_ethAccounts.allAccounts())
		{
			if (pending.sender() == account)
			{
				ours.push_back(pending);
				break;
			}
		}
	}

	return toJson(ours);
}
コード例 #2
0
ファイル: BlockChain.cpp プロジェクト: dan-da/cpp-ethereum
ImportRoute BlockChain::import(VerifiedBlockRef const& _block, OverlayDB const& _db, bool _mustBeNew)
{
	//@tidy This is a behemoth of a method - could do to be split into a few smaller ones.

#if ETH_TIMED_IMPORTS
	Timer total;
	double preliminaryChecks;
	double enactment;
	double collation;
	double writing;
	double checkBest;
	Timer t;
#endif

	// Check block doesn't already exist first!
	if (isKnown(_block.info.hash()) && _mustBeNew)
	{
		clog(BlockChainNote) << _block.info.hash() << ": Not new.";
		BOOST_THROW_EXCEPTION(AlreadyHaveBlock());
	}

	// Work out its number as the parent's number + 1
	if (!isKnown(_block.info.parentHash()))
	{
		clog(BlockChainNote) << _block.info.hash() << ": Unknown parent " << _block.info.parentHash();
		// We don't know the parent (yet) - discard for now. It'll get resent to us if we find out about its ancestry later on.
		BOOST_THROW_EXCEPTION(UnknownParent());
	}

	auto pd = details(_block.info.parentHash());
	if (!pd)
	{
		auto pdata = pd.rlp();
		clog(BlockChainDebug) << "Details is returning false despite block known:" << RLP(pdata);
		auto parentBlock = block(_block.info.parentHash());
		clog(BlockChainDebug) << "isKnown:" << isKnown(_block.info.parentHash());
		clog(BlockChainDebug) << "last/number:" << m_lastBlockNumber << m_lastBlockHash << _block.info.number();
		clog(BlockChainDebug) << "Block:" << BlockInfo(&parentBlock);
		clog(BlockChainDebug) << "RLP:" << RLP(parentBlock);
		clog(BlockChainDebug) << "DATABASE CORRUPTION: CRITICAL FAILURE";
		exit(-1);
	}

	// Check it's not crazy
	if (_block.info.timestamp() > (u256)time(0))
	{
		clog(BlockChainChat) << _block.info.hash() << ": Future time " << _block.info.timestamp() << " (now at " << time(0) << ")";
		// Block has a timestamp in the future. This is no good.
		BOOST_THROW_EXCEPTION(FutureTime());
	}

	// Verify parent-critical parts
	verifyBlock(_block.block, m_onBad, ImportRequirements::InOrderChecks);

	clog(BlockChainChat) << "Attempting import of " << _block.info.hash() << "...";

#if ETH_TIMED_IMPORTS
	preliminaryChecks = t.elapsed();
	t.restart();
#endif

	ldb::WriteBatch blocksBatch;
	ldb::WriteBatch extrasBatch;
	h256 newLastBlockHash = currentHash();
	unsigned newLastBlockNumber = number();

	BlockLogBlooms blb;
	BlockReceipts br;

	u256 td;
	Transactions goodTransactions;
#if ETH_CATCH
	try
#endif
	{
		// Check transactions are valid and that they result in a state equivalent to our state_root.
		// Get total difficulty increase and update state, checking it.
		Block s(_db);
		auto tdIncrease = s.enactOn(_block, *this);

		for (unsigned i = 0; i < s.pending().size(); ++i)
		{
			blb.blooms.push_back(s.receipt(i).bloom());
			br.receipts.push_back(s.receipt(i));
			goodTransactions.push_back(s.pending()[i]);
		}

		s.cleanup(true);

		td = pd.totalDifficulty + tdIncrease;

#if ETH_TIMED_IMPORTS
		enactment = t.elapsed();
		t.restart();
#endif

#if ETH_PARANOIA || !ETH_TRUE
		checkConsistency();
#endif

		// All ok - insert into DB

		// ensure parent is cached for later addition.
		// TODO: this is a bit horrible would be better refactored into an enveloping UpgradableGuard
		// together with an "ensureCachedWithUpdatableLock(l)" method.
		// This is safe in practice since the caches don't get flushed nearly often enough to be
		// done here.
		details(_block.info.parentHash());
		DEV_WRITE_GUARDED(x_details)
			m_details[_block.info.parentHash()].children.push_back(_block.info.hash());

#if ETH_TIMED_IMPORTS || !ETH_TRUE
		collation = t.elapsed();
		t.restart();
#endif

		blocksBatch.Put(toSlice(_block.info.hash()), ldb::Slice(_block.block));
		DEV_READ_GUARDED(x_details)
			extrasBatch.Put(toSlice(_block.info.parentHash(), ExtraDetails), (ldb::Slice)dev::ref(m_details[_block.info.parentHash()].rlp()));

		extrasBatch.Put(toSlice(_block.info.hash(), ExtraDetails), (ldb::Slice)dev::ref(BlockDetails((unsigned)pd.number + 1, td, _block.info.parentHash(), {}).rlp()));
		extrasBatch.Put(toSlice(_block.info.hash(), ExtraLogBlooms), (ldb::Slice)dev::ref(blb.rlp()));
		extrasBatch.Put(toSlice(_block.info.hash(), ExtraReceipts), (ldb::Slice)dev::ref(br.rlp()));

#if ETH_TIMED_IMPORTS || !ETH_TRUE
		writing = t.elapsed();
		t.restart();
#endif
	}
#if ETH_CATCH
	catch (BadRoot& ex)
	{
		cwarn << "*** BadRoot error! Trying to import" << _block.info.hash() << "needed root" << ex.root;
		cwarn << _block.info;
		// Attempt in import later.
		BOOST_THROW_EXCEPTION(TransientError());
	}
	catch (Exception& ex)
	{
		ex << errinfo_now(time(0));
		ex << errinfo_block(_block.block.toBytes());
		// only populate extraData if we actually managed to extract it. otherwise,
		// we might be clobbering the existing one.
		if (!_block.info.extraData().empty())
			ex << errinfo_extraData(_block.info.extraData());
		throw;
	}
#endif

	StructuredLogger::chainReceivedNewBlock(
		_block.info.hashWithout().abridged(),
		"",//_block.info.proof.nonce.abridged(),
		currentHash().abridged(),
		"", // TODO: remote id ??
		_block.info.parentHash().abridged()
	);
	//	cnote << "Parent " << bi.parentHash() << " has " << details(bi.parentHash()).children.size() << " children.";

	h256s route;
	h256 common;
	// This might be the new best block...
	h256 last = currentHash();
	if (td > details(last).totalDifficulty)
	{
		// don't include bi.hash() in treeRoute, since it's not yet in details DB...
		// just tack it on afterwards.
		unsigned commonIndex;
		tie(route, common, commonIndex) = treeRoute(last, _block.info.parentHash());
		route.push_back(_block.info.hash());

		// Most of the time these two will be equal - only when we're doing a chain revert will they not be
		if (common != last)
		{
			// Erase the number-lookup cache for the segment of the chain that we're reverting (if any).
			unsigned n = number(route.front());
			DEV_WRITE_GUARDED(x_blockHashes)
				for (auto i = route.begin(); i != route.end() && *i != common; ++i, --n)
					m_blockHashes.erase(n);
			DEV_WRITE_GUARDED(x_transactionAddresses)
				m_transactionAddresses.clear();	// TODO: could perhaps delete them individually?

			// If we are reverting previous blocks, we need to clear their blooms (in particular, to
			// rebuild any higher level blooms that they contributed to).
			clearBlockBlooms(number(common) + 1, number(last) + 1);
		}