void database::create_block_summary(const signed_block& next_block) { block_summary_id_type sid(next_block.block_num() & 0xffff ); modify( sid(*this), [&](block_summary_object& p) { p.block_id = next_block.id(); }); }
void database::_apply_block( const signed_block& next_block ) { try { uint32_t next_block_num = next_block.block_num(); uint32_t skip = get_node_properties().skip_flags; _applied_ops.clear(); FC_ASSERT( (skip & skip_merkle_check) || next_block.transaction_merkle_root == next_block.calculate_merkle_root(), "", ("next_block.transaction_merkle_root",next_block.transaction_merkle_root)("calc",next_block.calculate_merkle_root())("next_block",next_block)("id",next_block.id()) ); const witness_object& signing_witness = validate_block_header(skip, next_block); const auto& global_props = get_global_properties(); const auto& dynamic_global_props = get<dynamic_global_property_object>(dynamic_global_property_id_type()); bool maint_needed = (dynamic_global_props.next_maintenance_time <= next_block.timestamp); _current_block_num = next_block_num; _current_trx_in_block = 0; for( const auto& trx : next_block.transactions ) { /* We do not need to push the undo state for each transaction * because they either all apply and are valid or the * entire block fails to apply. We only need an "undo" state * for transactions when validating broadcast transactions or * when building a block. */ apply_transaction( trx, skip ); ++_current_trx_in_block; } update_global_dynamic_data(next_block); update_signing_witness(signing_witness, next_block); update_last_irreversible_block(); // Are we at the maintenance interval? if( maint_needed ) perform_chain_maintenance(next_block, global_props); create_block_summary(next_block); clear_expired_transactions(); clear_expired_proposals(); clear_expired_orders(); update_expired_feeds(); update_withdraw_permissions(); // n.b., update_maintenance_flag() happens this late // because get_slot_time() / get_slot_at_time() is needed above // TODO: figure out if we could collapse this function into // update_global_dynamic_data() as perhaps these methods only need // to be called for header validation? update_maintenance_flag( maint_needed ); update_witness_schedule(); if( !_node_property_object.debug_updates.empty() ) apply_debug_updates(); // notify observers that the block has been applied applied_block( next_block ); //emit _applied_ops.clear(); notify_changed_objects(); } FC_CAPTURE_AND_RETHROW( (next_block.block_num()) ) }
optional<index_entry> block_database::last_index_entry()const { try { index_entry e; _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); std::streampos pos = _block_num_to_pos.tellg(); if( pos < long(sizeof(index_entry)) ) return optional<index_entry>(); pos -= pos % sizeof(index_entry); _blocks.seekg( 0, _block_num_to_pos.end ); const std::streampos blocks_size = _blocks.tellg(); while( pos > 0 ) { pos -= sizeof(index_entry); _block_num_to_pos.seekg( pos ); _block_num_to_pos.read( (char*)&e, sizeof(e) ); if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0 && int64_t(e.block_pos + e.block_size) <= blocks_size ) try { vector<char> data( e.block_size ); _blocks.seekg( e.block_pos ); _blocks.read( data.data(), e.block_size ); if( _blocks.gcount() == long(e.block_size) ) { const signed_block block = fc::raw::unpack<signed_block>(data); if( block.id() == e.block_id ) return e; } } catch (const fc::exception&) { } catch (const std::exception&) { } fc::resize_file( _index_filename, pos ); } } catch (const fc::exception&) { } catch (const std::exception&) { } return optional<index_entry>(); }
void database::apply_block( const signed_block& next_block, uint32_t skip ) { auto block_num = next_block.block_num(); if( _checkpoints.size() && _checkpoints.rbegin()->second != block_id_type() ) { auto itr = _checkpoints.find( block_num ); if( itr != _checkpoints.end() ) FC_ASSERT( next_block.id() == itr->second, "Block did not match checkpoint", ("checkpoint",*itr)("block_id",next_block.id()) ); if( _checkpoints.rbegin()->first >= block_num ) skip = ~0;// WE CAN SKIP ALMOST EVERYTHING } detail::with_skip_flags( *this, skip, [&]() { _apply_block( next_block ); } ); return; }
void database::update_global_dynamic_data( const signed_block& b ) { const dynamic_global_property_object& _dgp = dynamic_global_property_id_type(0)(*this); uint32_t missed_blocks = get_slot_at_time( b.timestamp ); assert( missed_blocks != 0 ); missed_blocks--; // dynamic global properties updating modify( _dgp, [&]( dynamic_global_property_object& dgp ){ if( BOOST_UNLIKELY( b.block_num() == 1 ) ) dgp.recently_missed_count = 0; else if( _checkpoints.size() && _checkpoints.rbegin()->first >= b.block_num() ) dgp.recently_missed_count = 0; else if( missed_blocks ) dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks; else if( dgp.recently_missed_count > GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT ) dgp.recently_missed_count -= GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT; else if( dgp.recently_missed_count > 0 ) dgp.recently_missed_count--; dgp.head_block_number = b.block_num(); dgp.head_block_id = b.id(); dgp.time = b.timestamp; dgp.current_witness = b.witness; dgp.recent_slots_filled = ( (dgp.recent_slots_filled << 1) + 1) << missed_blocks; dgp.current_aslot += missed_blocks+1; }); if( !(get_node_properties().skip_flags & skip_undo_history_check) ) { GRAPHENE_ASSERT( _dgp.recently_missed_count < GRAPHENE_MAX_UNDO_HISTORY, undo_database_exception, "The database does not have enough undo history to support a blockchain with so many missed blocks. " "Please add a checkpoint if you would like to continue applying blocks beyond this point.", ("recently_missed",_dgp.recently_missed_count)("max_undo",GRAPHENE_MAX_UNDO_HISTORY) ); } _undo_db.set_max_size( _dgp.recently_missed_count + GRAPHENE_MIN_UNDO_HISTORY ); _fork_db.set_max_size( _dgp.recently_missed_count + GRAPHENE_MIN_UNDO_HISTORY ); }
uint64_t block_log::append( const signed_block& b ) { try { my->check_block_write(); my->check_index_write(); uint64_t pos = my->block_stream.tellp(); FC_ASSERT( my->index_stream.tellp() == sizeof( uint64_t ) * ( b.block_num() - 1 ), "Append to index file occuring at wrong position.", ( "position", (uint64_t) my->index_stream.tellp() )( "expected",( b.block_num() - 1 ) * sizeof( uint64_t ) ) ); auto data = fc::raw::pack( b ); my->block_stream.write( data.data(), data.size() ); my->block_stream.write( (char*)&pos, sizeof( pos ) ); my->index_stream.write( (char*)&pos, sizeof( pos ) ); my->head = b; my->head_id = b.id(); return pos; } FC_LOG_AND_RETHROW() }
void apply(database &db, const signed_block &b, const options_type &opts) { auto undo_session = db.start_undo_session(!(opts & skip_undo_block)); db.pre_apply_block(b); if (!(opts & skip_validation)) { FC_ASSERT(b.timestamp.sec_since_epoch() % 3 == 0); if (b.block_num() > 1) { idump((b.block_num())); const auto &head = db.head_block(); FC_ASSERT(b.block_num() == head.block_num + 1); FC_ASSERT(b.timestamp >= head.timestamp + fc::seconds(3)); } } db.create<block_object>([&](block_object &obj) { obj.block_num = b.block_num(); obj.block_id = b.id(); obj.ref_prefix = obj.block_id._hash[1]; obj.previous = b.previous; obj.timestamp = b.timestamp; obj.witness = b.witness; obj.transaction_merkle_root = b.transaction_merkle_root; obj.witness_signature = b.witness_signature; obj.transactions.reserve(b.transactions.size()); for (const auto &t : b.transactions) { obj.transactions.emplace_back(t.id()); } }); for (const auto &trx : b.transactions) { apply(db, trx, opts); } db.post_apply_block(b); undo_session.push(); }
void network_api::on_applied_block( const signed_block& b ) { if( _callbacks.size() ) { for( uint32_t trx_num = 0; trx_num < b.transactions.size(); ++trx_num ) { const auto& trx = b.transactions[trx_num]; auto id = trx.id(); auto itr = _callbacks.find(id); auto block_num = b.block_num(); if( itr != _callbacks.end() ) { fc::async( [=](){ itr->second( fc::variant(transaction_confirmation{ id, block_num, trx_num, trx}) ); } ); } } } }
void block_database::store( const block_id_type& _id, const signed_block& b ) { block_id_type id = _id; if( id == block_id_type() ) { id = b.id(); elog( "id argument of block_database::store() was not initialized for block ${id}", ("id", id) ); } _block_num_to_pos.seekp( sizeof( index_entry ) * int64_t(block_header::num_from_id(id)) ); index_entry e; _blocks.seekp( 0, _blocks.end ); auto vec = fc::raw::pack( b ); e.block_pos = _blocks.tellp(); e.block_size = vec.size(); e.block_id = id; _blocks.write( vec.data(), vec.size() ); _block_num_to_pos.write( (char*)&e, sizeof(e) ); }
void block_generation_loop() { while( !_block_gen_loop_complete.canceled() ) { if( _pending.size() && (fc::time_point::now() - _current_block.timestamp) > fc::seconds(60) ) { signed_block next_block; next_block.number = _current_block.number + 1; auto next_diff = _current_block.difficulty * 500 / _pending.size(); next_diff = (_current_block.difficulty * 99 + next_diff) / 100; next_block.difficulty = std::max<uint64_t>(next_diff, 1000 ); next_block.timestamp = fc::time_point::now(); for( auto rec : _pending ) { next_block.records.push_back( rec.second ); } next_block.sign( _trustee_key ); _block_database.store(next_block.number,next_block); for( uint32_t rec = 0; rec < next_block.records.size(); ++rec ) { auto new_rec = next_block.records[rec]; auto hist = _self->fetch_history( new_rec.name ); hist.updates.push_back( name_index( next_block.number, rec ) ); _name_index.store( new_rec.name, hist ); _key_to_name.store( new_rec.active_key.to_base58(), new_rec.name ); } _current_block = next_block; _current_block_id = _current_block.id(); fc::path block_file = _data_dir / "block" / fc::to_string( uint64_t(_current_block.number) ); std::ofstream out( block_file.generic_string().c_str() ); auto block_str = fc::json::to_pretty_string( _current_block ); out.write( block_str.c_str(), block_str.size() ); } fc::usleep( fc::seconds( 1 ) ); } }
void network_broadcast_api::on_applied_block( const signed_block& b ) { int32_t block_num = int32_t(b.block_num()); if( _callbacks.size() ) { /// we need to ensure the database_api is not deleted for the life of the async operation auto capture_this = shared_from_this(); for( int32_t trx_num = 0; trx_num < b.transactions.size(); ++trx_num ) { const auto& trx = b.transactions[trx_num]; auto id = trx.id(); auto itr = _callbacks.find(id); if( itr != _callbacks.end() ) { auto callback = _callbacks.find(id)->second; fc::async( [capture_this,this,id,block_num,trx_num,callback](){ callback( fc::variant(transaction_confirmation{ id, block_num, trx_num, false}) ); } ); itr->second = []( const variant& ){}; } } } /// clear all expirations if( _callbacks_expirations.size() ) { auto end = _callbacks_expirations.upper_bound( b.timestamp ); auto itr = _callbacks_expirations.begin(); while( itr != end ) { for( const auto trx_id : itr->second ) { auto cb_itr = _callbacks.find( trx_id ); if( cb_itr != _callbacks.end() ) { auto capture_this = shared_from_this(); auto callback = _callbacks.find(trx_id)->second; fc::async( [capture_this,this,block_num,trx_id,callback](){ callback( fc::variant(transaction_confirmation{ trx_id, block_num, -1, true}) ); } ); _callbacks.erase(cb_itr); } } _callbacks_expirations.erase( itr ); itr = _callbacks_expirations.begin(); } } }
bool database::_push_block(const signed_block& new_block) { try { uint32_t skip = get_node_properties().skip_flags; if( !(skip&skip_fork_db) ) { /// TODO: if the block is greater than the head block and before the next maitenance interval // verify that the block signer is in the current set of active witnesses. shared_ptr<fork_item> new_head = _fork_db.push_block(new_block); //If the head block from the longest chain does not build off of the current head, we need to switch forks. if( new_head->data.previous != head_block_id() ) { //If the newly pushed block is the same height as head, we get head back in new_head //Only switch forks if new_head is actually higher than head if( new_head->data.block_num() > head_block_num() ) { wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) ); auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id()); // pop blocks until we hit the forked block while( head_block_id() != branches.second.back()->data.previous ) pop_block(); // push all blocks on the new fork for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { ilog( "pushing blocks from fork ${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->data.id()) ); optional<fc::exception> except; try { undo_database::session session = _undo_db.start_undo_session(); apply_block( (*ritr)->data, skip ); _block_id_to_block.store( (*ritr)->id, (*ritr)->data ); session.commit(); } catch ( const fc::exception& e ) { except = e; } if( except ) { wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) ); // remove the rest of branches.first from the fork_db, those blocks are invalid while( ritr != branches.first.rend() ) { _fork_db.remove( (*ritr)->data.id() ); ++ritr; } _fork_db.set_head( branches.second.front() ); // pop all blocks from the bad fork while( head_block_id() != branches.second.back()->data.previous ) pop_block(); // restore all blocks from the good fork for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { auto session = _undo_db.start_undo_session(); apply_block( (*ritr)->data, skip ); _block_id_to_block.store( new_block.id(), (*ritr)->data ); session.commit(); } throw *except; } } return true; } else return false; } } try { auto session = _undo_db.start_undo_session(); apply_block(new_block, skip); _block_id_to_block.store(new_block.id(), new_block); session.commit(); } catch ( const fc::exception& e ) { elog("Failed to push new block:\n${e}", ("e", e.to_detail_string())); _fork_db.remove(new_block.id()); throw; } return false; } FC_CAPTURE_AND_RETHROW( (new_block) ) }
fork_item( signed_block d ) :num(d.block_num()),id(d.id()),data( std::move(d) ){}
/** * Pushes the block into the fork database and caches it if it doesn't link * */ shared_ptr<fork_item> fork_database::push_block(const signed_block& b) { auto item = std::make_shared<fork_item>(b); try { _push_block(item); } catch ( const unlinkable_block_exception& e ) { wlog( "Pushing block to fork database that failed to link: ${id}, ${num}", ("id",b.id())("num",b.block_num()) ); wlog( "Head: ${num}, ${id}", ("num",_head->data.block_num())("id",_head->data.id()) ); throw; _unlinked_index.insert( item ); } return _head; }
block_message(const signed_block &blk) : block(blk), block_id(blk.id()) { }
void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account_id, const optional <operation_history_object>& oho, const signed_block& b) { graphene::chain::database& db = database(); const auto &stats_obj = account_id(db).statistics(db); // add new entry const auto &ath = db.create<account_transaction_history_object>([&](account_transaction_history_object &obj) { obj.operation_id = oho->id; obj.account = account_id; obj.sequence = stats_obj.total_ops + 1; obj.next = stats_obj.most_recent_op; }); // keep stats growing as no op will be removed db.modify(stats_obj, [&](account_statistics_object &obj) { obj.most_recent_op = ath.id; obj.total_ops = ath.sequence; }); // operation_type int op_type = -1; if (!oho->id.is_null()) op_type = oho->op.which(); // operation history data operation_history_struct os; os.trx_in_block = oho->trx_in_block; os.op_in_trx = oho->op_in_trx; os.operation_result = fc::json::to_string(oho->result); os.virtual_op = oho->virtual_op; os.op = fc::json::to_string(oho->op); // visitor data visitor_struct vs; if(_elasticsearch_visitor) { operation_visitor o_v; oho->op.visit(o_v); vs.fee_data.asset = o_v.fee_asset; vs.fee_data.amount = o_v.fee_amount; vs.transfer_data.asset = o_v.transfer_asset_id; vs.transfer_data.amount = o_v.transfer_amount; vs.transfer_data.from = o_v.transfer_from; vs.transfer_data.to = o_v.transfer_to; } // block data std::string trx_id = ""; if(!b.transactions.empty() && oho->trx_in_block < b.transactions.size()) { trx_id = b.transactions[oho->trx_in_block].id().str(); } block_struct bs; bs.block_num = b.block_num(); bs.block_time = b.timestamp; bs.trx_id = trx_id; // check if we are in replay or in sync and change number of bulk documents accordingly uint32_t limit_documents = 0; if((fc::time_point::now() - b.timestamp) < fc::seconds(30)) limit_documents = _elasticsearch_bulk_sync; else limit_documents = _elasticsearch_bulk_replay; createBulkLine(ath, os, op_type, bs, vs); // we have everything, creating bulk line if (curl && bulk.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech sendBulk(_elasticsearch_node_url, _elasticsearch_logs); } // remove everything except current object from ath const auto &his_idx = db.get_index_type<account_transaction_history_index>(); const auto &by_seq_idx = his_idx.indices().get<by_seq>(); auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id) { // if found, remove the entry const auto remove_op_id = itr->operation_id; const auto itr_remove = itr; ++itr; db.remove( *itr_remove ); // modify previous node's next pointer // this should be always true, but just have a check here if( itr != by_seq_idx.end() && itr->account == account_id ) { db.modify( *itr, [&]( account_transaction_history_object& obj ){ obj.next = account_transaction_history_id_type(); }); } // do the same on oho const auto &by_opid_idx = his_idx.indices().get<by_opid>(); if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) { db.remove(remove_op_id(db)); } } }