void debug_node_plugin::on_applied_block( const chain::signed_block& b )
{
   if( !_debug_updates.empty() )
      apply_debug_updates();

   if( _json_object_stream )
   {
      (*_json_object_stream) << "{\"bn\":" << fc::to_string( b.block_num() ) << "}\n";
   }
}
Exemple #2
0
/**
 * Every time a block is produced, this method is called. This method will iterate through all
 * mining accounts specified by commandline and for which the private key is known. The first
 * account that isn't already scheduled in the mining queue is selected to mine for the
 * BLOCK_INTERVAL minus 1 second. If a POW is solved or a a new block comes in then the
 * worker will stop early.
 *
 * Work is farmed out to N threads in parallel based upon the value specified on the command line.
 *
 * The miner assumes that the next block will be produced on time and that network propagation
 * will take at least 1 second. This 1 second consists of the time it took to receive the block
 * and how long it will take to broadcast the work. In other words, we assume 0.5s broadcast times
 * and therefore do not even attempt work that cannot be delivered on time.
 */
void witness_plugin::on_applied_block( const chain::signed_block& b )
{ try {
  if( !_mining_threads || _miners.size() == 0 ) return;
  chain::database& db = database();

   const auto& dgp = db.get_dynamic_global_properties();
   double hps   = (_total_hashes*1000000)/(fc::time_point::now()-_hash_start_time).count();
   int64_t bits    = (dgp.num_pow_witnesses/4) + 4;
   fc::uint128 hashes  = fc::uint128(1) << bits;
   hashes *= 1000000;
   hps += 1;
   hashes /= int64_t(hps*1000000);
   auto seconds = hashes.to_uint64();
   //double seconds = hashes/hps;
   auto minutes = uint64_t(seconds / 60.0);


   if( _total_hashes > 0 )
      ilog( "hash rate: ${x} hps  target: ${t} queue: ${l} estimated time to produce: ${m} minutes",
              ("x",uint64_t(hps)) ("t",bits) ("m", minutes ) ("l",dgp.num_pow_witnesses)
         );


  _head_block_num = b.block_num();
  /// save these variables to be captured by worker lambda

  for( const auto& miner : _miners ) {
    const auto* w = db.find_witness( miner.first );
    if( !w || w->pow_worker == 0 ) {
       auto miner_pub_key = miner.second; //a.active.key_auths.begin()->first;
       auto priv_key_itr = _private_keys.find(miner_pub_key);
       if( priv_key_itr == _private_keys.end() ) {
          continue; /// skipping miner for lack of private key
       }

       auto miner_priv_key = priv_key_itr->second;
       start_mining( miner_pub_key, priv_key_itr->second, miner.first, b );
       break;
    } else {
        // ilog( "Skipping miner ${m} because it is already scheduled to produce a block", ("m",miner) );
    }
  } // for miner in miners

} catch ( const fc::exception& e ) { ilog( "exception thrown while attempting to mine" ); }
}
Exemple #3
0
void block_producer::apply_pending_transactions(
        const chain::account_name_type& witness_owner,
        fc::time_point_sec when,
        chain::signed_block& pending_block)
{
   // The 4 is for the max size of the transaction vector length
   size_t total_block_size = fc::raw::pack_size( pending_block ) + 4;
   const auto& gpo = _db.get_dynamic_global_properties();
   uint64_t maximum_block_size = gpo.maximum_block_size; //STEEM_MAX_BLOCK_SIZE;
   uint64_t maximum_transaction_partition_size = maximum_block_size -  ( maximum_block_size * gpo.required_actions_partition_percent ) / STEEM_100_PERCENT;

   //
   // The following code throws away existing pending_tx_session and
   // rebuilds it by re-applying pending transactions.
   //
   // This rebuild is necessary because pending transactions' validity
   // and semantics may have changed since they were received, because
   // time-based semantics are evaluated based on the current block
   // time.  These changes can only be reflected in the database when
   // the value of the "when" variable is known, which means we need to
   // re-apply pending transactions in this method.
   //
   _db.pending_transaction_session().reset();
   _db.pending_transaction_session() = _db.start_undo_session();

   FC_TODO( "Safe to remove after HF20 occurs because no more pre HF20 blocks will be generated" );
   if( _db.has_hardfork( STEEM_HARDFORK_0_20 ) )
   {
      /// modify current witness so transaction evaluators can know who included the transaction
      _db.modify(
             _db.get_dynamic_global_properties(),
             [&]( chain::dynamic_global_property_object& dgp )
             {
                dgp.current_witness = witness_owner;
             });
   }

   uint64_t postponed_tx_count = 0;
   // pop pending state (reset to head block state)
   for( const chain::signed_transaction& tx : _db._pending_tx )
   {
      // Only include transactions that have not expired yet for currently generating block,
      // this should clear problem transactions and allow block production to continue

      if( postponed_tx_count > STEEM_BLOCK_GENERATION_POSTPONED_TX_LIMIT )
         break;

      if( tx.expiration < when )
         continue;

      uint64_t new_total_size = total_block_size + fc::raw::pack_size( tx );

      // postpone transaction if it would make block too big
      if( new_total_size >= maximum_transaction_partition_size )
      {
         postponed_tx_count++;
         continue;
      }

      try
      {
         auto temp_session = _db.start_undo_session();
         _db.apply_transaction( tx, _db.get_node_properties().skip_flags );
         temp_session.squash();

         total_block_size = new_total_size;
         pending_block.transactions.push_back( tx );
      }
      catch ( const fc::exception& e )
      {
         // Do nothing, transaction will not be re-applied
         //wlog( "Transaction was not processed while generating block due to ${e}", ("e", e) );
         //wlog( "The transaction was ${t}", ("t", tx) );
      }
   }
   if( postponed_tx_count > 0 )
   {
      wlog( "Postponed ${n} transactions due to block size limit", ("n", _db._pending_tx.size() - pending_block.transactions.size()) );
   }

   const auto& pending_required_action_idx = _db.get_index< chain::pending_required_action_index, chain::by_execution >();
   auto pending_required_itr = pending_required_action_idx.begin();
   chain::required_automated_actions required_actions;

   while( pending_required_itr != pending_required_action_idx.end() && pending_required_itr->execution_time <= when )
   {
      uint64_t new_total_size = total_block_size + fc::raw::pack_size( pending_required_itr->action );

      // required_actions_partizion_size is a lower bound of requirement.
      // If we have extra space to include actions we should use it.
      if( new_total_size > maximum_block_size )
         break;

      try
      {
         auto temp_session = _db.start_undo_session();
         _db.apply_required_action( pending_required_itr->action );
         temp_session.squash();
         total_block_size = new_total_size;
         required_actions.push_back( pending_required_itr->action );
         ++pending_required_itr;
      }
      catch( fc::exception& e )
      {
         FC_RETHROW_EXCEPTION( e, warn, "A required automatic action was rejected. ${a} ${e}", ("a", pending_required_itr->action)("e", e.to_detail_string()) );
      }
   }

FC_TODO( "Remove ifdef when required actions are added" )
#ifdef IS_TEST_NET
   if( required_actions.size() )
   {
      pending_block.extensions.insert( required_actions );
   }
#endif

   const auto& pending_optional_action_idx = _db.get_index< chain::pending_optional_action_index, chain::by_execution >();
   auto pending_optional_itr = pending_optional_action_idx.begin();
   chain::optional_automated_actions optional_actions;

   while( pending_optional_itr != pending_optional_action_idx.end() && pending_optional_itr->execution_time <= when )
   {
      uint64_t new_total_size = total_block_size + fc::raw::pack_size( pending_optional_itr->action );

      if( new_total_size > maximum_block_size )
         break;

      try
      {
         auto temp_session = _db.start_undo_session();
         _db.apply_optional_action( pending_optional_itr->action );
         temp_session.squash();
         total_block_size = new_total_size;
         optional_actions.push_back( pending_optional_itr->action );
      }
      catch( fc::exception& ) {}
      ++pending_optional_itr;
   }

FC_TODO( "Remove ifdef when optional actions are added" )
#ifdef IS_TEST_NET
   if( optional_actions.size() )
   {
      pending_block.extensions.insert( optional_actions );
   }
#endif

   _db.pending_transaction_session().reset();

   pending_block.transaction_merkle_root = pending_block.calculate_merkle_root();
}