예제 #1
0
void  fork_database::_push_block(const item_ptr& item)
{
    if( _head ) // make sure the block is within the range that we are caching
    {
        FC_ASSERT( item->num > std::max<int64_t>( 0, int64_t(_head->num) - (_max_size) ),
                   "attempting to push a block that is too old",
                   ("item->num",item->num)("head",_head->num)("max_size",_max_size));
        FC_ASSERT( item->num < _head->num + MAX_BLOCK_REORDERING );
    }

    if( _head && item->previous_id() != block_id_type() )
    {
        auto& index = _index.get<block_id>();
        auto itr = index.find(item->previous_id());
        GRAPHENE_ASSERT(itr != index.end(), unlinkable_block_exception, "block does not link to known chain");
        FC_ASSERT(!(*itr)->invalid);
        item->prev = *itr;
    }

    _index.insert(item);
    if( !_head ) _head = item;
    else if( item->num > _head->num )
    {
        _head = item;
        _index.get<block_num>().erase(_head->num - _max_size);
        _unlinked_index.get<block_num>().erase(_head->num - _max_size);
    }

    _push_next( item );
}
void  fork_database::_push_block(const item_ptr& item)
{
   if( _head ) // make sure the block is within the range that we are caching
   {
      FC_ASSERT( item->num > std::max<int64_t>( 0, int64_t(_head->num) - (_max_size) ),
                 "attempting to push a block that is too old", 
                 ("item->num",item->num)("head",_head->num)("max_size",_max_size));
   }

   if( _head && item->previous_id() != block_id_type() )
   {
      auto& index = _index.get<block_id>();
      auto itr = index.find(item->previous_id());
      GRAPHENE_ASSERT(itr != index.end(), unlinkable_block_exception, "block does not link to known chain");
      item->prev = *itr;
   }

   _index.insert(item);
   if( !_head ) _head = item;
   else if( item->num > _head->num )
   {
      _head = item;
      uint32_t min_num = _head->num - std::min( _max_size, _head->num );
//      ilog( "min block in fork DB ${n}, max_size: ${m}", ("n",min_num)("m",_max_size) );
      auto& num_idx = _index.get<block_num>();
      while( num_idx.size() && (*num_idx.begin())->num < min_num )
         num_idx.erase( num_idx.begin() );
      
      _unlinked_index.get<block_num>().erase(_head->num - _max_size);
   }
   //_push_next( item );
}
예제 #3
0
      /**
       * Assuming all data elements are ordered in some way, this method should
       * return up to limit ids that occur *after* the last ID in synopsis that
       * we recognize.
       *
       * On return, remaining_item_count will be set to the number of items
       * in our blockchain after the last item returned in the result,
       * or 0 if the result contains the last item in the blockchain
       */
      virtual std::vector<item_hash_t> get_block_ids(const std::vector<item_hash_t>& blockchain_synopsis,
                                                     uint32_t& remaining_item_count,
                                                     uint32_t limit) override
      { try {
         vector<block_id_type> result;
         remaining_item_count = 0;
         if( _chain_db->head_block_num() == 0 )
            return result;

         result.reserve(limit);
         block_id_type last_known_block_id;

         if (blockchain_synopsis.empty() ||
             (blockchain_synopsis.size() == 1 && blockchain_synopsis[0] == block_id_type()))
         {
           // peer has sent us an empty synopsis meaning they have no blocks.
           // A bug in old versions would cause them to send a synopsis containing block 000000000
           // when they had an empty blockchain, so pretend they sent the right thing here.

           // do nothing, leave last_known_block_id set to zero
         }
         else
         {
           bool found_a_block_in_synopsis = false;
           for (const item_hash_t& block_id_in_synopsis : boost::adaptors::reverse(blockchain_synopsis))
             if (block_id_in_synopsis == block_id_type() ||
                 (_chain_db->is_known_block(block_id_in_synopsis) && is_included_block(block_id_in_synopsis)))
             {
               last_known_block_id = block_id_in_synopsis;
               found_a_block_in_synopsis = true;
               break;
             }
           if (!found_a_block_in_synopsis)
             FC_THROW_EXCEPTION(graphene::net::peer_is_on_an_unreachable_fork, "Unable to provide a list of blocks starting at any of the blocks in peer's synopsis");
         }
         for( uint32_t num = block_header::num_from_id(last_known_block_id);
              num <= _chain_db->head_block_num() && result.size() < limit;
              ++num )
            if( num > 0 )
               result.push_back(_chain_db->get_block_id_for_num(num));

         if( !result.empty() && block_header::num_from_id(result.back()) < _chain_db->head_block_num() )
            remaining_item_count = _chain_db->head_block_num() - block_header::num_from_id(result.back());

         return result;
      } FC_CAPTURE_AND_RETHROW( (blockchain_synopsis)(remaining_item_count)(limit) ) }
예제 #4
0
block_id_type block_database::fetch_block_id( uint32_t block_num )const
{
   assert( block_num != 0 );
   index_entry e;
   int64_t index_pos = sizeof(e) * int64_t(block_num);
   _block_num_to_pos.seekg( 0, _block_num_to_pos.end );
   if ( _block_num_to_pos.tellg() <= index_pos )
      FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block number ${block_num} not contained in block database", ("block_num", block_num));

   _block_num_to_pos.seekg( index_pos );
   _block_num_to_pos.read( (char*)&e, sizeof(e) );

   FC_ASSERT( e.block_id != block_id_type(), "Empty block_id in block_database (maybe corrupt on disk?)" );
   return e.block_id;
}
예제 #5
0
bool block_database::contains( const block_id_type& id )const
{
   if( id == block_id_type() )
      return false;

   index_entry e;
   int64_t index_pos = sizeof(e) * int64_t(block_header::num_from_id(id));
   _block_num_to_pos.seekg( 0, _block_num_to_pos.end );
   if ( _block_num_to_pos.tellg() < int64_t(index_pos + sizeof(e)) )
      return false;
   _block_num_to_pos.seekg( index_pos );
   _block_num_to_pos.read( (char*)&e, sizeof(e) );

   return e.block_id == id && e.block_size > 0;
}
예제 #6
0
      void init_chain()
      {
           // build genesis block if there is no chain data 
           if( chain.head_block_num() == INVALID_BLOCK_NUM )
           {
               bts::blockchain::trx_block b;
               b.version      = 0;
               b.prev         = block_id_type();
               b.block_num    = 0;
               b.timestamp    = fc::time_point::from_iso_string("20131201T054434");
               b.trxs.emplace_back( load_genesis( "genesis.csv", b.total_shares) );
               b.trx_mroot   = b.calculate_merkle_root();

               chain.push_block(b);
           }
      }
예제 #7
0
void block_database::store( const block_id_type& _id, const signed_block& b )
{
   block_id_type id = _id;
   if( id == block_id_type() )
   {
      id = b.id();
      elog( "id argument of block_database::store() was not initialized for block ${id}", ("id", id) );
   }
   _block_num_to_pos.seekp( sizeof( index_entry ) * int64_t(block_header::num_from_id(id)) );
   index_entry e;
   _blocks.seekp( 0, _blocks.end );
   auto vec = fc::raw::pack( b );
   e.block_pos  = _blocks.tellp();
   e.block_size = vec.size();
   e.block_id   = id;
   _blocks.write( vec.data(), vec.size() );
   _block_num_to_pos.write( (char*)&e, sizeof(e) );
}
예제 #8
0
void database::apply_block( const signed_block& next_block, uint32_t skip )
{
   auto block_num = next_block.block_num();
   if( _checkpoints.size() && _checkpoints.rbegin()->second != block_id_type() )
   {
      auto itr = _checkpoints.find( block_num );
      if( itr != _checkpoints.end() )
         FC_ASSERT( next_block.id() == itr->second, "Block did not match checkpoint", ("checkpoint",*itr)("block_id",next_block.id()) );

      if( _checkpoints.rbegin()->first >= block_num )
         skip = ~0;// WE CAN SKIP ALMOST EVERYTHING
   }

   detail::with_skip_flags( *this, skip, [&]()
   {
      _apply_block( next_block );
   } );
   return;
}
예제 #9
0
shared_ptr<fork_item>  fork_database::push_block( const signed_block& b )
{
   auto item = std::make_shared<fork_item>( b );

   if( _head && b.previous != block_id_type() )
   {
      auto itr = _index.get<block_id>().find( b.previous );
      FC_ASSERT( itr != _index.get<block_id>().end() );
      FC_ASSERT( !(*itr)->invalid );
      item->prev = *itr;
   }

   _index.insert( item );
   if( !_head ) _head = item;
   else if( item->num > _head->num )
   {
      _head = item;
      _index.get<block_num>().erase( _head->num - 1024 );
   }
   return _head;
}
void pending_chain_state::get_undo_state( const chain_interface_ptr& undo_state_arg )const
{
    auto undo_state = std::dynamic_pointer_cast<pending_chain_state>(undo_state_arg);
    chain_interface_ptr prev_state = _prev_state.lock();
    FC_ASSERT( prev_state );
    for( const auto& item : properties )
    {
        auto prev_value = prev_state->get_property( (chain_property_enum)item.first );
        undo_state->set_property( (chain_property_enum)item.first, prev_value );
    }
    for( const auto& item : assets )
    {
        auto prev_value = prev_state->get_asset_record( item.first );
        if( !!prev_value ) undo_state->store_asset_record( *prev_value );
        else undo_state->store_asset_record( item.second.make_null() );
    }
    for( const auto& item : slates )
    {
        auto prev_value = prev_state->get_delegate_slate( item.first );
        if( prev_value ) undo_state->store_delegate_slate( item.first, *prev_value );
        else undo_state->store_delegate_slate( item.first, delegate_slate() );
    }
    for( const auto& item : accounts )
    {
        auto prev_value = prev_state->get_account_record( item.first );
        if( !!prev_value ) undo_state->store_account_record( *prev_value );
        else undo_state->store_account_record( item.second.make_null() );
    }
    for( const auto& item : proposals )
    {
        auto prev_value = prev_state->get_proposal_record( item.first );
        if( !!prev_value ) undo_state->store_proposal_record( *prev_value );
        else undo_state->store_proposal_record( item.second.make_null() );
    }
    for( const auto& item : proposal_votes )
    {
        auto prev_value = prev_state->get_proposal_vote( item.first );
        if( !!prev_value ) undo_state->store_proposal_vote( *prev_value );
        else {
            undo_state->store_proposal_vote( item.second.make_null() );
        }
    }
    for( const auto& item : balances )
    {
        auto prev_value = prev_state->get_balance_record( item.first );
        if( !!prev_value ) undo_state->store_balance_record( *prev_value );
        else undo_state->store_balance_record( item.second.make_null() );
    }
    for( const auto& item : transactions )
    {
        auto prev_value = prev_state->get_transaction( item.first );
        if( !!prev_value ) undo_state->store_transaction( item.first, *prev_value );
        else undo_state->store_transaction( item.first, transaction_record() );
    }
    for( const auto& item : bids )
    {
        auto prev_value = prev_state->get_bid_record( item.first );
        if( prev_value.valid() ) undo_state->store_bid_record( item.first, *prev_value );
        else  undo_state->store_bid_record( item.first, order_record() );
    }
    for( const auto& item : asks )
    {
        auto prev_value = prev_state->get_ask_record( item.first );
        if( prev_value.valid() ) undo_state->store_ask_record( item.first, *prev_value );
        else  undo_state->store_ask_record( item.first, order_record() );
    }
    for( const auto& item : shorts )
    {
        auto prev_value = prev_state->get_short_record( item.first );
        if( prev_value.valid() ) undo_state->store_short_record( item.first, *prev_value );
        else  undo_state->store_short_record( item.first, order_record() );
    }
    for( const auto& item : collateral )
    {
        auto prev_value = prev_state->get_collateral_record( item.first );
        if( prev_value.valid() ) undo_state->store_collateral_record( item.first, *prev_value );
        else  undo_state->store_collateral_record( item.first, collateral_record() );
    }
    for( const auto& item : slots )
    {
        auto prev_value = prev_state->get_slot_record( item.first );
        if( prev_value ) undo_state->store_slot_record( *prev_value );
        else
        {
            slot_record invalid_slot_record;
            invalid_slot_record.start_time = item.first;
            invalid_slot_record.block_produced = true;
            invalid_slot_record.block_id = block_id_type();
            undo_state->store_slot_record( invalid_slot_record );
        }
    }
}
예제 #11
0
      /**
       * Returns a synopsis of the blockchain used for syncing.  This consists of a list of
       * block hashes at intervals exponentially increasing towards the genesis block.
       * When syncing to a peer, the peer uses this data to determine if we're on the same
       * fork as they are, and if not, what blocks they need to send us to get us on their
       * fork.
       *
       * In the over-simplified case, this is a straighforward synopsis of our current
       * preferred blockchain; when we first connect up to a peer, this is what we will be sending.
       * It looks like this:
       *   If the blockchain is empty, it will return the empty list.
       *   If the blockchain has one block, it will return a list containing just that block.
       *   If it contains more than one block:
       *     the first element in the list will be the hash of the highest numbered block that
       *         we cannot undo
       *     the second element will be the hash of an item at the half way point in the undoable
       *         segment of the blockchain
       *     the third will be ~3/4 of the way through the undoable segment of the block chain
       *     the fourth will be at ~7/8...
       *       &c.
       *     the last item in the list will be the hash of the most recent block on our preferred chain
       * so if the blockchain had 26 blocks labeled a - z, the synopsis would be:
       *    a n u x z
       * the idea being that by sending a small (<30) number of block ids, we can summarize a huge
       * blockchain.  The block ids are more dense near the end of the chain where because we are
       * more likely to be almost in sync when we first connect, and forks are likely to be short.
       * If the peer we're syncing with in our example is on a fork that started at block 'v',
       * then they will reply to our synopsis with a list of all blocks starting from block 'u',
       * the last block they know that we had in common.
       *
       * In the real code, there are several complications.
       *
       * First, as an optimization, we don't usually send a synopsis of the entire blockchain, we
       * send a synopsis of only the segment of the blockchain that we have undo data for.  If their
       * fork doesn't build off of something in our undo history, we would be unable to switch, so there's
       * no reason to fetch the blocks.
       *
       * Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think
       * we are missing, they only send a chunk of a few thousand blocks at once.  After we get those
       * block ids, we need to request more blocks by sending another synopsis (we can't just say "send me
       * the next 2000 ids" because they may have switched forks themselves and they don't track what
       * they've sent us).  For faster performance, we want to get a fairly long list of block ids first,
       * then start downloading the blocks.
       * The peer doesn't handle these follow-up block id requests any different from the initial request;
       * it treats the synopsis we send as our blockchain and bases its response entirely off that.  So to
       * get the response we want (the next chunk of block ids following the last one they sent us, or,
       * failing that, the shortest fork off of the last list of block ids they sent), we need to construct
       * a synopsis as if our blockchain was made up of:
       *    1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork)
       *    2. the blocks we've already pushed from their fork (if there's a fork)
       *    3. the block ids they've previously sent us
       * Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in
       * number_of_blocks_after_reference_point) so we can leave space in the synopsis for them.
       * We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
       * fork database.  The reference_point parameter is the last block from that peer that has been
       * successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on
       * the main chain.
       */
      virtual std::vector<item_hash_t> get_blockchain_synopsis(const item_hash_t& reference_point,
                                                               uint32_t number_of_blocks_after_reference_point) override
      { try {
          std::vector<item_hash_t> synopsis;
          synopsis.reserve(30);
          uint32_t high_block_num;
          uint32_t non_fork_high_block_num;
          uint32_t low_block_num = _chain_db->last_non_undoable_block_num();
          std::vector<block_id_type> fork_history;

          if (reference_point != item_hash_t())
          {
            // the node is asking for a summary of the block chain up to a specified
            // block, which may or may not be on a fork
            // for now, assume it's not on a fork
            if (is_included_block(reference_point))
            {
              // reference_point is a block we know about and is on the main chain
              uint32_t reference_point_block_num = block_header::num_from_id(reference_point);
              assert(reference_point_block_num > 0);
              high_block_num = reference_point_block_num;
              non_fork_high_block_num = high_block_num;

              if (reference_point_block_num < low_block_num)
              {
                // we're on the same fork (at least as far as reference_point) but we've passed
                // reference point and could no longer undo that far if we diverged after that
                // block.  This should probably only happen due to a race condition where
                // the network thread calls this function, and then immediately pushes a bunch of blocks,
                // then the main thread finally processes this function.
                // with the current framework, there's not much we can do to tell the network
                // thread what our current head block is, so we'll just pretend that
                // our head is actually the reference point.
                // this *may* enable us to fetch blocks that we're unable to push, but that should
                // be a rare case (and correctly handled)
                low_block_num = reference_point_block_num;
              }
            }
            else
            {
              // block is a block we know about, but it is on a fork
              try
              {
                fork_history = _chain_db->get_block_ids_on_fork(reference_point);
                // returns a vector where the last element is the common ancestor with the preferred chain,
                // and the first element is the reference point you passed in
                assert(fork_history.size() >= 2);

                if( fork_history.front() != reference_point )
                {
                   edump( (fork_history)(reference_point) );
                   assert(fork_history.front() == reference_point);
                }
                block_id_type last_non_fork_block = fork_history.back();
                fork_history.pop_back();  // remove the common ancestor
                boost::reverse(fork_history);

                if (last_non_fork_block == block_id_type()) // if the fork goes all the way back to genesis (does graphene's fork db allow this?)
                  non_fork_high_block_num = 0;
                else
                  non_fork_high_block_num = block_header::num_from_id(last_non_fork_block);

                high_block_num = non_fork_high_block_num + fork_history.size();
                assert(high_block_num == block_header::num_from_id(fork_history.back()));
              }
              catch (const fc::exception& e)
              {
                // unable to get fork history for some reason.  maybe not linked?
                // we can't return a synopsis of its chain
                elog("Unable to construct a blockchain synopsis for reference hash ${hash}: ${exception}", ("hash", reference_point)("exception", e));
                throw;
              }
              if (non_fork_high_block_num < low_block_num)
              {
                wlog("Unable to generate a usable synopsis because the peer we're generating it for forked too long ago "
                     "(our chains diverge after block #${non_fork_high_block_num} but only undoable to block #${low_block_num})",
                     ("low_block_num", low_block_num)
                     ("non_fork_high_block_num", non_fork_high_block_num));
                FC_THROW_EXCEPTION(graphene::net::block_older_than_undo_history, "Peer is are on a fork I'm unable to switch to");
              }
            }
          }
          else
          {
            // no reference point specified, summarize the whole block chain
            high_block_num = _chain_db->head_block_num();
            non_fork_high_block_num = high_block_num;
            if (high_block_num == 0)
              return synopsis; // we have no blocks
          }
          
          if( low_block_num == 0)
             low_block_num = 1;

          // at this point:
          // low_block_num is the block before the first block we can undo,
          // non_fork_high_block_num is the block before the fork (if the peer is on a fork, or otherwise it is the same as high_block_num)
          // high_block_num is the block number of the reference block, or the end of the chain if no reference provided

          // true_high_block_num is the ending block number after the network code appends any item ids it
          // knows about that we don't
          uint32_t true_high_block_num = high_block_num + number_of_blocks_after_reference_point;
          do
          {
            // for each block in the synopsis, figure out where to pull the block id from.
            // if it's <= non_fork_high_block_num, we grab it from the main blockchain;
            // if it's not, we pull it from the fork history
            if (low_block_num <= non_fork_high_block_num)
              synopsis.push_back(_chain_db->get_block_id_for_num(low_block_num));
            else
              synopsis.push_back(fork_history[low_block_num - non_fork_high_block_num - 1]);
            low_block_num += (true_high_block_num - low_block_num + 2) / 2;
          }
          while (low_block_num <= high_block_num);

          //idump((synopsis));
          return synopsis;
      } FC_CAPTURE_AND_RETHROW() }
예제 #12
0
 slot_record( const time_point_sec& t, const account_id_type& d, bool p = false, const block_id_type& b = block_id_type() )
 :start_time(t),block_producer_id(d),block_produced(p),block_id(b){}
예제 #13
0
 void mining_loop()
 {
    while( !_mining_loop_complete.canceled() )
    {
      try {
          if( _current_block.prev == block_id_type() || !_callback || _effort < 0.01 || _prev_header.next_difficulty == 0 )
          {
             ilog( "${current.prev}  _effort ${effort}  prev_header: ${prev_header}", 
                   ("current.prev",_current_block.prev)("effort",_effort)("prev_header",_prev_header) );
             fc::usleep( fc::microseconds( 1000*1000 ) );
             continue;
          }
          auto start = fc::time_point::now();
         
          block_header tmp = _current_block;
          tmp.timestamp = fc::time_point::now();
          auto next_diff = _prev_header.next_difficulty * 300*1000000ll / (tmp.timestamp - _prev_header.timestamp).count();
          tmp.next_difficulty = (_prev_header.next_difficulty * 24 + next_diff ) / 25;
         
          tmp.noncea = 0;
          tmp.nonceb = 0;
          auto tmp_id = tmp.id();
          auto seed = fc::sha256::hash( (char*)&tmp_id, sizeof(tmp_id) );
          auto pairs = momentum_search( seed );
          for( auto collision : pairs )
          {
             tmp.noncea = collision.first;
             tmp.nonceb = collision.second;
             FC_ASSERT( _min_votes > 0 );
             FC_ASSERT( _prev_header.next_difficulty > 0 );
             ilog( "difficlty ${d}  target ${t}  tmp.get_difficulty ${dd}  mv ${mv} min: ${min}  block:\n${block}", ("min",_min_votes)("mv",_miner_votes)("dd",tmp.get_difficulty())
                                                                                   ("d",(tmp.get_difficulty() * _miner_votes)/_min_votes)("t",_prev_header.next_difficulty)("block",_current_block) );
             if( (tmp.get_difficulty() * _miner_votes)/_min_votes  >= _prev_header.next_difficulty )
             {
                if( _callback )
                {
                   auto cb = _callback; 
                   _main_thread->async( [cb,tmp](){cb( tmp );} );
                }
                _effort = 0;
                break;
             }
          }
         
          // search space...
          
          auto end   = fc::time_point::now();
         
          // wait while checking for cancel...
          if( _effort < 1.0 )
          {
             auto calc_time = (end-start).count();
             auto wait_time = ((1-_effort)/_effort) * calc_time;
         
             auto wait_until = end + fc::microseconds(wait_time);
             if( wait_until > fc::time_point::now() && !_mining_loop_complete.canceled() )
             {
                ilog( "." );
                fc::usleep( fc::microseconds( 1000*100 ) );
             }
          }
          else
          {
             ilog( "." );
             fc::usleep( fc::microseconds(1000*10) );
          }
      }
      catch ( const fc::exception& e )
      {
         wlog( "${e}", ("e",e.to_detail_string() ));
      }
    } // while 
 } /// mining_loop
예제 #14
0
   void pending_chain_state::get_undo_state( const chain_interface_ptr& undo_state_arg )const
   {
      auto undo_state = std::dynamic_pointer_cast<pending_chain_state>( undo_state_arg );
      chain_interface_ptr prev_state = _prev_state.lock();
      FC_ASSERT( prev_state );
      for( const auto& item : properties )
      {
         auto prev_value = prev_state->get_property( (chain_property_enum)item.first );
         undo_state->set_property( (chain_property_enum)item.first, prev_value );
      }
      for( const auto& item : assets )
      {
         auto prev_value = prev_state->get_asset_record( item.first );
         if( !!prev_value ) undo_state->store_asset_record( *prev_value );
         else undo_state->store_asset_record( item.second.make_null() );
      }
      for( const auto& item : slates )
      {
         auto prev_value = prev_state->get_delegate_slate( item.first );
         if( prev_value ) undo_state->store_delegate_slate( item.first, *prev_value );
         else undo_state->store_delegate_slate( item.first, delegate_slate() );
      }
      for( const auto& item : accounts )
      {
         auto prev_value = prev_state->get_account_record( item.first );
         if( !!prev_value ) undo_state->store_account_record( *prev_value );
         else undo_state->store_account_record( item.second.make_null() );
      }
#if 0
      for( const auto& item : proposals )
      {
         auto prev_value = prev_state->get_proposal_record( item.first );
         if( !!prev_value ) undo_state->store_proposal_record( *prev_value );
         else undo_state->store_proposal_record( item.second.make_null() );
      }
      for( const auto& item : proposal_votes )
      {
         auto prev_value = prev_state->get_proposal_vote( item.first );
         if( !!prev_value ) undo_state->store_proposal_vote( *prev_value );
         else { undo_state->store_proposal_vote( item.second.make_null() ); }
      }
#endif
      for( const auto& item : balances )
      {
         auto prev_value = prev_state->get_balance_record( item.first );
         if( !!prev_value ) undo_state->store_balance_record( *prev_value );
         else undo_state->store_balance_record( item.second.make_null() );
      }
      for( const auto& item : transactions )
      {
         auto prev_value = prev_state->get_transaction( item.first );
         if( !!prev_value ) undo_state->store_transaction( item.first, *prev_value );
         else undo_state->store_transaction( item.first, transaction_record() );
      }
      for( const auto& item : bids )
      {
         auto prev_value = prev_state->get_bid_record( item.first );
         if( prev_value.valid() ) undo_state->store_bid_record( item.first, *prev_value );
         else  undo_state->store_bid_record( item.first, order_record() );
      }
      for( const auto& item : asks )
      {
         auto prev_value = prev_state->get_ask_record( item.first );
         if( prev_value.valid() ) undo_state->store_ask_record( item.first, *prev_value );
         else  undo_state->store_ask_record( item.first, order_record() );
      }
      for( const auto& item : shorts )
      {
         auto prev_value = prev_state->get_short_record( item.first );
         if( prev_value.valid() ) undo_state->store_short_record( item.first, *prev_value );
         else  undo_state->store_short_record( item.first, order_record() );
      }
      for( const auto& item : collateral )
      {
         auto prev_value = prev_state->get_collateral_record( item.first );
         if( prev_value.valid() ) undo_state->store_collateral_record( item.first, *prev_value );
         else  undo_state->store_collateral_record( item.first, collateral_record() );
      }
      for( const auto& item : slots )
      {
         auto prev_value = prev_state->get_slot_record( item.first );
         if( prev_value ) undo_state->store_slot_record( *prev_value );
         else
         {
             slot_record invalid_slot_record;
             invalid_slot_record.start_time = item.first;
             invalid_slot_record.block_produced = true;
             invalid_slot_record.block_id = block_id_type();
             undo_state->store_slot_record( invalid_slot_record );
         }
      }
      for( const auto& item : market_statuses )
      {
         auto prev_value = prev_state->get_market_status( item.first.first, item.first.second );
         if( prev_value ) undo_state->store_market_status( *prev_value );
         else
         {
            undo_state->store_market_status( market_status() );
         }
      }
      for( const auto& item : feeds )
      {
         auto prev_value = prev_state->get_feed( item.first );
         if( prev_value ) undo_state->set_feed( *prev_value );
         else undo_state->set_feed( feed_record{item.first} );
      }
      for( const auto& item : burns )
      {
         undo_state->store_burn_record( burn_record( item.first ) );
      }

      const auto dirty_markets = prev_state->get_dirty_markets();
      undo_state->set_dirty_markets(dirty_markets);

      /* NOTE: Recent operations are currently not rewound on undo */
   }