Example #1
0
    void peer_database_impl::open(const fc::path& peer_database_filename)
    {
      _peer_database_filename = peer_database_filename;
      if (fc::exists(_peer_database_filename))
      {
        try
        {
          std::vector<potential_peer_record> peer_records = fc::json::from_file(_peer_database_filename).as<std::vector<potential_peer_record> >();
          std::copy(peer_records.begin(), peer_records.end(), std::inserter(_potential_peer_set, _potential_peer_set.end()));
#define MAXIMUM_PEERDB_SIZE 1000
          if (_potential_peer_set.size() > MAXIMUM_PEERDB_SIZE)
          {
            // prune database to a reasonable size
            auto iter = _potential_peer_set.begin();
            std::advance(iter, MAXIMUM_PEERDB_SIZE);
            _potential_peer_set.erase(iter, _potential_peer_set.end());
          }
        }
        catch (const fc::exception& e)
        {
          elog("error opening peer database file ${peer_database_filename}, starting with a clean database", 
               ("peer_database_filename", _peer_database_filename));
        }
      }
    }
Example #2
0
    void peer_database_impl::open(const fc::path& databaseFilename)
    {
      try
      {
        _leveldb.open(databaseFilename);
      }
      catch (const graphene::db::level_pod_map_open_failure&) 
      {
        fc::remove_all(databaseFilename);
        _leveldb.open(databaseFilename);
      }

      _potential_peer_set.clear();

      for (auto iter = _leveldb.begin(); iter.valid(); ++iter)
        _potential_peer_set.insert(potential_peer_database_entry(iter.key(), iter.value()));
#define MAXIMUM_PEERDB_SIZE 1000
      if (_potential_peer_set.size() > MAXIMUM_PEERDB_SIZE)
      {
        // prune database to a reasonable size
        auto iter = _potential_peer_set.begin();
        std::advance(iter, MAXIMUM_PEERDB_SIZE);
        while (iter != _potential_peer_set.end())
        {
          _leveldb.remove(iter->database_key);
          iter = _potential_peer_set.erase(iter);
        }
      }
    }