Esempio n. 1
0
void
client_view::advance_leader (
   std::vector <boost::asio::ip::tcp::endpoint> const & live_servers)
{
   if (live_servers.empty () == true)
   {
      next_leader_ = boost::none;
      return;
   }

   for (auto const & i : live_servers)
   {
      PAXOS_DEBUG (i);
   }

   /*!
     If the server was is found, this returns an iterator to the server, otherwise
     it returns the iterator *before* the last server.
    */
   std::vector <boost::asio::ip::tcp::endpoint>::const_iterator pos;

   if (next_leader_.is_initialized () == false)
   {
      pos = live_servers.begin ();
   }
   else
   {
      pos = std::find (live_servers.begin (),
                       live_servers.end (),
                       *next_leader_);

      if (pos == live_servers.end ())
      {
         /*!
           Server we determined to be the next leader was not in live_servers array anymore, 
           the best thing to do is to just return to the beginning of the array.
          */
         pos = live_servers.begin ();
      }      
   }

   PAXOS_ASSERT (pos != live_servers.end ());

   ++pos;
   if (pos == live_servers.end ())
   {
      /*!
        Uh-oh, we're at the end of the vector. Let's start at the beginning again.
       */
      pos = live_servers.begin ();
   }

   PAXOS_ASSERT (pos != live_servers.end ());

   /*!
     The next time we call this function, we want to immediately select the server
     we return here.
    */
   next_leader_ = *pos;
}
Esempio n. 2
0
void
heap::remove (
   int64_t              proposal_id)
{
   PAXOS_ASSERT (proposal_id >= 0);

   if (data_.find (proposal_id) == data_.end ())
   {
      /*!
        This is weird, likely caused by some manual copying / fiddling with the data
        backend. Only sensible thing to do is to ignore this remove command.
       */
      PAXOS_WARN ("proposal_id " << proposal_id << " not found in history, ignoring remove!");
      return;
   }

   PAXOS_DEBUG (this << " deleting all data since " << proposal_id);

   data_.erase (data_.begin (),
                data_.find (proposal_id));
   data_.erase (proposal_id);

   PAXOS_ASSERT_EQ (lowest_proposal_id (), proposal_id + 1);

}
Esempio n. 3
0
int main ()
{
   std::map <int64_t, uint16_t> responses;

   /*!
     Synchronizes access to responses
    */
   boost::mutex mutex;

   paxos::configuration configuration1;
   paxos::configuration configuration2;
   paxos::configuration configuration3;

   /*!
     Note that the configuration objects below outlive the server objects declared later in
     the test, thus providing semi-durable storage.
    */
   configuration1.set_durable_storage (
      new paxos::durable::heap ());
   configuration2.set_durable_storage (
      new paxos::durable::heap ());
   configuration3.set_durable_storage (
      new paxos::durable::heap ());

   paxos::server::callback_type callback = 
      [& responses,
       & mutex](
         int64_t                promise_id,
         std::string const &    workload) -> std::string
      {
         boost::mutex::scoped_lock lock (mutex);

         if (responses.find (promise_id) == responses.end ())
         {
            responses[promise_id] = 1;
         }
         else
         {
            responses[promise_id]++;
         }

         PAXOS_ASSERT (responses[promise_id] <= 3);

         return "bar";
      };

   paxos::client client;
   client.add  ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});

   {
      paxos::server server1 ("127.0.0.1", 1337, callback, configuration1);

      server1.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});

      {
         paxos::server server2 ("127.0.0.1", 1338, callback, configuration2);

         server2.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});

         {
            paxos::server server3 ("127.0.0.1", 1339, callback, configuration3);

            server3.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});

            PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");
            PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");
            PAXOS_ASSERT_EQ (all_responses_equal (responses, 3), true);
         }



         PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");
         PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");
         PAXOS_ASSERT_EQ (all_responses_equal (responses, 3), false);
      }

      PAXOS_ASSERT_THROW (client.send ("foo").get (), paxos::exception::no_majority);
   }

   /*!
     Note that we're re-adding servers in reverse order here; this is to ensure that
     server3 doesn't become our leader while it's lagging behind.
    */
   paxos::server server3 ("127.0.0.1", 1339, callback, configuration3);
   server3.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});
   
   PAXOS_ASSERT_THROW (client.send ("foo").get (), paxos::exception::no_majority);

   paxos::server server2 ("127.0.0.1", 1338, callback, configuration2);
   server2.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});
   boost::this_thread::sleep (
      boost::posix_time::milliseconds (
         paxos::configuration ().timeout ()));

   PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");
   PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");

   paxos::server server1 ("127.0.0.1", 1337, callback, configuration1);
   server1.add ({{"127.0.0.1", 1337}, {"127.0.0.1", 1338}, {"127.0.0.1", 1339}});


   boost::this_thread::sleep (
      boost::posix_time::milliseconds (
         paxos::configuration ().timeout ()));

   do
   {
      PAXOS_ASSERT_EQ (client.send ("foo").get (), "bar");   
      
   } while (all_responses_equal (responses, 3) == false);

   PAXOS_INFO ("test succeeded");
}