コード例 #1
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void run_threaded_string_sends_0(size_t length, size_t numthreads) {
   if (rmi.procid() == 1) {
     rmi.full_barrier();
     return;
   }
   timer ti;
   std::cout << numthreads << " threaded " << SEND_LIMIT_PRINT <<" sends, "
                                           << length << " bytes\n";
   ti.start();
   size_t numsends = SEND_LIMIT / (length * numthreads);
   size_t rd = rdtsc();
   thread_group thrgrp;
   for (size_t i = 0; i < numthreads; ++i) {
     thrgrp.launch(boost::bind(&teststruct::perform_string_sends_0, this, length, numsends));
   }
   thrgrp.join();
   size_t rd2 = rdtsc();
   std::cout << (rd2 - rd) / (numthreads * numsends)  << " cycles per call\n";
   double t1 = ti.current_time();
   rmi.dc().flush();
   double t2 = ti.current_time();
   rmi.full_barrier();
   double t3 = ti.current_time();
   print_res(t1,t2,t3);
 }
コード例 #2
0
 void task(size_t i) {
   if (i < 5) std::cout << "Task " << i << std::endl;
   if (i > 0) {
     if (rmi.numprocs() == 1) {
       add_task_local(i - 1);
     }
     else {
       rmi.remote_call((procid_t)((rmi.procid() + 1) % rmi.numprocs()),
                   &simple_engine_test::add_task_local,
                   i - 1);
     }
   }
 }
コード例 #3
0
 std::vector<std::vector<size_t> > 
     get_procs_with_keys(const std::vector<size_t>& local_key_list, Graph& g) {
   // this machine will get all keys from each processor where
   // key = procid mod numprocs
   std::vector<std::vector<size_t> > procs_with_keys(rmi.numprocs());
   for (size_t i = 0; i < local_key_list.size(); ++i) {
     if (g.l_vertex(i).owned() && local_key_list[i] != (size_t)(-1)) {
       procid_t target_procid = local_key_list[i] % rmi.numprocs();
       procs_with_keys[target_procid].push_back(local_key_list[i]);
     }
   }
   rmi.all_to_all(procs_with_keys);
   return procs_with_keys;
 }
コード例 #4
0
ファイル: dht.hpp プロジェクト: jerrylam/GraphLab
    /**
     * Sets the newval to be the value associated with the key
     */
    void set(const KeyType &key, const ValueType &newval) {
        // who owns the data?
        const size_t hashvalue = hasher(key);
        const size_t owningmachine = hashvalue % rpc.numprocs();

        // if it is me, set it
        if (owningmachine == rpc.dc().procid()) {
            lock.lock();
            storage[hashvalue] = newval;
            lock.unlock();
        } else {
            rpc.remote_call(owningmachine,
                            &dht<KeyType,ValueType>::set,
                            key, newval);
        }
    }
コード例 #5
0
    void prepare_injective_join(LeftEmitKey left_emit_key, 
                                RightEmitKey right_emit_key) {
      typedef std::pair<size_t, vertex_id_type> key_vertex_pair;
      // Basically, what we are trying to do is to figure out, for each vertex
      // on one side of the graph, which vertices for the other graph
      // (and on on which machines) emitted the same key.
      //
      // The target datastructure is:
      // vtx_to_key[vtx]: The key for each vertex
      // opposing_join_proc[vtx]: Machines which hold a vertex on the opposing
      //                          graph which emitted the same key
      // key_to_vtx[key] Mapping of keys to vertices. 
      
      // resize the left index
      // resize the right index

      reset_and_fill_injective_index(left_inj_index, 
                                     left_graph, 
                                     left_emit_key, "left graph");    

      reset_and_fill_injective_index(right_inj_index, 
                                     right_graph, 
                                     right_emit_key, "right graph");    
      rmi.barrier(); 
      // now, we need cross join across all machines to figure out the 
      // opposing join proc
      // we need to do this twice. Once for left, and once for right. 
      compute_injective_join();
    }
コード例 #6
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void run_short_sends_0() {
   if (rmi.procid() == 1) {
     rmi.full_barrier();
     return;
   }
   timer ti;
   std::cout << "Single Threaded " << SEND_LIMIT_PRINT << " sends, 4 integer blocks\n";
   ti.start();
   size_t numsends = SEND_LIMIT / (sizeof(size_t) * 4);
   perform_short_sends_0(numsends);
   double t1 = ti.current_time();
   rmi.dc().flush();
   double t2 = ti.current_time();
   rmi.full_barrier();
   double t3 = ti.current_time();
   print_res(t1,t2,t3);
 }
コード例 #7
0
    void injective_join(injective_join_index& target,
                        TargetGraph& target_graph,
                        injective_join_index& source,
                        SourceGraph& source_graph,
                        JoinOp joinop) {
      // build up the exchange structure.
      // move right vertex data to left
      std::vector<
          std::vector<
              std::pair<size_t, typename SourceGraph::vertex_data_type> > > 
            source_data(rmi.numprocs());

      for (size_t i = 0; i < source.opposing_join_proc.size(); ++i) {
        if (source_graph.l_vertex(i).owned()) {
          procid_t target_proc = source.opposing_join_proc[i];
          if (target_proc >= 0 && target_proc < rmi.numprocs()) {
            source_data[target_proc].push_back(
                std::make_pair(source.vtx_to_key[i],
                               source_graph.l_vertex(i).data()));
          }
        }
      }
      // exchange
      rmi.all_to_all(source_data);
      // ok. now join against left
#ifdef _OPENMP
#pragma omp parallel for
#endif
      for (size_t p = 0;p < source_data.size(); ++p) {
        for (size_t i = 0;i < source_data[p].size(); ++i) {
          // find the target vertex with the matching key
          hopscotch_map<size_t, vertex_id_type>::const_iterator iter = 
              target.key_to_vtx.find(source_data[p][i].first);
          ASSERT_TRUE(iter != target.key_to_vtx.end());
          // found it!
          typename TargetGraph::local_vertex_type 
              lvtx = target_graph.l_vertex(iter->second);
          typename TargetGraph::vertex_type vtx(lvtx);
          joinop(vtx, source_data[p][i].second);
        }
      }
      target_graph.synchronize();
    }
コード例 #8
0
ファイル: dht.hpp プロジェクト: jerrylam/GraphLab
    /**
     * gets the value associated with a key.
     * Returns (true, Value) if the entry is available.
     * Returns (false, undefined) otherwise.
     */
    std::pair<bool, ValueType> get(const KeyType &key) const {
        // who owns the data?

        const size_t hashvalue = hasher(key);
        const size_t owningmachine = hashvalue % rpc.numprocs();
        std::pair<bool, ValueType> retval;
        // if it is me, we can return it
        if (owningmachine == rpc.dc().procid()) {

            lock.lock();
            typename storage_type::const_iterator iter = storage.find(hashvalue);
            retval.first = iter != storage.end();
            if (retval.first) retval.second = iter->second;
            lock.unlock();
        } else {
            retval = rpc.remote_request(owningmachine,
                                        &dht<KeyType,ValueType>::get,
                                        key);
        }
        return retval;
    }
コード例 #9
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void run_threaded_short_pod_sends_0(size_t numthreads) {
   if (rmi.procid() == 1) {
     rmi.full_barrier();
     return;
   }
   timer ti;
   std::cout << numthreads << " threaded "<< SEND_LIMIT_PRINT <<" POD sends, 4 integers\n";
   size_t numsends = SEND_LIMIT / (sizeof(size_t) * 4 * numthreads);
   ti.start();
   thread_group thrgrp;
   for (size_t i = 0; i < numthreads; ++i) {
     thrgrp.launch(boost::bind(&teststruct::perform_short_pod_sends_0, this, numsends));
   }
   thrgrp.join();
   double t1 = ti.current_time();
   rmi.dc().flush();
   double t2 = ti.current_time();
   rmi.full_barrier();
   double t3 = ti.current_time();
   print_res(t1,t2,t3);
 }
コード例 #10
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void run_string_sends_0(size_t length) {
   if (rmi.procid() == 1) {
     rmi.full_barrier();
     return;
   }
   timer ti;
   size_t numsends = SEND_LIMIT / (length);
   std::cout << "Single Threaded " << SEND_LIMIT_PRINT <<" sends, " << length << " bytes * "<< numsends <<  "\n";
   ti.start();
   size_t rd = rdtsc();
   perform_string_sends_0(length, numsends);
   size_t rd2 = rdtsc();
   std::cout << "Completed in: " << ti.current_time() << " seconds\n";
   std::cout << (rd2 - rd) / numsends << " cycles per call\n";
   double t1 = ti.current_time();
   rmi.dc().flush();
   std::cout << "Flush in: " << ti.current_time() << " seconds\n";
   double t2 = ti.current_time();
   rmi.full_barrier();
   std::cout << "Receive Complete in: " << ti.current_time() << " seconds\n";
   double t3 = ti.current_time();
   print_res(t1,t2,t3);
 }
コード例 #11
0
 void run_synchronous(MemberFunction member_fun, const vertex_set& vset) {
   shared_lvid_counter = 0;
   if (threads.size() <= 1) {
     (this->*(member_fun))(0, vset);
   }
   else {
     // launch the initialization threads
     for(size_t i = 0; i < threads.size(); ++i) {
       boost::function<void(void)> invoke = boost::bind(member_fun, this, i, vset);
       threads.launch(invoke, i);
     }
   }
   // Wait for all threads to finish
   threads.join();
   rmi.barrier();
 } // end of run_synchronous
コード例 #12
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void all_reduce(U& data, bool control = false) {
   rmi.all_reduce(data, control);
 }
コード例 #13
0
    void compute_injective_join() {
      std::vector<std::vector<size_t> > left_keys = 
          get_procs_with_keys(left_inj_index.vtx_to_key, left_graph);
      std::vector<std::vector<size_t> > right_keys = 
          get_procs_with_keys(right_inj_index.vtx_to_key, right_graph);
      // now. for each key on the right, I need to figure out which proc it
      // belongs in. and vice versa. This is actually kind of annoying.
      // but since it is one-to-one, I only need to make a hash map of one side.
      hopscotch_map<size_t, procid_t> left_key_to_procs;

      // construct a hash table of keys to procs
      // clear frequently to use less memory
      for (size_t p = 0; p < left_keys.size(); ++p) {
        for (size_t i = 0; i < left_keys[p].size(); ++i) {
          ASSERT_MSG(left_key_to_procs.count(left_keys[p][i]) == 0,
                     "Duplicate keys not permitted for left graph keys in injective join");
          left_key_to_procs.insert(std::make_pair(left_keys[p][i], p));
        }
        std::vector<size_t>().swap(left_keys[p]);
      }
      left_keys.clear();
     
      std::vector<
          std::vector<
              std::pair<size_t, procid_t> > > left_match(rmi.numprocs());
      std::vector<
          std::vector<
              std::pair<size_t, procid_t> > > right_match(rmi.numprocs());

      // now for each key on the right, find the matching key on the left
      for (size_t p = 0; p < right_keys.size(); ++p) {
        for (size_t i = 0; i < right_keys[p].size(); ++i) {
          size_t key = right_keys[p][i];
          hopscotch_map<size_t, procid_t>::iterator iter =
              left_key_to_procs.find(key);
          if (iter != left_key_to_procs.end()) {
            ASSERT_MSG(iter->second != (procid_t)(-1),
                       "Duplicate keys not permitted for right graph keys in injective join");
            // we have a match
            procid_t left_proc = iter->second;
            procid_t right_proc = p;
            // now. left has to be told about right and right
            // has to be told about left
            left_match[left_proc].push_back(std::make_pair(key, right_proc));
            right_match[right_proc].push_back(std::make_pair(key, left_proc));
            // set the map entry to -1 
            // so we know if it is ever reused
            iter->second = (procid_t)(-1); 
          }
        }
        std::vector<size_t>().swap(right_keys[p]);
      }
      right_keys.clear();

      rmi.all_to_all(left_match);
      rmi.all_to_all(right_match);
      // fill in the index
      // go through the left match and set up the opposing index to based
      // on the match result
#ifdef _OPENMP
#pragma omp parallel for
#endif
      for (size_t p = 0;p < left_match.size(); ++p) {
        for (size_t i = 0;i < left_match[p].size(); ++i) {
          // search for the key in the left index
          hopscotch_map<size_t, vertex_id_type>::const_iterator iter = 
              left_inj_index.key_to_vtx.find(left_match[p][i].first);
          ASSERT_TRUE(iter != left_inj_index.key_to_vtx.end());
          // fill in the match
          left_inj_index.opposing_join_proc[iter->second] = left_match[p][i].second;
        }
      }
      left_match.clear();
      // repeat for the right match
#ifdef _OPENMP
#pragma omp parallel for
#endif
      for (size_t p = 0;p < right_match.size(); ++p) {
        for (size_t i = 0;i < right_match[p].size(); ++i) {
          // search for the key in the right index
          hopscotch_map<size_t, vertex_id_type>::const_iterator iter = 
              right_inj_index.key_to_vtx.find(right_match[p][i].first);
          ASSERT_TRUE(iter != right_inj_index.key_to_vtx.end());
          // fill in the match
          right_inj_index.opposing_join_proc[iter->second] = right_match[p][i].second;
        }
      }
      right_match.clear();
      // ok done.
    }
コード例 #14
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 /// \copydoc distributed_control::full_barrier()
 inline void full_barrier() {
   rmi.full_barrier();
 }
コード例 #15
0
ファイル: dht.hpp プロジェクト: jerrylam/GraphLab
 /**
    Must be called by all machines simultaneously
 */
 void clear() {
     rpc.barrier();
     storage.clear();
 }
コード例 #16
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 void all_reduce2(U& data, PlusEqual plusequal, bool control = false) {
   rmi.all_reduce2(data, plusequal, control);
 }
コード例 #17
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void broadcast(U& data, bool originator, bool control = false) { 
   rmi.broadcast(data, originator, control);
 }
コード例 #18
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void recv_from(procid_t source, U& t, bool control = false) {
   rmi.recv_from(source, t, control);
 }
コード例 #19
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void send_to(procid_t target, U& t, bool control = false) {
   rmi.send_to(target, t, control);
 }
コード例 #20
0
ファイル: dht.hpp プロジェクト: jerrylam/GraphLab
 void print_stats() const {
     std::cerr << rpc.calls_sent() << " calls sent\n";
     std::cerr << rpc.calls_received() << " calls received\n";
 }
コード例 #21
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void all_gather(std::vector<U>& data, bool control = false) {
   rmi.all_gather(data, control);
 }
コード例 #22
0
ファイル: dht.hpp プロジェクト: jerrylam/GraphLab
 /**
  * Get the owner of the key
  */
 procid_t owner(const KeyType& key) const {
     return hasher(key) % rpc.dc().numprocs();
 }
コード例 #23
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void perform_string_sends_0(size_t length, size_t number) {
   std::string s(length, 1);
   for (size_t i = 0;i < number; ++i) {
     rmi.remote_call(1, &teststruct::receive_string, s);
   }
 }
コード例 #24
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void perform_short_pod_sends_0(size_t number) {
   for (size_t i = 0;i < number; ++i) {
     rmi.pod_call(1, &teststruct::receive_ints, 100,100,1000,5000000);
   }
 }
コード例 #25
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 /// \copydoc distributed_control::barrier()
 inline void barrier() {
   rmi.barrier();
 }
コード例 #26
0
ファイル: rpc_call_perf_test.cpp プロジェクト: JLtW/graphlab
 void perform_long_sends_0(size_t length, size_t number) {
   std::vector<size_t> v(length, 5000000);
   for (size_t i = 0;i < number; ++i) {
     rmi.remote_call(1, &teststruct::receive_vector, v);
   }
 }
コード例 #27
0
ファイル: dc_services.hpp プロジェクト: Hannah1999/Dato-Core
 inline void gather(std::vector<U>& data, procid_t sendto, bool control = false) {
   rmi.gather(data, sendto, control);
 }