Пример #1
0
distributed_control::distributed_control() {
  dc_init_param initparam;
  if (init_param_from_env(initparam)) {
    logstream(LOG_INFO) << "Distributed Control Initialized from Environment" << std::endl;
  } else if (init_param_from_zookeeper(initparam)) {
      logstream(LOG_INFO) << "Distributed Control Initialized from Zookeeper" << std::endl;
  } else if (mpi_tools::initialized() && init_param_from_mpi(initparam)) {
      logstream(LOG_INFO) << "Distributed Control Initialized from MPI" << std::endl;
  }
  else {
    logstream(LOG_INFO) << "Shared Memory Execution" << std::endl;
    // get a port and socket
    std::pair<size_t, int> port_and_sock = get_free_tcp_port();
    size_t port = port_and_sock.first;
    int sock = port_and_sock.second;

    initparam.machines.push_back(std::string("localhost:") + tostr(port));
    initparam.curmachineid = 0;
    initparam.initstring = std::string(" __sockhandle__=") + tostr(sock) + " ";
    initparam.numhandlerthreads = RPC_DEFAULT_NUMHANDLERTHREADS;
    initparam.commtype = RPC_DEFAULT_COMMTYPE;
  }
  init(initparam.machines,
        initparam.initstring,
        initparam.curmachineid,
        initparam.numhandlerthreads,
        initparam.commtype);
  INITIALIZE_TRACER(dc_receive_queuing, "dc: time spent on enqueue");
  INITIALIZE_TRACER(dc_receive_multiplexing, "dc: time spent exploding a chunk");
  INITIALIZE_TRACER(dc_call_dispatch, "dc: time spent issuing RPC calls");
}
Пример #2
0
distributed_control::distributed_control(dc_init_param initparam) {
  init(initparam.machines,
        initparam.initstring,
        initparam.curmachineid,
        initparam.numhandlerthreads,
        initparam.commtype);
  INITIALIZE_TRACER(dc_receive_queuing, "dc: time spent on enqueue");
  INITIALIZE_TRACER(dc_receive_multiplexing, "dc: time spent exploding a chunk");
  INITIALIZE_TRACER(dc_call_dispatch, "dc: time spent issuing RPC calls");
}
    distributed_batch_ingress(distributed_control& dc, graph_type& graph, 
        size_t bufsize = 50000, bool usehash = false, bool userecent = false) :
      base_type(dc, graph), rpc(dc, this), 
      num_edges(0), bufsize(bufsize), query_set(dc.numprocs()),
      proc_num_edges(dc.numprocs()), usehash(usehash), userecent(userecent) { 
       rpc.barrier(); 

      INITIALIZE_TRACER(batch_ingress_add_edge, "Time spent in add edge");
      INITIALIZE_TRACER(batch_ingress_add_edges, "Time spent in add block edges" );
      INITIALIZE_TRACER(batch_ingress_compute_assignments, "Time spent in compute assignment");
      INITIALIZE_TRACER(batch_ingress_request_degree_table, "Time spent in requesting assignment");
      INITIALIZE_TRACER(batch_ingress_get_degree_table, "Time spent in retrieve degree table");
      INITIALIZE_TRACER(batch_ingress_update_degree_table, "Time spent in update degree table");
     }
Пример #4
0
void orthogonalize_vs_all(DistSlicedMat & mat, int curoffset){
  assert(mi.ortho_repeats >=1 && mi.ortho_repeats <= 3);
  INITIALIZE_TRACER(orthogonalize_vs_alltrace, "orthogonalization step");
  BEGIN_TRACEPOINT(orthogonalize_vs_alltrace);
  bool old_debug = debug;
  debug = false;
  DistVec current = mat[curoffset];
  //DistDouble * alphas = new DistDouble[curoffset];
  //cout<<current.to_vec().transpose() << endl;
  for (int j=0; j < mi.ortho_repeats; j++){
    for (int i=0; i< curoffset; i++){
      DistDouble alpha = mat[i]*current;
      //     //cout<<mat[i].to_vec().transpose()<<endl;
      //     //cout<<"alpha is: " <<alpha.toDouble()<<endl;
      if (alpha.toDouble() > 1e-10)
        current = current - mat[i]*alpha;
    }
  }
  END_TRACEPOINT(orthogonalize_vs_alltrace);
  debug = old_debug;
  current.debug_print(current.name);
}
    distributed_oblivious_ingress(distributed_control& dc, graph_type& graph, bool usehash = false, bool userecent = false) :
      base_type(dc, graph),
      dht(-1),proc_num_edges(dc.numprocs()), usehash(usehash), userecent(userecent) { 

      INITIALIZE_TRACER(ob_ingress_compute_assignments, "Time spent in compute assignment");
     }