コード例 #1
0
ファイル: unity_global.cpp プロジェクト: Bhushan1002/SFrame
 void unity_global::save_model(std::shared_ptr<model_base> model,
                               const std::string& model_wrapper,
                               const std::string& url) {
   logstream(LOG_INFO) << "Save model to " << sanitize_url(url) << std::endl;
   logstream(LOG_INFO) << "Model name: " << model->name() << std::endl;
   try {
     dir_archive dir;
     dir.open_directory_for_write(url);
     dir.set_metadata("contents", "model");
     oarchive oarc(dir);
     oarc.write(CLASS_MAGIC_HEADER, strlen(CLASS_MAGIC_HEADER));
     oarc << model->name();
     oarc << model_wrapper;
     oarc << *model;
     if (dir.get_output_stream()->fail()) {
       std::string message = "Fail to write.";
       log_and_throw_io_failure(message);
     }
     dir.close();
   } catch (std::ios_base::failure& e) {
     std::string message = "Unable to save model to " + sanitize_url(url) + ": " + e.what();
     log_and_throw_io_failure(message);
   } catch (std::string& e) {
     log_and_throw(std::string("Unable to save model to ") + sanitize_url(url) + ": " + e);
   } catch (...) {
     log_and_throw(std::string("Unknown Error: Unable to save model to ") + sanitize_url(url));
   }
 }
コード例 #2
0
 inline std::string serialize_to_string(const T &t) {
   std::stringstream strm;
   oarchive oarc(strm);
   oarc << t;
   strm.flush();
   return strm.str();
 }
コード例 #3
0
ファイル: unity_sgraph.cpp プロジェクト: GYGit/SFrame
bool unity_sgraph::save_graph(std::string target, std::string format) {
  log_func_entry();
  try {
    if (format == "binary") {
      dir_archive dir;
      dir.open_directory_for_write(target);
      dir.set_metadata("contents", "graph");
      oarchive oarc(dir);
      if (dir.get_output_stream()->fail()) {
        log_and_throw_io_failure("Fail to write");
      }
      save(oarc);
      dir.close();
    } else if (format == "json") {
      save_sgraph_to_json(get_graph(), target);
    } else if (format == "csv") {
      save_sgraph_to_csv(get_graph(), target);
    } else {
      log_and_throw("Unable to save to format : " + format);
    }
  } catch (std::ios_base::failure& e) {
    std::string message =
        "Unable to save graph to " + sanitize_url(target) + ": " + e.what();
    log_and_throw_io_failure(message);
  } catch (std::string& e) {
    std::string message =
        "Unable to save graph to " + sanitize_url(target) + ": " + e;
    log_and_throw(message);
  } catch (...) {
    std::string message =
        "Unable to save graph to " + sanitize_url(target) + ": Unknown Error.";
    log_and_throw(message);
  }
  return true;
}
コード例 #4
0
ファイル: mpi_tools.hpp プロジェクト: Bhushan1002/SFrame
    void send(const T& elem, const size_t id, const int tag = 0) {
#ifdef HAS_MPI
      // Get the mpi rank and size
      assert(id < size());
      // Serialize the local map
      graphlab::charstream cstrm(128);
      graphlab::oarchive oarc(cstrm);
      oarc << elem;
      cstrm.flush();
      char* send_buffer = cstrm->c_str();
      int send_buffer_size = cstrm->size();
      assert(send_buffer_size >= 0);

      int dest(id);
      // send the size
      int error = MPI_Send(&send_buffer_size,  // Send buffer
                           1,                  // send count
                           MPI_INT,            // send type
                           dest,               // destination
                           tag,                  // tag
                           MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);

      // send the actual content
      error = MPI_Send(send_buffer,         // send buffer
                       send_buffer_size,    // how much to send
                       MPI_BYTE,            // send type
                       dest,
                       tag,
                       MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);
#else
      logstream(LOG_FATAL) << "MPI not installed!" << std::endl;
#endif
    } // end of send
コード例 #5
0
ファイル: mpi_tools.hpp プロジェクト: Bhushan1002/SFrame
    void bcast(const size_t& root, T& elem) {
#ifdef HAS_MPI
      // Get the mpi rank and size
      if(mpi_tools::rank() == root) {
        // serialize the object
        graphlab::charstream cstrm(128);
        graphlab::oarchive oarc(cstrm);
        oarc << elem;
        cstrm.flush();
        char* send_buffer = cstrm->c_str();
        int send_buffer_size = cstrm->size();
        assert(send_buffer_size >= 0);

        // send the ammount to send
        int error = MPI_Bcast(&send_buffer_size,  // Send buffer
                              1,                  // send count
                              MPI_INT,            // send type
                              root,               // root rank
                              MPI_COMM_WORLD);
        assert(error == MPI_SUCCESS);

        // send the actual data
        error = MPI_Bcast(send_buffer,  // Send buffer
                          send_buffer_size,    // send count
                          MPI_BYTE,            // send type
                          root,               // root rank
                          MPI_COMM_WORLD);
        assert(error == MPI_SUCCESS);

      } else {
        int recv_buffer_size(-1);
        // recv the ammount the required buffer size
        int error = MPI_Bcast(&recv_buffer_size,  // recvbuffer
                              1,                  // recvcount
                              MPI_INT,            // recvtype
                              root,               // root rank
                              MPI_COMM_WORLD);
        assert(error == MPI_SUCCESS);
        assert(recv_buffer_size >= 0);

        std::vector<char> recv_buffer(recv_buffer_size);
        error = MPI_Bcast(&(recv_buffer[0]),  // recvbuffer
                          recv_buffer_size,                  // recvcount
                          MPI_BYTE,            // recvtype
                          root,               // root rank
                          MPI_COMM_WORLD);
        assert(error == MPI_SUCCESS);
        // construct the local element
        namespace bio = boost::iostreams;
        typedef bio::stream<bio::array_source> icharstream;
        icharstream strm(&(recv_buffer[0]), recv_buffer.size());
        graphlab::iarchive iarc(strm);
        iarc >> elem;

      }
#else
      logstream(LOG_FATAL) << "MPI not installed!" << std::endl;
#endif
    } // end of bcast
コード例 #6
0
size_t block_writer::write_typed_block(size_t segment_id,
                                       size_t column_id, 
                                       const std::vector<flexible_type>& data,
                                       block_info block) {
  auto serialization_buffer = m_buffer_pool.get_new_buffer();
  oarchive oarc(*serialization_buffer);
  typed_encode(data, block, oarc);
  size_t ret = write_block(segment_id, column_id, serialization_buffer->data(), block);
  m_buffer_pool.release_buffer(std::move(serialization_buffer));
  return ret;
}
コード例 #7
0
ファイル: unity_sgraph.cpp プロジェクト: GYGit/SFrame
void unity_sgraph::save_reference(std::string target_dir) const {
  dir_archive dir;
  dir.open_directory_for_write(target_dir);
  dir.set_metadata("contents", "graph");
  oarchive oarc(dir);
  if (dir.get_output_stream()->fail()) {
    log_and_throw_io_failure("Fail to write");
  }
  save_reference(oarc);
  dir.close();
}
コード例 #8
0
/**
Synchronize variable with index i.
Call
*/
void distributed_glshared_manager::write_synchronize(size_t entry, bool async) {
//  logstream(LOG_DEBUG) << rmi.procid() << ": " << "write synchronize on " << entry << " async = " << async << std::endl;
  std::stringstream strm;
  oarchive oarc(strm);
  glsharedobjs[entry]->save(oarc);
  glsharedobjs[entry]->invalidated = false;
  if (async) {
    dht.set(entry, strm.str());
  }
  else {
    dht.set_synchronous(entry, strm.str());
  }
}
コード例 #9
0
ファイル: mpi_tools.hpp プロジェクト: Bhushan1002/SFrame
    void gather(size_t root, const T& elem) {
#ifdef HAS_MPI
       // Get the mpi rank and size
      assert(root < size_t(std::numeric_limits<int>::max()));
      int mpi_root(root);

      // Serialize the local map
      graphlab::charstream cstrm(128);
      graphlab::oarchive oarc(cstrm);
      oarc << elem;
      cstrm.flush();
      char* send_buffer = cstrm->c_str();
      int send_buffer_size = cstrm->size();
      assert(send_buffer_size >= 0);

      // compute the sizes
      // Compute the sizes
      int error = MPI_Gather(&send_buffer_size,  // Send buffer
                             1,                  // send count
                             MPI_INT,            // send type
                             NULL,               // recvbuffer
                             1,                  // recvcount
                             MPI_INT,           // recvtype
                             mpi_root,          // root rank
                             MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);


      // recv all the maps
      error = MPI_Gatherv(send_buffer,         // send buffer
                          send_buffer_size,    // how much to send
                          MPI_BYTE,            // send type
                          NULL,                // recv buffer
                          NULL,                // amount to recv
                                               // for each cpuess
                          NULL,                // where to place data
                          MPI_BYTE,
                          mpi_root,            // root rank
                          MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);
#else
      logstream(LOG_FATAL) << "MPI not installed!" << std::endl;
#endif
    } // end of gather
コード例 #10
0
ファイル: graphio.hpp プロジェクト: Hannah1999/Dato-Core
    bool save_binary(const GraphType& g, const std::string& prefix) {
      g.dc().full_barrier();
      ASSERT_TRUE (g.is_finalized());
      timer savetime;  savetime.start();
      std::string fname = prefix + tostr(g.procid()) + ".bin";
      logstream(LOG_INFO) << "Save graph to " << fname << std::endl;

      general_ofstream fout(fname, true);
      if (!fout.good()) {
        logstream(LOG_ERROR) << "\n\tError opening file: " << fname << std::endl;
        return false;
      }
      oarchive oarc(fout);
      oarc << g;
      logstream(LOG_INFO) << "Finish saving graph to " << fname << std::endl
                          << "Finished saving binary graph: "
                          << savetime.current_time() << std::endl;
      g.dc().full_barrier();
      fout.close();
      return true;
    } // end of save
コード例 #11
0
ファイル: sarray_source.hpp プロジェクト: pauldevos/SFrame
    static std::shared_ptr<planner_node> make_planner_node(
        std::shared_ptr<sarray<flexible_type> > source, size_t begin_index = 0, size_t _end_index = -1) {
        std::stringstream strm;
        oarchive oarc(strm);
        oarc << source->get_index_info();
        auto type = source->get_type();

        size_t end_index = (_end_index == size_t(-1)) ? source->size() : _end_index;

        DASSERT_LE(begin_index, end_index);
        DASSERT_LE(end_index, source->size());

        // we need to keep a copy of the source in the node for reference counting
        // reasons.
        return planner_node::make_shared(planner_node_type::SARRAY_SOURCE_NODE,
        {   {"index", strm.str()},
            {"type", (flex_int)type},
            {"begin_index", begin_index},
            {"end_index", end_index}
        },
        {{"sarray", any(source)}});
    }
コード例 #12
0
distributed_glshared_manager::distributed_glshared_manager(distributed_control &dc):
                  rmi(dc, this),
                  glsharedobjs(distgl_impl::get_global_dist_glshared_registry()),
                  dht(dc){
  dht.attach_modification_trigger(boost::bind(&distributed_glshared_manager::invalidate,
                                              this, _1, _2, _3));
  for (size_t i = 0; i < glsharedobjs.size(); ++i) {
    logstream(LOG_INFO) << "registered entry " << i << " with type " 
                        << glsharedobjs[i]->type_name() << std::endl;
    if (glsharedobjs[i]->manager != NULL) {
      logger(LOG_WARNING, "glshared objects are still attached to a previous manager!");
    }
    glsharedobjs[i]->manager = this;
    glsharedobjs[i]->id = i;
    objrevmap[glsharedobjs[i]] = i;
    if (dht.owning_machine(i) == rmi.procid()) {
      std::stringstream strm;
      oarchive oarc(strm);
      glsharedobjs[i]->save(oarc);
      dht.set(i, strm.str());
    }
  }
  // perform the sets
}
コード例 #13
0
void _save_and_load_object(T& dest, const U& src, std::string dir) {

  // Create the directory
  boost::filesystem::create_directory(dir);
  _add_directory_to_deleter(dir); 
  
  std::string arc_name = dir + "/test_archive";

  uint64_t random_number = hash64(random::fast_uniform<size_t>(0,size_t(-1)));

  // Save it
  dir_archive archive_write;
  archive_write.open_directory_for_write(arc_name);

  graphlab::oarchive oarc(archive_write);

  oarc << src << random_number;

  archive_write.close();
  
  // Load it
  dir_archive archive_read;
  archive_read.open_directory_for_read(arc_name);

  graphlab::iarchive iarc(archive_read);

  iarc >> dest;
  
  uint64_t test_number;

  iarc >> test_number;

  archive_read.close();

  ASSERT_EQ(test_number, random_number);
}
コード例 #14
0
ファイル: mpi_tools.hpp プロジェクト: Bhushan1002/SFrame
    void gather(const T& elem, std::vector<T>& results) {
#ifdef HAS_MPI
      // Get the mpi rank and size
      size_t mpi_size(size());
      int mpi_rank(rank());
      if(results.size() != mpi_size) results.resize(mpi_size);

      // Serialize the local map
      graphlab::charstream cstrm(128);
      graphlab::oarchive oarc(cstrm);
      oarc << elem;
      cstrm.flush();
      char* send_buffer = cstrm->c_str();
      int send_buffer_size = cstrm->size();
      assert(send_buffer_size >= 0);

      // compute the sizes
      std::vector<int> recv_sizes(mpi_size, -1);
      // Compute the sizes
      int error = MPI_Gather(&send_buffer_size,  // Send buffer
                             1,                  // send count
                             MPI_INT,            // send type
                             &(recv_sizes[0]),  // recvbuffer
                             1,                  // recvcount
                             MPI_INT,           // recvtype
                             mpi_rank,          // root rank
                             MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);
      for(size_t i = 0; i < recv_sizes.size(); ++i)
        assert(recv_sizes[i] >= 0);


      // Construct offsets
      std::vector<int> recv_offsets(recv_sizes);
      int sum = 0, tmp = 0;
      for(size_t i = 0; i < recv_offsets.size(); ++i) {
        tmp = recv_offsets[i]; recv_offsets[i] = sum; sum += tmp;
      }

      // if necessary realloac recv_buffer
      std::vector<char> recv_buffer(sum);

      // recv all the maps
      error = MPI_Gatherv(send_buffer,         // send buffer
                          send_buffer_size,    // how much to send
                          MPI_BYTE,            // send type
                          &(recv_buffer[0]),   // recv buffer
                          &(recv_sizes[0]),    // amount to recv
                                               // for each cpuess
                          &(recv_offsets[0]),  // where to place data
                          MPI_BYTE,
                          mpi_rank,            // root rank
                          MPI_COMM_WORLD);
      assert(error == MPI_SUCCESS);
      // Update the local map
      namespace bio = boost::iostreams;
      typedef bio::stream<bio::array_source> icharstream;
      icharstream strm(&(recv_buffer[0]), recv_buffer.size());
      graphlab::iarchive iarc(strm);
      for(size_t i = 0; i < results.size(); ++i) {
        iarc >> results[i];
      }
#else
      logstream(LOG_FATAL) << "MPI not installed!" << std::endl;
#endif
    } // end of gather
コード例 #15
0
ファイル: mpi_tools.hpp プロジェクト: Bhushan1002/SFrame
    void all2all(const std::vector<T>& send_data,
                 std::vector<T>& recv_data) {
#ifdef HAS_MPI
      // Get the mpi rank and size
      size_t mpi_size(size());
      ASSERT_EQ(send_data.size(), mpi_size);
      if(recv_data.size() != mpi_size) recv_data.resize(mpi_size);

      // Serialize the output data and compute buffer sizes
      graphlab::charstream cstrm(128);
      graphlab::oarchive oarc(cstrm);
      std::vector<int> send_buffer_sizes(mpi_size);
      for(size_t i = 0; i < mpi_size; ++i) {
        const size_t OLD_SIZE(cstrm->size());
        oarc << send_data[i];
        cstrm.flush();
        const size_t ELEM_SIZE(cstrm->size() - OLD_SIZE);
        send_buffer_sizes[i] = ELEM_SIZE;
      }
      cstrm.flush();
      char* send_buffer = cstrm->c_str();
      std::vector<int> send_offsets(send_buffer_sizes);
      int total_send = 0;
      for(size_t i = 0; i < send_offsets.size(); ++i) {
        const int tmp = send_offsets[i];
        send_offsets[i] = total_send;
        total_send += tmp;
      }

      // AlltoAll scatter the buffer sizes
      std::vector<int> recv_buffer_sizes(mpi_size);
      int error = MPI_Alltoall(&(send_buffer_sizes[0]),
                               1,
                               MPI_INT,
                               &(recv_buffer_sizes[0]),
                               1,
                               MPI_INT,
                               MPI_COMM_WORLD);
      ASSERT_EQ(error, MPI_SUCCESS);

      // Construct offsets
      std::vector<int> recv_offsets(recv_buffer_sizes);
      int total_recv = 0;
      for(size_t i = 0; i < recv_offsets.size(); ++i){
        const int tmp = recv_offsets[i];
        recv_offsets[i] = total_recv;
        total_recv += tmp;
      }
      // Do the massive send
      std::vector<char> recv_buffer(total_recv);
      error = MPI_Alltoallv(send_buffer,
                            &(send_buffer_sizes[0]),
                            &(send_offsets[0]),
                            MPI_BYTE,
                            &(recv_buffer[0]),
                            &(recv_buffer_sizes[0]),
                            &(recv_offsets[0]),
                            MPI_BYTE,
                            MPI_COMM_WORLD);
      ASSERT_EQ(error, MPI_SUCCESS);

      // Deserialize the result
      namespace bio = boost::iostreams;
      typedef bio::stream<bio::array_source> icharstream;
      icharstream strm(&(recv_buffer[0]), recv_buffer.size());
      graphlab::iarchive iarc(strm);
      for(size_t i = 0; i < recv_data.size(); ++i) {
        iarc >> recv_data[i];
      }
#else
      logstream(LOG_FATAL) << "MPI not installed!" << std::endl;
#endif
    } // end of mpi all to all