예제 #1
0
int main(int argc, const char ** argv) {
  print_copyright();

  //* CE_Graph initialization will read the command line arguments and the configuration file. */
  CE_Graph_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("biassgd-inmemory-factors");

  biassgd_lambda    = get_option_float("biassgd_lambda", 1e-3);
  biassgd_gamma     = get_option_float("biassgd_gamma", 1e-3);
  biassgd_step_dec  = get_option_float("biassgd_step_dec", 0.9);


  parse_command_line_args();
  parse_implicit_command_line();


  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<EdgeDataType>(training, 0, 0, 3, TRAINING, false);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION, false);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &bias_sgd_predict);
  }

  /* load initial state from disk (optional) */
  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
    vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
    assert(user_bias.size() == M);
    vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
    assert(item_bias.size() == N);
    for (uint i=0; i<M+N; i++){
      latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
    }
    vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
    globalMean = gm[0];
  }


  /* Run */
  BIASSGDVerticesInMemProgram program;
  CE_Graph_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_biassgd_result(training);
  test_predictions(&bias_sgd_predict);    


  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  return 0;
}
예제 #2
0
int main(int argc, const char ** argv) {

  print_copyright();
 
  /* GraphChi initialization will read the command line 
     arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("als-inmemory-factors");

  lambda        = get_option_float("lambda", 0.065);

  parse_command_line_args();
  parse_implicit_command_line();
  if (unittest == 1){
    if (training == "") training = "test_wals"; 
    niters = 100;
  }

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket4<edge_data>(training);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket4<EdgeDataType>(validation, false, M==N, VALIDATION, 0);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &wals_predict, true, false, 0);
  }

  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }


  /* Run */
  WALSVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_als_result(training);
  test_predictions(&wals_predict);    

  if (unittest == 1){
    if (dtraining_rmse > 0.03)
      logstream(LOG_FATAL)<<"Unit test 1 failed. Training RMSE is: " << training_rmse << std::endl;
    if (dvalidation_rmse > 0.61)
      logstream(LOG_FATAL)<<"Unit test 1 failed. Validation RMSE is: " << validation_rmse << std::endl;

  }
 
  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  return 0;
}
예제 #3
0
int main(int argc, const char ** argv) {

  print_copyright();
 
  /* GraphChi initialization will read the command line 
     arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("als-inmemory-factors");

  lambda        = get_option_float("lambda", 0.065);
  user_sparsity = get_option_float("user_sparsity", 0.9);
  movie_sparsity = get_option_float("movie_sparsity", 0.9);
  algorithm      = get_option_int("algorithm", SPARSE_USR_FACTOR);

  parse_command_line_args();
  parse_implicit_command_line(); 

  if (user_sparsity < 0.5 || user_sparsity >= 1)
    logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --user_sparsity=XX in this range" << std::endl;

  if (movie_sparsity < 0.5 || movie_sparsity >= 1)
    logstream(LOG_FATAL)<<"Sparsity level should be [0.5,1). Please run again using --movie_sparsity=XX in this range" << std::endl;

if (algorithm != SPARSE_USR_FACTOR && algorithm != SPARSE_BOTH_FACTORS && algorithm != SPARSE_ITM_FACTOR)
    logstream(LOG_FATAL)<<"Algorithm should be 1 for SPARSE_USR_FACTOR, 2 for SPARSE_ITM_FACTOR and 3 for SPARSE_BOTH_FACTORS" << std::endl;

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<EdgeDataType>(training);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sparse_als_predict);
  }
 

  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }

  /* Run */
  ALSVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_als_result(training);
  test_predictions(&sparse_als_predict);    

  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  return 0;
}
예제 #4
0
int main(int argc, const char ** argv) {

    print_copyright();

    //* GraphChi initialization will read the command line arguments and the configuration file. */
    graphchi_init(argc, argv);

    /* Metrics object for keeping track of performance counters
       and other information. Currently required. */
    metrics m("rbm-inmemory-factors");

    /* Basic arguments for RBM algorithm */
    rbm_bins      = get_option_int("rbm_bins", rbm_bins);
    rbm_alpha     = get_option_float("rbm_alpha", rbm_alpha);
    rbm_beta      = get_option_float("rbm_beta", rbm_beta);
    rbm_mult_step_dec  = get_option_float("rbm_mult_step_dec", rbm_mult_step_dec);
    rbm_scaling   = get_option_float("rbm_scaling", rbm_scaling);

    parse_command_line_args();
    parse_implicit_command_line();

    mytimer.start();

    /* Preprocess data if needed, or discover preprocess files */
    int nshards = convert_matrixmarket<float>(training);

    rbm_init();

    if (validation != "") {
        int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION);
        init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &rbm_predict);
    }

    /* load initial state from disk (optional) */
    if (load_factors_from_file) {
        load_matrix_market_matrix(training + "_U.mm", 0, 3*D);
        load_matrix_market_matrix(training + "_V.mm", M, rbm_bins*(D+1));
    }

    print_config();

    /* Run */
    RBMVerticesInMemProgram program;
    graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
    set_engine_flags(engine);
    pengine = &engine;
    engine.run(program, niters);

    /* Output latent factor matrices in matrix-market format */
    output_rbm_result(training);
    test_predictions(&rbm_predict);


    /* Report execution metrics */
    if (!quiet)
        metrics_report(m);
    return 0;
}
예제 #5
0
int main(int argc, const char ** argv) {
  //* GraphChi initialization will read the command line arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("climf-inmemory-factors");

  /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
  sgd_lambda    = get_option_float("sgd_lambda", 1e-3);
  sgd_gamma     = get_option_float("sgd_gamma", 1e-4);
  sgd_step_dec  = get_option_float("sgd_step_dec", 1.0);
  binary_relevance_thresh = get_option_float("binary_relevance_thresh", 0);
  halt_on_mrr_decrease = get_option_int("halt_on_mrr_decrease", 0);
  num_ratings = get_option_int("num_ratings", 10000); //number of top predictions over which we compute actual MRR
  verbose     = get_option_int("verbose", 0);
  debug       = get_option_int("debug", 0);

  parse_command_line_args();
  parse_implicit_command_line();

  /* Preprocess data if needed, or discover preprocess files */
  bool allow_square = false;
  int nshards = convert_matrixmarket<EdgeDataType>(training, 0, 0, 3, TRAINING, allow_square);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file, 0.01);

  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION);
    init_mrr_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards);
  }

  if (load_factors_from_file)
  {
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }

  print_config();

  /* Run */
  SGDVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_sgd_result(training);
  test_predictions(&climf_predict);

  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  
  return 0;
}
예제 #6
0
int main(int argc, const char ** argv) {

  print_copyright();

  //* GraphChi initialization will read the command line arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("sgd-inmemory-factors");

  /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
  sgd_lambda    = get_option_float("sgd_lambda", 1e-3);
  sgd_gamma     = get_option_float("sgd_gamma", 1e-3);
  sgd_step_dec  = get_option_float("sgd_step_dec", 0.9);

  parse_command_line_args();
  parse_implicit_command_line();

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<EdgeDataType>(training, NULL, 0, 0, 3, TRAINING, false);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, NULL, 0, 0, 3, VALIDATION, false);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sgd_predict);
  }

  /* load initial state from disk (optional) */
  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }

  print_config();

  /* Run */
  SGDVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_sgd_result(training);
  test_predictions(&sgd_predict);    

  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  
  return 0;
}
예제 #7
0
int main(int argc, const char ** argv) {


  print_copyright(); 
 
  /* GraphChi initialization will read the command line 
     arguments and the configuration file. */
  graphchi_init(argc, argv);
  metrics m("nmf-inmemory-factors");

  parse_command_line_args();
  parse_implicit_command_line();

  niters *= 2; //each NMF iteration is composed of two sub iters

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<float>(training, 0, 0, 3, TRAINING, false);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION, false);
    if (vshards != -1)
       init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &nmf_predict);
  }
 
  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }

  sum_of_item_latent_features = zeros(D);
  sum_of_user_latent_feautres = zeros(D);

  /* Run */
  NMFVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_nmf_result(training);
  test_predictions(&nmf_predict);    

  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);

  return 0;
}
예제 #8
0
int main(int argc, const char ** argv) {

  print_copyright();

  //* GraphChi initialization will read the command line arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("sgd-inmemory-factors");

  algorithm     = get_option_string("algorithm", "global_mean");
  if (algorithm == "global_mean")
    algo = GLOBAL_MEAN;
  else if (algorithm == "user_mean")
    algo = USER_MEAN;
  else if (algorithm == "item_mean")
    algo = ITEM_MEAN;
  else logstream(LOG_FATAL)<<"Unsupported algorithm name. Should be --algorithm=XX where XX is one of [global_mean,user_mean,item_mean] for example --algorithm=global_mean" << std::endl;


  parse_command_line_args();
  mytimer.start();

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<float>(training, NULL, 0, 0, 3, TRAINING, false);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, false);
  rmse_vec = zeros(number_of_omp_threads());
  print_config();

  /* Run */
  BaselineVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine); 
  pengine = &engine;
  engine.run(program, 1);

  if (algo == USER_MEAN || algo == ITEM_MEAN)
    output_baseline_result(training);
  test_predictions(&baseline_predict);    

  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  return 0;
}
예제 #9
0
  /**
   * Called after an iteration has finished.
   */
  void after_iteration(int iteration, graphchi_context &gcontext) {
    if (iteration == pmf_burn_in){
      printf("Finished burn-in period. starting to aggregate samples\n");
    }
    if (pmf_additional_output && iiter >= pmf_burn_in){
        char buf[256];
        sprintf(buf, "%s-%d", training.c_str(), iiter-pmf_burn_in);
        output_pmf_result(buf);
    }
 
    double res = training_rmse(iteration, gcontext);
    sample_hyperpriors(res);
    rmse_index = 0;
    rmse_type = VALIDATION;
    validation_rmse(&pmf_predict, gcontext, 3, &validation_avgprod, pmf_burn_in);
    if (iteration >= pmf_burn_in){
      rmse_index = 0;
      rmse_type = TEST;
      test_predictions(&pmf_predict, &gcontext, iiter == niters-1, &test_avgprod);
    }
    iiter++;
  }
int main(int argc, const char ** argv) {

 // print_copyright();
  write_copyright();
  //* GraphChi initialization will read the command line arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("sgd-inmemory-factors");

  /* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
  sgd_lambda    = get_option_float("sgd_lambda", 1e-3);
  sgd_gamma     = get_option_float("sgd_gamma", 1e-3);
  sgd_step_dec  = get_option_float("sgd_step_dec", 0.9);

  int file_format   = get_option_int("ff", 3);


  parse_command_line_args();
  parse_implicit_command_line();

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<EdgeDataType>(training, 0, 0, file_format, TRAINING, false);
  init_feature_vectors<std::vector<vertex_data> >(M+N, latent_factors_inmem, !load_factors_from_file);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION, false);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &sgd_predict);
  }

  /* load initial state from disk (optional) */
  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
  }

  print_config();

  /* Run */
  SGDVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;

  timer train_timer;
  engine.run(program, niters);
 // std::cout << "Trn Time for file test: " << std::setw(10) << train_timer.current_time() / niters << std::endl;

  std::ofstream ofs(result.c_str(), std::ofstream::out | std::ofstream::app);
  ofs << D << " " << train_timer.current_time() << " ";


  /* Run TopN program */
  n_top = get_option_int("n_int", 10);
  /*timer test_timer1;

  ofs << test_timer1.current_time() << " ";*/
  //run_general_topn_program(pengine, &latent_factors_inmem, &sgd_predict);
  timer index_timer;
  kd_Node* mroot = init_kdtree(&latent_factors_inmem);
  ofs << index_timer.current_time() << " ";
  timer test_timer;
  /* construct kd tree index */ 
//  ofs << "constructing index: " << test_timer.current_time() << " ";
  run_kd_topn_program(pengine, &latent_factors_inmem, mroot);

 // std::coua << "Tst Time: " << std::setw(10) << test_timer.current_time() << std::endl;
  ofs << test_timer.current_time() << std::endl;
  ofs.close();
  /* Output latent factor matrices in matrix-market format */
  output_sgd_result(training);
  test_predictions(&sgd_predict);    
  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  
  return 0;
}
예제 #11
0
int main(int argc, const char ** argv) {

  print_copyright();

  //* GraphChi initialization will read the command line arguments and the configuration file. */
  graphchi_init(argc, argv);

  /* Metrics object for keeping track of performance counters
     and other information. Currently required. */
  metrics m("svdpp-inmemory-factors");

  svdpp.step_dec  =   get_option_float("svdpp_step_dec", 0.9);
  svdpp.itmBiasStep  =   get_option_float("svdpp_item_bias_step", 1e-3);
  svdpp.itmBiasReg =   get_option_float("svdpp_item_bias_reg", 1e-3);
  svdpp.usrBiasStep  =   get_option_float("svdpp_user_bias_step", 1e-3);
  svdpp.usrBiasReg  =   get_option_float("svdpp_user_bias_reg", 1e-3);
  svdpp.usrFctrStep  =   get_option_float("svdpp_user_factor_step", 1e-3);
  svdpp.usrFctrReg  =   get_option_float("svdpp_user_factor_reg", 1e-3);
  svdpp.itmFctrReg =   get_option_float("svdpp_item_factor_reg", 1e-3);
  svdpp.itmFctrStep =   get_option_float("svdpp_item_factor_step", 1e-3);
  svdpp.itmFctr2Reg =   get_option_float("svdpp_item_factor2_reg", 1e-3);
  svdpp.itmFctr2Step =   get_option_float("svdpp_item_factor2_step", 1e-3);

  parse_command_line_args();
  parse_implicit_command_line();

  /* Preprocess data if needed, or discover preprocess files */
  int nshards = convert_matrixmarket<EdgeDataType>(training, 0, 0, 3, TRAINING, false);
  if (validation != ""){
    int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION, false);
    init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &svdpp_predict);
  }

  svdpp_init();

  if (load_factors_from_file){
    load_matrix_market_matrix(training + "_U.mm", 0, 2*D);
    load_matrix_market_matrix(training + "_V.mm", M, D);
    vec user_bias = load_matrix_market_vector(training +"_U_bias.mm", false, true);
    assert(user_bias.size() == M);
    vec item_bias = load_matrix_market_vector(training +"_V_bias.mm", false, true);
    assert(item_bias.size() == N);
    for (uint i=0; i<M+N; i++){
      latent_factors_inmem[i].bias = ((i<M)?user_bias[i] : item_bias[i-M]);
    }
    vec gm = load_matrix_market_vector(training + "_global_mean.mm", false, true);
    globalMean = gm[0];
 }

  /* Run */
  SVDPPVerticesInMemProgram program;
  graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m); 
  set_engine_flags(engine);
  pengine = &engine;
  engine.run(program, niters);

  /* Output latent factor matrices in matrix-market format */
  output_svdpp_result(training);
  test_predictions(&svdpp_predict);    


  /* Report execution metrics */
  if (!quiet)
    metrics_report(m);
  return 0;
}
예제 #12
0
파일: bsvd_coor.cpp 프로젝트: banglh/my_CF
int main(int argc, const char ** argv) {

	print_copyright();

	/* GraphChi initialization will read the command line
	 arguments and the configuration file. */
	graphchi_init(argc, argv);

	/* Metrics object for keeping track of performance counters
	 and other information. Currently required. */
	metrics m("bsvd_coor-inmemory-factors");

	/* Basic arguments for application. NOTE: File will be automatically 'sharded'. */
	alpha = get_option_float("alpha", 1.0);
	lambda = get_option_float("lambda", 1.0);

	parse_command_line_args();
	parse_implicit_command_line();

	/* Preprocess data if needed, or discover preprocess files */
	int nshards = convert_matrixmarket<EdgeDataType>(training, 0, 0, 3, TRAINING, false);

	// initialize features vectors
	std::string initType;
	switch (init_features_type) {
	case 1: // bounded random
		// randomly initialize feature vectors so that rmin < rate < rmax
		initType = "bounded-random";
		init_random_bounded<std::vector<vertex_data> >(latent_factors_inmem, !load_factors_from_file);
		break;
	case 2: // baseline
		initType = "baseline";
		init_baseline<std::vector<vertex_data> >(latent_factors_inmem);
		load_matrix_market_matrix(training + "-baseline_P.mm", 0, D);
		load_matrix_market_matrix(training + "-baseline_Q.mm", M, D);
		break;
	case 3: // random
		initType = "random";
		init_feature_vectors<std::vector<vertex_data> >(M + N, latent_factors_inmem, !load_factors_from_file);
		break;
	default: // random
		initType = "random";
		init_feature_vectors<std::vector<vertex_data> >(M + N, latent_factors_inmem, !load_factors_from_file);
	}

	if (validation != "") {
		int vshards = convert_matrixmarket<EdgeDataType>(validation, 0, 0, 3, VALIDATION, false);
		init_validation_rmse_engine<VertexDataType, EdgeDataType>(pvalidation_engine, vshards, &bsvd_predict);
	}

	/* load initial state from disk (optional) */
	if (load_factors_from_file) {
		load_matrix_market_matrix(training + "_U.mm", 0, D);
		load_matrix_market_matrix(training + "_V.mm", M, D);
	}

	/* Run */
	ALSVerticesInMemProgram program;
	graphchi_engine<VertexDataType, EdgeDataType> engine(training, nshards, false, m);
	set_engine_flags(engine);
	pengine = &engine;
	engine.run(program, niters);

	/* Output latent factor matrices in matrix-market format */
	output_als_result(training);
	test_predictions(&bsvd_predict);

	/* Report execution metrics */
	if (!quiet)
		metrics_report(m);

	return 0;
}