TYPED_TEST_P(recommender_random_test, random) {
  pfi::math::random::mtrand rand(0);
  TypeParam r;

  // Generate random data from two norma distributions, N1 and N2.
  vector<float> mu1;
  mu1.push_back(1.0);
  mu1.push_back(1.0);
  mu1.push_back(1.0);
  for (size_t i = 0; i < 100; ++i) {
    vector<double> v;
    make_random(rand, mu1, 1.0, 3, v);
    string row_name = "r1_" + lexical_cast<string>(i);
    r.update_row(row_name, make_vec(v[0], v[1], v[2]));
  }

  vector<float> mu2;
  mu2.push_back(-1.0);
  mu2.push_back(-1.0);
  mu2.push_back(-1.0);
  for (size_t i = 0; i < 100; ++i) {
    vector<double> v;
    make_random(rand, mu2, 1.0, 3, v);
    string row_name = "r2_" + lexical_cast<string>(i);
    r.update_row(row_name, make_vec(v[0], v[1], v[2]));
  }

  // Then, recommend to mean of N1
  vector<pair<string, float> > ids;
  r.similar_row(make_vec(1.0, 1.0, 1.0), ids, 10);
  ASSERT_EQ(10u, ids.size());
  size_t correct = 0;
  for (size_t i = 0; i < ids.size(); ++i) {
    if (ids[i].first[1] == '1')
    ++correct;
  }
  EXPECT_GT(correct, 5u);

  // save the recommender
  stringstream oss;
  r.save(oss);
  TypeParam r2;
  r2.load(oss);

  // Run the same test
  ids.clear();
  r2.similar_row(make_vec(1.0, 1.0, 1.0), ids, 10);
  ASSERT_EQ(10u, ids.size());
  correct = 0;
  for (size_t i = 0; i < ids.size(); ++i) {
    if (ids[i].first[1] == '1')
    ++correct;
  }
  EXPECT_GT(correct, 5u);
}
TYPED_TEST_P(recommender_random_test, trivial) {
  TypeParam r;

  r.update_row("r1", make_vec("c1", "c2", "c3"));
  r.update_row("r2", make_vec("c4", "c5", "c6"));

  vector<pair<string, float> > ids;
  r.similar_row(make_vec("c1", "c2", "c3"), ids, 1);
  ASSERT_EQ(1u, ids.size());
  EXPECT_EQ("r1", ids[0].first);
}
TYPED_TEST_P(recommender_random_test, mix) {
  pfi::math::random::mtrand rand(0);
  TypeParam r1, r2, expect;
  vector<float> mu(10);
  for (size_t i = 0; i < 100; ++i) {
    vector<double> v;
    make_random(rand, mu, 1.0, 3, v);
    common::sfv_t vec = make_vec(v[0], v[1], v[2]);

    string row = "r_" + lexical_cast<string>(i);
    (i < 50 ? r1 : r2).update_row(row, vec);
    expect.update_row(row, vec);
  }

  string diff1, diff2;
  r1.get_storage()->get_diff(diff1);
  r2.get_storage()->get_diff(diff2);

  r1.get_storage()->mix(diff1, diff2);

  TypeParam mixed;
  mixed.get_storage()->set_mixed_and_clear_diff(diff2);

  compare_recommenders(expect, mixed, false);
}
void compare_recommenders(recommender_base& r1, recommender_base& r2,
                          bool compare_complete_row = true) {
  // make a query vector
  common::sfv_t q = make_vec(0.5, 0.3, 1.0);

  // Get result before saving
  vector<pair<string, float> > ids1;
  r1.similar_row(q, ids1, 10);
  common::sfv_t comp1;
  r1.complete_row("r1_0", comp1);

  // Get result from loaded data
  vector<pair<string, float> > ids2;
  r2.similar_row(q, ids2, 10);
  common::sfv_t comp2;
  r2.complete_row("r1_0", comp2);

  // Compare two results
  // ID order could not be same if there are score ties.
  // EXPECT_TRUE(ids1 == ids2);
  ASSERT_EQ(ids1.size(), ids2.size());
  for (size_t i = 0; i < ids1.size(); ++i) {
    EXPECT_FLOAT_EQ(ids1[i].second, ids2[i].second);
  }

  if (compare_complete_row)
    EXPECT_TRUE(comp1 == comp2);
}
void update_random(recommender_base& r) {
  pfi::math::random::mtrand rand(0);
  vector<float> mu(3);
  for (size_t i = 0; i < 100; ++i) {
    vector<double> v;
    make_random(rand, mu, 1.0, 3, v);
    string row_name = "r1_" + lexical_cast<string>(i);
    r.update_row(row_name, make_vec(v[0], v[1], v[2]));
  }
}
Example #6
0
int main(int argc, char* argv[])
{
  double  twoThrd = 0, sqrts = 0, Flint = 0, Cookson = 0;
  v2df    Harmonic, zeta, poly, alt, Gregory;
  v2df    zero, one, two, init, m_one, kv, av;

  double  k, k3, s, c;
  int n;  n = atoi(argv[1]);

  zero  = make_vec( 0.0,  0.0);  one   = make_vec( 1.0,  1.0);
  two   = make_vec( 2.0,  2.0);  m_one = make_vec(-1.0, -1.0);
  init  = make_vec( 1.0,  2.0);  av    = make_vec( 1.0, -1.0);

  Harmonic = zeta = poly = alt = Gregory = zero;

  for (k=1; k<=n; k++)
    {
      twoThrd += pow(2.0/3.0, k-1);
      sqrts   += 1.0/sqrt(k);
      k3 = k*k*k;
      s  = sin(k); c = cos(k);
      Flint   += 1.0/(k3 * s*s);
      Cookson += 1.0/(k3 * c*c);
    }

  for (kv=init; *(double *)(&kv)<=n; kv+=two)
    {
      poly    += one /(kv*(kv+one));
      Harmonic+= one / kv;
      zeta    += one /(kv*kv);
      alt     +=  av / kv;
      Gregory +=  av /(two*kv - one);
    }

#define psum(name,num) printf("%.9f\t%s\n",num,name)
  psum("(2/3)^k",           twoThrd); psum("k^-0.5",      sqrts);
  psum("1/k(k+1)",    sum_vec(poly)); psum("Flint Hills", Flint);
  psum("Cookson Hills",     Cookson); psum("Harmonic", sum_vec(Harmonic));
  psum("Riemann Zeta",sum_vec(zeta)); psum("Alternating Harmonic",sum_vec(alt));
  psum("Gregory",  sum_vec(Gregory));

  return 0;
}
Example #7
0
void bidiag_gkl_restart(
    int locked, int l, int n,
    CAX && Ax, CATX && Atx, CD && D, CE && E, CRho && rho, CP && P, CQ && Q, int s_indx, int t_s_indx) {
  // enhancements version from SLEPc
  const double eta = 1.e-10;
  double t_start = 0.0, t_end = 0.0;
  double local_start = 0.0, local_end = 0.0; 
  double t_total3 = 0.0, t_total4 = 0.0, t_total5 = 0.0, t_total6 = 0.0, t_total7 = 0.0;
  
  int rank, nprocs;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  
  // Step 1
  int recv_len = (int)P.dim0() * nprocs;
  vec_container<double> tmp(Ax.dim0());
  vec_container<double> recv_tmp(recv_len);
  
  auto m_Ax = make_gemv_ax(&Ax);
  auto m_Atx = make_gemv_ax(&Atx);
  
  m_Ax(Q.col(l), tmp, P.dim0() > 1000);

  vec_container<double> send_data(P.dim0(),0);
  for(size_t i = s_indx; i < s_indx + Ax.dim0(); ++i)
    send_data[i] = tmp.get(i-s_indx);
  MPI_Gather(&send_data[0], P.dim0(), MPI_DOUBLE, &recv_tmp[0], P.dim0(), MPI_DOUBLE, 0, MPI_COMM_WORLD);

  P.col(l) = 0;
  // Generate truly P.col(l)
  if(rank == 0) {
    local_union(P, recv_tmp, l, nprocs);
    // Step 2 & also in rank 0
    for (int j = locked; j < l; ++j) {
      P.col(l) += -rho(j) * P.col(j);
    }
  }
  
  MPI_Bcast(&(P.col(0)[0]), P.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  //MPI_Bcast(&(P.col(l)[0]), P.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  // Main loop
  vec_container<double> T(n);
  int recv_l = Q.dim0() * nprocs;
  vec_container<double> recv_t(recv_l);
  for (int j = l; j < n; ++j) {
    // Step 3   
    vec_container<double> tmp2(Atx.dim0());
    
    /* for print */
    if(rank == 0)
    	t_start = currenttime();
   
    local_start = currenttime();
    m_Atx(P.col(j), tmp2, Q.dim0() > 1000);
    local_end = currenttime();
    std::cout << "parallel mv time cost is " << (local_end - local_start) / 1.0e6 << std::endl;
     
    vec_container<double> s_data(Q.dim0(), 0); 
    for(size_t i = t_s_indx; i < t_s_indx + Atx.dim0(); ++i)
      s_data[i] = tmp2[i-t_s_indx];
    MPI_Gather(&s_data[0], Q.dim0(), MPI_DOUBLE, &recv_t[0], Q.dim0(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
    local_start = currenttime();
    std::cout << "parallel mv time cost2 is " << (local_start - local_end) / 1.0e6 << std::endl;
    
    //Q.col(j+1) = 0;
    if(rank == 0) {
      // Generate truly Q.col(j+1) 
      local_union(Q, recv_t, j + 1, nprocs);
      local_end = currenttime();
      t_end = currenttime();
      std::cout << "parallel mv time cost3 is " << (local_end - local_start) / 1.0e6 << std::endl;
      std::cout << "time of step 3 is : " << (t_end - t_start) / 1.0e6 << std::endl;
      t_total3 += (t_end - t_start) / 1.0e6;
    }
      
    // Step 4
    for(size_t aa = 0; aa < Q.dim0(); ++aa) // row
      MPI_Bcast(&(Q.row(aa)[0]), j + 2, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
    // MPI_Bcast(&(Q.col(0)[0]), Q.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD); 
    
    if(rank == 0)
      t_end = currenttime();
    auto Qj = mat_cols(Q, 0, j + 1);
    auto Tj = make_vec(&T, j + 1);
    
    //Tj.assign(gemv(Qj.trans(), Q.col(j + 1)), j >= 3);
    parallel_gemv_task(Qj.trans(), Q.col(j+1), Tj);
    
    if(rank == 0) {
      t_start = currenttime();
      t_total4 += (t_start - t_end) / 1.0e6;
      std::cout << "time of step 4 is : " << (t_start - t_end) / 1.0e6 << std::endl;
    }

    // Step 5
    if(rank == 0) {
      double r = Q.col(j + 1).norm2();
      D[j] = vec_unit(P.col(j));
      Q.col(j + 1).scale(1. / D[j]);
      Tj = Tj / D[j];
      r /= D[j];
      Q.col(j + 1).plus_assign(- gemv(Qj, Tj), Q.dim0() > 1000);
      
      t_end = currenttime();
      t_total5 += (t_end - t_start) / 1.0e6;
      std::cout << "time of step 5 is : " << (t_end - t_start) / 1.0e6 << std::endl;

      // Step 6
      double beta = r * r - Tj.square_sum();
      if (beta < eta * r * r) {
        Tj.assign(gemv(Qj.trans(), Q.col(j + 1)), Q.dim0() > 1000);
        r = Q.col(j + 1).square_sum();
        Q.col(j + 1).plus_assign(-gemv(Qj, Tj), Q.dim0() > 1000);
        beta = r * r - Tj.square_sum();
      }
      beta = std::sqrt(beta);
      E[j] = beta;
      Q.col(j + 1).scale(1. / E[j]);
      
      t_start = currenttime();
      t_total6 += (t_start - t_end) / 1.0e6;
      std::cout << "time of step 6 is : " << (t_start - t_end) / 1.0e6 << std::endl;
    } 
    
    // Step 7
    // MPI_Bcast(&(Q.col(j+1)[0]), Q.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
    // MPI_Bcast(&(Q.col(0)[0]), Q.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
    for(size_t aa = 0; aa < Q.dim0(); ++aa)
      MPI_Bcast(&(Q.col(j+1)[aa]), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    if (j + 1 < n) {
      if(rank == 0) 
        t_start = currenttime();
      vec_container<double> tmp3(Ax.dim0());
      vec_container<double> se_data(P.dim0(), 0);
      
      m_Ax(Q.col(j + 1), tmp3, P.dim0() > 1000);
      
      for(size_t k1 = s_indx; k1 < s_indx + Ax.dim0(); ++k1)
        se_data[k1] = tmp3[k1-s_indx];
      MPI_Gather(&se_data[0], P.dim0(), MPI_DOUBLE, &recv_tmp[0], P.dim0(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
      
      // P.col(j+1) = 0;
      if(rank == 0) {
	local_union(P, recv_tmp, j + 1, nprocs);
	P.col(j + 1).plus_assign(- E[j] * P.col(j), P.dim0() > 1000);
      }
      
      /* for print */
      if(rank == 0) {
        t_end = currenttime();
        t_total7 += (t_end - t_start) / 1.0e6;
	std::cout << "time of step 7 is : " << (t_end - t_start) / 1.0e6 << std::endl;
      }

      // MPI_Bcast(&(P.col(l)[0]), P.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
      // MPI_Bcast(&(P.col(0)[0]), P.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
      for(size_t aa = 0; aa < P.dim0(); ++aa)
        MPI_Bcast(&(P.col(j+1)[aa]), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    }  // end if
  }    // end while
  
  /* for print time of each step. */
  if(rank == 0) {
    std::cout << "total step 3 time is : " << t_total3 << std::endl; 
    std::cout << "total step 4 time is : " << t_total4 << std::endl; 
    std::cout << "total step 5 time is : " << t_total5 << std::endl; 
    std::cout << "total step 6 time is : " << t_total6 << std::endl; 
    std::cout << "total step 7 time is : " << t_total7 << std::endl; 
  }
  
  return ;
}