double computeLoss() { // compute loss w.r.t to oracle hypothesis and current weights w assert(oracle); if (hope_select==1) loss = (features.dot(w) + cost) - (oracle->features.dot(w) - oracle->cost); else loss = (features.dot(w) + cost) - (oracle->features.dot(w)); if (loss < 0) { cerr << "Warning! Loss < 0! this_score=" << features.dot(w) << " oracle_score=" << oracle->features.dot(w) << " this_cost=" << cost << " oracle_cost=" << oracle->cost << endl; loss = 0; } return loss; }
Vec RQR_Multiply(const VECTOR &v, const SparseKalmanMatrix &RQR, const SparseVector &Z, double H) { int state_dim = Z.size(); if(v.size() != state_dim + 2) { report_error("wrong sizes in RQR_Multiply"); } // Partition v = [eta, epsilon, 0] ConstVectorView eta(v, 0, state_dim); double epsilon = v[state_dim]; // Partition this Vec RQRZ = RQR * Z.dense(); double ZRQRZ_plus_H = Z.dot(RQRZ) + H; Vec ans(v.size()); VectorView(ans, 0, state_dim) = (RQR * eta).axpy(RQRZ, epsilon); ans[state_dim] = RQRZ.dot(eta) + ZRQRZ_plus_H * epsilon; return ans; }
// partial reorthogonalization double EigenTriangle::solve(int maxIter, double tol) { int n = graph -> VertexCount(); srand(time(0)); if (maxIter > n) maxIter = n; double na = adjMatrix.norm(); double phi = tol * na; double delta = tol * sqrt(na); vector<Triplet<double> > omega_vector; //MatrixXd omega = MatrixXd::Zero(n + 2, n + 2); for (int i = 0; i < n + 2; i ++) { //omega(i, i - 1) = phi; if (i > 0) omega_vector.push_back(Triplet<double>(i, i - 1, phi)); omega_vector.push_back(Triplet<double>(i, i, 1)); } omega.resize(n + 2, n + 2); omega.setFromTriplets(omega_vector.begin(), omega_vector.end()); //std::cout << omega << std::endl; vector<SparseVector<double> > v; //v.push_back(SparseVector::Random(n)); SparseVector<double> firstV(n); for (int i = 0; i < 100; i ++) firstV.coeffRef(i % n) = i; v.push_back(firstV); v[0] /= v[0].norm(); VectorXd alpha(n); VectorXd beta(n + 1); beta[0] = 0; SparseVector<double> w; bool flag = false; int num = 0; // reorthogonalization times int last = -1; for (int i = 0; i < maxIter; i ++) { if ((i + 1) * 100 / maxIter > last) { last = (i + 1) * 100 / maxIter; std::cout << "\r" << "[EigenTriangle] Processing " << last << "% ..." << std::flush; } //printf("== Iter %d ===\n", i); w = adjMatrix * v[i]; alpha.coeffRef(i) = w.dot(v[i]); if (i == maxIter - 1) break; w -= alpha[i] * v[i]; if (i > 0) w -= beta[i] * v[i - 1]; beta[i + 1] = w.norm(); v.push_back(w / beta[i + 1]); if (flag) { flag = false; for (int j = 0; j <= i; j ++) v[i + 1] -= v[j].dot(v[i + 1]) * v[j]; for (int j = 0; j <= i; j ++) omega.coeffRef(i + 1, j) = phi; //for (int j = 0; j <= i; j ++) // printf("%.5lf\n", v[i + 1].dot(v[j])); } else { omega.coeffRef(i + 1, 0) = 0.0; if (i > 0) { omega.coeffRef(i + 1, 0) = 1.0 / beta(i) * ((alpha(0) - alpha(i)) * omega.coeffRef(i, 0) - beta(i - 1) * omega.coeffRef(i - 1, 0)) + delta; } for (int j = 1; j <= i; j ++) { omega.coeffRef(i + 1, j) = 1.0 / beta(i) * (beta(j) * omega.coeffRef(i, j + 1) + (alpha(j) - alpha(i)) * omega.coeffRef(i, j) - beta(j - 1) * omega.coeffRef(i, j - 1) - beta(i - 1) * omega.coeffRef(i - 1, j)) + delta; } } double mx = 0.0; for (int j = 0; j <= i; j ++) if (mx < fabs(omega.coeffRef(i + 1, j))) mx = fabs(omega.coeffRef(i + 1, j)); if (mx > sqrt(tol)) { for (int j = 0; j <= i; j ++) omega.coeffRef(i + 1, j) = phi; num ++; for (int j = 0; j <= i; j ++) v[i + 1] -= v[i + 1].dot(v[j]) * v[j]; flag = true; } } printf("\n"); int k = maxIter; MatrixXd T = MatrixXd::Zero(k, k); for (int i = 0; i < k; i ++) { T(i, i) = alpha[i]; if (i < k - 1) T(i, i + 1) = beta[i + 1]; if (i > 0) T(i, i - 1) = beta[i]; } //std::cout << T << std::endl; Eigen::EigenSolver<MatrixXd> eigenSolver; eigenSolver.compute(T, false); Eigen::VectorXcd eigens = eigenSolver.eigenvalues(); double res = 0; for (int i = 0; i < eigens.size(); i ++) { std::complex<double> tmp = eigens[i]; res += pow(tmp.real(), 3) / 6; std::cout << i << ": " << tmp << std::endl; } //res /= 6; return res; }