示例#1
0
Word16 svm_predict(const Sample sample) {
	//const Node *x = sample.data;
	Word16 i;

	for(i = 0; i < NR_L; i++) {
		kvalue[i] = rbf_kernel(sample.data, model.SV[i]);
		//printf("%f\n", kvalue[i]);
	}

	//printf("%hd\n", kvalue[0]);

	start[0] = 0;
	for(i = 1; i < NR_CLASS; i++) {
		start[i] = start[i - 1] + model.nSV[i - 1];
		//printf("%d\n", start[i]);
	}

	for(i = 0; i < NR_CLASS; i++)
		vote[i] = 0;

	Word16 p=0, j = 0;

	// 1-to-1 vote
	for(i=0;i<NR_CLASS;i++){
		for(j=i+1;j<NR_CLASS;j++) {
			Word16 sum = 0;
			Word16 si = start[i];
			Word16 sj = start[j];
			Word16 ci = model.nSV[i];
			Word16 cj = model.nSV[j];
			
			Word16 k;
			Word16 *coef1 = model.sv_coef[j-1];
			Word16 *coef2 = model.sv_coef[i];

			for(k=0;k<ci;k++)
				sum += coef1[si+k] * kvalue[si+k] / 1000;

			for(k=0;k<cj;k++)
				sum += coef2[sj+k] * kvalue[sj+k] / 1000;
			
			sum -= model.rho[p];
			//printf("%f\n", sum);
			dec_values[p] = sum;

			if(dec_values[p] > 0)
				++vote[i];
			else
				++vote[j];
			p++;
		}
	}

	Word16 vote_max_idx = 0;
	for(i=1; i < NR_CLASS; i++)
		if(vote[i] > vote[vote_max_idx])
			vote_max_idx = i;

	return model.label[vote_max_idx];
}
示例#2
0
void RandomPCA::pca(MatrixXd &X, int method, bool transpose,
   unsigned int ndim, unsigned int nextra, unsigned int maxiter, double tol,
   long seed, int kernel, double sigma, bool rbf_center,
   unsigned int rbf_sample, bool save_kernel, bool do_orth, bool do_loadings)
{
   unsigned int N;

   if(kernel != KERNEL_LINEAR)
   {
      transpose = false;
      verbose && std::cout << timestamp()
	 << " Kernel not linear, can't transpose" << std::endl;
   }

   verbose && std::cout << timestamp() << " Transpose: " 
      << (transpose ? "yes" : "no") << std::endl;

   if(transpose)
   {
      if(stand_method != STANDARDIZE_NONE)
	  X_meansd = standardize_transpose(X, stand_method, verbose);
      N = X.cols();
   }
   else
   {
      if(stand_method != STANDARDIZE_NONE)
	 X_meansd = standardize(X, stand_method, verbose);
      N = X.rows();
   }

   unsigned int total_dim = ndim + nextra;
   MatrixXd R = make_gaussian(X.cols(), total_dim, seed);
   MatrixXd Y = X * R;
   verbose && std::cout << timestamp() << " dim(Y): " << dim(Y) << std::endl;
   normalize(Y);
   MatrixXd Yn;

   verbose && std::cout << timestamp() << " dim(X): " << dim(X) << std::endl;
   MatrixXd K; 
   if(kernel == KERNEL_RBF)
   {
      if(sigma == 0)
      {
	 unsigned int med_samples = fminl(rbf_sample, N);
      	 double med = median_dist(X, med_samples, seed, verbose);
      	 sigma = sqrt(med);
      }
      verbose && std::cout << timestamp() << " Using RBF kernel with sigma="
	 << sigma << std::endl;
      K.noalias() = rbf_kernel(X, sigma, rbf_center, verbose);
   }
   else
   {
      verbose && std::cout << timestamp() << " Using linear kernel" << std::endl;
      K.noalias() = X * X.transpose() / (N - 1);
   }

   //trace = K.diagonal().array().sum() / (N - 1);
   trace = K.diagonal().array().sum();
   verbose && std::cout << timestamp() << " Trace(K): " << trace 
      << " (N: " << N << ")" << std::endl;

   verbose && std::cout << timestamp() << " dim(K): " << dim(K) << std::endl;
   if(save_kernel)
   {
      verbose && std::cout << timestamp() << " saving K" << std::endl;
      save_text("kernel.txt", K);
   }

   for(unsigned int iter = 0 ; iter < maxiter ; iter++)
   {
      verbose && std::cout << timestamp() << " iter " << iter;
      Yn.noalias() = K * Y;
      if(do_orth)
      {
	 verbose && std::cout << " (orthogonalising)";
	 ColPivHouseholderQR<MatrixXd> qr(Yn);
	 MatrixXd I = MatrixXd::Identity(Yn.rows(), Yn.cols());
	 Yn = qr.householderQ() * I;
	 Yn.conservativeResize(NoChange, Yn.cols());
      }
      else
	 normalize(Yn);

      double diff =  (Y -  Yn).array().square().sum() / Y.size(); 
      verbose && std::cout << " " << diff << std::endl;
      Y.noalias() = Yn;
      if(diff < tol)
	 break;
   }

   verbose && std::cout << timestamp() << " QR begin" << std::endl;
   ColPivHouseholderQR<MatrixXd> qr(Y);
   MatrixXd Q = MatrixXd::Identity(Y.rows(), Y.cols());
   Q = qr.householderQ() * Q;
   Q.conservativeResize(NoChange, Y.cols());
   verbose && std::cout << timestamp() << " dim(Q): " << dim(Q) << std::endl;
   verbose && std::cout << timestamp() << " QR done" << std::endl;

   MatrixXd B = Q.transpose() * X;
   verbose && std::cout << timestamp() << " dim(B): " << dim(B) << std::endl;

   MatrixXd Et;
   pca_small(B, method, Et, d, verbose);
   verbose && std::cout << timestamp() << " dim(Et): " << dim(Et) << std::endl;

   d = d.array() / (N - 1);

   if(transpose)
   {
      V.noalias() = Q * Et;
      // We divide P by sqrt(N - 1) since X has not been divided
      // by it (but B has)
      P.noalias() = X.transpose() * V;
      VectorXd s = 1 / (d.array().sqrt() * sqrt(N - 1));
      MatrixXd Dinv = s.asDiagonal();
      U = P * Dinv;
   }
   else
   {
      // P = U D = X V
      U.noalias() = Q * Et;
      P.noalias() = U * d.asDiagonal();
      if(do_loadings)
      {
	 VectorXd s = 1 / (d.array().sqrt() * sqrt(N - 1));
	 MatrixXd Dinv = s.asDiagonal();
	 V = X.transpose() * U * Dinv;
      }
   }

   P.conservativeResize(NoChange, ndim);
   U.conservativeResize(NoChange, ndim);
   V.conservativeResize(NoChange, ndim);
   d.conservativeResize(ndim);
   pve = d.array() / trace;
}