예제 #1
0
VectorXd CurvatureError::operator()(const VectorXd& a) const {
    DblVec curvatures;
    DblVec Deltas;
    curvatures = toDblVec(a.head(pi->curvature_vars.size()));
    Deltas = DblVec(pi->curvature_vars.size(), a(a.size() - 1));
    VectorXd ret(1);
    ret(0) = -this->total_curvature_limit;
    for (int i = 0; i < curvatures.size(); ++i) {
        ret(0) += fabs(curvatures[i]) * Deltas[i];
    }
    return ret;
}
예제 #2
0
double CurvatureCost::operator()(const VectorXd& a) const {
    DblVec curvatures;
    DblVec Deltas;
    curvatures = toDblVec(a.head(pi->curvature_vars.size()));
    Deltas = DblVec(pi->curvature_vars.size(), a(a.size() - 1));
    double ret = 0;
    for (int i = 0; i < curvatures.size(); ++i)
    {
        double tmp = curvatures[i] * Deltas[i];
        ret += tmp * tmp;
    }
    return ret * coeff;
}
예제 #3
0
 void populateSimpleDataFrame(DataFrame& df)
 {
   // create a very simple data frame were the first factor is perfect and the others are 
   // garbage
   df.addDataVector("a", DblVec(0, 1, 3));
   df.addDataVector("a", DblVec(0, 2, 2));
   df.addDataVector("a", DblVec(0, 3, 1));
   df.addDataVector("b", DblVec(1, 1, 3));
   df.addDataVector("b", DblVec(1, 2, 2));
   df.addDataVector("b", DblVec(1, 3, 1));
   std::vector<std::string> factorLabels;
   factorLabels.push_back("f1");
   factorLabels.push_back("f2");
   factorLabels.push_back("f3");
   df.setFactorLabels(factorLabels);
 }
예제 #4
0
파일: logreg.cpp 프로젝트: SUYONGJIAN/Luna
double LogisticRegressionObjective::EvalLocalMultiThread(const DblVec& input, DblVec& gradient){
	
	/*
	create 24 thread;
	each thread calculate a loss and gradient
	*/
	int threadNum = 24;
	double lossList[threadNum];
	DblVec *gradList = new DblVec[threadNum];
	for(int i = 0; i < threadNum; i++){
		gradList[i] = DblVec(gradient.size());
	}
	threadList = new pthread_t[threadNum];
	
	for(int i = 0; i < threadNum; i++){
		Parameter*p = new Parameter(*this, input, gradList[i], lossList[i], i, threadNum);
		pthread_create(&threadList[i], NULL, ThreadEvalLocal, p);
	}
	
	for(int i = 0; i < threadNum; i++){
		pthread_join(threadList[i], NULL);
	}
	
	double loss = 0.0;
	for(int j = 0; j < gradient.size(); j++){
		gradient[j] = 0.0;
	}
	for(int i = 0; i < threadNum; i++){
		loss += lossList[i];
		for(int j = 0; j < gradient.size(); j++){
			gradient[j] += gradList[i][j];
		}
	}
	delete []gradList;
	delete []threadList;
	return loss;
	
}
예제 #5
0
/*! 
 *  A const function that calculates the matrix exponential and returns a copy.
 *  The exponential is calculated by finding the eigenvalues and eigenvectors
 *  of the matrix, exponentiating the eigenvalues. The eigenvalues is stored in
 *  a matrix V, eigenvectors is stored in a matrix A, inv(A) is calculated.
 *  The product A*V*inv(A) is returned.
 *  @return A copy of the exponentiated matrix
 */
Matrix Matrix::expm() const{
  return expm(DblVec(1,1))[0];
}