int solveLinearEquationLU(dmatrix a, const dmatrix &b, dmatrix &out_x)
		{
				assert(a.rows() == a.cols() && a.cols() == b.rows() );

				out_x = b;

				const int n = (int)a.rows();
				const int nrhs = (int)b.cols();
				int info;
				std::vector<int> ipiv(n);

#ifndef USE_CLAPACK_INTERFACE

				int lda = n;
				int ldb = n;
				dgesv_(&n, &nrhs, &(a(0,0)), &lda, &(ipiv[0]), &(out_x(0,0)), &ldb, &info);
#else
				info = clapack_dgesv(CblasColMajor,
									 n, nrhs, &(a(0,0)), n, 
									 &(ipiv[0]),
									 &(out_x(0,0)),
									 n);
#endif
				assert(info == 0);
				
				return info;
		}
Example #2
0
  bool MLP::calcGradient(const dmatrix& inputs,
                         const ivector& ids,
                         dvector& grad) {

    if (inputs.rows() != ids.size()) {
      setStatusString("Number of vectors not consistent with number of ids");
      return false;
    }

    dvector tmp;
    int i;
    double tmpError;

    totalError = 0;
    calcGradient(inputs.getRow(0),ids.at(0),grad);
    computeActualError(ids.at(0),totalError);

    for (i=1;i<inputs.rows();++i) {
      calcGradient(inputs.getRow(i),ids.at(i),tmp);
      computeActualError(ids.at(i),tmpError);
      grad.add(tmp);
      totalError+=tmpError;
    }

    return true;
  }
 double clusteringValidity::getMaximumDistance(const dmatrix& m1,
                                               const dmatrix& m2) const {
   int i,j;
   dmatrix distances(m1.rows(),m2.rows());
   l2Distance<double> dist;
   for (i=0; i<m1.rows(); i++) {
     for (j=0; j<m2.rows(); j++) {
       distances[i][j]=dist.apply(m1.getRow(i),m2.getRow(j));
     }
   }
   return distances.maximum();
 }
 double clusteringValidity::getStandardDiameter(const dmatrix& m1) const {
   dmatrix distances(m1.rows(),m1.rows());
   int j,k;
   l2Distance<double> dist;
   for (j=0; j<m1.rows(); j++) {
     for (k=j+1; k<m1.rows(); k++) {
       distances[j][k]=dist.apply(m1.getRow(j),
                                  m1.getRow(k));
     }
   }
   return distances.maximum();  
 }
 double clusteringValidity::getAverageDistance(const dmatrix& m1,
                                               const dmatrix& m2) const {
   double distance=0.0;
   int i,j;
   l2Distance<double> dist;
   for (i=0; i<m1.rows(); i++) {
     for (j=0; j<m2.rows(); j++) {
       distance+=dist.apply(m1.getRow(i),m2.getRow(j));
     }
   }
   distance=distance/((double)m1.rows()*(double)m2.rows());
   return distance;
 }
  /**
   * Adds an object to this classifier. The id is determined automatically
   * and returned in the parameter.
   */
  bool shClassifier::trainObject(const dmatrix& input, int& id) {
    id=0;
    for (std::map<int,int>::const_iterator i=rIdMap.begin(); i != rIdMap.end(); i++) {
      if (i->second >= id) {
        id=i->second+1;
      }
    }
    idMap[id]=nClasses;
    rIdMap[nClasses]=id;
    nClasses++;

    const parameters& par=getParameters();

    // do not touch min and max
    if (getParameters().binVector.size() > 0) {
      models.push_back(new sparseHistogram(getParameters().binVector,
                                           par.minimum,par.maximum));
    } else {
      models.push_back(new sparseHistogram(getParameters().numberOfBins,
                                           par.minimum,par.maximum));
    }
    // fill histograms
    int sum=0;
    for (int j=0; j<input.rows(); j++) {
      models[nClasses-1]->add(input.getRow(j));
      sum++;
    }
    models[nClasses-1]->divide(static_cast<float>(sum));

    defineOutputTemplate();

    return true;
  }
		/**
		   Unified interface to solve a linear equation
		*/
		int solveLinearEquation(const dmatrix &_a, const dvector &_b, dvector &_x, double _sv_ratio)
		{
				if(_a.cols() == _a.rows())
						return solveLinearEquationLU(_a, _b, _x);
				else
						return solveLinearEquationSVD(_a, _b, _x,  _sv_ratio);
		}
 double clusteringValidity::getCentroidDistance(const dmatrix& m1,
                                                const dmatrix& m2) const {
   l2Distance<double> dist;
   int i;
   dvector a(m1.columns());
   dvector b(m2.columns());
   for (i=0; i<m1.rows();i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows());
   for (i=0; i<m2.rows();i++) {
     b.add(m2.getRow(i));
   }
   b.divide(m2.rows());
   return dist.apply(a,b);
 }
		//----- Calculation of eigen vectors and eigen values -----
		int calcEigenVectors(const dmatrix &_a, dmatrix  &_evec, dvector &_eval)
		{
				assert( _a.cols() == _a.rows() );

				typedef dmatrix mlapack;
				typedef dvector vlapack;
				
				mlapack a    = _a; // <-
				mlapack evec = _evec;
				vlapack eval = _eval;
				
				int n = (int)_a.cols();
				
				double *wi = new double[n];
				double *vl = new double[n*n];
				double *work = new double[4*n];

				int lwork = 4*n;
				int info;
				
				dgeev_("N","V", &n, &(a(0,0)), &n, &(eval(0)), wi, vl, &n, &(evec(0,0)), &n, work, &lwork, &info);
				
				_evec = evec.transpose();
				_eval = eval;
				
				delete [] wi;
				delete [] vl;
				delete [] work;
				
				return info;
		}
		//--- Calculation of determinamt ---
		double det(const dmatrix &_a)
		{
				assert( _a.cols() == _a.rows() );

				typedef dmatrix mlapack;
				mlapack a = _a;	// <-

				int info;
				int n = (int)a.cols();
				int lda = n;
				std::vector<int> ipiv(n);

#ifdef USE_CLAPACK_INTERFACE
				info = clapack_dgetrf(CblasColMajor,
									  n, n, &(a(0,0)), lda, &(ipiv[0]));
#else
				dgetrf_(&n, &n, &a(0,0), &lda, &(ipiv[0]), &info);
#endif

				double det=1.0;
	
				for(int i=0; i < n-1; i++)
						if(ipiv[i] != i+1)  det = -det;
				
				for(int i=0; i < n; i++)  det *= a(i,i);

				assert(info == 0);
				
				return det;
		}
 double clusteringValidity::getAverageDiameter(const dmatrix& m1) const {
   double distance=0.0;
   int j,k;
   l2Distance<double> dist;
   for (j=0; j<m1.rows(); j++) {
     for (k=0; k<m1.rows(); k++) {
       distance+=dist.apply(m1.getRow(j),
                            m1.getRow(k));
     }
   }
   if (m1.rows()>1) {
     return (distance/((double)m1.rows()*
                       (double)(m1.rows()-1)));
   } else {
     return distance;
   }
 }
  bool regionMerge::apply(const imatrix& srcmask,
                          const dmatrix& simMat,
                          const dvector& thresholds,
                          imatrix& destmask) const {
    int i,j;
    const dvector& thrs = thresholds;

    ivector eLab(simMat.rows());
    for (i=0;i<eLab.size();++i)
      eLab.at(i) = i;

    double a;
    for (j=0;j<simMat.rows();++j)
      for (i=0;i<simMat.columns();++i) {
        if (simMat.at(j,i) > (a=max(thrs.at(j),thrs.at(i))) ) {
//          cout<<j<<" "<<i<<" "<<simMat.at(j,i)<<" "<<a<<endl;
          if (eLab.at(j)>eLab.at(i)) {
            eLab.at(j)=eLab.at(i);
          } else {
            eLab.at(i)=eLab.at(j);
          }

        }
      }


    // now correct the labels
    for (j=eLab.size()-1;j>=0;--j) { //todo
      i = j;
      while (eLab.at(i) != i) {
        i=eLab.at(i);
      }
      eLab.at(j) = i;
    }

    destmask.resize(srcmask.size(),0,false,false);
    for (j=0;j<srcmask.rows();++j)
      for (i=0;i<srcmask.columns();++i) {
        destmask.at(j,i) = eLab.at(srcmask.at(j,i));
      }

    return true;
 };
 double clusteringValidity::getAverageToCentroidDiameter(const dmatrix& m1) const {
   dvector a(m1.columns());
   int i,j;
   l2Distance<double> dist;
   double distance=0.0;
   for (i=0; i<m1.rows(); i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows());
   for (j=0; j< m1.rows(); j++) {
     distance+=dist.apply(a,m1.getRow(j));
   }
   if (m1.rows()>0) {
     return (2*distance/(double)m1.rows());
   } else {
     return 2*distance;
   }
   
 }
  /*
   * operates on a copy of the given %parameters.
   * @param srcmask source mask.  Each object must be represented by one
   *                              label.
   * @param simMat The similarity matrix.  The size of the matrix must
   *               be at least equal the number of labels plus one.
   * @param destMask resulting mask with merged objects.
   * @return true if apply successful or false otherwise.
   */
  bool regionMerge::apply(const imatrix& srcmask,
                          const dmatrix& simMat,
                          imatrix& destmask) const {
    int i,j;
    ivector eLab(simMat.rows());
    const double thr = getParameters().threshold;

    for (i=0;i<eLab.size();++i) {
      eLab.at(i) = i;
    }

    for (j=0;j<simMat.rows();++j) {
      for (i=0;i<simMat.columns();++i) {
        if (simMat.at(j,i) > thr) {
          if (eLab.at(j)>eLab.at(i)) {
            eLab.at(j)=eLab.at(i);
          } else {
            eLab.at(i)=eLab.at(j);
          }

        }
      }
    }

    // now correct the labels
    for (j=eLab.size()-1;j>=0;--j) {
      i = j;
      while (eLab.at(i) != i) {
        i=eLab.at(i);
      }
      eLab.at(j) = i;
    }

    destmask.resize(srcmask.size(),0,false,false);

    for (j=0;j<srcmask.rows();++j)
      for (i=0;i<srcmask.columns();++i) {
        destmask.at(j,i) = eLab.at(srcmask.at(j,i));
      }

    return true;
  };
  // Calls the same method of the superclass.
  bool shClassifier::train(const dmatrix& input, const ivector& ids) {

    buildIdMaps(ids);

    boundsFunctor<double> bounds;
    const parameters& par=getParameters();

    dvector min,max;

    if (par.autoBounds) {
      bounds.boundsOfRows(input,min,max);
    } else {
      min=par.minimum;
      max=par.maximum;
    }

    _lti_debug("Binvector.size = " << par.binVector.size() << "\n");

    int i;

    // build one histogram per object
    models.resize(nClasses);
    for (i=0; i<nClasses; i++) {
      if (par.binVector.size() == min.size()) {
        models[i]=new sparseHistogram(par.binVector,min,max);
      } else {
        models[i]=new sparseHistogram(par.numberOfBins,min,max);
      }
    }

    ivector sum(nClasses);

    // fill histograms
    for (i=0; i<input.rows(); i++) {
      int id=idMap[ids.at(i)];
      models[id]->add(input.getRow(i));
      sum[id]++;
    }

    // normalize histograms
    for (i=0; i<nClasses; i++) {
      _lti_debug("Sum of " << i << " is " << sum.at(i) << "\n");
      if (sum.at(i) == 0) {
        delete models[i];
        models[i]=0;
      } else {
        models[i]->divide(static_cast<float>(sum.at(i)));
      }
    }
    defineOutputTemplate();
    return true;
  }
Example #16
0
int hrp::calcSRInverse(const dmatrix& _a, dmatrix &_a_sr, double _sr_ratio, dmatrix _w) {
    // J# = W Jt(J W Jt + kI)-1 (Weighted SR-Inverse)
    // SR-inverse :
    // Y. Nakamura and H. Hanafusa : "Inverse Kinematic Solutions With
    // Singularity Robustness for Robot Manipulator Control"
    // J. Dyn. Sys., Meas., Control  1986. vol 108, Issue 3, pp. 163--172.

    const int c = _a.rows(); // 6
    const int n = _a.cols(); // n

    if ( _w.cols() != n || _w.rows() != n ) {
        _w = dmatrix::Identity(n, n);
    }

    dmatrix at = _a.transpose();
    dmatrix a1(c, c);
    a1 = (_a * _w * at +  _sr_ratio * dmatrix::Identity(c,c)).inverse();

    //if (DEBUG) { dmatrix aat = _a * at; std::cerr << " a*at :" << std::endl << aat; }

    _a_sr  = _w * at * a1;
    //if (DEBUG) { dmatrix ii = _a * _a_sr; std::cerr << "    i :" << std::endl << ii; }
}
Example #17
0
  /*
   * compute mat*vct' where vct' is a vector with one additional element
   * (1.0) at the beginning of vct.
   */
  bool MLP::biasMultiply(const dmatrix& mat,
                         const dvector& vct,
                         dvector& res) const {
    int j;
    dmatrix::const_iterator it,eit;
    dvector::iterator rit;
    dvector::const_iterator vit,evit;

    res.resize(mat.rows(),0.0,false,false);
    it = mat.begin();
    eit = mat.end();
    rit = res.begin();
    evit = vct.end();

    for (j=0;j<mat.rows();++j,++rit) {
      *rit = *it;
      ++it;
      for (vit=vct.begin();vit!=evit;++it,++vit) {
        *rit += (*vit)*(*it);
      }
    }

    return true;
  }
 double clusteringValidity::getAverageInterpointDistance(const dmatrix& m1,
                                                         const dmatrix& m2) const {
   l2Distance<double> dist;
   int i;
   dvector a(m1.columns());
   dvector b(m2.columns());
   for (i=0; i<m1.rows();i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows()); // centroid 1
   for (i=0; i<m2.rows();i++) {
     b.add(m2.getRow(i));
   }
   b.divide(m2.rows()); // centroid 2
   double distance=0.0;
   for (i=0; i<m1.rows(); i++) {
     distance+=dist.apply(m1.getRow(i),a);
   }
   for (i=0; i<m2.rows(); i++) {
     distance+=dist.apply(m2.getRow(i),b);
   }
   return (distance/(m1.rows()+m2.rows()));
 }
Example #19
0
  /*
   * compute the error of the given weights for the whole training set.
   */
  bool MLP::computeTotalError(const std::vector<dmatrix>& mWeights,
                              const dmatrix& inputs,
                              const ivector& ids,
                              double& totalError) const {

    if (ids.size() != inputs.rows()) {
      return false;
    }

    const parameters& param = getParameters();
    const int layers = param.hiddenUnits.size()+1;
    std::vector<dvector> uNet(layers),uOut(layers);
    int i;
    double tmp;
    totalError=0.0;
    for (i=0;i<ids.size();++i) {
      propagate(inputs.getRow(i),mWeights,uNet,uOut);
      computePatternError(ids.at(i),uOut.back(),tmp);
      totalError+=tmp;
    }

    return true;
  }
Example #20
0
  bool SOFM2D::trainDist(const dmatrix& data) {

    bool b=true;
    int i,j,k,maxN;
    int startx, starty, stopx, stopy;
    kernel2D<double> facN;

    const parameters& param=getParameters();

    distanceFunctor<double>* dist = 0;
    if (param.metricType == parameters::L1) {
      dist = new l1Distance<double>();
    } else {
      dist = new l2Distance<double>();
    }
    distanceFunctor<double>::parameters dfp;
    dfp.rowWise=true;
    dist->setParameters(dfp);

    int step=0;
    int epoch;
    scramble<int> mix;
    ivector idx(data.rows());
    for (i=0; i<data.rows(); i++) {
      idx[i]=i;
    }

    dvector distances;
    dvector delta;
    int winner;

    char buffer[256];
    bool abort=false;

    // temp value needed for kernel init
    const double tfac=sqrt(-2*log(param.orderNeighborThresh));

    //ordering
    for (epoch=0; epoch<param.stepsOrdering; epoch++) {

      if (validProgressObject()) {
        sprintf(buffer,"ordering step %i",epoch);
        getProgressObject().step(buffer);
        abort = getProgressObject().breakRequested();
        }
      if (abort) return b;

      mix.apply(idx);
      for (i=0; i<idx.size(); i++, step++) {
        const dvector& curr = data.getRow(idx[i]);
        dist->apply(grid, curr, distances);
        winner=distances.getIndexOfMinimum();

        maxN=static_cast<int>(sigma*tfac);
        getNeighborhoodKernel(maxN, facN);

        //find bounds
        if (winner%sizeX-maxN < 0) {
          startx=-winner%sizeX;
        } else {
          startx=-maxN;
        }
        if (winner%sizeX+maxN > sizeX) {
          stopx=sizeX-winner%sizeX;
        } else {
          stopx=maxN;
        }
        if (winner/sizeX-maxN < 0) {
          starty=-winner/sizeX;
        } else {
          starty=-maxN;
        }
        if (winner/sizeX+maxN > sizeY) {
          stopy=sizeY-winner/sizeX;
        } else {
          stopy=maxN;
        }
        for (j=starty; j<stopy; j++) {
          for (k=startx; k<stopx; k++) {
            if (facN.at(j,k)==0.) {
              continue;
            }
            delta.subtract(curr, grid[winner+j*sizeX+k]);
            grid[winner+j*sizeX+k].addScaled(lrOrder*facN.at(j,k),delta);
          }
        }


        lrOrder-=lrOrderDelta;
        sigma-=sigmaDelta;
      }
    }

    // convergence training

    // neighborhood is fixed: calc matrix of factors.
    maxN=static_cast<int>(sigma*tfac);
    getNeighborhoodKernel(maxN, facN);

    double lrC=lrConvergeA/lrConvergeB;
    step=0;
    for (epoch=0; epoch<param.stepsConvergence; epoch++) {

      if (validProgressObject()) {
        sprintf(buffer,"convergence step %i",epoch);
        getProgressObject().step(buffer);
        abort = getProgressObject().breakRequested();
        }
      if (abort) return b;

      mix.apply(idx);
      for (i=0; i<idx.size(); i++, step++) {
        const dvector& curr = data.getRow(idx[i]);
        //find winner
        dist->apply(grid, curr, distances);
        winner=distances.getIndexOfMinimum();
        //find bounds
        if (winner%sizeX-maxN < 0) {
          startx=-winner%sizeX;
        } else {
          startx=-maxN;
        }
        if (winner%sizeX+maxN > sizeX) {
          stopx=sizeX-winner%sizeX;
        } else {
          stopx=maxN;
        }
        if (winner/sizeX-maxN < 0) {
          starty=-winner/sizeX;
        } else {
          starty=-maxN;
        }
        if (winner/sizeX+maxN > sizeY) {
          stopy=sizeY-winner/sizeX;
        } else {
          stopy=maxN;
        }
        for (j=starty; j<stopy; j++) {
          for (k=startx; k<stopx; k++) {
            if (facN.at(j,k)==0.) {
              continue;
            }
            delta.subtract(curr, grid[winner+j*sizeX+k]);
            grid[winner+j*sizeX+k].addScaled(lrC*facN.at(j,k),delta);
          }
        }
        lrC=lrConvergeA/(step+lrConvergeB);
      }
    }
    delete dist;
    return b;
  }
Example #21
0
  bool SOFM2D::trainDot(const dmatrix& data) {

    bool b=true;

    int i,j,k,maxN;
    int startx, starty, stopx, stopy;
    kernel2D<double> facN;

    l2Distance<double> dist;
    l2Distance<double>::parameters dfp;
    dfp.rowWise=true;
    dist.setParameters(dfp);

    const parameters& param=getParameters();

    int step=0;
    int epoch;
    scramble<int> mix;
    ivector idx(data.rows());
    for (i=0; i<data.rows(); i++) {
      idx[i]=i;
    }

    dvector prod;
    dvector sum;
    int winner;

    //normalize grid
    dvector norms;
    b = b && dist.apply(grid,norms);
    for (i=0; i<grid.rows(); i++) {
      grid.getRow(i).divide(norms[i]);
    }

    // temp value needed for kernel init
    const double tfac=sqrt(-2*log(param.orderNeighborThresh));

    char buffer[256];
    bool abort=false;
    //ordering
    for (epoch=0; epoch<param.stepsOrdering; epoch++) {

      if (validProgressObject()) {
        sprintf(buffer,"ordering step %i",epoch);
        getProgressObject().step(buffer);
        abort = getProgressObject().breakRequested();
        }
      if (abort) return b;

      mix.apply(idx);
      for (i=0; i<idx.size(); i++, step++) {
        const dvector& curr = data.getRow(idx[i]);
        //find winner
        grid.multiply(curr, prod);
        winner=prod.getIndexOfMaximum();

        //find size and init neighborhood function
        maxN=static_cast<int>(sigma*tfac);
        getNeighborhoodKernel(maxN, facN);

        //find bounds
        if (winner%sizeX-maxN < 0) {
          startx=-winner%sizeX;
        } else {
          startx=-maxN;
        }
        if (winner%sizeX+maxN > sizeX) {
          stopx=sizeX-winner%sizeX;
        } else {
          stopx=maxN;
        }
        if (winner/sizeX-maxN < 0) {
          starty=-winner/sizeX;
        } else {
          starty=-maxN;
        }
        if (winner/sizeX+maxN > sizeY) {
          stopy=sizeY-winner/sizeX;
        } else {
          stopy=maxN;
        }
        for (j=starty; j<stopy; j++) {
          for (k=startx; k<stopx; k++) {
            if (facN.at(j,k)==0.) {
              continue;
            }
            dvector& winnerRow=grid[winner+j*sizeX+k];
            winnerRow.addScaled(lrOrder*facN.at(j,k), curr);
            winnerRow.divide(dist.apply(winnerRow));
          }
        }


        lrOrder-=lrOrderDelta;
        sigma-=sigmaDelta;
      }
    }

    // convergence training

    // neighborhood is fixed: calc matrix of factors.
    maxN=static_cast<int>(sigma*tfac);
    getNeighborhoodKernel(maxN, facN);

    double lrC=lrConvergeA/lrConvergeB;
    step=0;
    for (epoch=0; epoch<param.stepsConvergence; epoch++) {

      if (validProgressObject()) {
        sprintf(buffer,"convergence step %i",epoch);
        getProgressObject().step(buffer);
        abort = getProgressObject().breakRequested();
        }
      if (abort) return b;

      mix.apply(idx);
      for (i=0; i<idx.size(); i++, step++) {
        const dvector& curr = data.getRow(idx[i]);
        //find winner
        grid.multiply(curr, prod);
        winner=prod.getIndexOfMaximum();
        //find bounds
        if (winner%sizeX-maxN < 0) {
          startx=-winner%sizeX;
        } else {
          startx=-maxN;
        }
        if (winner%sizeX+maxN > sizeX) {
          stopx=sizeX-winner%sizeX;
        } else {
          stopx=maxN;
        }
        if (winner/sizeX-maxN < 0) {
          starty=-winner/sizeX;
        } else {
          starty=-maxN;
        }
        if (winner/sizeX+maxN > sizeY) {
          stopy=sizeY-winner/sizeX;
        } else {
          stopy=maxN;
        }
        for (j=starty; j<stopy; j++) {
          for (k=startx; k<stopx; k++) {
            if (facN.at(j,k)==0.) {
              continue;
            }
            dvector& winnerRow=grid[winner+j*sizeX+k];
            winnerRow.addScaled(lrC*facN.at(j,k), curr);
            winnerRow.divide(dist.apply(winnerRow));
          }
        }
        lrC=lrConvergeA/(step+lrConvergeB);
      }
    }

    return b;

  }
		/**
		   calculate Pseudo-Inverse using SVD(Singular Value Decomposition)
		   by lapack library DGESVD (_a can be non-square matrix)
		*/
		int calcPseudoInverse(const dmatrix &_a, dmatrix &_a_pseu, double _sv_ratio)
		{
				int i, j, k;
				char jobu  = 'A';
				char jobvt = 'A';
				int m = (int)_a.rows();
				int n = (int)_a.cols();
				int max_mn = max(m,n);
				int min_mn = min(m,n);

				dmatrix a(m,n);
				a = _a;

				int lda = m;
				double *s = new double[max_mn];
				int ldu = m;
				double *u = new double[ldu*m];
				int ldvt = n;
				double *vt = new double[ldvt*n];
				int lwork = max(3*min_mn+max_mn, 5*min_mn);     // for CLAPACK ver.2 & ver.3
				double *work = new double[lwork];
				int info;

				for(i = 0; i < max_mn; i++) s[i] = 0.0;
		   
				dgesvd_(&jobu, &jobvt, &m, &n, &(a(0,0)), &lda, s, u, &ldu, vt, &ldvt, work,
						&lwork, &info);


				double smin, smax=0.0;
				for (j = 0; j < min_mn; j++) if (s[j] > smax) smax = s[j];
				smin = smax*_sv_ratio; 			// default _sv_ratio is 1.0e-3
				for (j = 0; j < min_mn; j++) if (s[j] < smin) s[j] = 0.0;

				//------------ calculate pseudo inverse   pinv(A) = V*S^(-1)*U^(T)
				// S^(-1)*U^(T)
				for (j = 0; j < m; j++){
						if (s[j]){
								for (i = 0; i < m; i++) u[j*m+i] /= s[j];
						}
						else {
								for (i = 0; i < m; i++) u[j*m+i] = 0.0;
						}
				}

				// V * (S^(-1)*U^(T)) 
				_a_pseu.resize(n,m);
				for(j = 0; j < n; j++){
						for(i = 0; i < m; i++){
								_a_pseu(j,i) = 0.0;
								for(k = 0; k < min_mn; k++){
										if(s[k]) _a_pseu(j,i) += vt[j*n+k] * u[k*m+i];
								}
						}
				}

				delete [] work;
				delete [] vt;
				delete [] s;
				delete [] u;

				return info;
		}
		/**
		   solve linear equation using SVD(Singular Value Decomposition)
		   by lapack library DGESVD (_a can be non-square matrix)
		*/
		int solveLinearEquationSVD(const dmatrix &_a, const dvector &_b, dvector &_x, double _sv_ratio)
		{
				const int m = _a.rows();
				const int n = _a.cols();
				assert( m == static_cast<int>(_b.size()) );
				_x.resize(n);

				int i, j;
				char jobu  = 'A';
				char jobvt = 'A';
        
				int max_mn = max(m,n);
				int min_mn = min(m,n);

				dmatrix a(m,n);
				a = _a;

				int lda = m;
				double *s = new double[max_mn];		// singular values
				int ldu = m;
				double *u = new double[ldu*m];
				int ldvt = n;
				double *vt = new double[ldvt*n];

				int lwork = max(3*min_mn+max_mn, 5*min_mn);     // for CLAPACK ver.2 & ver.3
				double *work = new double[lwork];
				int info;

				for(i = 0; i < max_mn; i++) s[i] = 0.0;

				dgesvd_(&jobu, &jobvt, &m, &n, &(a(0,0)), &lda, s, u, &ldu, vt, &ldvt, work,
						&lwork, &info);

				double tmp;

				double smin, smax=0.0;
				for (j = 0; j < min_mn; j++) if (s[j] > smax) smax = s[j];
				smin = smax*_sv_ratio; // 1.0e-3;
				for (j = 0; j < min_mn; j++) if (s[j] < smin) s[j] = 0.0;
	
				double *utb = new double[m];		// U^T*b

				for (j = 0; j < m; j++){
						tmp = 0;
						if (s[j]){
								for (i = 0; i < m; i++) tmp += u[j*m+i] * _b(i);
								tmp /= s[j];
						}
						utb[j] = tmp;
				}

				// v*utb
				for (j = 0; j < n; j++){
						tmp = 0;
						for (i = 0; i < n; i++){
								if(s[i]) tmp += utb[i] * vt[j*n+i];
						}
						_x(j) = tmp;
				}

				delete [] utb;
				delete [] work;
				delete [] vt;
				delete [] s;
				delete [] u;
	
				return info;
		}
		/**
		   solve linear equation using LU decomposition
		   by lapack library DGESVX (_a must be square matrix)
		*/
		int solveLinearEquationLU(const dmatrix &_a, const dvector &_b, dvector &_x)
		{
				assert(_a.cols() == _a.rows() && _a.cols() == _b.size() );

				int n = (int)_a.cols();
				int nrhs = 1;

				int lda = n;

				std::vector<int> ipiv(n);

				int ldb = n;

				int info;

				// compute the solution
#ifndef USE_CLAPACK_INTERFACE
  				char fact      = 'N';
				char transpose = 'N';

				double *af = new double[n*n];

				int ldaf = n;

				char equed = 'N';

				double *r = new double[n];
				double *c = new double[n];

				int ldx = n;

				double rcond;

				double *ferr = new double[nrhs];
				double *berr = new double[nrhs];
				double *work = new double[4*n];

				int *iwork = new int[n];

			    _x.resize(n);			// memory allocation for the return vector
				dgesvx_(&fact, &transpose, &n, &nrhs, const_cast<double *>(&(_a(0,0))), &lda, af, &ldaf, &(ipiv[0]),
						&equed, r, c, const_cast<double *>(&(_b(0))), &ldb, &(_x(0)), &ldx, &rcond,
						ferr, berr, work, iwork, &info);

				delete [] iwork;
				delete [] work;
				delete [] berr;
				delete [] ferr;
				delete [] c;
				delete [] r;

				delete [] af;
#else
				_x = _b;
				info = clapack_dgesv(CblasColMajor,
									 n, nrhs, const_cast<double *>(&(a(0,0))), lda, &(ipiv[0]),
									 &(_x(0)), ldb);
#endif

				return info;
		}
  // implements the Fuzzy C Means algorithm
  bool fuzzyCMeans::train(const dmatrix& data) {

    bool ok=true;
    int t=0;
    // create the distance functor according to the paramter norm
    distanceFunctor<double>* distFunc = 0;
    switch (getParameters().norm)  {
      case parameters::L1:
        distFunc = new l1Distance<double>;
        break;
      case parameters::L2:
        distFunc = new l2Distance<double>;
        break;
      default:
        break;
    }
    int nbOfClusters=getParameters().nbOfClusters;
    int nbOfPoints=data.rows();
    if(nbOfClusters>nbOfPoints) {
      setStatusString("more Clusters than points");
      ok = false;
    }
    double q=getParameters().fuzzifier;
    if (q<=1) {
      setStatusString("q has to be bigger than 1");
      ok = false;
    }
    // select some points of the given data to initialise the centroids
    selectRandomPoints(data,nbOfClusters,centroids);
    // initialize variables
    centroids.resize(nbOfClusters,data.columns(),0.0);
    dmatrix memberships(nbOfPoints, nbOfClusters, 0.0);
    double terminationCriterion=0;
    double newDistance;
    dvector newCenter(data.columns());
    dvector currentPoint(data.columns());
    dmatrix newCentroids(nbOfClusters,data.columns(),0.0);
    double sumOfMemberships=0;
    double membership=0;
    double dist1;
    double dist2;
    int i,j,k,m;
    do {
        // calculate new memberships
      memberships.fill(0.0);  //  clear old memberships
      for (i=0; i<nbOfPoints; i++) {
        for (j=0; j<nbOfClusters; j++) {
          newDistance=0;
          dist1=distFunc->apply(data.getRow(i),
                                centroids.getRow(j));
          for (k=0; k<nbOfClusters; k++) {
            dist2=distFunc->apply(data.getRow(i),
                                  centroids.getRow(k));
       // if distance is 0, normal calculation of membership is not possible.
            if (dist2!=0) {
              newDistance+=pow((dist1/dist2),(1/(q-1)));
            }
          }
      // if point and centroid are equal
          if (newDistance!=0)
            memberships.at(i,j)=1/newDistance;
          else {
            dvector row(memberships.columns(),0.0);
            memberships.setRow(i,row);
            memberships.at(i,j)=1;
            break;
          }
        }
      }
      t++;  // counts the iterations

     // calculate new centroids based on modified memberships
      for (m=0; m<nbOfClusters; m++) {
        newCenter.fill(0.0);
        sumOfMemberships=0;
        for (i=0; i<nbOfPoints; i++) {
          currentPoint=data.getRow(i);
          membership=pow(memberships.at(i,m),q);
          sumOfMemberships+=membership;
          currentPoint.multiply(membership);
          newCenter.add(currentPoint);
        }
        newCenter.divide(sumOfMemberships);
        newCentroids.setRow(m,newCenter);
      }
      terminationCriterion=distFunc->apply(centroids,newCentroids);
      centroids=newCentroids;
    }
    // the termination criterions
    while ( (terminationCriterion>getParameters().epsilon)
            && (t<getParameters().maxIterations));

    int nbClusters = nbOfClusters;
    //Put the id information into the result object
    //Each cluster has the id of its position in the matrix
    ivector tids(nbClusters);
    for (i=0; i<nbClusters; i++) {
      tids.at(i)=i;
    }
    outTemplate=outputTemplate(tids);
    return ok;


  }
Example #26
0
  bool SOFM2D::train(const dmatrix& data) {

    // tracks the status of the training process.
    // if an error occurs set to false and use setStatusString()
    // however, training should continue, fixing the error as well as possible
    bool b=true;

    int i;

    const parameters& param=getParameters();

    // find the actual size of the grid
    if (param.calculateSize) {
      b = calcSize(data);
    } else {
      sizeX=param.sizeX;
      sizeY=param.sizeY;
    }

    // check whether one of the dimensions has negative or zero size
    // and try to fix by using alternate way of setting sizes.
    if (sizeX<=0 || sizeY<=0) {
      b=false;
      std::string err="Negative or zero size of one dimension";
      if (param.calculateSize) {
        if (param.area<=0) {
          err += "\narea is <= 0";
          if (param.sizeX>0 && param.sizeY>0) {
            sizeX=param.sizeX;
            sizeY=param.sizeY;
            err += "\nusing sizeX and sizeY instead";
          }
        }
      } else {
        if (param.sizeX<=0) {
          err += "\nsizeX <= 0";
        }
        if (param.sizeY<=0) {
          err += "\nsizeY <= 0";
        }
        if (param.area>0) {
          err += "\ncalculating size from area instead";
          calcSize(data);
          err += getStatusString();
        }
      }
      setStatusString(err.c_str());
    }

    // set grid to size
    grid.resize(sizeY*sizeX, data.columns());

    //set learn rates
    setLearnRates(data.rows());

    if (validProgressObject()) {
      getProgressObject().reset();
      std::string str("SOFM2D: Training using ");
      switch(param.metricType) {
        case parameters::L1:
          str += "L1 distance";
          break;
        case parameters::L2:
          str += "L2 distance";
          break;
        case parameters::Dot:
          str += "dot product";
          break;
        default:
          str += "unnamed method";
      }
      char buffer[256];
      sprintf(buffer," size of map %i x %i", sizeY, sizeX);
      str += std::string(buffer);
      getProgressObject().setTitle(str);
      getProgressObject().setMaxSteps(param.stepsOrdering+param.stepsConvergence+2);
    }



    //initialize grid
    if (validProgressObject()) {
      getProgressObject().step("initializing map");
    }
    b = initGrid(data);

    //training
    if (param.metricType == parameters::Dot) {
      trainDot(data);
    } else {
      trainDist(data);
    }

    if (validProgressObject()) {
      getProgressObject().step("training finished");
    }


    int nbOutputs = sizeX*sizeY;

    //Put the id information into the result object
    //Each output value has the id of its position in the matrix
    ivector tids(nbOutputs);
    for (i=0; i<nbOutputs; i++) {
      tids.at(i)=i;
    }
    outTemplate=outputTemplate(tids);

    return b;
  }
Example #27
0
  // TODO: comment your train method
  bool MLP::train(const dvector& theWeights,
                  const dmatrix& data,
                  const ivector& ids) {

    if (data.empty()) {
      setStatusString("Train data empty");
      return false;
    }


    if (ids.size()!=data.rows()) {
      std::string str;
      str = "dimensionality of IDs vector and the number of rows ";
      str+= "of the input matrix must have the same size.";
      setStatusString(str.c_str());
      return false;
    }


    // tracks the status of the training process.
    // if an error occurs set to false and use setStatusString()
    // however, training should continue, fixing the error as well as possible
    bool b=true;

    // vector with internal ids
    ivector newIds,idsLUT;
    newIds.resize(ids.size(),0,false,false);

    // map to get the internal Id to an external Id;
    std::map<int,int> extToInt;
    std::map<int,int>::iterator it;

    int i,k;
    for (i=0,k=0;i<ids.size();++i) {
      it = extToInt.find(ids.at(i));
      if (it != extToInt.end()) {
        newIds.at(i) = (*it).second;
      } else {
        extToInt[ids.at(i)] = k;
        newIds.at(i) = k;
        ++k;
      }
    }

    idsLUT.resize(extToInt.size());
    for (it=extToInt.begin();it!=extToInt.end();++it) {
      idsLUT.at((*it).second) = (*it).first;
    }

    // initialize the inputs and output units from the given data
    outputs = idsLUT.size();
    inputs  = data.columns();

    const parameters& param = getParameters();

    // display which kind of algorithm is to be used
    if (validProgressObject()) {
      getProgressObject().reset();
      std::string str("MLP: Training using ");
      switch(param.trainingMode) {
        case parameters::ConjugateGradients:
          str += "conjugate gradients";
          break;
        case parameters::SteepestDescent:
          str += "steepest descent";
          break;
        default:
          str += "unnamed method";
      }
      getProgressObject().setTitle(str);
      getProgressObject().setMaxSteps(param.maxNumberOfEpochs+1);
    }

    dvector grad;
    if (&theWeights != &weights) {
      weights.copy(theWeights);
    }

    if (!initWeights(true)) { // keep the weights
      setStatusString("Wrong weights!");
      return false;
    };

    computeErrorNorm(newIds);

    if (param.trainingMode == parameters::ConjugateGradients) {
      b = trainConjugateGradients(data,newIds);
    } else {
      if (param.batchMode) { // batch training mode:
        b = trainSteepestBatch(data,newIds);
      } else { // sequential training mode:
        b = trainSteepestSequential(data,newIds);
      }
    }

    if (validProgressObject()) {
      getProgressObject().step("Training ready.");
    }

    outputTemplate tmpOutTemp(idsLUT);
    setOutputTemplate(tmpOutTemp);

    // create the appropriate outputTemplate
    makeOutputTemplate(outputs,data,ids);

    return b;
  }
Example #28
0
  bool MLP::trainSteepestSequential(const dmatrix& data,
                                    const ivector& internalIds) {

    const parameters& param = getParameters();
    char buffer[256];
    bool abort = false;
    scramble<int> scrambler;
    int i,j,k;
    double tmpError;
    ivector idx;
    idx.resize(data.rows(),0,false,false);
    for (i=0;i<idx.size();++i) {
      idx.at(i)=i;
    }

    if (param.momentum > 0) {
      // with momentum
      dvector grad,delta(weights.size(),0.0);

      for (i=0; !abort && (i<param.maxNumberOfEpochs); ++i) {
        scrambler.apply(idx); // present the pattern in a random sequence
        totalError = 0;
        for (j=0;j<idx.size();++j) {
          k=idx.at(j);
          calcGradient(data.getRow(k),internalIds.at(k),grad);
          computeActualError(internalIds.at(k),tmpError);
          totalError+=tmpError;
          delta.addScaled(param.learnrate,grad,param.momentum,delta);
          weights.add(delta);
        }

        // update progress info object
        if (validProgressObject()) {
          sprintf(buffer,"Error=%f",totalError/errorNorm);
          getProgressObject().step(buffer);
          abort = abort || (totalError/errorNorm <= param.stopError);
          abort = abort || getProgressObject().breakRequested();
        }
      }
    } else {
      // without momentum
      ivector idx;
      idx.resize(data.rows(),0,false,false);
      dvector grad;

      int i,j,k;
      double tmpError;
      for (i=0;i<idx.size();++i) {
        idx.at(i)=i;
      }
      for (i=0; !abort && (i<param.maxNumberOfEpochs); ++i) {
        scrambler.apply(idx); // present the pattern in a random sequence
        totalError = 0;
        for (j=0;j<idx.size();++j) {
          k=idx.at(j);
          calcGradient(data.getRow(k),internalIds.at(k),grad);
          computeActualError(internalIds.at(k),tmpError);
          totalError+=tmpError;
          weights.addScaled(param.learnrate,grad);
        }

        // update progress info object
        if (validProgressObject()) {
          sprintf(buffer,"Error=%f",totalError/errorNorm);
          getProgressObject().step(buffer);
          abort = abort || (totalError/errorNorm <= param.stopError);
          abort = abort || getProgressObject().breakRequested();
        }
      }
    }
    return true;
  }
Example #29
0
  // Calls the same method of the superclass.
  bool svm::genericTrain(const dmatrix& input, const ivector& ids) {

    char buffer[80];

    if (validProgressObject()) {
      getProgressObject().reset();
      getProgressObject().setTitle("SVM: Training");
      getProgressObject().setMaxSteps(nClasses);
    }

    bias.resize(nClasses,getParameters().bias,false,true);
    trainData=new dmatrix(input);
    alpha.resize(nClasses,input.rows(),0,false,true);
    makeTargets(ids);
    errorCache.resize(input.rows());

    const parameters& param=getParameters();

    C=param.C;
    tolerance=param.tolerance;
    epsilon=param.epsilon;
    bool abort=false;

    // train one SVM for each class
    for (int cid=0; cid<nClasses && !abort; cid++) {
      int numChanged=0;
      bool examineAll=true;

      currentTarget=&target->getRow(cid);
      currentClass=cid;
      currentAlpha=&alpha.getRow(cid);

      _lti_debug("Training class " << cid << "\n");

      fillErrorCache();

      while ((numChanged > 0 || examineAll) && !abort) {
        numChanged=0;
        if (examineAll) {
          // iterate over all alphas
          for (int i=0; i<trainData->rows(); i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, look only at non-bound alphas
          examineAll=false;
        } else {
          // iterate over all non-0 and non-C alphas
          int *tmpAlpha=new int[alpha.getRow(cid).size()];
          int j=0,i=0;
          for (i=0; i<alpha.getRow(cid).size(); i++) {
            if (alpha.getRow(cid).at(i) != 0.0 &&
                alpha.getRow(cid).at(i) != C) {
              tmpAlpha[j++]=i;
            }
          }
          delete[] tmpAlpha;
          for (i=0; i<j; i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, examine all if we did not succeed this time
          if (numChanged == 0) {
            examineAll=true;
          }
        }
      }
      // update progress info object
      if (validProgressObject()) {
        sprintf(buffer,"numChanged=%d, error=%f",numChanged,errorSum);
        getProgressObject().step(buffer);
        abort=abort || getProgressObject().breakRequested();
      }

     // now limit the number of support vectors
      // does not work yet, so disable it
      if (0) {
        int supnum=0;
        ivector index(currentAlpha->size());
        ivector newindex(currentAlpha->size());
        dvector newkey(currentAlpha->size());
        for (int i=0; i<currentAlpha->size(); i++) {
          if (currentAlpha->at(i) > 0) {
            supnum++;
          }
          index[i]=i;
        }
        if (supnum > param.nSupport && param.nSupport > 0) {
          lti::sort2<double> sorter;
          sorter.apply(*currentAlpha,index,newkey,newindex);

          int i;
          for (i=0; i<newkey.size() &&
                 lti::abs(newkey[i]) > std::numeric_limits<double>::epsilon(); i++) {
          }
          for (int j=i; j<currentAlpha->size()-param.nSupport; j++) {
            currentAlpha->at(newindex[j])=0;
          }
          _lti_debug("Final alpha: " << *currentAlpha << std::endl);
        }
      }
    }

    defineOutputTemplate();

    _lti_debug("alpha:\n" << alpha << "\n");

    // make sure that all lagrange multipliers are larger than
    // zero, otherwise we might get into trouble later
    alpha.apply(rectify);

    if (abort) {
      setStatusString("Training aborted by user!");
    }
    return !abort;
  }
Example #30
0
  bool sffs::apply(const dmatrix& src,const ivector& srcIds, 
                   dmatrix& dest) const {
    bool ok=true;
    dest.clear();
    parameters param=getParameters();
    // initialize cross validator

    costFunction *cF;
    cF = param.usedCostFunction;
    cF->setSrc(src,srcIds);

    int featureToInsert(0),featureToDelete(0),i;
    double oldRate,newRate;
    bool doInclude=true;
    bool terminate=false;
    int nbFeatures=src.columns();
    std::list<int> in,out;
    std::list<int>::iterator it;
    std::map<double,int> values;
    double value;
    for (i=0; i<nbFeatures; i++) {
      out.push_back(i);
    }
    ivector posInSrc(nbFeatures,-1);//saves the position in src of the inserted
    // feature to mark it as not used if this feature is deleted later
    dvector regRate(nbFeatures);  // the recognition rates after the insertion 
                                  // of a new feature
    if (param.nbFeatures<2) {
      setStatusString("You will have to choose at least two features. Set nbFeatures=2");
      return false;
    }

    // add the first best two features; do 2 steps sfs
    for (i=0; i<2; i++ ) {
      if (dest.columns()<src.columns() && !terminate) {
        // add space for one extra feature
        for (it=out.begin(); it!=out.end(); it++) {
          in.push_back(*it);
          cF->apply(in,value);
          values[value]=*it;
          in.pop_back();
        }
        // search for maximum in regRate; all possibilities not tested are -1
        in.push_back((--values.end())->second);
        out.remove((--values.end())->second);
      }
    }
    cF->apply(in,oldRate);
    while (!terminate) {
      // STEP 1: include the best possible feature
      if (static_cast<int>(in.size())<src.columns() && 
          !terminate && doInclude) {
        values.clear();
        for (it=out.begin(); it!=out.end(); it++) {
          in.push_back(*it);
          cF->apply(in,value);
          values[value]=*it;
          in.pop_back();
        }
        featureToInsert=(--values.end())->second;
        in.push_back(featureToInsert);
        out.remove(featureToInsert);
      }
      // STEP 2: conditional exclusion
      if (in.size()>0 && !terminate) {
        values.clear();
        for (it=in.begin(); it!=in.end(); it++) {
          int tmp=*it;
          it=in.erase(it);
          cF->apply(in,value);
          values[value]=tmp;
          in.insert(it,tmp);
          it--;
        }
        featureToDelete=(--values.end())->second;

        // if the least significant feature is equal to the most significant
        // feature that was included in step 1, leave feature and 
        // include the next one
        if (featureToDelete==featureToInsert) {
          doInclude=true;
        } else {    // delete this feature and compute new recognition rate

          // if the feature to delete is not the last feature in dest,
          // change the feature against the last feature in dest and delete
          // the last column in dest, otherwise if the feature to delete 
          // is equal to the last feature in dest nothing will be done, 
          // because this is already the lacking feature in temp
          cF->apply(in,newRate);
          // if recognition rate without least significant feature is better 
          // than with this feature delete it
          if (newRate>oldRate) { 

            in.remove(featureToDelete);
            out.push_back(featureToDelete);
            // search for another least significant feature before 
            // including the next one
            doInclude=false;
            oldRate=newRate;
          } else {
            doInclude=true;
          }
          // if only two features left, include the next one
          if (dest.columns()<=2) {
            doInclude=true;    
          }
        }          
      } // end of exclusion
      // test if the predetermined number of features is reached
      terminate=(param.nbFeatures==static_cast<int>(in.size()));
    } // while (!terminate)

    // Now fill dest
    const int sz = static_cast<int>(in.size());
    dest.resize(src.rows(), sz, 0., false, false);
    ivector idvec(false, sz);
    std::list<int>::const_iterator lit = in.begin();
    for (i=0; i < sz; ++i) {
      idvec.at(i)=*lit;
      ++lit;
    }
    for (i=0; i < src.rows(); ++i) {
      const dvector& svec = src.getRow(i);
      dvector& dvec = dest.getRow(i);
      for (int j=0; j < sz; ++j) {
        dvec.at(j) = svec.at(idvec.at(j));
      }
    }

    return ok;
  };