Esempio n. 1
0
 // -------------------------------------------------------------------
 // The train-methods!
 // -------------------------------------------------------------------
 // Normal training method
 bool MLP::train(const dmatrix& data,
                 const ivector& ids) {
   checkHowManyOutputs(ids);
   inputs=data.columns();
   initWeights(false,-1,1);
   return train(weights,data,ids);
 }
Esempio n. 2
0
 bool svm::genericNormTrain(const dmatrix& input, const ivector& ids) {
   if (getParameters().normalizeData) {
     meansFunctor<double> mf;
     mf.meanOfRows(input,offset);
     varianceFunctor<double> vf;
     dvector vscale;
     vf.varianceOfRows(input,vscale);
     /*
     double m1=lti::abs(vscale.minimum());
     std::cerr << "M1 = " << m1 << "\n";
     double m2=lti::abs(vscale.maximum());
     double m3=lti::max(m1,m2);
     */
     scale=sqrt(max(abs(vscale.maximum()),abs(vscale.minimum())));
     _lti_debug("Scale = " << scale << "\n");
     dmatrix* data=new dmatrix(input);
     for (int i=0; i<data->rows(); i++) {
       dvector& item=data->getRow(i);
       item.subtract(offset);
       item.edivide(scale);
     }
     return genericTrain(*data,ids);
   } else {
     offset.resize(input.columns(),0,false,false);
     offset.fill(0.0);
     //scale.resize(input.columns(),0,false,false);
     //scale.fill(1.0);
     scale=1.0;
     return genericTrain(input,ids);
   }
 }
 double clusteringValidity::getCentroidDistance(const dmatrix& m1,
                                                const dmatrix& m2) const {
   l2Distance<double> dist;
   int i;
   dvector a(m1.columns());
   dvector b(m2.columns());
   for (i=0; i<m1.rows();i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows());
   for (i=0; i<m2.rows();i++) {
     b.add(m2.getRow(i));
   }
   b.divide(m2.rows());
   return dist.apply(a,b);
 }
 double clusteringValidity::getAverageInterpointDistance(const dmatrix& m1,
                                                         const dmatrix& m2) const {
   l2Distance<double> dist;
   int i;
   dvector a(m1.columns());
   dvector b(m2.columns());
   for (i=0; i<m1.rows();i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows()); // centroid 1
   for (i=0; i<m2.rows();i++) {
     b.add(m2.getRow(i));
   }
   b.divide(m2.rows()); // centroid 2
   double distance=0.0;
   for (i=0; i<m1.rows(); i++) {
     distance+=dist.apply(m1.getRow(i),a);
   }
   for (i=0; i<m2.rows(); i++) {
     distance+=dist.apply(m2.getRow(i),b);
   }
   return (distance/(m1.rows()+m2.rows()));
 }
  bool regionMerge::apply(const imatrix& srcmask,
                          const dmatrix& simMat,
                          const dvector& thresholds,
                          imatrix& destmask) const {
    int i,j;
    const dvector& thrs = thresholds;

    ivector eLab(simMat.rows());
    for (i=0;i<eLab.size();++i)
      eLab.at(i) = i;

    double a;
    for (j=0;j<simMat.rows();++j)
      for (i=0;i<simMat.columns();++i) {
        if (simMat.at(j,i) > (a=max(thrs.at(j),thrs.at(i))) ) {
//          cout<<j<<" "<<i<<" "<<simMat.at(j,i)<<" "<<a<<endl;
          if (eLab.at(j)>eLab.at(i)) {
            eLab.at(j)=eLab.at(i);
          } else {
            eLab.at(i)=eLab.at(j);
          }

        }
      }


    // now correct the labels
    for (j=eLab.size()-1;j>=0;--j) { //todo
      i = j;
      while (eLab.at(i) != i) {
        i=eLab.at(i);
      }
      eLab.at(j) = i;
    }

    destmask.resize(srcmask.size(),0,false,false);
    for (j=0;j<srcmask.rows();++j)
      for (i=0;i<srcmask.columns();++i) {
        destmask.at(j,i) = eLab.at(srcmask.at(j,i));
      }

    return true;
 };
 double clusteringValidity::getAverageToCentroidDiameter(const dmatrix& m1) const {
   dvector a(m1.columns());
   int i,j;
   l2Distance<double> dist;
   double distance=0.0;
   for (i=0; i<m1.rows(); i++) {
     a.add(m1.getRow(i));
   }
   a.divide(m1.rows());
   for (j=0; j< m1.rows(); j++) {
     distance+=dist.apply(a,m1.getRow(j));
   }
   if (m1.rows()>0) {
     return (2*distance/(double)m1.rows());
   } else {
     return 2*distance;
   }
   
 }
  /*
   * operates on a copy of the given %parameters.
   * @param srcmask source mask.  Each object must be represented by one
   *                              label.
   * @param simMat The similarity matrix.  The size of the matrix must
   *               be at least equal the number of labels plus one.
   * @param destMask resulting mask with merged objects.
   * @return true if apply successful or false otherwise.
   */
  bool regionMerge::apply(const imatrix& srcmask,
                          const dmatrix& simMat,
                          imatrix& destmask) const {
    int i,j;
    ivector eLab(simMat.rows());
    const double thr = getParameters().threshold;

    for (i=0;i<eLab.size();++i) {
      eLab.at(i) = i;
    }

    for (j=0;j<simMat.rows();++j) {
      for (i=0;i<simMat.columns();++i) {
        if (simMat.at(j,i) > thr) {
          if (eLab.at(j)>eLab.at(i)) {
            eLab.at(j)=eLab.at(i);
          } else {
            eLab.at(i)=eLab.at(j);
          }

        }
      }
    }

    // now correct the labels
    for (j=eLab.size()-1;j>=0;--j) {
      i = j;
      while (eLab.at(i) != i) {
        i=eLab.at(i);
      }
      eLab.at(j) = i;
    }

    destmask.resize(srcmask.size(),0,false,false);

    for (j=0;j<srcmask.rows();++j)
      for (i=0;i<srcmask.columns();++i) {
        destmask.at(j,i) = eLab.at(srcmask.at(j,i));
      }

    return true;
  };
  // implements the Fuzzy C Means algorithm
  bool fuzzyCMeans::train(const dmatrix& data) {

    bool ok=true;
    int t=0;
    // create the distance functor according to the paramter norm
    distanceFunctor<double>* distFunc = 0;
    switch (getParameters().norm)  {
      case parameters::L1:
        distFunc = new l1Distance<double>;
        break;
      case parameters::L2:
        distFunc = new l2Distance<double>;
        break;
      default:
        break;
    }
    int nbOfClusters=getParameters().nbOfClusters;
    int nbOfPoints=data.rows();
    if(nbOfClusters>nbOfPoints) {
      setStatusString("more Clusters than points");
      ok = false;
    }
    double q=getParameters().fuzzifier;
    if (q<=1) {
      setStatusString("q has to be bigger than 1");
      ok = false;
    }
    // select some points of the given data to initialise the centroids
    selectRandomPoints(data,nbOfClusters,centroids);
    // initialize variables
    centroids.resize(nbOfClusters,data.columns(),0.0);
    dmatrix memberships(nbOfPoints, nbOfClusters, 0.0);
    double terminationCriterion=0;
    double newDistance;
    dvector newCenter(data.columns());
    dvector currentPoint(data.columns());
    dmatrix newCentroids(nbOfClusters,data.columns(),0.0);
    double sumOfMemberships=0;
    double membership=0;
    double dist1;
    double dist2;
    int i,j,k,m;
    do {
        // calculate new memberships
      memberships.fill(0.0);  //  clear old memberships
      for (i=0; i<nbOfPoints; i++) {
        for (j=0; j<nbOfClusters; j++) {
          newDistance=0;
          dist1=distFunc->apply(data.getRow(i),
                                centroids.getRow(j));
          for (k=0; k<nbOfClusters; k++) {
            dist2=distFunc->apply(data.getRow(i),
                                  centroids.getRow(k));
       // if distance is 0, normal calculation of membership is not possible.
            if (dist2!=0) {
              newDistance+=pow((dist1/dist2),(1/(q-1)));
            }
          }
      // if point and centroid are equal
          if (newDistance!=0)
            memberships.at(i,j)=1/newDistance;
          else {
            dvector row(memberships.columns(),0.0);
            memberships.setRow(i,row);
            memberships.at(i,j)=1;
            break;
          }
        }
      }
      t++;  // counts the iterations

     // calculate new centroids based on modified memberships
      for (m=0; m<nbOfClusters; m++) {
        newCenter.fill(0.0);
        sumOfMemberships=0;
        for (i=0; i<nbOfPoints; i++) {
          currentPoint=data.getRow(i);
          membership=pow(memberships.at(i,m),q);
          sumOfMemberships+=membership;
          currentPoint.multiply(membership);
          newCenter.add(currentPoint);
        }
        newCenter.divide(sumOfMemberships);
        newCentroids.setRow(m,newCenter);
      }
      terminationCriterion=distFunc->apply(centroids,newCentroids);
      centroids=newCentroids;
    }
    // the termination criterions
    while ( (terminationCriterion>getParameters().epsilon)
            && (t<getParameters().maxIterations));

    int nbClusters = nbOfClusters;
    //Put the id information into the result object
    //Each cluster has the id of its position in the matrix
    ivector tids(nbClusters);
    for (i=0; i<nbClusters; i++) {
      tids.at(i)=i;
    }
    outTemplate=outputTemplate(tids);
    return ok;


  }
Esempio n. 9
0
  // TODO: comment your train method
  bool MLP::train(const dvector& theWeights,
                  const dmatrix& data,
                  const ivector& ids) {

    if (data.empty()) {
      setStatusString("Train data empty");
      return false;
    }


    if (ids.size()!=data.rows()) {
      std::string str;
      str = "dimensionality of IDs vector and the number of rows ";
      str+= "of the input matrix must have the same size.";
      setStatusString(str.c_str());
      return false;
    }


    // tracks the status of the training process.
    // if an error occurs set to false and use setStatusString()
    // however, training should continue, fixing the error as well as possible
    bool b=true;

    // vector with internal ids
    ivector newIds,idsLUT;
    newIds.resize(ids.size(),0,false,false);

    // map to get the internal Id to an external Id;
    std::map<int,int> extToInt;
    std::map<int,int>::iterator it;

    int i,k;
    for (i=0,k=0;i<ids.size();++i) {
      it = extToInt.find(ids.at(i));
      if (it != extToInt.end()) {
        newIds.at(i) = (*it).second;
      } else {
        extToInt[ids.at(i)] = k;
        newIds.at(i) = k;
        ++k;
      }
    }

    idsLUT.resize(extToInt.size());
    for (it=extToInt.begin();it!=extToInt.end();++it) {
      idsLUT.at((*it).second) = (*it).first;
    }

    // initialize the inputs and output units from the given data
    outputs = idsLUT.size();
    inputs  = data.columns();

    const parameters& param = getParameters();

    // display which kind of algorithm is to be used
    if (validProgressObject()) {
      getProgressObject().reset();
      std::string str("MLP: Training using ");
      switch(param.trainingMode) {
        case parameters::ConjugateGradients:
          str += "conjugate gradients";
          break;
        case parameters::SteepestDescent:
          str += "steepest descent";
          break;
        default:
          str += "unnamed method";
      }
      getProgressObject().setTitle(str);
      getProgressObject().setMaxSteps(param.maxNumberOfEpochs+1);
    }

    dvector grad;
    if (&theWeights != &weights) {
      weights.copy(theWeights);
    }

    if (!initWeights(true)) { // keep the weights
      setStatusString("Wrong weights!");
      return false;
    };

    computeErrorNorm(newIds);

    if (param.trainingMode == parameters::ConjugateGradients) {
      b = trainConjugateGradients(data,newIds);
    } else {
      if (param.batchMode) { // batch training mode:
        b = trainSteepestBatch(data,newIds);
      } else { // sequential training mode:
        b = trainSteepestSequential(data,newIds);
      }
    }

    if (validProgressObject()) {
      getProgressObject().step("Training ready.");
    }

    outputTemplate tmpOutTemp(idsLUT);
    setOutputTemplate(tmpOutTemp);

    // create the appropriate outputTemplate
    makeOutputTemplate(outputs,data,ids);

    return b;
  }
Esempio n. 10
0
  bool SOFM2D::train(const dmatrix& data) {

    // tracks the status of the training process.
    // if an error occurs set to false and use setStatusString()
    // however, training should continue, fixing the error as well as possible
    bool b=true;

    int i;

    const parameters& param=getParameters();

    // find the actual size of the grid
    if (param.calculateSize) {
      b = calcSize(data);
    } else {
      sizeX=param.sizeX;
      sizeY=param.sizeY;
    }

    // check whether one of the dimensions has negative or zero size
    // and try to fix by using alternate way of setting sizes.
    if (sizeX<=0 || sizeY<=0) {
      b=false;
      std::string err="Negative or zero size of one dimension";
      if (param.calculateSize) {
        if (param.area<=0) {
          err += "\narea is <= 0";
          if (param.sizeX>0 && param.sizeY>0) {
            sizeX=param.sizeX;
            sizeY=param.sizeY;
            err += "\nusing sizeX and sizeY instead";
          }
        }
      } else {
        if (param.sizeX<=0) {
          err += "\nsizeX <= 0";
        }
        if (param.sizeY<=0) {
          err += "\nsizeY <= 0";
        }
        if (param.area>0) {
          err += "\ncalculating size from area instead";
          calcSize(data);
          err += getStatusString();
        }
      }
      setStatusString(err.c_str());
    }

    // set grid to size
    grid.resize(sizeY*sizeX, data.columns());

    //set learn rates
    setLearnRates(data.rows());

    if (validProgressObject()) {
      getProgressObject().reset();
      std::string str("SOFM2D: Training using ");
      switch(param.metricType) {
        case parameters::L1:
          str += "L1 distance";
          break;
        case parameters::L2:
          str += "L2 distance";
          break;
        case parameters::Dot:
          str += "dot product";
          break;
        default:
          str += "unnamed method";
      }
      char buffer[256];
      sprintf(buffer," size of map %i x %i", sizeY, sizeX);
      str += std::string(buffer);
      getProgressObject().setTitle(str);
      getProgressObject().setMaxSteps(param.stepsOrdering+param.stepsConvergence+2);
    }



    //initialize grid
    if (validProgressObject()) {
      getProgressObject().step("initializing map");
    }
    b = initGrid(data);

    //training
    if (param.metricType == parameters::Dot) {
      trainDot(data);
    } else {
      trainDist(data);
    }

    if (validProgressObject()) {
      getProgressObject().step("training finished");
    }


    int nbOutputs = sizeX*sizeY;

    //Put the id information into the result object
    //Each output value has the id of its position in the matrix
    ivector tids(nbOutputs);
    for (i=0; i<nbOutputs; i++) {
      tids.at(i)=i;
    }
    outTemplate=outputTemplate(tids);

    return b;
  }
Esempio n. 11
0
  bool sffs::apply(const dmatrix& src,const ivector& srcIds, 
                   dmatrix& dest) const {
    bool ok=true;
    dest.clear();
    parameters param=getParameters();
    // initialize cross validator

    costFunction *cF;
    cF = param.usedCostFunction;
    cF->setSrc(src,srcIds);

    int featureToInsert(0),featureToDelete(0),i;
    double oldRate,newRate;
    bool doInclude=true;
    bool terminate=false;
    int nbFeatures=src.columns();
    std::list<int> in,out;
    std::list<int>::iterator it;
    std::map<double,int> values;
    double value;
    for (i=0; i<nbFeatures; i++) {
      out.push_back(i);
    }
    ivector posInSrc(nbFeatures,-1);//saves the position in src of the inserted
    // feature to mark it as not used if this feature is deleted later
    dvector regRate(nbFeatures);  // the recognition rates after the insertion 
                                  // of a new feature
    if (param.nbFeatures<2) {
      setStatusString("You will have to choose at least two features. Set nbFeatures=2");
      return false;
    }

    // add the first best two features; do 2 steps sfs
    for (i=0; i<2; i++ ) {
      if (dest.columns()<src.columns() && !terminate) {
        // add space for one extra feature
        for (it=out.begin(); it!=out.end(); it++) {
          in.push_back(*it);
          cF->apply(in,value);
          values[value]=*it;
          in.pop_back();
        }
        // search for maximum in regRate; all possibilities not tested are -1
        in.push_back((--values.end())->second);
        out.remove((--values.end())->second);
      }
    }
    cF->apply(in,oldRate);
    while (!terminate) {
      // STEP 1: include the best possible feature
      if (static_cast<int>(in.size())<src.columns() && 
          !terminate && doInclude) {
        values.clear();
        for (it=out.begin(); it!=out.end(); it++) {
          in.push_back(*it);
          cF->apply(in,value);
          values[value]=*it;
          in.pop_back();
        }
        featureToInsert=(--values.end())->second;
        in.push_back(featureToInsert);
        out.remove(featureToInsert);
      }
      // STEP 2: conditional exclusion
      if (in.size()>0 && !terminate) {
        values.clear();
        for (it=in.begin(); it!=in.end(); it++) {
          int tmp=*it;
          it=in.erase(it);
          cF->apply(in,value);
          values[value]=tmp;
          in.insert(it,tmp);
          it--;
        }
        featureToDelete=(--values.end())->second;

        // if the least significant feature is equal to the most significant
        // feature that was included in step 1, leave feature and 
        // include the next one
        if (featureToDelete==featureToInsert) {
          doInclude=true;
        } else {    // delete this feature and compute new recognition rate

          // if the feature to delete is not the last feature in dest,
          // change the feature against the last feature in dest and delete
          // the last column in dest, otherwise if the feature to delete 
          // is equal to the last feature in dest nothing will be done, 
          // because this is already the lacking feature in temp
          cF->apply(in,newRate);
          // if recognition rate without least significant feature is better 
          // than with this feature delete it
          if (newRate>oldRate) { 

            in.remove(featureToDelete);
            out.push_back(featureToDelete);
            // search for another least significant feature before 
            // including the next one
            doInclude=false;
            oldRate=newRate;
          } else {
            doInclude=true;
          }
          // if only two features left, include the next one
          if (dest.columns()<=2) {
            doInclude=true;    
          }
        }          
      } // end of exclusion
      // test if the predetermined number of features is reached
      terminate=(param.nbFeatures==static_cast<int>(in.size()));
    } // while (!terminate)

    // Now fill dest
    const int sz = static_cast<int>(in.size());
    dest.resize(src.rows(), sz, 0., false, false);
    ivector idvec(false, sz);
    std::list<int>::const_iterator lit = in.begin();
    for (i=0; i < sz; ++i) {
      idvec.at(i)=*lit;
      ++lit;
    }
    for (i=0; i < src.rows(); ++i) {
      const dvector& svec = src.getRow(i);
      dvector& dvec = dest.getRow(i);
      for (int j=0; j < sz; ++j) {
        dvec.at(j) = svec.at(idvec.at(j));
      }
    }

    return ok;
  };