bool MLP::computeErrorNorm(const ivector& ids) { dvector vct(outputs,off*ids.size()); const double fix=on-off; int i; double tmp,tmp2,v; // compute first the average outputs for the training set for (i=0;i<ids.size();++i) { vct.at(ids.at(i))+=fix; } vct.divide(ids.size()); double offError(0.0); // now compute the error for (i=0;i<vct.size();++i) { tmp = off - vct.at(i); offError += (tmp*tmp); } errorNorm = 0.0; for (i=0;i<ids.size();++i) { v = vct.at(ids.at(i)); tmp = off - v; tmp2 = on - v; errorNorm += (offError - tmp*tmp + tmp2*tmp2); } errorNorm *= 0.5; return true; }
bool MLP::calcGradient(const dmatrix& inputs, const ivector& ids, dvector& grad) { if (inputs.rows() != ids.size()) { setStatusString("Number of vectors not consistent with number of ids"); return false; } dvector tmp; int i; double tmpError; totalError = 0; calcGradient(inputs.getRow(0),ids.at(0),grad); computeActualError(ids.at(0),totalError); for (i=1;i<inputs.rows();++i) { calcGradient(inputs.getRow(i),ids.at(i),tmp); computeActualError(ids.at(i),tmpError); grad.add(tmp); totalError+=tmpError; } return true; }
// return probability value of an rgb pixel float probabilityMap2D::apply(const ubyte &value1, const ubyte &value2, ivector& theBin) const { assert((probabilityHistogram.dimensions() == 2) && (theBin.size() == 2)); theBin[0] = lookupTable[0][static_cast<int>(value1)]; theBin[1] = lookupTable[1][static_cast<int>(value2)]; return static_cast<float>(probabilityHistogram.at(theBin)); }
/* * compute the error of the given weights for the whole training set. */ bool MLP::computeTotalError(const std::vector<dmatrix>& mWeights, const dmatrix& inputs, const ivector& ids, double& totalError) const { if (ids.size() != inputs.rows()) { return false; } const parameters& param = getParameters(); const int layers = param.hiddenUnits.size()+1; std::vector<dvector> uNet(layers),uOut(layers); int i; double tmp; totalError=0.0; for (i=0;i<ids.size();++i) { propagate(inputs.getRow(i),mWeights,uNet,uOut); computePatternError(ids.at(i),uOut.back(),tmp); totalError+=tmp; } return true; }
void svm::makeTargets(const ivector& ids) { // expand each class label i to a vector v with v[j]=1 if j == i, // and j[j]=-1 if j != i srcIds=ids; dmatrix* t=new dmatrix(nClasses,ids.size(),-1.0); // iterate over training labels for (int i=0; i<t->columns(); i++) { t->at(idMap[ids.at(i)],i)=1; } if (target != 0) { delete target; } target=t; }
bool homography8DofEstimator::apply(const matrix<dpoint>& src, dvector& dest, dvector& error, const ivector& indices, int numCorrespondences) const { if ( numCorrespondences < minNumberCorrespondences() || indices.size() < numCorrespondences ) { setStatusString("number of correspondences to small or too few indices"); return false; } const parameters& par = getParameters(); hom8DofHelper<double,double> help ( par.computeSqError ); return help.apply(src,dest,error,indices,numCorrespondences); }
void svm::buildIdMaps(const ivector& ids) { int j=0; // create reverse id map idMap.clear(); for (int i=0; i<ids.size(); i++) { if (idMap.find(ids.at(i)) == idMap.end()) { _lti_debug("Mapping external id " << ids.at(i) << " to " << j << std::endl); rIdMap[j]=ids.at(i); idMap[ids.at(i)]=j++; } } nClasses=j; }
int kNearestNeighFilter::getMedian(const ivector& histogram, const int max, const int numOfMax) const { ivector vect(numOfMax,0); int i,z=0; const int size=histogram.size(); for(i=0;i<size;++i) { if (histogram.at(i) == max) { vect.at(z++) = i; } } return vect.at(z/2); }
int writeIntVec(ivector & im, string fname, int buffsize) { int fmt, nrows, ncols, nnz; ostream *ofstr = open_out_buf(fname.c_str(), buffsize); fmt = 110; nrows = im.size(); ncols = 1; nnz = nrows; ofstr->write((const char *)&fmt, 4); ofstr->write((const char *)&nrows, 4); ofstr->write((const char *)&ncols, 4); ofstr->write((const char *)&nnz, 4); ofstr->write((const char *)&im[0], 4 * nrows); closeos(ofstr); return 0; }
void MLP::checkHowManyOutputs(const ivector& ids) { // count how many different ids are present in the training set std::map<int,int> extToInt; std::map<int,int>::iterator it; int i,k; for (i=0,k=0;i<ids.size();++i) { it = extToInt.find(ids.at(i)); if (it == extToInt.end()) { extToInt[ids.at(i)] = k; ++k; } } outputs = extToInt.size(); }
MyuFunctions make_myu_functions(const Concrete::CData&cdata,const ivector&numbers,const MinMax&minmax) { const int x_length=minmax.max_x.size(); Function*y_function=function3(coord(minmax.min_y,0), coord((minmax.max_y+minmax.min_y)/2.0,1.0), coord(minmax.max_y,0)); MyuFunctions result={fvector(0),y_function}; for(int i=0;i<numbers.size();++i){ for(int j=0;j<x_length;++j){ Function*max_f=function3(coord(minmax.min_x[i],0),coord(cdata[numbers[i]]->at(j),1.0), coord(minmax.max_x[i],0)); result.x_funcs.push_back(max_f); } } return result; }
// @todo: функция должна уменьшать количество правил. fuzzy::rule_vector make_rules(const Concrete::CData&cdata,const MyuFunctions&mf,const ivector&numbers) { const int x_length=cdata.x_count(); fuzzy::rule_vector result; for(int i=0;i<numbers.size();++i){ fuzzy::Rule rule; for(int x=0;x<x_length;++x){ int num=-1; // номер функции для текущего x с максимальной принадлженостью double max_value=-1; // Маскимальная принадлежность x к правилу num for(int f=0;f<mf.x_funcs.size();++f){ double cur_value=(*mf.x_funcs[f])(cdata[numbers[i]]->at(x)); if(cur_value>max_value){ max_value=cur_value; num=f; } } rule.push_back(mf.x_funcs[num]); } rule.set_y(cdata.y_for_xp(i)); result.push_back(rule); } return result; }
MinMax find_min_max(const Concrete::CData&cdata,const ivector&numbers) { int x_length=cdata.x_count(); MinMax result={dvector(x_length,0),dvector(x_length,0),-1,-1}; for(int i=0;i<numbers.size();++i){ //Максимальные и Минимальные значения X for(int j=0;j<x_length;++j){ if(result.max_x[j]<cdata[numbers[i]]->at(j)) result.max_x[j]=cdata[numbers[i]]->at(j); else if(result.min_x[j]>cdata[numbers[i]]->at(j)) result.min_x[j]=cdata[numbers[i]]->at(j); } //Максимальные и Минимальные значения Y if(result.max_y<cdata.y_for_xp(numbers[i])) result.max_y=cdata.y_for_xp(numbers[i]); else if(result.min_y>cdata.y_for_xp(numbers[i])) result.min_y=cdata.y_for_xp(numbers[i]); } return result; }
int parseFormat(string ffname, ivector & tvec, svector & dnames, svector & delims, int *grpsize) { ifstream ifstr(ffname.c_str(), ios::in); if (ifstr.fail() || ifstr.eof()) { cerr << "couldnt open format file" << endl; throw; } char *next, *third, *newstr, *linebuf = new char[80]; while (!ifstr.bad() && !ifstr.eof()) { ifstr.getline(linebuf, 80); if (strlen(linebuf) > 1) { next = strchr(linebuf, ' '); *next++ = 0; third = strchr(next, ' '); if (third) *third++ = 0; dnames.push_back(next); if (strncmp(linebuf, "int", 3) == 0) { tvec.push_back(ftype_int); delims.push_back(""); } else if (strncmp(linebuf, "dint", 4) == 0) { tvec.push_back(ftype_dint); delims.push_back(""); } else if (strncmp(linebuf, "qhex", 4) == 0) { tvec.push_back(ftype_qhex); delims.push_back(""); } else if (strncmp(linebuf, "float", 5) == 0) { tvec.push_back(ftype_float); delims.push_back(""); } else if (strncmp(linebuf, "double", 6) == 0) { tvec.push_back(ftype_double); delims.push_back(""); } else if (strncmp(linebuf, "word", 4) == 0) { tvec.push_back(ftype_word); delims.push_back(""); } else if (strncmp(linebuf, "string", 6) == 0) { tvec.push_back(ftype_string); ifstr.getline(linebuf, 80); newstr = new char[strlen(linebuf)+1]; strcpy(newstr, linebuf); delims.push_back(newstr); } else if (strncmp(linebuf, "date", 4) == 0) { tvec.push_back(ftype_date); delims.push_back(""); } else if (strncmp(linebuf, "mdate", 5) == 0) { tvec.push_back(ftype_mdate); delims.push_back(""); } else if (strncmp(linebuf, "cmdate", 6) == 0) { tvec.push_back(ftype_cmdate); delims.push_back(""); } else if (strncmp(linebuf, "dt", 2) == 0) { tvec.push_back(ftype_dt); delims.push_back(""); } else if (strncmp(linebuf, "mdt", 3) == 0) { tvec.push_back(ftype_mdt); delims.push_back(""); } else if (strncmp(linebuf, "group", 5) == 0) { sscanf(third, "%d", grpsize); tvec.push_back(ftype_group); delims.push_back(""); } else if (strncmp(linebuf, "igroup", 6) == 0) { sscanf(third, "%d", grpsize); tvec.push_back(ftype_igroup); ifstr.getline(linebuf, 80); delims.push_back(linebuf); } else if (strncmp(linebuf, "digroup", 7) == 0) { sscanf(third, "%d", grpsize); tvec.push_back(ftype_digroup); ifstr.getline(linebuf, 80); delims.push_back(linebuf); } else { cerr << "couldnt parse format file line " << tvec.size()+1 << endl; throw; } } } return tvec.size(); delete [] linebuf; }
int parseLine(char * line, int membuf, int lineno, const char * delim1, ivector & tvec, svector & delims, srivector & srv, ftvector & out, int grpsize) { int i, ival; int64 dival; qint qval; float fval; double dval; char * here, * next; here = line; for (i = 0; i < tvec.size(); i++) { next = strpbrk(here, delim1); if (!next && i < tvec.size()-1) { cerr << "parseLine: format error line " << lineno << endl; cerr << " contents: " << line << " ... " << here << endl; throw 10; } if (next && *next) *(next++) = 0; switch (tvec[i]) { case ftype_int: sscanf(here, "%d", &ival); out[i].iv.push_back(ival); break; case ftype_dint: sscanf(here, "%lld", &dival); out[i].div.push_back(dival); break; case ftype_qhex: sscanf(here, "%16llx%16llx", &qval.top, &qval.bottom); out[i].qv.push_back(qval); break; case ftype_float: sscanf(here, "%f", &fval); out[i].fv.push_back(fval); break; case ftype_double: sscanf(here, "%lf", &dval); out[i].dv.push_back(dval); break; case ftype_word: here += strspn(here, " "); out[i].iv.push_back(srv[i].checkword(here)); break; case ftype_string: out[i].im.push_back(srv[i].checkstring(here, delims[i].c_str())); break; case ftype_dt: ival = parsedt(here); if (ival < 0) printf("\nWarning: bad dt on line %d\n", lineno); out[i].iv.push_back(ival); break; case ftype_mdt: ival = parsemdt(here); if (ival < 0) printf("\nWarning: bad mdt on line %d\n", lineno); out[i].iv.push_back(ival); break; case ftype_date: ival = parsedate(here); if (ival < 0) printf("\nWarning: bad date on line %d\n", lineno); out[i].iv.push_back(ival); break; case ftype_mdate: ival = parsemdate(here); if (ival < 0) printf("\nWarning: bad mdate on line %d\n", lineno, here); out[i].iv.push_back(ival); break; case ftype_cmdate: ival = parsecmdate(here); if (ival < 0) printf("\nWarning: bad cmdate on line %d\n", lineno, here); out[i].iv.push_back(ival); break; case ftype_group: *(next-1) = *delim1; out[i].im.push_back(srv[i].checkstrings(&here, delim1, grpsize)); next = here; break; case ftype_igroup: // *(next-1) = *delim1; out[i].im.push_back(srv[i].checkgroup(&here, delims[i].c_str(), grpsize)); next = here; break; case ftype_digroup: // *(next-1) = *delim1; out[i].dim.push_back(srv[i].checkdgroup(&here, delims[i].c_str(), grpsize)); next = here; break; default: break; } here = next; } return 0; }
/* Длина хромосомы равна - ((Количество x)* количество экспериментов) * 3 3 - количество параметров у экспонициальной функции активации. */ bin_sugeno::bin_sugeno(Concrete::CData*cdata,const ivector&numbers):fitness(cdata->x_count()*numbers.size()*3,"sugeno"), m_cdata(cdata), m_numbers(numbers) {}
// TODO: comment your train method bool MLP::train(const dvector& theWeights, const dmatrix& data, const ivector& ids) { if (data.empty()) { setStatusString("Train data empty"); return false; } if (ids.size()!=data.rows()) { std::string str; str = "dimensionality of IDs vector and the number of rows "; str+= "of the input matrix must have the same size."; setStatusString(str.c_str()); return false; } // tracks the status of the training process. // if an error occurs set to false and use setStatusString() // however, training should continue, fixing the error as well as possible bool b=true; // vector with internal ids ivector newIds,idsLUT; newIds.resize(ids.size(),0,false,false); // map to get the internal Id to an external Id; std::map<int,int> extToInt; std::map<int,int>::iterator it; int i,k; for (i=0,k=0;i<ids.size();++i) { it = extToInt.find(ids.at(i)); if (it != extToInt.end()) { newIds.at(i) = (*it).second; } else { extToInt[ids.at(i)] = k; newIds.at(i) = k; ++k; } } idsLUT.resize(extToInt.size()); for (it=extToInt.begin();it!=extToInt.end();++it) { idsLUT.at((*it).second) = (*it).first; } // initialize the inputs and output units from the given data outputs = idsLUT.size(); inputs = data.columns(); const parameters& param = getParameters(); // display which kind of algorithm is to be used if (validProgressObject()) { getProgressObject().reset(); std::string str("MLP: Training using "); switch(param.trainingMode) { case parameters::ConjugateGradients: str += "conjugate gradients"; break; case parameters::SteepestDescent: str += "steepest descent"; break; default: str += "unnamed method"; } getProgressObject().setTitle(str); getProgressObject().setMaxSteps(param.maxNumberOfEpochs+1); } dvector grad; if (&theWeights != &weights) { weights.copy(theWeights); } if (!initWeights(true)) { // keep the weights setStatusString("Wrong weights!"); return false; }; computeErrorNorm(newIds); if (param.trainingMode == parameters::ConjugateGradients) { b = trainConjugateGradients(data,newIds); } else { if (param.batchMode) { // batch training mode: b = trainSteepestBatch(data,newIds); } else { // sequential training mode: b = trainSteepestSequential(data,newIds); } } if (validProgressObject()) { getProgressObject().step("Training ready."); } outputTemplate tmpOutTemp(idsLUT); setOutputTemplate(tmpOutTemp); // create the appropriate outputTemplate makeOutputTemplate(outputs,data,ids); return b; }