/* Interface to numerical recipes: svbksb ---------------------------------- */ void svbksb(Matrix2D<double> &u, Matrix1D<double> &w, Matrix2D<double> &v, Matrix1D<double> &b, Matrix1D<double> &x) { // Call to the numerical recipes routine. Results will be stored in X svbksb(u.adaptForNumericalRecipes2(), w.adaptForNumericalRecipes(), v.adaptForNumericalRecipes2(), u.mdimy, u.mdimx, b.adaptForNumericalRecipes(), x.adaptForNumericalRecipes()); }
/* Euler direction --------------------------------------------------------- */ void Euler_angles2direction(DOUBLE alpha, DOUBLE beta, Matrix1D<DOUBLE> &v) { DOUBLE ca, sa, cb, sb; DOUBLE sc, ss; v.resize(3); alpha = DEG2RAD(alpha); beta = DEG2RAD(beta); ca = cos(alpha); cb = cos(beta); sa = sin(alpha); sb = sin(beta); sc = sb * ca; ss = sb * sa; v(0) = sc; v(1) = ss; v(2) = cb; }
/* Do inference ------------------------------------------------------------ */ int NaiveBayes::doInference(const MultidimArray<double> &newFeatures, double &cost, Matrix1D<double> &classesProbs, Matrix1D<double> &allCosts) { classesProbs=__priorProbsLog10; for(int f=0; f<Nfeatures; f++) { const LeafNode &leaf_f=*(__leafs[f]); double newFeatures_f=DIRECT_A1D_ELEM(newFeatures,f); for (int k=0; k<K; k++) { double p = leaf_f.assignProbability(newFeatures_f, k); if (fabs(p) < 1e-2) VEC_ELEM(classesProbs,k) += -2*DIRECT_A1D_ELEM(__weights,f); else VEC_ELEM(classesProbs,k) += DIRECT_A1D_ELEM(__weights,f)*std::log10(p); #ifdef DEBUG_FINE_CLASSIFICATION if(debugging == true) { std::cout << "Feature " << f << " Probability for class " << k << " = " << classesProbs(k) << " increase= " << p << std::endl; char c; // COSS std::cin >> c; // if (c=='q') debugging = false; } #endif } } classesProbs-=classesProbs.computeMax(); // std::cout << "classesProbs " << classesProbs.transpose() << std::endl; for (int k=0; k<K; k++) VEC_ELEM(classesProbs,k)=pow(10.0,VEC_ELEM(classesProbs,k)); classesProbs*=1.0/classesProbs.sum(); // std::cout << "classesProbs norm " << classesProbs.transpose() << std::endl; allCosts=__cost*classesProbs; // std::cout << "allCosts " << allCosts.transpose() << std::endl; int bestk=0; cost=VEC_ELEM(allCosts,0)=std::log10(VEC_ELEM(allCosts,0)); for (int k=1; k<K; k++) { VEC_ELEM(allCosts,k)=std::log10(VEC_ELEM(allCosts,k)); if (VEC_ELEM(allCosts,k)<cost) { cost=VEC_ELEM(allCosts,k); bestk=k; } } #ifdef DEBUG_CLASSIFICATION if(debugging == true) { for (int k=0; k<K; k++) classesProbs(k)=log10(classesProbs(k)); std::cout << "Class probababilities=" << classesProbs.transpose() << "\n costs=" << allCosts.transpose() << " best class=" << bestk << " cost=" << cost << std::endl; char c; // COSS std::cin >> c; // if (c=='q') debugging = false; } #endif return bestk; }
//gamma is useless but I keep it for simmetry //with Euler_direction void Euler_direction2angles(Matrix1D<DOUBLE> &v0, DOUBLE &alpha, DOUBLE &beta) { DOUBLE abs_ca, sb, cb; DOUBLE aux_alpha; DOUBLE aux_beta; DOUBLE error, newerror; Matrix1D<DOUBLE> v_aux; Matrix1D<DOUBLE> v; //if not normalized do it so v.resize(3); v = v0; v.selfNormalize(); v_aux.resize(3); cb = v(2); if (fabs((cb)) > 0.999847695)/*one degree */ { std::cerr << "\nWARNING: Routine Euler_direction2angles is not reliable\n" "for small tilt angles. Up to 0.001 deg it should be OK\n" "for most applications but you never know"; } if (fabs((cb - 1.)) < FLT_EPSILON) { alpha = 0.; beta = 0.; } else {/*1*/ aux_beta = acos(cb); /* beta between 0 and PI */ sb = sin(aux_beta); abs_ca = fabs(v(0)) / sb; if (fabs((abs_ca - 1.)) < FLT_EPSILON) aux_alpha = 0.; else aux_alpha = acos(abs_ca); v_aux(0) = sin(aux_beta) * cos(aux_alpha); v_aux(1) = sin(aux_beta) * sin(aux_alpha); v_aux(2) = cos(aux_beta); error = fabs(dotProduct(v, v_aux) - 1.); alpha = aux_alpha; beta = aux_beta; v_aux(0) = sin(aux_beta) * cos(-1. * aux_alpha); v_aux(1) = sin(aux_beta) * sin(-1. * aux_alpha); v_aux(2) = cos(aux_beta); newerror = fabs(dotProduct(v, v_aux) - 1.); if (error > newerror) { alpha = -1. * aux_alpha; beta = aux_beta; error = newerror; } v_aux(0) = sin(-aux_beta) * cos(-1. * aux_alpha); v_aux(1) = sin(-aux_beta) * sin(-1. * aux_alpha); v_aux(2) = cos(-aux_beta); newerror = fabs(dotProduct(v, v_aux) - 1.); if (error > newerror) { alpha = -1. * aux_alpha; beta = -1. * aux_beta; error = newerror; } v_aux(0) = sin(-aux_beta) * cos(aux_alpha); v_aux(1) = sin(-aux_beta) * sin(aux_alpha); v_aux(2) = cos(-aux_beta); newerror = fabs(dotProduct(v, v_aux) - 1.); if (error > newerror) { alpha = aux_alpha; beta = -1. * aux_beta; error = newerror; } }/*else 1 end*/ beta = RAD2DEG(beta); alpha = RAD2DEG(alpha); }/*Eulerdirection2angles end*/
//The following 2 functions (GetTeachersTimetable & GetSubgroupsTimetable) //are very similar to the above 2 ones (GetTeachersMatrix & GetSubgroupsMatrix) //void Solution::getTeachersTimetable(Rules& r, qint16 a[MAX_TEACHERS][MAX_DAYS_PER_WEEK][MAX_HOURS_PER_DAY], QList<qint16> b[TEACHERS_FREE_PERIODS_N_CATEGORIES][MAX_DAYS_PER_WEEK][MAX_HOURS_PER_DAY]){ //void Solution::getTeachersTimetable(Rules& r, Matrix3D<qint16>& a, QList<qint16> b[TEACHERS_FREE_PERIODS_N_CATEGORIES][MAX_DAYS_PER_WEEK][MAX_HOURS_PER_DAY]){ void Solution::getTeachersTimetable(Rules& r, Matrix3D<qint16>& a, Matrix3D<QList<qint16> >& b){ //assert(HFitness()==0); //This is only for perfect solutions, that do not have any non-satisfied hard constrains assert(r.initialized); assert(r.internalStructureComputed); a.resize(r.nInternalTeachers, r.nDaysPerWeek, r.nHoursPerDay); b.resize(TEACHERS_FREE_PERIODS_N_CATEGORIES, r.nDaysPerWeek, r.nHoursPerDay); int i, j, k; for(i=0; i<r.nInternalTeachers; i++) for(j=0; j<r.nDaysPerWeek; j++) for(k=0; k<r.nHoursPerDay; k++) //a1[i][j][k]=a2[i][j][k]=UNALLOCATED_ACTIVITY; a[i][j][k]=UNALLOCATED_ACTIVITY; Activity *act; for(i=0; i<r.nInternalActivities; i++) if(this->times[i]!=UNALLOCATED_TIME) { act=&r.internalActivitiesList[i]; int hour=this->times[i]/r.nDaysPerWeek; int day=this->times[i]%r.nDaysPerWeek; for(int dd=0; dd < act->duration; dd++){ assert(hour+dd<r.nHoursPerDay); for(int ti=0; ti<act->iTeachersList.count(); ti++){ int tch = act->iTeachersList.at(ti); //teacher index /*if(a1[tch][day][hour+dd]==UNALLOCATED_ACTIVITY) a1[tch][day][hour+dd]=i; else a2[tch][day][hour+dd]=i;*/ assert(a[tch][day][hour+dd]==UNALLOCATED_ACTIVITY); a[tch][day][hour+dd]=i; } } } //Prepare teachers free periods timetable. //Code contributed by Volker Dirr (http://timetabling.de/) BEGIN int d,h,tch; for(d=0; d<r.nDaysPerWeek; d++){ for(h=0; h<r.nHoursPerDay; h++){ for(int tfp=0; tfp<TEACHERS_FREE_PERIODS_N_CATEGORIES; tfp++){ b[tfp][d][h].clear(); } } } for(tch=0; tch<r.nInternalTeachers; tch++){ for(d=0; d<r.nDaysPerWeek; d++){ int firstPeriod=-1; int lastPeriod=-1; for(h=0; h<r.nHoursPerDay; h++){ if(a[tch][d][h]!=UNALLOCATED_ACTIVITY){ if(firstPeriod==-1) firstPeriod=h; lastPeriod=h; } } if(firstPeriod==-1){ for(h=0; h<r.nHoursPerDay; h++){ b[TEACHER_HAS_A_FREE_DAY][d][h]<<tch; } } else { for(h=0; h<firstPeriod; h++){ if(firstPeriod-h==1){ b[TEACHER_MUST_COME_EARLIER][d][h]<<tch; } else { b[TEACHER_MUST_COME_MUCH_EARLIER][d][h]<<tch; } } for(; h<lastPeriod+1; h++){ if(a[tch][d][h]==UNALLOCATED_ACTIVITY){ if(a[tch][d][h+1]==UNALLOCATED_ACTIVITY){ if(a[tch][d][h-1]==UNALLOCATED_ACTIVITY){ b[TEACHER_HAS_BIG_GAP][d][h]<<tch; } else { b[TEACHER_HAS_BORDER_GAP][d][h]<<tch; } } else { if(a[tch][d][h-1]==UNALLOCATED_ACTIVITY){ b[TEACHER_HAS_BORDER_GAP][d][h]<<tch; } else { b[TEACHER_HAS_SINGLE_GAP][d][h]<<tch; } } } } for(; h<r.nHoursPerDay; h++){ if(lastPeriod-h==-1){ b[TEACHER_MUST_STAY_LONGER][d][h]<<tch; } else { b[TEACHER_MUST_STAY_MUCH_LONGER][d][h]<<tch; } } } } } //care about not available teacher and breaks for(tch=0; tch<r.nInternalTeachers; tch++){ for(d=0; d<r.nDaysPerWeek; d++){ for(h=0; h<r.nHoursPerDay; h++){ if(teacherNotAvailableDayHour[tch][d][h]==true || breakDayHour[d][h]==true){ int removed=0; for(int tfp=0; tfp<TEACHER_IS_NOT_AVAILABLE; tfp++){ if(b[tfp][d][h].contains(tch)){ removed+=b[tfp][d][h].removeAll(tch); if(breakDayHour[d][h]==false) b[TEACHER_IS_NOT_AVAILABLE][d][h]<<tch; } } assert(removed==1); } } } } //END of Code contributed by Volker Dirr (http://timetabling.de/) END //bool visited[MAX_TEACHERS]; Matrix1D<bool> visited; visited.resize(r.nInternalTeachers); for(d=0; d<r.nDaysPerWeek; d++){ for(h=0; h<r.nHoursPerDay; h++){ for(tch=0; tch<r.nInternalTeachers; tch++) visited[tch]=false; for(int tfp=0; tfp<TEACHERS_FREE_PERIODS_N_CATEGORIES; tfp++){ foreach(int tch, b[tfp][d][h]){ assert(!visited[tch]); visited[tch]=true; } } } }
//#define DEBUG double spatial_Bspline03_proj( const Matrix1D<double> &r, const Matrix1D<double> &u) { // Avoids divisions by zero and allows orthogonal rays computation static Matrix1D<double> ur(3); if (XX(u) == 0) XX(ur) = XMIPP_EQUAL_ACCURACY; else XX(ur) = XX(u); if (YY(u) == 0) YY(ur) = XMIPP_EQUAL_ACCURACY; else YY(ur) = YY(u); if (ZZ(u) == 0) ZZ(ur) = XMIPP_EQUAL_ACCURACY; else ZZ(ur) = ZZ(u); // Some precalculated variables double x_sign = SGN(XX(ur)); double y_sign = SGN(YY(ur)); double z_sign = SGN(ZZ(ur)); // Compute the minimum and maximum alpha for the ray double alpha_xmin = (-2 - XX(r)) / XX(ur); double alpha_xmax = (2 - XX(r)) / XX(ur); double alpha_ymin = (-2 - YY(r)) / YY(ur); double alpha_ymax = (2 - YY(r)) / YY(ur); double alpha_zmin = (-2 - ZZ(r)) / ZZ(ur); double alpha_zmax = (2 - ZZ(r)) / ZZ(ur); double alpha_min = XMIPP_MAX(XMIPP_MIN(alpha_xmin, alpha_xmax), XMIPP_MIN(alpha_ymin, alpha_ymax)); alpha_min = XMIPP_MAX(alpha_min, XMIPP_MIN(alpha_zmin, alpha_zmax)); double alpha_max = XMIPP_MIN(XMIPP_MAX(alpha_xmin, alpha_xmax), XMIPP_MAX(alpha_ymin, alpha_ymax)); alpha_max = XMIPP_MIN(alpha_max, XMIPP_MAX(alpha_zmin, alpha_zmax)); if (alpha_max - alpha_min < XMIPP_EQUAL_ACCURACY) return 0.0; #ifdef DEBUG std::cout << "Pixel: " << r.transpose() << std::endl << "Dir: " << ur.transpose() << std::endl << "Alpha x:" << alpha_xmin << " " << alpha_xmax << std::endl << " " << (r + alpha_xmin*ur).transpose() << std::endl << " " << (r + alpha_xmax*ur).transpose() << std::endl << "Alpha y:" << alpha_ymin << " " << alpha_ymax << std::endl << " " << (r + alpha_ymin*ur).transpose() << std::endl << " " << (r + alpha_ymax*ur).transpose() << std::endl << "Alpha z:" << alpha_zmin << " " << alpha_zmax << std::endl << " " << (r + alpha_zmin*ur).transpose() << std::endl << " " << (r + alpha_zmax*ur).transpose() << std::endl << "alpha :" << alpha_min << " " << alpha_max << std::endl << std::endl; #endif // Compute the first point in the volume intersecting the ray static Matrix1D<double> v(3); V3_BY_CT(v, ur, alpha_min); V3_PLUS_V3(v, r, v); #ifdef DEBUG std::cout << "First entry point: " << v.transpose() << std::endl; std::cout << " Alpha_min: " << alpha_min << std::endl; #endif // Follow the ray double alpha = alpha_min; double ray_sum = 0; do { double alpha_x = (XX(v) + x_sign - XX(r)) / XX(ur); double alpha_y = (YY(v) + y_sign - YY(r)) / YY(ur); double alpha_z = (ZZ(v) + z_sign - ZZ(r)) / ZZ(ur); // Which dimension will ray move next step into?, it isn't neccesary to be only // one. double diffx = ABS(alpha - alpha_x); double diffy = ABS(alpha - alpha_y); double diffz = ABS(alpha - alpha_z); double diff_alpha = XMIPP_MIN(XMIPP_MIN(diffx, diffy), diffz); ray_sum += spatial_Bspline03_integral(r, ur, alpha, alpha + diff_alpha); // Update alpha and the next entry point if (ABS(diff_alpha - diffx) <= XMIPP_EQUAL_ACCURACY) alpha = alpha_x; if (ABS(diff_alpha - diffy) <= XMIPP_EQUAL_ACCURACY) alpha = alpha_y; if (ABS(diff_alpha - diffz) <= XMIPP_EQUAL_ACCURACY) alpha = alpha_z; XX(v) += diff_alpha * XX(ur); YY(v) += diff_alpha * YY(ur); ZZ(v) += diff_alpha * ZZ(ur); #ifdef DEBUG std::cout << "Alpha x,y,z: " << alpha_x << " " << alpha_y << " " << alpha_z << " ---> " << alpha << std::endl; std::cout << " Next entry point: " << v.transpose() << std::endl << " diff_alpha: " << diff_alpha << std::endl << " ray_sum: " << ray_sum << std::endl << " Alfa tot: " << alpha << "alpha_max: " << alpha_max << std::endl; #endif } while ((alpha_max - alpha) > XMIPP_EQUAL_ACCURACY); return ray_sum; }
// Outliers =============================================================== void ProgClassifyCL2DCore::computeStableCores() { if (verbose && node->rank==0) std::cerr << "Computing stable cores ...\n"; MetaData thisClass, anotherClass, commonImages, thisClassCore; MDRow row; size_t first, last; Matrix2D<unsigned char> coocurrence; Matrix1D<unsigned char> maximalCoocurrence; int Nblocks=blocks.size(); taskDistributor->reset(); std::vector<size_t> commonIdx; std::map<String,size_t> thisClassOrder; String fnImg; while (taskDistributor->getTasks(first, last)) for (size_t idx=first; idx<=last; ++idx) { // Read block CL2DBlock &thisBlock=blocks[idx]; if (thisBlock.level<=tolerance) continue; if (!existsBlockInMetaDataFile(thisBlock.fnLevelCore, thisBlock.block)) continue; thisClass.read(thisBlock.block+"@"+thisBlock.fnLevelCore); thisClassCore.clear(); // Add MDL_ORDER if (thisClass.size()>0) { size_t order=0; thisClassOrder.clear(); FOR_ALL_OBJECTS_IN_METADATA(thisClass) { thisClass.getValue(MDL_IMAGE,fnImg,__iter.objId); thisClassOrder[fnImg]=order++; } // Calculate coocurrence within all blocks whose level is inferior to this size_t NthisClass=thisClass.size(); if (NthisClass>0) { try { coocurrence.initZeros(NthisClass,NthisClass); } catch (XmippError e) { std::cerr << e << std::endl; std::cerr << "There is a memory allocation error. Most likely there are too many images in this class (" << NthisClass << " images). Consider increasing the number of initial and final classes\n"; REPORT_ERROR(ERR_MEM_NOTENOUGH,"While computing stable class"); } for (int n=0; n<Nblocks; n++) { CL2DBlock &anotherBlock=blocks[n]; if (anotherBlock.level>=thisBlock.level) break; if (!existsBlockInMetaDataFile(anotherBlock.fnLevelCore, anotherBlock.block)) continue; anotherClass.read(anotherBlock.block+"@"+anotherBlock.fnLevelCore); anotherClass.intersection(thisClass,MDL_IMAGE); commonImages.join1(anotherClass, thisClass, MDL_IMAGE,LEFT); commonIdx.resize(commonImages.size()); size_t idx=0; FOR_ALL_OBJECTS_IN_METADATA(commonImages) { commonImages.getValue(MDL_IMAGE,fnImg,__iter.objId); commonIdx[idx++]=thisClassOrder[fnImg]; } size_t Ncommon=commonIdx.size(); for (size_t i=0; i<Ncommon; i++) { size_t idx_i=commonIdx[i]; for (size_t j=i+1; j<Ncommon; j++) { size_t idx_j=commonIdx[j]; MAT_ELEM(coocurrence,idx_i,idx_j)+=1; } } } } // Take only those elements whose coocurrence is maximal maximalCoocurrence.initZeros(NthisClass); int aimedCoocurrence=thisBlock.level-tolerance; FOR_ALL_ELEMENTS_IN_MATRIX2D(coocurrence) if (MAT_ELEM(coocurrence,i,j)==aimedCoocurrence) VEC_ELEM(maximalCoocurrence,i)=VEC_ELEM(maximalCoocurrence,j)=1; // Now compute core FOR_ALL_OBJECTS_IN_METADATA(thisClass) { thisClass.getValue(MDL_IMAGE,fnImg,__iter.objId); size_t idx=thisClassOrder[fnImg]; if (VEC_ELEM(maximalCoocurrence,idx)) { thisClass.getRow(row,__iter.objId); thisClassCore.addRow(row); } } } thisClassCore.write(thisBlock.fnLevel.insertBeforeExtension((String)"_stable_core_"+thisBlock.block),MD_APPEND); }
void PolyZernikes::fit(const Matrix1D<int> & coef, MultidimArray<double> & im, MultidimArray<double> &weight, MultidimArray<bool> & ROI, int verbose) { this->create(coef); size_t xdim = XSIZE(im); size_t ydim = YSIZE(im); //int numZer = (size_t)coef.sum(); int numZer = (size_t)coef.sum(); //Actually polOrder corresponds to the polynomial order +1 int polOrder=(int)ZERNIKE_ORDER(coef.size()); im.setXmippOrigin(); Matrix2D<double> polValue(polOrder,polOrder); //First argument means number of images //Second argument means number of pixels WeightedLeastSquaresHelper weightedLeastSquaresHelper; Matrix2D<double>& zerMat=weightedLeastSquaresHelper.A; zerMat.resizeNoCopy((size_t)ROI.sum(), numZer); double iMaxDim2 = 2./std::max(xdim,ydim); size_t pixel_idx=0; weightedLeastSquaresHelper.b.resizeNoCopy((size_t)ROI.sum()); weightedLeastSquaresHelper.w.resizeNoCopy(weightedLeastSquaresHelper.b); FOR_ALL_ELEMENTS_IN_ARRAY2D(im) { if ( (A2D_ELEM(ROI,i,j))) { //For one i we swap the different j double y=i*iMaxDim2; double x=j*iMaxDim2; //polValue = [ 0 y y2 y3 ... // x xy xy2 xy3 ... // x2 x2y x2y2 x2y3 ] //dMij(polValue,py,px) py es fila, px es columna for (int py = 0; py < polOrder; ++py) { double ypy=std::pow(y,py); for (int px = 0; px < polOrder; ++px) dMij(polValue,px,py) = ypy*std::pow(x,px); } Matrix2D<int> *fMat; //We generate the representation of the Zernike polynomials for (int k=0; k < numZer; ++k) { fMat = &fMatV[k]; if (fMat == NULL) continue; double temp = 0; for (size_t px = 0; px < (*fMat).Xdim(); ++px) for (size_t py = 0; py < (*fMat).Ydim(); ++py) temp += dMij(*fMat,py,px)*dMij(polValue,py,px); dMij(zerMat,pixel_idx,k) = temp; } VEC_ELEM(weightedLeastSquaresHelper.b,pixel_idx)=A2D_ELEM(im,i,j); VEC_ELEM(weightedLeastSquaresHelper.w,pixel_idx)=std::abs(A2D_ELEM(weight,i,j)); ++pixel_idx; } } Matrix1D<double> zernikeCoefficients; weightedLeastSquares(weightedLeastSquaresHelper, zernikeCoefficients); fittedCoeffs = zernikeCoefficients; // Pointer to the image to be fitted MultidimArray<double> reconstructed; reconstructed.resizeNoCopy(im); pixel_idx=0; FOR_ALL_ELEMENTS_IN_ARRAY2D(im) if (A2D_ELEM(ROI,i,j)) { double temp=0; for (int k=0; k < numZer; ++k) temp+=dMij(zerMat,pixel_idx,k)*VEC_ELEM(fittedCoeffs,k); A2D_ELEM(reconstructed,i,j)=temp; if ( fabs(A2D_ELEM(reconstructed,i,j)-A2D_ELEM(im,i,j)) > PI) A2D_ELEM(ROI,i,j) = false; ++pixel_idx; } pixel_idx=0; if (verbose > 0) { Image<double> save; save()=reconstructed; save.write("reconstructedZernikes.xmp"); ROI.write("ROI.txt"); } }
void PolyZernikes::zernikePols(const Matrix1D<int> coef, MultidimArray<double> & im, MultidimArray<bool> & ROI, int verbose) { this->create(coef); int polOrder=(int)ZERNIKE_ORDER(coef.size()); int numZer = coef.size(); int xdim = XSIZE(im); int ydim = YSIZE(im); im.setXmippOrigin(); Matrix2D<double> polValue(polOrder,polOrder); double iMaxDim2 = 2./std::max(xdim,ydim); double temp = 0; FOR_ALL_ELEMENTS_IN_ARRAY2D(im) { if (A2D_ELEM(ROI,i,j)) { //For one i we swap the different j double y=i*iMaxDim2; double x=j*iMaxDim2; //polValue = [ 0 y y2 y3 ... // x xy xy2 xy3 ... // x2 x2y x2y2 x2y3 ] //dMij(polValue,py,px) py es fila, px es columna for (int py = 0; py < polOrder; ++py) { double ypy=std::pow(y,py); for (int px = 0; px < polOrder; ++px) dMij(polValue,px,py) = ypy*std::pow(x,px); } Matrix2D<int> *fMat; //We generate the representation of the Zernike polynomials for (int k=0; k < numZer; ++k) { fMat = &fMatV[k]; if ( (dMij(*fMat,0,0) == 0) && MAT_SIZE(*fMat) == 1 ) continue; for (size_t px = 0; px < (*fMat).Xdim(); ++px) for (size_t py = 0; py < (*fMat).Ydim(); ++py) temp += dMij(*fMat,py,px)*dMij(polValue,py,px)*VEC_ELEM(coef,k); } A2D_ELEM(im,i,j) = temp; temp = 0; } } STARTINGX(im)=STARTINGY(im)=0; if (verbose == 1) { Image<double> save; save()=im; save.write("PPP1.xmp"); } }
int main2() { MultidimArray<double> preImg, avgCurr, mappedImg; MultidimArray<double> outputMovie; Matrix1D<double> meanStdev; ImageGeneric movieStack; Image<double> II; MetaData MD; // To save plot information FileName motionInfFile, flowFileName, flowXFileName, flowYFileName; ArrayDim aDim; // For measuring times (both for whole process and for each level of the pyramid) clock_t tStart, tStart2; #ifdef GPU // Matrix that we required in GPU part GpuMat d_flowx, d_flowy, d_dest; GpuMat d_avgcurr, d_preimg; #endif // Matrix required by Opencv cv::Mat flow, dest, flowx, flowy; cv::Mat flowxPre, flowyPre; cv::Mat avgcurr, avgstep, preimg, preimg8, avgcurr8; cv::Mat planes[]={flowxPre, flowyPre}; int imagenum, cnt=2, div=0, flowCounter; int h, w, levelNum, levelCounter=1; motionInfFile=foname.replaceExtension("xmd"); std::string extension=fname.getExtension(); if (extension=="mrc") fname+=":mrcs"; movieStack.read(fname,HEADER); movieStack.getDimensions(aDim); imagenum = aDim.ndim; h = aDim.ydim; w = aDim.xdim; if (darkImageCorr) { II.read(darkRefFilename); darkImage=II(); } if (gainImageCorr) { II.read(gianRefFilename); gainImage=II(); } meanStdev.initZeros(4); //avgcurr=cv::Mat::zeros(h, w,CV_32FC1); avgCurr.initZeros(h, w); flowxPre=cv::Mat::zeros(h, w,CV_32FC1); flowyPre=cv::Mat::zeros(h, w,CV_32FC1); #ifdef GPU // Object for optical flow FarnebackOpticalFlow d_calc; setDevice(gpuDevice); // Initialize the parameters for optical flow structure d_calc.numLevels=6; d_calc.pyrScale=0.5; d_calc.fastPyramids=true; d_calc.winSize=winSize; d_calc.numIters=1; d_calc.polyN=5; d_calc.polySigma=1.1; d_calc.flags=0; #endif // Initialize the stack for the output movie if (saveCorrMovie) outputMovie.initZeros(imagenum, 1, h, w); // Correct for global motion from a cross-correlation based algorithms if (globalShiftCorr) { Matrix1D<double> shiftMatrix(2); shiftVector.reserve(imagenum); shiftMD.read(globalShiftFilename); FOR_ALL_OBJECTS_IN_METADATA(shiftMD) { shiftMD.getValue(MDL_SHIFT_X, XX(shiftMatrix), __iter.objId); shiftMD.getValue(MDL_SHIFT_Y, YY(shiftMatrix), __iter.objId); shiftVector.push_back(shiftMatrix); } } tStart2=clock(); // Compute the average of the whole stack fstFrame++; // Just to adapt to Li algorithm lstFrame++; // Just to adapt to Li algorithm if (lstFrame>=imagenum || lstFrame==1) lstFrame=imagenum; imagenum=lstFrame-fstFrame+1; levelNum=sqrt(double(imagenum)); computeAvg(fname, fstFrame, lstFrame, avgCurr); // if the user want to save the PSD if (doAverage) { II()=avgCurr; II.write(foname); return 0; } xmipp2Opencv(avgCurr, avgcurr); cout<<"Frames "<<fstFrame<<" to "<<lstFrame<<" under processing ..."<<std::endl; while (div!=groupSize) { div=int(imagenum/cnt); // avgStep to hold the sum of aligned frames of each group at each step avgstep=cv::Mat::zeros(h, w,CV_32FC1); cout<<"Level "<<levelCounter<<"/"<<levelNum<<" of the pyramid is under processing"<<std::endl; // Compute time for each level tStart = clock(); // Check if we are in the final step if (div==1) cnt=imagenum; flowCounter=1; for (int i=0;i<cnt;i++) { //Just compute the average in the last step if (div==1) { if (globalShiftCorr) { Matrix1D<double> shiftMatrix(2); MultidimArray<double> frameImage; movieStack.readMapped(fname,i+1); movieStack().getImage(frameImage); if (darkImageCorr) frameImage-=darkImage; if (gainImageCorr) frameImage/=gainImage; XX(shiftMatrix)=XX(shiftVector[i]); YY(shiftMatrix)=YY(shiftVector[i]); translate(BSPLINE3, preImg, frameImage, shiftMatrix, WRAP); } else { movieStack.readMapped(fname,fstFrame+i); movieStack().getImage(preImg); if (darkImageCorr) preImg-=darkImage; if (gainImageCorr) preImg/=gainImage; } xmipp2Opencv(preImg, preimg); } else { if (i==cnt-1) computeAvg(fname, i*div+fstFrame, lstFrame, preImg); else computeAvg(fname, i*div+fstFrame, (i+1)*div+fstFrame-1, preImg); } xmipp2Opencv(preImg, preimg); // Note: we should use the OpenCV conversion to use it in optical flow convert2Uint8(avgcurr,avgcurr8); convert2Uint8(preimg,preimg8); #ifdef GPU d_avgcurr.upload(avgcurr8); d_preimg.upload(preimg8); if (cnt==2) d_calc(d_avgcurr, d_preimg, d_flowx, d_flowy); else { flowXFileName=foname.removeLastExtension()+formatString("flowx%d%d.txt",div*2,flowCounter); flowYFileName=foname.removeLastExtension()+formatString("flowy%d%d.txt",div*2,flowCounter); readMat(flowXFileName.c_str(), flowx); readMat(flowYFileName.c_str(), flowy); d_flowx.upload(flowx); d_flowy.upload(flowy); d_calc.flags=cv::OPTFLOW_USE_INITIAL_FLOW; d_calc(d_avgcurr, d_preimg, d_flowx, d_flowy); } d_flowx.download(planes[0]); d_flowy.download(planes[1]); d_avgcurr.release(); d_preimg.release(); d_flowx.release(); d_flowy.release(); #else if (cnt==2) calcOpticalFlowFarneback(avgcurr8, preimg8, flow, 0.5, 6, winSize, 1, 5, 1.1, 0); else { flowFileName=foname.removeLastExtension()+formatString("flow%d%d.txt",div*2,flowCounter); readMat(flowFileName.c_str(), flow); calcOpticalFlowFarneback(avgcurr8, preimg8, flow, 0.5, 6, winSize, 1, 5, 1.1, cv::OPTFLOW_USE_INITIAL_FLOW); } split(flow, planes); #endif // Save the flows if we are in the last step if (div==groupSize) { if (i > 0) { std_dev2(planes,flowxPre,flowyPre,meanStdev); size_t id=MD.addObject(); MD.setValue(MDL_OPTICALFLOW_MEANX, double(meanStdev(0)), id); MD.setValue(MDL_OPTICALFLOW_MEANY, double(meanStdev(2)), id); MD.setValue(MDL_OPTICALFLOW_STDX, double(meanStdev(1)), id); MD.setValue(MDL_OPTICALFLOW_STDY, double(meanStdev(3)), id); MD.write(motionInfFile, MD_APPEND); } planes[0].copyTo(flowxPre); planes[1].copyTo(flowyPre); } else { #ifdef GPU flowXFileName=foname.removeLastExtension()+formatString("flowx%d%d.txt",div,i+1); flowYFileName=foname.removeLastExtension()+formatString("flowy%d%d.txt",div,i+1); saveMat(flowXFileName.c_str(), planes[0]); saveMat(flowYFileName.c_str(), planes[1]); #else flowFileName=foname.removeLastExtension()+formatString("flow%d%d.txt",div,i+1); saveMat(flowFileName.c_str(), flow); #endif if ((i+1)%2==0) flowCounter++; } for( int row = 0; row < planes[0].rows; row++ ) for( int col = 0; col < planes[0].cols; col++ ) { planes[0].at<float>(row,col) += (float)col; planes[1].at<float>(row,col) += (float)row; } cv::remap(preimg, dest, planes[0], planes[1], cv::INTER_CUBIC); if (div==1 && saveCorrMovie) { mappedImg.aliasImageInStack(outputMovie, i); opencv2Xmipp(dest, mappedImg); } avgstep+=dest; } avgcurr=avgstep/cnt; cout<<"Processing level "<<levelCounter<<"/"<<levelNum<<" has been finished"<<std::endl; printf("Processing time: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC); cnt*=2; levelCounter++; } opencv2Xmipp(avgcurr, avgCurr); II() = avgCurr; II.write(foname); printf("Total Processing time: %.2fs\n", (double)(clock() - tStart2)/CLOCKS_PER_SEC); if (saveCorrMovie) { II()=outputMovie; II.write(foname.replaceExtension("mrcs")); } // Release the memory avgstep.release(); preimg.release(); avgcurr8.release(); preimg8.release(); flow.release(); planes[0].release(); planes[1].release(); flowxPre.release(); flowyPre.release(); movieStack.clear(); preImg.clear(); avgCurr.clear(); II.clear(); return 0; }
void ProgVolumePCA::run() { show(); produce_side_info(); const MultidimArray<int> &imask=mask.imask; size_t Nvoxels=imask.sum(); MultidimArray<float> v; v.initZeros(Nvoxels); // Add all volumes to the analyzer FileName fnVol; FOR_ALL_OBJECTS_IN_METADATA(mdVols) { mdVols.getValue(MDL_IMAGE,fnVol,__iter.objId); V.read(fnVol); // Construct vector const MultidimArray<double> &mV=V(); size_t idx=0; FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mV) { if (DIRECT_MULTIDIM_ELEM(imask,n)) DIRECT_MULTIDIM_ELEM(v,idx++)=DIRECT_MULTIDIM_ELEM(mV,n); } analyzer.addVector(v); } // Construct PCA basis analyzer.subtractAvg(); analyzer.learnPCABasis(NPCA,100); // Project onto the PCA basis Matrix2D<double> proj; analyzer.projectOnPCABasis(proj); std::vector<double> dimredProj; dimredProj.resize(NPCA); int i=0; FOR_ALL_OBJECTS_IN_METADATA(mdVols) { memcpy(&dimredProj[0],&MAT_ELEM(proj,i,0),NPCA*sizeof(double)); mdVols.setValue(MDL_DIMRED,dimredProj,__iter.objId); i++; } if (fnVolsOut!="") mdVols.write(fnVolsOut); else mdVols.write(fnVols); // Save the basis const MultidimArray<double> &mV=V(); for (int i=NPCA-1; i>=0; --i) { V().initZeros(); size_t idx=0; const MultidimArray<double> &mPCA=analyzer.PCAbasis[i]; FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(mV) { if (DIRECT_MULTIDIM_ELEM(imask,n)) DIRECT_MULTIDIM_ELEM(mV,n)=DIRECT_MULTIDIM_ELEM(mPCA,idx++); } if (fnBasis!="") V.write(fnBasis,i+1,true,WRITE_OVERWRITE); } // Generate the PCA volumes if (listOfPercentiles.size()>0 && fnOutStack!="" && fnAvgVol!="") { Image<double> Vavg; if (fnAvgVol!="") Vavg.read(fnAvgVol); else Vavg().initZeros(V()); Matrix1D<double> p; proj.toVector(p); Matrix1D<double> psorted=p.sort(); Image<double> Vpca; Vpca()=Vavg(); createEmptyFile(fnOutStack,(int)XSIZE(Vavg()),(int)YSIZE(Vavg()),(int)ZSIZE(Vavg()),listOfPercentiles.size()); std::cout << "listOfPercentiles.size()=" << listOfPercentiles.size() << std::endl; for (size_t i=0; i<listOfPercentiles.size(); i++) { int idx=(int)round(textToFloat(listOfPercentiles[i].c_str())/100.0*VEC_XSIZE(p)); std::cout << "Percentile " << listOfPercentiles[i] << " -> idx=" << idx << " p(idx)=" << psorted(idx) << std::endl; Vpca()+=psorted(idx)*V(); Vpca.write(fnOutStack,i+1,true,WRITE_REPLACE); } } }