VImage VImage::composite( VImage other, VipsBlendMode mode, VOption *options ) { VImage v[2] = { *this, other }; std::vector<VImage> ivec( v, v + VIPS_NUMBER( v ) ); int m[1] = { static_cast<int>( mode ) }; std::vector<int> mvec( m, m + VIPS_NUMBER( m ) ); return( composite( ivec, mvec, options ) ); }
int main(void) { const int n =6; // 4I , 2 lambda int info; // system params Coil coil; Magnet magnet; coil.d = .0575; coil.R = 0.075; magnet.x = 0.01; magnet.y = 0.0; magnet.Fx = 1 * pow(10,-8); magnet.Fy = 1.* pow(10,-8); magnet.gamma = 35.8 * pow(10,-6); double th = -19 * PI/180; // convert angle in degrees to radians double Bmag = 10. * pow(10,-6); double Bx = Bmag * cos(th); double By = Bmag * sin(th); Vector2d mvec(Bx,By); mvec = magnet.gamma/sqrt(Bx * Bx + By * By)*mvec; cout << "mvec" << endl << mvec << endl; MatrixXd Bmat = computeBmat(magnet.x,magnet.y,coil.R,coil.d); cout << "Bmat" << endl << Bmat << endl; //MatrixXd Matrix4d A; cout << "A" << endl << A << endl; A.block(0,0,2,4) = Bmat; cout << "A row 1-2" << endl << A << endl; A.block(2,0,1,4) = mvec.transpose() * Dx(magnet.x,magnet.y,coil.R,coil.d); A.block(3,0,1,4) = mvec.transpose() * Dy(magnet.x,magnet.y,coil.R,coil.d); cout << "A " << endl << A << endl; cout << "A inverse is:" << endl << A.inverse() << endl; Vector4d BF; BF << Bx,By,magnet.Fx,magnet.Fy; cout << "BF: " << endl << BF << endl; Vector4d Isolve = A.inverse() * BF; cout << "Isolve: " << endl << Isolve << endl; return 0; }
void SymmetricSolver::circulantMul(const BlockCMat& M, mvec& v, unsigned int nPhi) { assert(!(v.size()%nPhi)); assert(M.nCols()*nPhi == v.size()); // stuff vector into vector-of-vectors for circulant blocks VarVec<mvec> vv; for(unsigned int i=0; i<v.size()/nPhi; i++) vv.push_back(mvec(&v[i*nPhi], &v[i*nPhi]+nPhi)); vv = M.lMultiply<mvec,mvec>(vv); // pull data back out v.getData().resize(M.nRows()*nPhi); for(unsigned int i=0; i<M.nRows(); i++) for(unsigned int j=0; j<nPhi; j++) v[i*nPhi+j] = vv[i][j]; }
void GenericSolver::calculateResult(ReactiveSet& R) { #ifdef WITH_LAPACKE assert(my_SVD); mvec finalState = my_SVD->calc_pseudo_inverse(singular_epsilon) * R.incidentState; #else assert(the_GF); gsl_vector* inc = gsl_vector_alloc(R.nDF()); gsl_vector* fin = gsl_vector_alloc(R.nDF()); for(unsigned int i=0; i<R.nDF(); i++) gsl_vector_set(inc,i,R.incidentState[i]); assert(!gsl_blas_dgemv(CblasNoTrans, 1., the_GF, inc, 0., fin)); mvec finalState = mvec(R.nDF()); for(unsigned int i=0; i<R.nDF(); i++) finalState[i] = gsl_vector_get(fin,i); gsl_vector_free(inc); gsl_vector_free(fin); #endif R.setFinalState(finalState); }
vector<vector<int>> combinationSum(vector<int>& candidates, int target) { vector<vector<int>> res; function<bool(int, int, const vector<vector<int>>&)> ff =[&res, &candidates, &ff](int t, int start, const vector<vector<int>>& vec){ if(t <0) return false; else if(t == 0){ copy(vec.begin(), vec.end(), back_inserter(res)); return true; } else if (start >= candidates.size()) return false; else if (candidates[start] > t) return false; bool bret = false; for(int i = start; i < candidates.size() && candidates[i] <= t; ++i){ if(candidates[i] <= t){ vector<vector<int>> mvec(vec.begin(), vec.end()); for_each(mvec.begin(), mvec.end(), [i, &candidates](vector<int>& item){item.push_back(candidates[i]);}); if(ff(t-candidates[i], i, mvec)) { bret = true; } } } return bret; }; vector<vector<int>> vv{vector<int>{}}; sort(candidates.begin(), candidates.end()); if(ff(target, 0, vv)){ return res; } else{ return vector<vector<int>>{}; } }
static void Diagonalizer_and_CrossCorrelationTable_qdiag (Diagonalizer me, CrossCorrelationTables thee, double *cweights, long maxNumberOfIterations, double delta) { try { CrossCorrelationTable c0 = (CrossCorrelationTable) thy item[1]; double **w = my data; long dimension = c0 -> numberOfColumns; autoEigen eigen = Thing_new (Eigen); autoCrossCorrelationTables ccts = Data_copy (thee); autoNUMmatrix<double> pinv (1, dimension, 1, dimension); autoNUMmatrix<double> d (1, dimension, 1, dimension); autoNUMmatrix<double> p (1, dimension, 1, dimension); autoNUMmatrix<double> m1 (1, dimension, 1, dimension); autoNUMmatrix<double> wc (1, dimension, 1, dimension); autoNUMvector<double> wvec (1, dimension); autoNUMvector<double> wnew (1, dimension); autoNUMvector<double> mvec (1, dimension); for (long i = 1; i <= dimension; i++) // Transpose W for (long j = 1; j <= dimension; j++) { wc[i][j] = w[j][i]; } // d = diag(diag(W'*C0*W)); // W = W*d^(-1/2); NUMdmatrix_normalizeColumnVectors (wc.peek(), dimension, dimension, c0 -> data); // scale eigenvectors for sphering // [vb,db] = eig(C0); // P = db^(-1/2)*vb'; Eigen_initFromSymmetricMatrix (eigen.peek(), c0 -> data, dimension); for (long i = 1; i <= dimension; i++) { if (eigen -> eigenvalues[i] < 0) { Melder_throw (U"Covariance matrix not positive definite, eigenvalue[", i, U"] is negative."); } double scalef = 1 / sqrt (eigen -> eigenvalues[i]); for (long j = 1; j <= dimension; j++) { p[dimension - i + 1][j] = scalef * eigen -> eigenvectors[i][j]; } } // P*C[i]*P' for (long ic = 1; ic <= thy size; ic++) { CrossCorrelationTable cov1 = (CrossCorrelationTable) thy item[ic]; CrossCorrelationTable cov2 = (CrossCorrelationTable) ccts -> item[ic]; NUMdmatrices_multiply_VCVp (cov2 -> data, p.peek(), dimension, dimension, cov1 -> data, 1); } // W = P'\W == inv(P') * W NUMpseudoInverse (p.peek(), dimension, dimension, pinv.peek(), 0); NUMdmatrices_multiply_VpC (w, pinv.peek(), dimension, dimension, wc.peek(), dimension); // initialisation for order KN^3 for (long ic = 2; ic <= thy size; ic++) { CrossCorrelationTable cov = (CrossCorrelationTable) ccts -> item[ic]; // C * W NUMdmatrices_multiply_VC (m1.peek(), cov -> data, dimension, dimension, w, dimension); // D += scalef * M1*M1' NUMdmatrices_multiplyScaleAdd (d.peek(), m1.peek(), dimension, dimension, 2 * cweights[ic]); } long iter = 0; double delta_w; autoMelderProgress progress (U"Simultaneous diagonalization of many CrossCorrelationTables..."); try { do { // the standard diagonality measure is rather expensive to calculate so we compare the norms of // differences of eigenvectors. delta_w = 0; for (long kol = 1; kol <= dimension; kol++) { for (long i = 1; i <= dimension; i++) { wvec[i] = w[i][kol]; } update_one_column (ccts.peek(), d.peek(), cweights, wvec.peek(), -1, mvec.peek()); Eigen_initFromSymmetricMatrix (eigen.peek(), d.peek(), dimension); // Eigenvalues already sorted; get eigenvector of smallest ! for (long i = 1; i <= dimension; i++) { wnew[i] = eigen -> eigenvectors[dimension][i]; } update_one_column (ccts.peek(), d.peek(), cweights, wnew.peek(), 1, mvec.peek()); for (long i = 1; i <= dimension; i++) { w[i][kol] = wnew[i]; } // compare norms of eigenvectors. We have to compare ||wvec +/- w_new|| because eigenvectors // may change sign. double normp = 0, normm = 0; for (long j = 1; j <= dimension; j++) { double dm = wvec[j] - wnew[j], dp = wvec[j] + wnew[j]; normp += dm * dm; normm += dp * dp; } normp = normp < normm ? normp : normm; normp = sqrt (normp); delta_w = normp > delta_w ? normp : delta_w; } iter++; Melder_progress ((double) iter / (double) (maxNumberOfIterations + 1), U"Iteration: ", iter, U", norm: ", delta_w); } while (delta_w > delta && iter < maxNumberOfIterations); } catch (MelderError) { Melder_clearError (); } // Revert the sphering W = P'*W; // Take transpose to make W*C[i]W' diagonal instead of W'*C[i]*W => (P'*W)'=W'*P NUMmatrix_copyElements (w, wc.peek(), 1, dimension, 1, dimension); NUMdmatrices_multiply_VpC (w, wc.peek(), dimension, dimension, p.peek(), dimension); // W = W'*P: final result // Calculate the "real" diagonality measure // double dm = CrossCorrelationTables_and_Diagonalizer_getDiagonalityMeasure (thee, me, cweights, 1, thy size); } catch (MelderError) { Melder_throw (me, U" & ", thee, U": no joint diagonalization (qdiag)."); } }
//M+ void mp( int MinCoreSize, int MaxCoreSize, int SamplingFreq, int NumReplicates, char* OutFilePath, std::string Kernel, vector<int> KernelAccessionIndex, vector<int> AccessionNameList, vector<vector<vector<int> > > ActiveAlleleByPopList, vector<vector<vector<int> > > TargetAlleleByPopList, vector<int> ActiveMaxAllelesList, vector<int> TargetMaxAllelesList, vector<std::string> FullAccessionNameList ) { //PERFORM INITIAL MPI STUFF MPI_Status status; //this struct contains three fields which will contain info about the sender of a received message // MPI_SOURCE, MPI_TAG, MPI_ERROR //MPI::Init (); //Initialize MPI. int nproc = MPI::COMM_WORLD.Get_size ( ); //Get the number of processes. int procid = MPI::COMM_WORLD.Get_rank ( ); //Get the individual process ID. //set up vectors to fill with results //below is a stupid way to calculate the number of rows in the output file, value l (which = V1) //used to monitor progress and as the maximum vector index for shared output vectors int l=0; for (int i=MinCoreSize;i<MaxCoreSize+1;i=i+SamplingFreq) { for (int j=0;j<NumReplicates;j++) { l++; } } double V1 = (double)l; //(MaxCoreSize - MinCoreSize + 1)*NumReplicates; //number of rows in output vectors vector<vector<double> > Results(V1, vector<double>(9)); //will contain numerical results vector<vector<string> > Members(V1); //will contain core set members //***MPI: RECEIVE RESULTS AT MASTER 0 //receive values from any slave, in any order, exiting when the number of 'receives' = the top vector size if ( procid == 0 ) { //set up variables for monitoring progress int percent; //percent of analysis completed int progindex = 0; //index to monitor progress, percent = 100*(progindex/l) //receive and process results from slave processors unsigned int i = 0; while (i<2*(Results.size())) //two receives per row { //probe the incoming message to determine its tag int nchar; //will contain the length of the char array passed with tag=1 int vchar; //will contain the length of the vector passed with tag=0 int tag; //tag of message from sender int source; //procid of sender MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); //MPI_Get_count(&status, MPI_CHAR, &nchar); //probes the length of the message, saves it in nchar tag = status.MPI_TAG; //the tag defines which kind of comm it is, a vector of stats (0=resvec()) //or a char array describing the members of the core (1=cc) source = status.MPI_SOURCE; //determine the source of the message so that you can define which sender to Recv from. This will avoid an intervening message coming in after the MPI_Probe with a different length, causing a message truncated error. if (tag == 0) { //determine the length of the message tagged 0 MPI_Get_count(&status, MPI_DOUBLE, &vchar); //cout <<" vchar="<<vchar<<" tag="<<tag<<" MPI_SOURCE="<<status.MPI_SOURCE<<" MPI_ERROR="<<status.MPI_ERROR<<"\n"; //receive the vector of results, tagged 0, from: //MPI_Send(&resvec[0], resvec.size(), MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); vector<double> t(10); MPI_Recv(&t[0], vchar, MPI_DOUBLE, source, 0, MPI_COMM_WORLD, &status); //load data from vector received onto Results, row number is last item t[9] for (int j=0;j<9;++j) { Results[ t[9] ][j] = t[j]; } t.clear(); } else if (tag == 1) { //determine the length of the message tagged 1 MPI_Get_count(&status, MPI_CHAR, &nchar); //probes the length of the message, saves it in nchar //cout <<" nchar="<<nchar<<" tag="<<tag<<" MPI_SOURCE="<<status.MPI_SOURCE<<" MPI_ERROR="<<status.MPI_ERROR<<"\n"; //receive the vector<string> of the core set, tagged 1, from: //MPI_Send(&m[0], nchar, MPI_CHAR, 0, 1, MPI_COMM_WORLD); //vector<string> m(nchar); char m[nchar]; MPI_Recv(&m[0], nchar, MPI_CHAR, source, 1, MPI_COMM_WORLD, &status); //load core set onto Members //1. convert char array into a string string mstr(m); //2. split string on delimiter ',<!>,' string delim = ",<!>,"; vector<string> mvec( countSubstring(mstr, delim) ); unsigned int st = 0; std::size_t en = mstr.find(delim); int k = 0; while (en != std::string::npos) { mvec[k] = mstr.substr(st, en-st); st = en + delim.length(); en = mstr.find(delim,st); ++k; } string z = mstr.substr(st); //get row number as last item in mstr int zz = atoi(z.c_str()); //convert string to c-string then to int //3. load onto Members Members[zz] = mvec; //4. clean up memset(m, 0, nchar);; mstr=""; mvec.clear(); } ++i; //display progress progindex = progindex + 1; percent = 100*( progindex/(V1*2) ); //number of rows X 2 repeats needed to complete search printProgBar(percent); } }//***MPI: END MASTER RECEIVE***/ /***MPI: SEND RESULTS FROM SLAVE PROCESSES***/ else if ( procid != 0 ) { unsigned int r; //r = core size, //int nr, RandAcc, b, bsc, plateau; //nr = controller to repeat NumReplicates times int RandAcc, b, bsc, plateau; //nr = controller to repeat NumReplicates times //row = result vector row number, bsc = holds best sub core member, and other indexed accessions //plateau = index of the number of reps in optimization loop with same diversity value double RandomActiveDiversity; double AltRandomActiveDiversity; double StartingRandomActiveDiversity; double StartingAltRandomActiveDiversity; double RandomTargetDiversity; double AltRandomTargetDiversity; double StartingDiversity; double TempAltOptimizedActiveDiversity; double AltOptimizedActiveDiversity; double OptimizedTargetDiversity; double AltOptimizedTargetDiversity; double best; double nnew; vector<vector<vector<int> > > AlleleList; vector<vector<vector<int> > > CoreAlleles; vector<vector<vector<int> > > TdTempList; vector<vector<vector<int> > > BestSubCoreAlleles; std::string Standardize = "yes"; //a run that mimics the MSTRAT approach can be accomplished by setting Standardize="no", and setting up the var file so that each column in the .dat file is treated as a single locus, rather than two (or more) adjacent columns being treated as a single codominant locus. vector<int> AccessionsInCore; vector<int> AccessionsInSubCore; vector<int> BestSubCore; vector<int> BestSubCoreRevSorted; vector<int> TempList; vector<int> TempList2; vector<int> bestcore; vector<std::string> TempListStr; //seed the random number generator for each processor int tt; tt = (time(NULL)); srand ( abs(((tt*181)*((procid-83)*359))%104729) ); //do parallelization so that each rep by core size combo can be //handled by a distinct thread. this involves figuring out the total //number of reps*coresizes taking into account the SamplingFreq int rsteps = 1 + floor( (MaxCoreSize - MinCoreSize) / SamplingFreq ); //number of steps from MinCoreSize to MaxCoreSize //***MPI: figure out where to start and stop loop for each processor int nreps = rsteps*NumReplicates; int count = nreps/(nproc-1); //p-1 assumes a master, i.e. one less processor than total int start = (procid-1) * count; //procid-1 makes you start at 0, assumes master is p0 int stop; if (nreps % (nproc-1) > (procid-1)) { start += procid - 1; stop = start + (count + 1); } else { start += nreps % (nproc-1); stop = start + count; } //iterate thru the relevant rows for (int rnr=start;rnr<stop;++rnr) { r = MinCoreSize + ((rnr / NumReplicates) * SamplingFreq); //int rounds to floor //develop random starting core set //clear AccessionsInCore and set size AccessionsInCore.clear(); AccessionsInCore.resize(r); //add kernel accessions to core, if necessary if (Kernel == "yes") { for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { AccessionsInCore[i] = KernelAccessionIndex[i]; } } //clear TempList and set size TempList.clear(); TempList.resize( AccessionNameList.size() ); //set list of available accessions in TempList, by erasing those already in the core TempList = AccessionNameList; //expunge the kernel accessions, so they are not available for random addition below //KernelAccessionIndex has been reverse sorted so you don't go outside range after automatic resize by .erase for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { b = KernelAccessionIndex[i]; TempList.erase(TempList.begin()+b); } //randomly add accessions until r accessions are in the core. if there is a kernel, include those (done above) //plus additional, randomly selected accessions, until you get r accessions for (unsigned int i=KernelAccessionIndex.size();i<r;i++) { //choose an accession randomly from those available RandAcc = rand() % TempList.size(); //add it to the list AccessionsInCore[i] = TempList[RandAcc]; //remove it from the list of available accessions TempList.erase(TempList.begin()+RandAcc); } //assemble genotypes for random core and calculate diversity //1. put together initial list of active alleles CoreAlleles.clear(); CoreAlleles.resize( AccessionsInCore.size() ); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. calculate diversity from random selection at active loci AlleleList.clear(); AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); //in MyCalculateDiversity, latter two variables are updated as references //save them away in non-updated variables StartingRandomActiveDiversity = RandomActiveDiversity; StartingAltRandomActiveDiversity = AltRandomActiveDiversity; //3. calculate diversity from random selection at target loci AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, RandomTargetDiversity, AltRandomTargetDiversity); //BEGIN OPTIMIZATION StartingDiversity = 0; //this is the diversity recovered during the prior iteration. plateau = 0; //count of the number of times you have found the best value, evaluates when you are //stuck on a plateau, assuming acceptance criterion allows downhill steps //this is the iterations step, now an indefinite loop that is broken when //no improvement is made during the course of the optimization algorithm //If r = kernel size = MinCoreSize then do no optimization but still calculate all variables. if (KernelAccessionIndex.size() == r) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); best = RandomActiveDiversity; //best is equivalent to OptimizedActiveDiversity AltOptimizedActiveDiversity = AltRandomActiveDiversity; } else { //do optimization while ( true ) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. go through all possible subsets of size r-1, one at a time, noting which is best. //If there is a kernel, do not swap out any of those accessions (they are retained as the //first KernelAccessionIndex.size() items in CoreAlleles). Accomplished by starting for loop //at KernelAccessionIndex.size(). best=0; for (unsigned int i=KernelAccessionIndex.size();i<CoreAlleles.size();i++) { //remove each item consecutively from the list of all populations in the core AlleleList.clear(); TdTempList.clear(); TdTempList = CoreAlleles; //swap to temporary vector TdTempList.erase( TdTempList.begin() + i); AlleleList = TdTempList; TempList2.clear(); TempList2 = AccessionsInCore; TempList2.erase(TempList2.begin() + i); AccessionsInSubCore = TempList2; /*Data structure for SubCoreAlleles: SubCore 1..r Population 1..(r-1) AlleleArray 1..NumLoci --3. fuse alleles from the same locus into a single array, for all accessions, for the current subcore --4. assemble a list of diversity (M) for each locus separately --5. standardize the M values to the maximum possible number of alleles at that locus, and add them up to get final estimate of standardized allelic diversity in the core. then divide by the number of loci to get a number that is comparable across data sets. --5.5. simultaneous to the calculation, keep track of which subcore is best */ MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); nnew = RandomActiveDiversity; if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; BestSubCore.clear(); BestSubCore = AccessionsInSubCore; BestSubCoreAlleles.clear(); BestSubCoreAlleles = AlleleList; } } //for loop cycles thru all subcores //reverse sort BestSubCore to support easy assembly of pared TempList below BestSubCoreRevSorted = BestSubCore; std::sort(BestSubCoreRevSorted.begin(), BestSubCoreRevSorted.end(), std::greater<int>()); /* 6. take the subcore with greatest diversity and consecutively add each possible additional accession from the base collection. find the core of size r (not r-1 subcore) that has the greatest diversity. suppress the IDs of those accessions found in the BestSubCore from the list of all accessions to get a list of remaining accessions.*/ TempList = AccessionNameList; for (unsigned int k=0;k<BestSubCoreRevSorted.size();k++) { bsc = BestSubCoreRevSorted[k]; TempList.erase( TempList.begin() + bsc ); } //shuffle the list of remaining accessions, so addition order is not predictable std::random_shuffle (TempList.begin(), TempList.end()); //add each remaining accession consecutively, calculate diversity, test //whether it is better than the prior one best = 0; for (unsigned int k=0;k<TempList.size();k++) { bsc = TempList[k]; //define the core TempList2 = BestSubCore; TempList2.resize( TempList2.size() + 1 ); //TempList2.push_back(i); TempList2[TempList2.size()-1] = bsc; //add new accession to last vector element AccessionsInCore = TempList2; //assemble the allelelist for the core TdTempList = BestSubCoreAlleles; TdTempList.resize( TdTempList.size() + 1 ); //TdTempList.push_back( ActiveAlleleByPopList[i] ); TdTempList[TdTempList.size()-1] = ActiveAlleleByPopList[bsc]; AlleleList = TdTempList; //calculate diversity MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, nnew, TempAltOptimizedActiveDiversity); //test whether current diversity is higher than the best diversity found so far if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; bestcore = AccessionsInCore; //save the alternative diversity value for the best core AltOptimizedActiveDiversity = TempAltOptimizedActiveDiversity; } } AccessionsInCore = bestcore; //define starting variable for next MSTRAT iteration //if there has been no improvement from the prior iteration, you have reached // the plateau and should exit the repeat if (best == StartingDiversity) { plateau++; if (plateau > 0) break; } //update starting value and repeat else if (best > StartingDiversity) StartingDiversity = best; } //while(true) endless loop } //7. Calculate diversity at target loci //assemble the target loci allelelist for the accessions in the best core AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } //calculate diversity at target loci based upon the optimized core selection MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, OptimizedTargetDiversity, AltOptimizedTargetDiversity); //8. Assemble stats for optimized core and add to output vectors //create a list of accession names from the list of accession ID's in AccessionsInCore sort( AccessionsInCore.begin(), AccessionsInCore.end() ); TempListStr.clear(); TempListStr.resize(r); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; TempListStr[i] = FullAccessionNameList[b]; } /***MPI: BUILD & SEND RESULTS VECTOR***/ //load the variables onto the results vectors //no need to calculate row number, it is the same as rnr, formula saved because it might be useful later //row = ((r - MinCoreSize)*NumReplicates) + nr - ( (NumReplicates*(SamplingFreq-1))*( (r-MinCoreSize)/SamplingFreq ) ); // (r - MinCoreSize)*NumReplicates) + nr specifies row number if SamplingFreq=1 // (NumReplicates*(SamplingFreq-1)) specifies a step value to correct when SamplingFreq>1 // ( (r-MinCoreSize)/SamplingFreq ) specifies the replicate on core size, accounting for SamplingFreq // see file Calculation of row value.xlsx for development of the 'row' index //put results 0-8 into a vector, resvec, return row as last item vector<double> resvec(10); resvec[0] = double(r); resvec[1] = StartingRandomActiveDiversity;//RandomActiveDiversity; resvec[2] = best; //equivalent to OptimizedActiveDiversity resvec[3] = RandomTargetDiversity; resvec[4] = OptimizedTargetDiversity; resvec[5] = StartingAltRandomActiveDiversity;//AltRandomActiveDiversity; resvec[6] = AltOptimizedActiveDiversity; resvec[7] = AltRandomTargetDiversity; resvec[8] = AltOptimizedTargetDiversity; resvec[9] = double(rnr); //cout<<"MPI_Rank="<<MPI_Rank<<" //send result vector to master 0, send row number, rnr, as last element. //message is tagged as 0 //here you are pointing to the first element, then returning resvec.size() doubles- //worth of memory from that starting location. MPI_Send(&resvec[0], resvec.size(), MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); /***MPI: END BUILD & SEND RESULTS VECTOR***/ /***MPI: BUILD & SEND MEMBERS VECTOR***/ //add row number as last item in TempListStr TempListStr.resize(TempListStr.size()+1); stringstream ss; ss << rnr; //convert int to stringstream to string TempListStr[ TempListStr.size() - 1 ] = ss.str(); //convert vector<string> to a single, ',<!>,' delimited, string string concat; for (unsigned int i=0;i<TempListStr.size();++i) { concat += TempListStr[i]; //add vector element if (i<TempListStr.size()-1) concat += ",<!>,"; //add delimiter, except for last item } //convert the string to a char array char cc[concat.size()+1]; strcpy(cc, concat.c_str()); //send the char array to master0 tagged as 1 //tagged as 1 to distinguish from result vector send MPI_Send(&cc, sizeof(cc), MPI_CHAR, 0, 1, MPI_COMM_WORLD); } //end for loop over rows } //***MPI: END SEND /*MPI: MASTER 0 WRITES OUTPUT*/ if ( procid == 0 ) { //set up file stream for output file ofstream output; output.open(OutFilePath); output.close(); //quick open close done to clear any existing file each time program is run output.open(OutFilePath, ios::out | ios::app); //open file in append mode output << "core size random reference diversity optimized reference diversity random target diversity optimized target diversity alt random reference diversity alt optimized reference diversity alt random target diversity alt optimized target diversity core members" << "\n"; //write out results row by row for (int i=0;i<V1;i++) { //write variables output << Results[i][0] << " " << Results[i][1] << " " << Results[i][2] << " " << Results[i][3] << " " << Results[i][4] << " " << Results[i][5] << " " << Results[i][6] << " " << Results[i][7] << " " << Results[i][8] << " " << "("; //write Accessions retained for (unsigned int j=0;j<Members[i].size();j++) { if ( j==(Members[i].size() - 1) ) { //add trailing parentheses and move to next row output << Members[i][j] << ")\n"; } else { output << Members[i][j] << ","; } } } //wrap up write step output.close(); } /***MPI: END MASTER WRITE***/ //Terminate MPI. //MPI::Finalize ( ); }
// Testing the I/O of the important classes of the library // (context, keys, ciphertexts). int main(int argc, char *argv[]) { ArgMapping amap; long r=1; long p=2; long c = 2; long w = 64; long L = 5; long mm=0; amap.arg("p", p, "plaintext base"); amap.arg("r", r, "lifting"); amap.arg("c", c, "number of columns in the key-switching matrices"); amap.arg("m", mm, "cyclotomic index","{31,127,1023}"); amap.parse(argc, argv); bool useTable = (mm==0 && p==2); long ptxtSpace = power_long(p,r); long numTests = useTable? N_TESTS : 1; std::unique_ptr<FHEcontext> contexts[numTests]; std::unique_ptr<FHESecKey> sKeys[numTests]; std::unique_ptr<Ctxt> ctxts[numTests]; std::unique_ptr<EncryptedArray> eas[numTests]; vector<ZZX> ptxts[numTests]; // first loop: generate stuff and write it to cout // open file for writing {fstream keyFile("iotest.txt", fstream::out|fstream::trunc); assert(keyFile.is_open()); for (long i=0; i<numTests; i++) { long m = (mm==0)? ms[i][1] : mm; cout << "Testing IO: m="<<m<<", p^r="<<p<<"^"<<r<<endl; Vec<long> mvec(INIT_SIZE,2); mvec[0] = ms[i][4]; mvec[1] = ms[i][5]; vector<long> gens(2); gens[0] = ms[i][6]; gens[1] = ms[i][7]; vector<long> ords(2); ords[0] = ms[i][8]; ords[1] = ms[i][9]; if (useTable && gens[0]>0) contexts[i].reset(new FHEcontext(m, p, r, gens, ords)); else contexts[i].reset(new FHEcontext(m, p, r)); contexts[i]->zMStar.printout(); buildModChain(*contexts[i], L, c); // Set the modulus chain if (mm==0 && m==1023) contexts[i]->makeBootstrappable(mvec); // Output the FHEcontext to file writeContextBase(keyFile, *contexts[i]); writeContextBase(cout, *contexts[i]); keyFile << *contexts[i] << endl; sKeys[i].reset(new FHESecKey(*contexts[i])); const FHEPubKey& publicKey = *sKeys[i]; sKeys[i]->GenSecKey(w,ptxtSpace); // A Hamming-weight-w secret key addSome1DMatrices(*sKeys[i]);// compute key-switching matrices that we need eas[i].reset(new EncryptedArray(*contexts[i])); long nslots = eas[i]->size(); // Output the secret key to file, twice. Below we will have two copies // of most things. keyFile << *sKeys[i] << endl;; keyFile << *sKeys[i] << endl;; vector<ZZX> b; long p2r = eas[i]->getContext().alMod.getPPowR(); ZZX poly = RandPoly(0,to_ZZ(p2r)); // choose a random constant polynomial eas[i]->decode(ptxts[i], poly); ctxts[i].reset(new Ctxt(publicKey)); eas[i]->encrypt(*ctxts[i], publicKey, ptxts[i]); eas[i]->decrypt(*ctxts[i], *sKeys[i], b); assert(ptxts[i].size() == b.size()); for (long j = 0; j < nslots; j++) assert (ptxts[i][j] == b[j]); // output the plaintext keyFile << "[ "; for (long j = 0; j < nslots; j++) keyFile << ptxts[i][j] << " "; keyFile << "]\n"; eas[i]->encode(poly,ptxts[i]); keyFile << poly << endl; // Output the ciphertext to file keyFile << *ctxts[i] << endl; keyFile << *ctxts[i] << endl; cerr << "okay " << i << endl<< endl; } keyFile.close();} cerr << "so far, so good\n\n"; // second loop: read from input and repeat the computation // open file for read {fstream keyFile("iotest.txt", fstream::in); for (long i=0; i<numTests; i++) { // Read context from file unsigned long m1, p1, r1; vector<long> gens, ords; readContextBase(keyFile, m1, p1, r1, gens, ords); FHEcontext tmpContext(m1, p1, r1, gens, ords); keyFile >> tmpContext; assert (*contexts[i] == tmpContext); cerr << i << ": context matches input\n"; // We define some things below wrt *contexts[i], not tmpContext. // This is because the various operator== methods check equality of // references, not equality of the referenced FHEcontext objects. FHEcontext& context = *contexts[i]; FHESecKey secretKey(context); FHESecKey secretKey2(tmpContext); const FHEPubKey& publicKey = secretKey; const FHEPubKey& publicKey2 = secretKey2; keyFile >> secretKey; keyFile >> secretKey2; assert(secretKey == *sKeys[i]); cerr << " secret key matches input\n"; EncryptedArray ea(context); EncryptedArray ea2(tmpContext); long nslots = ea.size(); // Read the plaintext from file vector<ZZX> a; a.resize(nslots); assert(nslots == (long)ptxts[i].size()); seekPastChar(keyFile, '['); // defined in NumbTh.cpp for (long j = 0; j < nslots; j++) { keyFile >> a[j]; assert(a[j] == ptxts[i][j]); } seekPastChar(keyFile, ']'); cerr << " ptxt matches input\n"; // Read the encoded plaintext from file ZZX poly1, poly2; keyFile >> poly1; eas[i]->encode(poly2,a); assert(poly1 == poly2); cerr << " eas[i].encode(a)==poly1 okay\n"; ea.encode(poly2,a); assert(poly1 == poly2); cerr << " ea.encode(a)==poly1 okay\n"; ea2.encode(poly2,a); assert(poly1 == poly2); cerr << " ea2.encode(a)==poly1 okay\n"; eas[i]->decode(a,poly1); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " eas[i].decode(poly1)==ptxts[i] okay\n"; ea.decode(a,poly1); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " ea.decode(poly1)==ptxts[i] okay\n"; ea2.decode(a,poly1); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " ea2.decode(poly1)==ptxts[i] okay\n"; // Read ciperhtext from file Ctxt ctxt(publicKey); Ctxt ctxt2(publicKey2); keyFile >> ctxt; keyFile >> ctxt2; assert(ctxts[i]->equalsTo(ctxt,/*comparePkeys=*/false)); cerr << " ctxt matches input\n"; sKeys[i]->Decrypt(poly2,*ctxts[i]); assert(poly1 == poly2); cerr << " sKeys[i]->decrypt(*ctxts[i]) == poly1 okay\n"; secretKey.Decrypt(poly2,*ctxts[i]); assert(poly1 == poly2); cerr << " secretKey.decrypt(*ctxts[i]) == poly1 okay\n"; secretKey.Decrypt(poly2,ctxt); assert(poly1 == poly2); cerr << " secretKey.decrypt(ctxt) == poly1 okay\n"; secretKey2.Decrypt(poly2,ctxt2); assert(poly1 == poly2); cerr << " secretKey2.decrypt(ctxt2) == poly1 okay\n"; eas[i]->decrypt(ctxt, *sKeys[i], a); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " eas[i].decrypt(ctxt, *sKeys[i])==ptxts[i] okay\n"; ea.decrypt(ctxt, secretKey, a); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " ea.decrypt(ctxt, secretKey)==ptxts[i] okay\n"; ea2.decrypt(ctxt2, secretKey2, a); assert(nslots == (long)a.size()); for (long j = 0; j < nslots; j++) assert(a[j] == ptxts[i][j]); cerr << " ea2.decrypt(ctxt2, secretKey2)==ptxts[i] okay\n"; cerr << "test "<<i<<" okay\n\n"; }} unlink("iotest.txt"); // clean up before exiting }
void MooseEnumTest::multiTestOne() { MultiMooseEnum mme("one two three four", "two"); CPPUNIT_ASSERT( mme.contains("one") == false ); CPPUNIT_ASSERT( mme.contains("two") == true ); CPPUNIT_ASSERT( mme.contains("three") == false ); CPPUNIT_ASSERT( mme.contains("four") == false ); mme.push_back("four"); CPPUNIT_ASSERT( mme.contains("one") == false ); CPPUNIT_ASSERT( mme.contains("two") == true ); CPPUNIT_ASSERT( mme.contains("three") == false ); CPPUNIT_ASSERT( mme.contains("four") == true ); // isValid CPPUNIT_ASSERT ( mme.isValid() == true ); mme.clear(); CPPUNIT_ASSERT ( mme.isValid() == false ); mme.push_back("one three"); CPPUNIT_ASSERT( mme.contains("one") == true ); CPPUNIT_ASSERT( mme.contains("two") == false ); CPPUNIT_ASSERT( mme.contains("three") == true ); CPPUNIT_ASSERT( mme.contains("four") == false ); std::vector<std::string> mvec(2); mvec[0] = "one"; mvec[1] = "two"; std::set<std::string> mset; mset.insert("two"); mset.insert("three"); // Assign mme = mvec; CPPUNIT_ASSERT( mme.contains("one") == true ); CPPUNIT_ASSERT( mme.contains("two") == true ); CPPUNIT_ASSERT( mme.contains("three") == false ); CPPUNIT_ASSERT( mme.contains("four") == false ); mme = mset; CPPUNIT_ASSERT( mme.contains("one") == false ); CPPUNIT_ASSERT( mme.contains("two") == true ); CPPUNIT_ASSERT( mme.contains("three") == true ); CPPUNIT_ASSERT( mme.contains("four") == false ); // Insert mme.push_back(mvec); CPPUNIT_ASSERT( mme.contains("one") == true ); CPPUNIT_ASSERT( mme.contains("two") == true ); CPPUNIT_ASSERT( mme.contains("three") == true ); CPPUNIT_ASSERT( mme.contains("four") == false ); mme.clear(); mme = "one four"; CPPUNIT_ASSERT( mme.contains("one") == true ); CPPUNIT_ASSERT( mme.contains("two") == false ); CPPUNIT_ASSERT( mme.contains("three") == false ); CPPUNIT_ASSERT( mme.contains("four") == true ); mme.push_back("three four"); CPPUNIT_ASSERT( mme.contains("one") == true ); CPPUNIT_ASSERT( mme.contains("two") == false ); CPPUNIT_ASSERT( mme.contains("three") == true ); CPPUNIT_ASSERT( mme.contains("four") == true ); // Size CPPUNIT_ASSERT( mme.size() == 4 ); CPPUNIT_ASSERT( mme.unique_items_size() == 3 ); // All but "two" should be in the Enum std::set<std::string> compare_set, return_set, difference; for (MooseEnumIterator it = mme.begin(); it != mme.end(); ++it) return_set.insert(*it); compare_set.insert("ONE"); compare_set.insert("THREE"); compare_set.insert("FOUR"); std::set_symmetric_difference(return_set.begin(), return_set.end(), compare_set.begin(), compare_set.end(), std::inserter(difference, difference.end())); CPPUNIT_ASSERT( difference.size() == 0 ); // Order and indexing mme.clear(); mme = "one two four"; CPPUNIT_ASSERT( mme.contains("three") == false ); CPPUNIT_ASSERT( mme[0] == "one" ); CPPUNIT_ASSERT( mme[1] == "two" ); CPPUNIT_ASSERT( mme[2] == "four" ); }