/* Copy data to and from ram at different offsets. Assume memory alignments between 0 and sizeof(double) */ static int ram_copy(int argc, char **argv, struct Result *res) { int n, i64; if (argc != 1) { fprintf(stderr, "ram_copy(): needs 1 argument!\n"); return (-1); } if (sscanf(*argv, "%d", &i64) < 1) { fprintf(stderr, "ram_copy(): needs 1 argument!\n"); return (-1); } for (n = 0; n < i64; n++) { void /* vary offsets with each call */ *src = (void *) &b[n % sizeof(double)], /* try all src & dest double word offsets */ *dst = (void *) &a[(n / sizeof(double)) % sizeof(double)]; /* insure offset always from 0 to sizeof(double) */ int count = sizes[n % Members(sizes)]; /* vary copy sizes by table above */ memcpy(src, dst, count); /* memcpy() can never fail */ } res->c = b[i64]; return (0); }
void dumpInternalState( a_state *x ) { a_parent *parent; a_shift_action *tx; a_reduce_action *rx; size_t col, new_col; set_size *mp; an_item **item; printf( "state %d: %p (%u)\n", x->sidx, x, x->kersize ); printf( " parent states:" ); col = 4; for( parent = x->parents; parent != NULL; parent = parent->next ) { printf( " %d(%p)", parent->state->sidx, parent->state ); --col; if( col == 0 ) { printf( "\n" ); col = 5; } } printf( "\n" ); for( item = x->items; *item != NULL; ++item ) { showitem( *item, " ." ); } printf( "actions:" ); col = 8; for( tx = x->trans; tx->sym != NULL; ++tx ) { new_col = col + 1 + strlen( tx->sym->name ) + 1 + 1 + 3; if( new_col > 79 ) { putchar('\n'); new_col -= col; } col = new_col; printf( " %s:s%03d", tx->sym->name, tx->state->sidx ); } putchar( '\n' ); col = 0; for( rx = x->redun; rx->pro != NULL; ++rx ) { for( mp = Members( rx->follow ); mp != setmembers; ) { --mp; new_col = col + 1 + strlen( symtab[*mp]->name ); if( new_col > 79 ) { putchar('\n'); new_col -= col; } col = new_col; printf( " %s", symtab[*mp]->name ); } new_col = col + 1 + 5; if( new_col > 79 ) { putchar('\n'); new_col -= col; } col = new_col; printf( ":r%03d", rx->pro->pidx ); } putchar( '\n' ); }
void CLSComplianceChecker::VerifyThatMembersAreNotMarkedCLSCompliant(BCSYM_Container * Container) { VSASSERT( !Container->IsCLSCompliant(), "CLS compliant container unexpected!!!"); // Members nested inside Non CLS Compliant Types cannot be // marked as CLS Compliant. // // For Top Level types, it is okay to be marked as CLS Compliant // even though inside a Non-CLS Compliant assembly. // if (Container->IsNamespace()) { return; } BCITER_CHILD Members(Container); while (BCSYM_NamedRoot *Member = Members.GetNext()) { if (Member->IsBad()) { continue; } // If the member is not accessible outside the assembly, then // don't bother checking for CLS compliance // if (!IsAccessibleOutsideAssembly(Member)) { continue; } // Need to do this so that we handle the operators. Operators // do not by themselves show up in this (bindable hash) hash // we are looking in, so we will find them through their functions // instead. // if (Member->IsProc() && Member->PProc()->IsUserDefinedOperatorMethod()) { Member = Member->PProc()->GetAssociatedOperatorDef(); } // We don't want to report errors for synthetic members. // Eg: withevents property, the withevents variable _prop, etc. // If we don't skip errors for these, we would end up reporting // the same errors on both these and the original symbols from // which they were synthesized resulting in duplicate errors. // if (IsSyntheticMember(Member)) { continue; } VerifyMemberNotMarkedCLSCompliant(Member); } }
void dump_reduction( a_reduce_action *rx, unsigned *base ) { a_pro *pro; short *p; pro = rx->pro; p = Members( rx->follow, setmembers ); while( --p >= setmembers ) { add_table( *p, ACTION_REDUCE | pro->pidx ); ++(*base); } }
bool Package::SetGlobal(const Value& key, Value& val, u4 attr) { if (!CanSetGlobal(key)) { if (!(attr & Slot::ATTR_forcewrite)) { return false; } else { attr &= ~Slot::ATTR_forcewrite; } } if (key.IsCollectible()) WriteBarrier(key); if (val.IsCollectible()) WriteBarrier(val); if (!Members().Set(key, val, attr)) { return false; } return true; }
void Bindable::ValidateWithEventsVarsInHandlesListsAndSynthesizePropertiesIfRequired() { BCSYM_Container *ContainerOfHandlingMethods = CurrentContainer(); if (!IsClass(ContainerOfHandlingMethods) && !IsStructure(ContainerOfHandlingMethods) && !IsStdModule(ContainerOfHandlingMethods)) { return; } Symbols SymbolFactory( CurrentCompilerInstance(), CurrentAllocator(), NULL, CurrentGenericBindingCache()); BCITER_CHILD Members(ContainerOfHandlingMethods); while(BCSYM_NamedRoot *Member = Members.GetNext()) { // only method implementations can have handles clauses if (!Member->IsMethodImpl()) { continue; } BCSYM_Proc *Proc = Member->PProc(); BCITER_Handles iterHandles(Member->PMethodImpl()); BCSYM_HandlesList *Handles = iterHandles.GetNext(); if (!Handles) { continue; } ErrorTable *ErrorLog = CurrentErrorLog(Proc); for(; Handles; Handles = iterHandles.GetNext()) { if (Handles->IsMyBase() || Handles->IsEventFromMeOrMyClass()) { continue; } bool FoundInBase; BCSYM_Variable *WithEventsVar = GetWithEventsVarReferredToInHandlesClause( Handles, FoundInBase); if (!WithEventsVar) { // "Handles clause requires a WithEvents variable." ReportErrorAtLocation( ERRID_NoWithEventsVarOnHandlesList, ErrorLog, Handles->GetLocationOfWithEventsVar()); Handles->SetIsBad(); } else if (WithEventsVar->IsBad() || WithEventsVar->IsBadVariableType() || // the type of the variable is good, but is not a class or interface WithEventsVar->GetType()->IsBad()) // the type of the variable is bad { // Any metadata errors on a symbol should be reported at the location // the symbol is used in source code // if (DefinedInMetaData(WithEventsVar->GetContainer())) { VSASSERT( !DefinedInMetaData(CurrentContainer()), "How can Current Context for handles clauses not be in VB Source Code ?!"); WithEventsVar->ReportError( CurrentCompilerInstance(), ErrorLog, Handles->GetLocationOfWithEventsVar()); } Handles->SetIsBad(); } else { // get the withevents property if possible BCSYM_Property *WithEventsProperty = GetWithEventsPropertyForWithEventsVariable(WithEventsVar->PVariable()); // Create it if it doesn't exist (for handling events defined on // WithEvent vars that exist on a base class). // if (!WithEventsProperty) { VSASSERT(FoundInBase, "Why do we have to synthesize a property for a withevents variable in the current class ? It should already have been synthesized in declared!!"); WithEventsProperty = SynthesizeWithEventsProperty(WithEventsVar->PVariable(), SymbolFactory); WithEventsProperty->SetCreatedByHandlesClause(Handles); } Handles->SetWithEventsProperty(WithEventsProperty); } } } }
void Bindable::ValidateWithEventsVarsAndHookUpHandlersInContainer() { VSASSERT( GetStatusOfValidateWithEventsVarsAndHookUpHandlers() != InProgress, "How can we have have event validation cycles ?"); if (GetStatusOfValidateWithEventsVarsAndHookUpHandlers() == Done) { return; } BCSYM_Container *Container = CurrentContainer(); if (!Container->IsClass() || TypeHelpers::IsEnumType(Container->PClass())) { SetStatusOfValidateWithEventsVarsAndHookUpHandlers(Done); return; } SetStatusOfValidateWithEventsVarsAndHookUpHandlers(InProgress); // Makes sure validation of withevents and shadowing, etc. of the types // of the withevents variables in bases are done. // BCSYM_Class *Base = Container->PClass()->GetCompilerBaseClass(); if (Base) { ValidateWithEventsVarsAndHookUpHandlersInContainer(Base, m_CompilationCaches); } // Need to use the SAFE iterator because more members might // be added to the Container during hookuping of Handles. // This adding of member will happen while the iterator is // still in use. // BCITER_CHILD_SAFE Members(Container); // Validate all the withevent vars in this container while (BCSYM_NamedRoot *Member = Members.GetNext()) { if (IsWithEventsVariable(Member)) { ValidateWithEventsVar(Member->PVariable()); } } // Validate and hookup all the handles clauses specified in this container Members.Reset(); while (BCSYM_NamedRoot *Member = Members.GetNext()) { // Only methods with implementation can have handles clauses // if (!Member->IsMethodImpl()) { continue; } BCITER_Handles iterHandles( Member->PMethodImpl() ); for(BCSYM_HandlesList *HandlesEntry = iterHandles.GetNext(); HandlesEntry; HandlesEntry = iterHandles.GetNext()) { if (HandlesEntry->IsBad()) { continue; } ValidateAndHookUpHandles( HandlesEntry, Member->PMethodImpl()); } } SetStatusOfValidateWithEventsVarsAndHookUpHandlers(Done); }
//M+ void mp( int MinCoreSize, int MaxCoreSize, int SamplingFreq, int NumReplicates, char* OutFilePath, std::string Kernel, vector<int> KernelAccessionIndex, vector<int> AccessionNameList, vector<vector<vector<int> > > ActiveAlleleByPopList, vector<vector<vector<int> > > TargetAlleleByPopList, vector<int> ActiveMaxAllelesList, vector<int> TargetMaxAllelesList, vector<std::string> FullAccessionNameList ) { //PERFORM INITIAL MPI STUFF MPI_Status status; //this struct contains three fields which will contain info about the sender of a received message // MPI_SOURCE, MPI_TAG, MPI_ERROR //MPI::Init (); //Initialize MPI. int nproc = MPI::COMM_WORLD.Get_size ( ); //Get the number of processes. int procid = MPI::COMM_WORLD.Get_rank ( ); //Get the individual process ID. //set up vectors to fill with results //below is a stupid way to calculate the number of rows in the output file, value l (which = V1) //used to monitor progress and as the maximum vector index for shared output vectors int l=0; for (int i=MinCoreSize;i<MaxCoreSize+1;i=i+SamplingFreq) { for (int j=0;j<NumReplicates;j++) { l++; } } double V1 = (double)l; //(MaxCoreSize - MinCoreSize + 1)*NumReplicates; //number of rows in output vectors vector<vector<double> > Results(V1, vector<double>(9)); //will contain numerical results vector<vector<string> > Members(V1); //will contain core set members //***MPI: RECEIVE RESULTS AT MASTER 0 //receive values from any slave, in any order, exiting when the number of 'receives' = the top vector size if ( procid == 0 ) { //set up variables for monitoring progress int percent; //percent of analysis completed int progindex = 0; //index to monitor progress, percent = 100*(progindex/l) //receive and process results from slave processors unsigned int i = 0; while (i<2*(Results.size())) //two receives per row { //probe the incoming message to determine its tag int nchar; //will contain the length of the char array passed with tag=1 int vchar; //will contain the length of the vector passed with tag=0 int tag; //tag of message from sender int source; //procid of sender MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); //MPI_Get_count(&status, MPI_CHAR, &nchar); //probes the length of the message, saves it in nchar tag = status.MPI_TAG; //the tag defines which kind of comm it is, a vector of stats (0=resvec()) //or a char array describing the members of the core (1=cc) source = status.MPI_SOURCE; //determine the source of the message so that you can define which sender to Recv from. This will avoid an intervening message coming in after the MPI_Probe with a different length, causing a message truncated error. if (tag == 0) { //determine the length of the message tagged 0 MPI_Get_count(&status, MPI_DOUBLE, &vchar); //cout <<" vchar="<<vchar<<" tag="<<tag<<" MPI_SOURCE="<<status.MPI_SOURCE<<" MPI_ERROR="<<status.MPI_ERROR<<"\n"; //receive the vector of results, tagged 0, from: //MPI_Send(&resvec[0], resvec.size(), MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); vector<double> t(10); MPI_Recv(&t[0], vchar, MPI_DOUBLE, source, 0, MPI_COMM_WORLD, &status); //load data from vector received onto Results, row number is last item t[9] for (int j=0;j<9;++j) { Results[ t[9] ][j] = t[j]; } t.clear(); } else if (tag == 1) { //determine the length of the message tagged 1 MPI_Get_count(&status, MPI_CHAR, &nchar); //probes the length of the message, saves it in nchar //cout <<" nchar="<<nchar<<" tag="<<tag<<" MPI_SOURCE="<<status.MPI_SOURCE<<" MPI_ERROR="<<status.MPI_ERROR<<"\n"; //receive the vector<string> of the core set, tagged 1, from: //MPI_Send(&m[0], nchar, MPI_CHAR, 0, 1, MPI_COMM_WORLD); //vector<string> m(nchar); char m[nchar]; MPI_Recv(&m[0], nchar, MPI_CHAR, source, 1, MPI_COMM_WORLD, &status); //load core set onto Members //1. convert char array into a string string mstr(m); //2. split string on delimiter ',<!>,' string delim = ",<!>,"; vector<string> mvec( countSubstring(mstr, delim) ); unsigned int st = 0; std::size_t en = mstr.find(delim); int k = 0; while (en != std::string::npos) { mvec[k] = mstr.substr(st, en-st); st = en + delim.length(); en = mstr.find(delim,st); ++k; } string z = mstr.substr(st); //get row number as last item in mstr int zz = atoi(z.c_str()); //convert string to c-string then to int //3. load onto Members Members[zz] = mvec; //4. clean up memset(m, 0, nchar);; mstr=""; mvec.clear(); } ++i; //display progress progindex = progindex + 1; percent = 100*( progindex/(V1*2) ); //number of rows X 2 repeats needed to complete search printProgBar(percent); } }//***MPI: END MASTER RECEIVE***/ /***MPI: SEND RESULTS FROM SLAVE PROCESSES***/ else if ( procid != 0 ) { unsigned int r; //r = core size, //int nr, RandAcc, b, bsc, plateau; //nr = controller to repeat NumReplicates times int RandAcc, b, bsc, plateau; //nr = controller to repeat NumReplicates times //row = result vector row number, bsc = holds best sub core member, and other indexed accessions //plateau = index of the number of reps in optimization loop with same diversity value double RandomActiveDiversity; double AltRandomActiveDiversity; double StartingRandomActiveDiversity; double StartingAltRandomActiveDiversity; double RandomTargetDiversity; double AltRandomTargetDiversity; double StartingDiversity; double TempAltOptimizedActiveDiversity; double AltOptimizedActiveDiversity; double OptimizedTargetDiversity; double AltOptimizedTargetDiversity; double best; double nnew; vector<vector<vector<int> > > AlleleList; vector<vector<vector<int> > > CoreAlleles; vector<vector<vector<int> > > TdTempList; vector<vector<vector<int> > > BestSubCoreAlleles; std::string Standardize = "yes"; //a run that mimics the MSTRAT approach can be accomplished by setting Standardize="no", and setting up the var file so that each column in the .dat file is treated as a single locus, rather than two (or more) adjacent columns being treated as a single codominant locus. vector<int> AccessionsInCore; vector<int> AccessionsInSubCore; vector<int> BestSubCore; vector<int> BestSubCoreRevSorted; vector<int> TempList; vector<int> TempList2; vector<int> bestcore; vector<std::string> TempListStr; //seed the random number generator for each processor int tt; tt = (time(NULL)); srand ( abs(((tt*181)*((procid-83)*359))%104729) ); //do parallelization so that each rep by core size combo can be //handled by a distinct thread. this involves figuring out the total //number of reps*coresizes taking into account the SamplingFreq int rsteps = 1 + floor( (MaxCoreSize - MinCoreSize) / SamplingFreq ); //number of steps from MinCoreSize to MaxCoreSize //***MPI: figure out where to start and stop loop for each processor int nreps = rsteps*NumReplicates; int count = nreps/(nproc-1); //p-1 assumes a master, i.e. one less processor than total int start = (procid-1) * count; //procid-1 makes you start at 0, assumes master is p0 int stop; if (nreps % (nproc-1) > (procid-1)) { start += procid - 1; stop = start + (count + 1); } else { start += nreps % (nproc-1); stop = start + count; } //iterate thru the relevant rows for (int rnr=start;rnr<stop;++rnr) { r = MinCoreSize + ((rnr / NumReplicates) * SamplingFreq); //int rounds to floor //develop random starting core set //clear AccessionsInCore and set size AccessionsInCore.clear(); AccessionsInCore.resize(r); //add kernel accessions to core, if necessary if (Kernel == "yes") { for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { AccessionsInCore[i] = KernelAccessionIndex[i]; } } //clear TempList and set size TempList.clear(); TempList.resize( AccessionNameList.size() ); //set list of available accessions in TempList, by erasing those already in the core TempList = AccessionNameList; //expunge the kernel accessions, so they are not available for random addition below //KernelAccessionIndex has been reverse sorted so you don't go outside range after automatic resize by .erase for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { b = KernelAccessionIndex[i]; TempList.erase(TempList.begin()+b); } //randomly add accessions until r accessions are in the core. if there is a kernel, include those (done above) //plus additional, randomly selected accessions, until you get r accessions for (unsigned int i=KernelAccessionIndex.size();i<r;i++) { //choose an accession randomly from those available RandAcc = rand() % TempList.size(); //add it to the list AccessionsInCore[i] = TempList[RandAcc]; //remove it from the list of available accessions TempList.erase(TempList.begin()+RandAcc); } //assemble genotypes for random core and calculate diversity //1. put together initial list of active alleles CoreAlleles.clear(); CoreAlleles.resize( AccessionsInCore.size() ); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. calculate diversity from random selection at active loci AlleleList.clear(); AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); //in MyCalculateDiversity, latter two variables are updated as references //save them away in non-updated variables StartingRandomActiveDiversity = RandomActiveDiversity; StartingAltRandomActiveDiversity = AltRandomActiveDiversity; //3. calculate diversity from random selection at target loci AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, RandomTargetDiversity, AltRandomTargetDiversity); //BEGIN OPTIMIZATION StartingDiversity = 0; //this is the diversity recovered during the prior iteration. plateau = 0; //count of the number of times you have found the best value, evaluates when you are //stuck on a plateau, assuming acceptance criterion allows downhill steps //this is the iterations step, now an indefinite loop that is broken when //no improvement is made during the course of the optimization algorithm //If r = kernel size = MinCoreSize then do no optimization but still calculate all variables. if (KernelAccessionIndex.size() == r) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); best = RandomActiveDiversity; //best is equivalent to OptimizedActiveDiversity AltOptimizedActiveDiversity = AltRandomActiveDiversity; } else { //do optimization while ( true ) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. go through all possible subsets of size r-1, one at a time, noting which is best. //If there is a kernel, do not swap out any of those accessions (they are retained as the //first KernelAccessionIndex.size() items in CoreAlleles). Accomplished by starting for loop //at KernelAccessionIndex.size(). best=0; for (unsigned int i=KernelAccessionIndex.size();i<CoreAlleles.size();i++) { //remove each item consecutively from the list of all populations in the core AlleleList.clear(); TdTempList.clear(); TdTempList = CoreAlleles; //swap to temporary vector TdTempList.erase( TdTempList.begin() + i); AlleleList = TdTempList; TempList2.clear(); TempList2 = AccessionsInCore; TempList2.erase(TempList2.begin() + i); AccessionsInSubCore = TempList2; /*Data structure for SubCoreAlleles: SubCore 1..r Population 1..(r-1) AlleleArray 1..NumLoci --3. fuse alleles from the same locus into a single array, for all accessions, for the current subcore --4. assemble a list of diversity (M) for each locus separately --5. standardize the M values to the maximum possible number of alleles at that locus, and add them up to get final estimate of standardized allelic diversity in the core. then divide by the number of loci to get a number that is comparable across data sets. --5.5. simultaneous to the calculation, keep track of which subcore is best */ MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); nnew = RandomActiveDiversity; if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; BestSubCore.clear(); BestSubCore = AccessionsInSubCore; BestSubCoreAlleles.clear(); BestSubCoreAlleles = AlleleList; } } //for loop cycles thru all subcores //reverse sort BestSubCore to support easy assembly of pared TempList below BestSubCoreRevSorted = BestSubCore; std::sort(BestSubCoreRevSorted.begin(), BestSubCoreRevSorted.end(), std::greater<int>()); /* 6. take the subcore with greatest diversity and consecutively add each possible additional accession from the base collection. find the core of size r (not r-1 subcore) that has the greatest diversity. suppress the IDs of those accessions found in the BestSubCore from the list of all accessions to get a list of remaining accessions.*/ TempList = AccessionNameList; for (unsigned int k=0;k<BestSubCoreRevSorted.size();k++) { bsc = BestSubCoreRevSorted[k]; TempList.erase( TempList.begin() + bsc ); } //shuffle the list of remaining accessions, so addition order is not predictable std::random_shuffle (TempList.begin(), TempList.end()); //add each remaining accession consecutively, calculate diversity, test //whether it is better than the prior one best = 0; for (unsigned int k=0;k<TempList.size();k++) { bsc = TempList[k]; //define the core TempList2 = BestSubCore; TempList2.resize( TempList2.size() + 1 ); //TempList2.push_back(i); TempList2[TempList2.size()-1] = bsc; //add new accession to last vector element AccessionsInCore = TempList2; //assemble the allelelist for the core TdTempList = BestSubCoreAlleles; TdTempList.resize( TdTempList.size() + 1 ); //TdTempList.push_back( ActiveAlleleByPopList[i] ); TdTempList[TdTempList.size()-1] = ActiveAlleleByPopList[bsc]; AlleleList = TdTempList; //calculate diversity MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, nnew, TempAltOptimizedActiveDiversity); //test whether current diversity is higher than the best diversity found so far if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; bestcore = AccessionsInCore; //save the alternative diversity value for the best core AltOptimizedActiveDiversity = TempAltOptimizedActiveDiversity; } } AccessionsInCore = bestcore; //define starting variable for next MSTRAT iteration //if there has been no improvement from the prior iteration, you have reached // the plateau and should exit the repeat if (best == StartingDiversity) { plateau++; if (plateau > 0) break; } //update starting value and repeat else if (best > StartingDiversity) StartingDiversity = best; } //while(true) endless loop } //7. Calculate diversity at target loci //assemble the target loci allelelist for the accessions in the best core AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } //calculate diversity at target loci based upon the optimized core selection MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, OptimizedTargetDiversity, AltOptimizedTargetDiversity); //8. Assemble stats for optimized core and add to output vectors //create a list of accession names from the list of accession ID's in AccessionsInCore sort( AccessionsInCore.begin(), AccessionsInCore.end() ); TempListStr.clear(); TempListStr.resize(r); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; TempListStr[i] = FullAccessionNameList[b]; } /***MPI: BUILD & SEND RESULTS VECTOR***/ //load the variables onto the results vectors //no need to calculate row number, it is the same as rnr, formula saved because it might be useful later //row = ((r - MinCoreSize)*NumReplicates) + nr - ( (NumReplicates*(SamplingFreq-1))*( (r-MinCoreSize)/SamplingFreq ) ); // (r - MinCoreSize)*NumReplicates) + nr specifies row number if SamplingFreq=1 // (NumReplicates*(SamplingFreq-1)) specifies a step value to correct when SamplingFreq>1 // ( (r-MinCoreSize)/SamplingFreq ) specifies the replicate on core size, accounting for SamplingFreq // see file Calculation of row value.xlsx for development of the 'row' index //put results 0-8 into a vector, resvec, return row as last item vector<double> resvec(10); resvec[0] = double(r); resvec[1] = StartingRandomActiveDiversity;//RandomActiveDiversity; resvec[2] = best; //equivalent to OptimizedActiveDiversity resvec[3] = RandomTargetDiversity; resvec[4] = OptimizedTargetDiversity; resvec[5] = StartingAltRandomActiveDiversity;//AltRandomActiveDiversity; resvec[6] = AltOptimizedActiveDiversity; resvec[7] = AltRandomTargetDiversity; resvec[8] = AltOptimizedTargetDiversity; resvec[9] = double(rnr); //cout<<"MPI_Rank="<<MPI_Rank<<" //send result vector to master 0, send row number, rnr, as last element. //message is tagged as 0 //here you are pointing to the first element, then returning resvec.size() doubles- //worth of memory from that starting location. MPI_Send(&resvec[0], resvec.size(), MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); /***MPI: END BUILD & SEND RESULTS VECTOR***/ /***MPI: BUILD & SEND MEMBERS VECTOR***/ //add row number as last item in TempListStr TempListStr.resize(TempListStr.size()+1); stringstream ss; ss << rnr; //convert int to stringstream to string TempListStr[ TempListStr.size() - 1 ] = ss.str(); //convert vector<string> to a single, ',<!>,' delimited, string string concat; for (unsigned int i=0;i<TempListStr.size();++i) { concat += TempListStr[i]; //add vector element if (i<TempListStr.size()-1) concat += ",<!>,"; //add delimiter, except for last item } //convert the string to a char array char cc[concat.size()+1]; strcpy(cc, concat.c_str()); //send the char array to master0 tagged as 1 //tagged as 1 to distinguish from result vector send MPI_Send(&cc, sizeof(cc), MPI_CHAR, 0, 1, MPI_COMM_WORLD); } //end for loop over rows } //***MPI: END SEND /*MPI: MASTER 0 WRITES OUTPUT*/ if ( procid == 0 ) { //set up file stream for output file ofstream output; output.open(OutFilePath); output.close(); //quick open close done to clear any existing file each time program is run output.open(OutFilePath, ios::out | ios::app); //open file in append mode output << "core size random reference diversity optimized reference diversity random target diversity optimized target diversity alt random reference diversity alt optimized reference diversity alt random target diversity alt optimized target diversity core members" << "\n"; //write out results row by row for (int i=0;i<V1;i++) { //write variables output << Results[i][0] << " " << Results[i][1] << " " << Results[i][2] << " " << Results[i][3] << " " << Results[i][4] << " " << Results[i][5] << " " << Results[i][6] << " " << Results[i][7] << " " << Results[i][8] << " " << "("; //write Accessions retained for (unsigned int j=0;j<Members[i].size();j++) { if ( j==(Members[i].size() - 1) ) { //add trailing parentheses and move to next row output << Members[i][j] << ")\n"; } else { output << Members[i][j] << ","; } } } //wrap up write step output.close(); } /***MPI: END MASTER WRITE***/ //Terminate MPI. //MPI::Finalize ( ); }
void CLSComplianceChecker::VerifyMembersForCLSCompliance(BCSYM_Container * Container) { VSASSERT( Container->IsCLSCompliant(), "Non-CLS compliant container unexpected!!!"); BCITER_CHILD Members(Container); while (BCSYM_NamedRoot *Member = Members.GetNext()) { // Need to do this so that we handle the operators. Operators // do not by themselves show up in this (bindable hash) hash // we are looking in, so we will find them through their functions // instead. // if (Member->IsProc() && Member->PProc()->IsUserDefinedOperatorMethod()) { Member = Member->PProc()->GetAssociatedOperatorDef(); } if (Member->IsBad()) { continue; } // Containers are checked later when checking the nested types // if (Member->IsContainer()) { continue; } // If the member is not accessible outside the assembly, then // don't bother checking for CLS compliance // if (!IsAccessibleOutsideAssembly(Member)) { continue; } // We don't want to report errors for synthetic members. // Eg: withevents property, the withevents variable _prop, etc. // If we don't skip errors for these, we would end up reporting // the same errors on both these and the original symbols from // which they were synthesized resulting in duplicate errors. // if (IsSyntheticMember(Member)) { continue; } // If explicitly marked as CLSCompliant(false), then no need to // verify // if (IsExplicitlyMarkedNonCLSCompliant(Member)) { ValidateNonCLSCompliantMemberInCLSCompliantContainer( Member, Container); continue; } VerifyNameIsCLSCompliant(Member); // Nothing more to do for enum members // if (Container->IsEnum()) { continue; } if (Member->IsProc()) { VerifyProcForCLSCompliance( Member->PProc(), Container); } else { VSASSERT( Member->IsVariable(), "What else can get here ?"); if (!IsTypeCLSCompliant(Member->PVariable()->GetRawType(), Member)) { ReportErrorOnSymbol( WRNID_FieldNotCLSCompliant1, Member, Member->GetName()); } } } VerifyConstraintsAreCLSCompliant(Container); }
static void resolve( a_state *x, set_size *work, a_reduce_action **reduce ) { a_shift_action *tx, *ux; a_reduce_action *rx; set_size *w; set_size *mp; index_n i; a_prec symprec, proprec, prevprec; w = work; for( rx = x->redun; rx->pro != NULL; ++rx ) { for( mp = Members( rx->follow ); mp != setmembers; ) { --mp; if( reduce[*mp] != NULL ) { prevprec = reduce[*mp]->pro->prec; proprec = rx->pro->prec; if( prevprec.prec == 0 || proprec.prec == 0 || prevprec.prec == proprec.prec ) { *w++ = *mp; /* resolve to the earliest production */ if( rx->pro->pidx >= reduce[*mp]->pro->pidx ) { continue; } } else if( prevprec.prec > proprec.prec ) { /* previous rule had higher precedence so leave it alone */ continue; } } reduce[*mp] = rx; } } while( w != work ) { --w; if( symtab[*w]->token == errsym->token ) continue; printf( "r/r conflict in state %d on %s:\n", x->sidx, symtab[*w]->name); ++RR_conflicts; for( rx = x->redun; rx->pro != NULL; ++rx ) { if( IsBitSet( rx->follow, *w ) ) { showitem( rx->pro->items, "" ); } } printf( "\n" ); for( rx = x->redun; rx->pro != NULL; ++rx ) { if( IsBitSet( rx->follow, *w ) ) { ShowSentence( x, symtab[*w], rx->pro, NULL ); } } printf( "---\n\n" ); } ux = x->trans; for( tx = ux; tx->sym != NULL; ++tx ) { i = tx->sym->idx; if( i >= nterm || reduce[i] == NULL ) { *ux++ = *tx; } else { /* shift/reduce conflict detected */ check_for_user_hooks( x, tx, reduce[i]->pro->pidx ); symprec = tx->sym->prec; proprec = reduce[i]->pro->prec; if( symprec.prec == 0 || proprec.prec == 0 ) { if( tx->sym != errsym ) { printf( "s/r conflict in state %d on %s:\n", x->sidx, tx->sym->name ); ++SR_conflicts; printf( "\tshift to %d\n", tx->state->sidx ); showitem( reduce[i]->pro->items, "" ); printf( "\n" ); ShowSentence( x, tx->sym, reduce[i]->pro, NULL ); ShowSentence( x, tx->sym, NULL, tx->state ); printf( "---\n\n" ); } *ux++ = *tx; reduce[i] = NULL; } else { if( symprec.prec > proprec.prec ) { *ux++ = *tx; reduce[i] = NULL; } else if( symprec.prec == proprec.prec ) { if( symprec.assoc == R_ASSOC ) { *ux++ = *tx; reduce[i] = NULL; } else if( symprec.assoc == NON_ASSOC ) { ux->sym = tx->sym; ux->state = errstate; ++ux; reduce[i] = NULL; } } } } } ux->sym = NULL; for( rx = x->redun; rx->pro != NULL; ++rx ) { Clear( rx->follow ); } for( i = 0; i < nterm; ++i ) { if( reduce[i] != NULL ) { SetBit( reduce[i]->follow, i ); reduce[i] = NULL; } } }
//M+ void mp( int MinCoreSize, int MaxCoreSize, int SamplingFreq, int NumReplicates, char* OutFilePath, std::string Kernel, vector<int> KernelAccessionIndex, vector<int> AccessionNameList, vector<vector<vector<std::string> > > ActiveAlleleByPopList, vector<vector<vector<std::string> > > TargetAlleleByPopList, vector<int> ActiveMaxAllelesList, vector<int> TargetMaxAllelesList, vector<std::string> FullAccessionNameList, int parallelism_enabled, int ncpu ) { //make recovery files (0) or not (1) int MakeRecovery=1; //set up variables for monitoring progress int percent; //percent of analysis completed int progindex = 0; //index to monitor progress, percent = 100*(progindex/l) //below is a stupid way to calculate the number of rows in the output file, value l (which = V1) //used to monitor progress and as the maximum vector index for shared output vectors int l=0; for (int i=MinCoreSize;i<MaxCoreSize+1;i=i+SamplingFreq) { for (int j=0;j<NumReplicates;j++) { l++; } } //set up vectors to fill with results unsigned long long V1 = l; //(MaxCoreSize - MinCoreSize + 1)*NumReplicates; //number of rows in output vectors vector<vector<double> > Results(V1, vector<double>(9)); //will contain numerical results vector<vector<string> > Members(V1); //will contain core set members #pragma omp parallel if(parallelism_enabled) num_threads(ncpu) { unsigned int r; //r = core size, int nr, RandAcc, b, row, bsc, plateau; //nr = controller to repeat NumReplicates times //row = result vector row number, bsc = holds best sub core member, and other indexed accessions //plateau = index of the number of reps in optimization loop with same diversity value double RandomActiveDiversity; double AltRandomActiveDiversity; double StartingRandomActiveDiversity; double StartingAltRandomActiveDiversity; double RandomTargetDiversity; double AltRandomTargetDiversity; double StartingDiversity; double TempAltOptimizedActiveDiversity; double AltOptimizedActiveDiversity; double OptimizedTargetDiversity; double AltOptimizedTargetDiversity; double best; double nnew; vector<vector<vector<std::string> > > AlleleList; vector<vector<vector<std::string> > > CoreAlleles; vector<vector<vector<std::string> > > TdTempList; vector<vector<vector<std::string> > > BestSubCoreAlleles; std::string Standardize = "yes"; //a run that mimics the MSTRAT approach can be accomplished by setting Standardize="no", and setting up the var file so that each column in the .dat file is treated as a single locus, rather than two (or more) adjacent columns being treated as a single codominant locus. vector<int> AccessionsInCore; vector<int> AccessionsInSubCore; vector<int> BestSubCore; vector<int> BestSubCoreRevSorted; vector<int> TempList; vector<int> TempList2; vector<int> bestcore; vector<std::string> TempListStr; //seed the random number generator for each thread int tt; tt = (time(NULL)); srand ( tt ^ omp_get_thread_num() ); //initialize //set up a recovery file for each thread that saves progress as program runs const char* RecoveryFilePath; if ( MakeRecovery == 0 ) // only write the recovery file to disk when option = 0 { stringstream ss; ss << OutFilePath << ".t" << omp_get_thread_num() << ".tmp"; string rfp = ss.str(); RecoveryFilePath = rfp.c_str(); ofstream RecoveryFile; RecoveryFile.open(RecoveryFilePath); RecoveryFile.close(); //quick open close done to clear any existing file each time program is run RecoveryFile.open(RecoveryFilePath, ios::out | ios::app); //open file in append mode RecoveryFile << "core size random reference diversity optimized reference diversity random target diversity optimized target diversity alt random reference diversity alt optimized reference diversity alt random target diversity alt optimized target diversity core members" << "\n"; RecoveryFile.close(); } //do parallelization so that each rep by core size combo can be //handled by a distinct thread. this involves figuring out the total //number of reps*coresizes taking into account the SamplingFreq int rsteps = 1 + (int)floor( (MaxCoreSize - MinCoreSize) / SamplingFreq ); //number of steps from MinCoreSize to MaxCoreSize #pragma omp for for (int rnr = 0; rnr<rsteps*NumReplicates;++rnr) { r = MinCoreSize + ((rnr / NumReplicates) * SamplingFreq); //int rounds to floor nr = rnr % NumReplicates; // modulo //develop random starting core set //clear AccessionsInCore and set size AccessionsInCore.clear(); AccessionsInCore.resize(r); //add kernel accessions to core, if necessary if (Kernel == "yes") { for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { AccessionsInCore[i] = KernelAccessionIndex[i]; } } //clear TempList and set size TempList.clear(); TempList.resize( AccessionNameList.size() ); //set list of available accessions in TempList, by erasing those already in the core TempList = AccessionNameList; //expunge the kernel accessions, so they are not available for random addition below //KernelAccessionIndex has been reverse sorted so you don't go outside range after automatic resize by .erase for (unsigned int i=0;i<KernelAccessionIndex.size();i++) { b = KernelAccessionIndex[i]; TempList.erase(TempList.begin()+b); } //randomly add accessions until r accessions are in the core. if there is a kernel, include those (done above) //plus additional, randomly selected accessions, until you get r accessions //for (int i=0;i<r;i++) for (unsigned int i=KernelAccessionIndex.size();i<r;i++) { //choose an accession randomly from those available RandAcc = rand() % TempList.size(); //add it to the list AccessionsInCore[i] = TempList[RandAcc]; //remove it from the list of available accessions TempList.erase(TempList.begin()+RandAcc); } //assemble genotypes for random core and calculate diversity //1. put together initial list of active alleles CoreAlleles.clear(); CoreAlleles.resize( AccessionsInCore.size() ); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. calculate diversity from random selection at active loci //vector<vector<vector<std::string> > >().swap(AlleleList); //clear AlleleList AlleleList.clear(); AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); //in MyCalculateDiversity, latter two variables are updated as references //save them away in non-updated variables StartingRandomActiveDiversity = RandomActiveDiversity; StartingAltRandomActiveDiversity = AltRandomActiveDiversity; //3. calculate diversity from random selection at target loci AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, RandomTargetDiversity, AltRandomTargetDiversity); //BEGIN OPTIMIZATION StartingDiversity = 0; //this is the diversity recovered during the prior iteration. plateau = 0; //count of the number of times you have found the best value, evaluates when you are //stuck on a plateau, assuming acceptance criterion allows downhill steps //this is the iterations step, now an indefinite loop that is broken when //no improvement is made during the course of the optimization algorithm //If r = kernel size = MinCoreSize then do no optimization but still calculate all variables. if (KernelAccessionIndex.size() == r) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } AlleleList = CoreAlleles; MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); best = RandomActiveDiversity; //best is equivalent to OptimizedActiveDiversity AltOptimizedActiveDiversity = AltRandomActiveDiversity; } else { //do optimization while ( true ) { //assemble genotypes for core //1. put together initial list CoreAlleles.clear(); CoreAlleles.resize(r); for (unsigned int i=0;i<r;i++) { b = AccessionsInCore[i]; CoreAlleles[i] = ActiveAlleleByPopList[b]; } //2. go through all possible subsets of size r-1, one at a time, noting which is best. //If there is a kernel, do not swap out any of those accessions (they are retained as the //first KernelAccessionIndex.size() items in CoreAlleles). Accomplished by starting for loop //at KernelAccessionIndex.size(). best=0; for (unsigned int i=KernelAccessionIndex.size();i<CoreAlleles.size();i++) { //remove each item consecutively from the list of all populations in the core AlleleList.clear(); TdTempList.clear(); TdTempList = CoreAlleles; //swap to temporary vector TdTempList.erase( TdTempList.begin() + i); AlleleList = TdTempList; TempList2.clear(); TempList2 = AccessionsInCore; TempList2.erase(TempList2.begin() + i); AccessionsInSubCore = TempList2; /*Data structure for SubCoreAlleles: SubCore 1..r Population 1..(r-1) AlleleArray 1..NumLoci --3. fuse alleles from the same locus into a single array, for all accessions, for the current subcore --4. assemble a list of diversity (M) for each locus separately --5. standardize the M values to the maximum possible number of alleles at that locus, and add them up to get final estimate of standardized allelic diversity in the core. then divide by the number of loci to get a number that is comparable across data sets. --5.5. simultaneous to the calculation, keep track of which subcore is best */ MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, RandomActiveDiversity, AltRandomActiveDiversity); nnew = RandomActiveDiversity; if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; BestSubCore.clear(); BestSubCore = AccessionsInSubCore; BestSubCoreAlleles.clear(); BestSubCoreAlleles = AlleleList; } } //for loop cycles thru all subcores //reverse sort BestSubCore to support easy assembly of pared TempList below BestSubCoreRevSorted = BestSubCore; std::sort(BestSubCoreRevSorted.begin(), BestSubCoreRevSorted.end(), std::greater<int>()); /* 6. take the subcore with greatest diversity and consecutively add each possible additional accession from the base collection. find the core of size r (not r-1 subcore) that has the greatest diversity. suppress the IDs of those accessions found in the BestSubCore from the list of all accessions to get a list of remaining accessions.*/ TempList = AccessionNameList; for (unsigned int k=0;k<BestSubCoreRevSorted.size();k++) { bsc = BestSubCoreRevSorted[k]; TempList.erase( TempList.begin() + bsc ); } //shuffle the list of remaining accessions, so addition order is not predictable std::random_shuffle (TempList.begin(), TempList.end()); //add each remaining accession consecutively, calculate diversity, test //whether it is better than the prior one best = 0; for (unsigned int k=0;k<TempList.size();k++) { bsc = TempList[k]; //define the core TempList2 = BestSubCore; TempList2.resize( TempList2.size() + 1 ); //TempList2.push_back(i); TempList2[TempList2.size()-1] = bsc; //add new accession to last vector element AccessionsInCore = TempList2; //assemble the allelelist for the core TdTempList = BestSubCoreAlleles; TdTempList.resize( TdTempList.size() + 1 ); //TdTempList.push_back( ActiveAlleleByPopList[i] ); TdTempList[TdTempList.size()-1] = ActiveAlleleByPopList[bsc]; AlleleList = TdTempList; //calculate diversity MyCalculateDiversity(AlleleList, ActiveMaxAllelesList, Standardize, nnew, TempAltOptimizedActiveDiversity); //test whether current diversity is higher than the best diversity found so far if (nnew >= best) // >= allows sideways movement during hill climbing { best = nnew; bestcore = AccessionsInCore; //save the alternative diversity value for the best core AltOptimizedActiveDiversity = TempAltOptimizedActiveDiversity; } } AccessionsInCore = bestcore; //define starting variable for next MSTRAT iteration //if there has been no improvement from the prior iteration, you have reached // the plateau and should exit the repeat if (best == StartingDiversity) { plateau++; if (plateau > 0) break; } //update starting value and repeat else if (best > StartingDiversity) StartingDiversity = best; } //while(true) endless loop } //7. Calculate diversity at target loci //assemble the target loci allelelist for the accessions in the best core AlleleList.clear(); AlleleList.resize( AccessionsInCore.size() ); for (unsigned int j=0;j<AccessionsInCore.size();j++) { b = AccessionsInCore[j]; AlleleList[j] = TargetAlleleByPopList[b]; } //calculate diversity at target loci based upon the optimized core selection MyCalculateDiversity(AlleleList, TargetMaxAllelesList, Standardize, OptimizedTargetDiversity, AltOptimizedTargetDiversity); //8. Assemble stats for optimized core and add to output vectors //create a list of accession names from the list of accession ID's in AccessionsInCore sort( AccessionsInCore.begin(), AccessionsInCore.end() ); TempListStr.clear(); TempListStr.resize(r); for (unsigned int i=0;i<AccessionsInCore.size();i++) { b = AccessionsInCore[i]; TempListStr[i] = FullAccessionNameList[b]; } //load the variables onto the results vectors //numerical results row = ((r - MinCoreSize)*NumReplicates) + nr - ( (NumReplicates*(SamplingFreq-1))*( (r-MinCoreSize)/SamplingFreq ) ); // (r - MinCoreSize)*NumReplicates) + nr specifies row number if SamplingFreq=1 // (NumReplicates*(SamplingFreq-1)) specifies a step value to correct when SamplingFreq>1 // ( (r-MinCoreSize)/SamplingFreq ) specifies the replicate on core size, accounting for SamplingFreq // see file Calculation of row value.xlsx for development of the 'row' index Results[row][0] = r; Results[row][1] = StartingRandomActiveDiversity;//RandomActiveDiversity; Results[row][2] = best; //equivalent to OptimizedActiveDiversity Results[row][3] = RandomTargetDiversity; Results[row][4] = OptimizedTargetDiversity; Results[row][5] = StartingAltRandomActiveDiversity;//AltRandomActiveDiversity; Results[row][6] = AltOptimizedActiveDiversity; Results[row][7] = AltRandomTargetDiversity; Results[row][8] = AltOptimizedTargetDiversity; //core set members Members[row] = TempListStr; //write the results onto the recovery files if ( MakeRecovery == 0) { WriteRecoveryFile(RecoveryFilePath, r, StartingRandomActiveDiversity, best, RandomTargetDiversity, OptimizedTargetDiversity, StartingAltRandomActiveDiversity, AltOptimizedActiveDiversity, AltRandomTargetDiversity, AltOptimizedTargetDiversity, TempListStr); } //display progress progindex = progindex + 1; percent = 100*(progindex/(double)V1); printProgBar(percent); } //end #pragma omp for loop } //end #pragma omp parallel //set up file stream for output file ofstream output; output.open(OutFilePath); output.close(); //quick open close done to clear any existing file each time program is run output.open(OutFilePath, ios::out | ios::app); //open file in append mode output << "core size random reference diversity optimized reference diversity random target diversity optimized target diversity alt random reference diversity alt optimized reference diversity alt random target diversity alt optimized target diversity core members" << "\n"; //write out results row by row for (unsigned int i=0;i<V1;i++) { //write variables output << Results[i][0] << " " << Results[i][1] << " " << Results[i][2] << " " << Results[i][3] << " " << Results[i][4] << " " << Results[i][5] << " " << Results[i][6] << " " << Results[i][7] << " " << Results[i][8] << " " << "("; //write Accessions retained for (unsigned int j=0;j<Members[i].size();j++) { if ( j==(Members[i].size() - 1) ) { //add trailing parentheses and move to next row output << Members[i][j] << ")\n"; } else { output << Members[i][j] << ","; } } } //wrap up write step output.close(); //delete all recovery files if ( MakeRecovery == 0 ) { cout << "\n\nDeleting recovery files...\n"; #pragma omp parallel if(parallelism_enabled) num_threads(ncpu) { stringstream ss; ss << OutFilePath << ".t" << omp_get_thread_num() << ".tmp"; string rfp = ss.str(); const char* RecoveryFilePath = rfp.c_str(); if (remove(RecoveryFilePath)) cout << "Failed to delete " << RecoveryFilePath << ": " << strerror(errno) << "\n"; //else cout << RecoveryFilePath << " successfully deleted.\n"; } } }
void genobj( void ) { int i; int ntoken; int this_token; int any_token; int action; short *p; a_pro *pro; a_state *x; a_reduce_action *rx; a_reduce_action *default_reduction; a_shift_action *tx; a_sym *sym; an_item *item; unsigned max; unsigned sum; unsigned savings; unsigned base; unsigned rule_base; short *state_base; ntoken = 0; for( i = 0; i < nterm; ++i ) { this_token = symtab[i]->token; if( this_token > ntoken ) { ntoken = this_token; } } for( i = nterm; i < nsym; ++i ) { symtab[i]->token = ++ntoken; } any_token = ++ntoken; state_base = CALLOC( nstate, short ); base = 0; max = 0; sum = 0; for( i = 0; i < nstate; ++i ){ state_base[i] = base; x = statetab[i]; for( tx = x->trans; sym = tx->sym; ++tx ) { add_table( sym->idx, ACTION_SHIFT | tx->state->sidx ); ++base; } default_reduction = NULL; savings = 0; for( rx = x->redun; rx->pro != NULL; ++rx ){ p = Members( rx->follow, setmembers ); if( p != setmembers ) { if( p - setmembers > savings ) { savings = p - setmembers; if( default_reduction != NULL ) { dump_reduction( default_reduction, &base ); } default_reduction = rx; } else { dump_reduction( rx, &base ); } } } if( default_reduction != NULL ) { pro = default_reduction->pro; action = ACTION_REDUCE | pro->pidx; } else { action = ACTION_SHIFT | 0; } add_table( any_token, action ); ++base; sum += base - state_base[i]; if( base - state_base[i] > max ) { max = base - state_base[i]; } } printf( "avg: %u max: %u\n", sum / nstate, max ); dump_define( "YYANYTOKEN", any_token ); dump_define( "YYEOFTOKEN", eofsym->token ); dump_define( "YYSTART", startstate->sidx ); begin_table( "YYACTTYPE", "yybasetab" ); for( i = 0; i < nstate; ++i ) { puttab( FITS_A_WORD, state_base[i] ); } end_table(); begin_table( "YYCHKTYPE", "yychktab" ); for( i = 0; i < used; ++i ) { puttab( FITS_A_BYTE, table[i].token ); } end_table(); begin_table( "YYACTTYPE", "yyacttab" ); for( i = 0; i < used; ++i ) { puttab( FITS_A_WORD, table[i].action ); } end_table(); begin_table( "YYPLENTYPE", "yyplentab" ); for( i = 0; i < npro; ++i ) { for( item = protab[i]->item; item->p.sym; ++item ) /* do nothing */; puttab( FITS_A_BYTE, item - protab[i]->item ); } end_table(); begin_table( "YYPLHSTYPE", "yyplhstab" ); for( i = 0; i < npro; ++i ) { puttab( FITS_A_BYTE, protab[i]->sym->token ); } end_table(); fprintf( actout, "#ifdef YYDEBUG\n" ); rule_base = 0; begin_table( "unsigned short", "yyrulebase" ); for( i = 0; i < npro; ++i ) { for( item = protab[i]->item; item->p.sym; ++item ) /* do nothing */; puttab( FITS_A_WORD, rule_base ); rule_base += item - protab[i]->item; } end_table(); begin_table( "YYCHKTYPE", "yyrhstoks" ); for( i = 0; i < npro; ++i ) { for( item = protab[i]->item; item->p.sym; ++item ) { puttab( FITS_A_BYTE, item->p.sym->token ); } } end_table(); begin_table( "char YYFAR *", "yytoknames" ); fputc( '\n', actout ); for( i = 0; i < nsym; ++i ) { fprintf( actout, "\"%s\",\n", symtab[ i ]->name ); } fprintf( actout, "\"\"" ); end_table(); fprintf( actout, "#endif\n" ); }
Bindable::MemberInfoIter::MemberInfoIter ( BCSYM_Container *Container, Compiler *CompilerInstance ) { // Resolve overloading in current container BCITER_CHILD_ALL Members(Container); BCSYM_NamedRoot *Member; unsigned MemberCount = Members.GetCount(); m_CompilerInstance = CompilerInstance; BindableMemberInfo *MemberInfos = CreateMemberInfosForMembers(MemberCount); ArrayBuffer<BCSYM_NamedRoot*> MembersArray(MemberCount); // Create a MemberInfo array and store the info. about whether // shadowing/overloading/overriding were specified on each member. // Why do we need this info. Although it is present on each of the // Members? Because we are going to change this information based // on the errors that occur and we don't want these changes reflected // in the Members themselves. Also this make it easy to avoid redundant // computing for other info that is needed commonly for overriding, // overloading and shadowing. Eg. of common information - is a member synthetic // and if so what is its source symbol? // // The sorting might seem slow here, but since this is used across several // tasks in shadowing, overloading, overriding, by keeping them sorted // and also reusing this information we recoup the extra time spent sorting here. // unsigned CurrentMemberIndex = 0; for(Member = Members.GetNext(); Member; Member = Members.GetNext()) { #pragma prefast(suppress: 26017, "Number of loop iterations is constrained by number of elements") MembersArray[CurrentMemberIndex++] = Member; VSASSERT( CurrentMemberIndex <= MemberCount, "How did more members show up than allocated space ?!!"); } MemberCount = CurrentMemberIndex; if (Bindable::DefinedInMetaData(Container) && !TypeHelpers::IsEmbeddableInteropType(Container)) { // For metadata container, sort by access too. This simplifies the solution for Bug VSWhidbey 401153. // // The reason this simplied the solution for Bug VSWhibdye 401153 is as follows: // // When 2 different kinds of members (method vs. field, etc.) have the same name (different casing - // possible in c# - Non CLS compliant), then VB will only allow the most accessible members to be // accessed. If no one kind of member is more accessible than the other, then they are marked bad // along with ambiguity errors which will be reported upon their use. Also note that we will allow // access to all the overloads of the most accessible member, even if the overloads are less accessible // or equal in accessibility than some of the ruled out members. So for this purpose, it is easier // if we can start with the most accessible member first and see what other members the most accessible // member makes inaccessible. // Sort by name and then location qsort( MembersArray.GetData(), MemberCount, sizeof(BCSYM_NamedRoot *), Container->HasUserDefinedOperators() ? // Perf tweak: consider operator kind in the sort only if the container has operators. SortSymbolsByNameAndOperatorAndExternalAccess : SortSymbolsByNameAndExternalAccess); } else { // Sort by name and then location qsort( MembersArray.GetData(), MemberCount, sizeof(BCSYM_NamedRoot *), Container->HasUserDefinedOperators() ? // Perf tweak: consider operator kind in the sort only if the container has operators. SortSymbolsByNameAndOperatorAndLocation : SortSymbolsByNameAndLocation); } for(CurrentMemberIndex = 0; CurrentMemberIndex < MemberCount; CurrentMemberIndex++) { BindableMemberInfo *MemberInfo = new((void*)(&MemberInfos[CurrentMemberIndex])) BindableMemberInfo(MembersArray[CurrentMemberIndex]); } m_MemberInfos = MemberInfos; m_NumberOfMembers = MemberCount; Reset(); }
void genobj( void ) { short int *symbol, *target; short int *p, *q, *r; short int action; set_size *mp; a_sym *sym; a_pro *pro; an_item *item; a_state *x; a_shift_action *tx; a_reduce_action *rx; int i, j, savings; for( i = nterm; i < nsym; ++i ) symtab[i]->token = i - nterm; label = OPTENTRY( nstate - 1 ); emitins( JMP, TOKENTRY( startstate->sidx ) ); target = CALLOC( nsym, short int ); for( i = 0; i < nsym; ++i ) target[i] = DEFAULT; symbol = CALLOC( nsym, short int ); for( i = 0; i < nstate; ++i ) { x = statetab[i]; q = symbol; for( tx = x->trans; (sym = tx->sym) != NULL; ++tx ) { if( sym == eofsym ) { action = ACCEPT; } else if( sym->idx < nterm ) { action = TOKENTRY(tx->state->sidx); } else { action = OPTENTRY(tx->state->sidx); } *q++ = sym->idx; target[sym->idx] = action; } savings = 0; for( rx = x->redun; (pro = rx->pro) != NULL; ++rx ) { action = PROENTRY( pro->pidx ); mp = Members( rx->follow ); if( mp - setmembers > savings ) { savings = mp - setmembers; r = q; } while( --mp >= setmembers ) { *q++ = *mp; target[*mp] = action; } } action = DEFAULT; if( savings ) { action = target[*r]; p = r; while( --savings >= 0 ) { target[*p++] = DEFAULT; } } emitins( LBL, TOKENTRY( x->sidx ) ); emitins( SCAN, 0 ); emitins( LBL, OPTENTRY( x->sidx ) ); emitins( CALL, VBLENTRY( x->sidx ) ); q = symbol; for( j = nterm; j < nsym; ++j ) { if( target[j] != DEFAULT ) { *q++ = j; } } if( q != symbol ) { emitv( symbol, target, q - symbol ); for( p = symbol; p < q; ++p ) { target[*p] = DEFAULT; } } emitins( LBL, VBLENTRY( x->sidx ) ); q = symbol; for( j = 0; j < nterm; ++j ) { if( target[j] != DEFAULT ) { *q++ = j; } } emitt( symbol, target, q - symbol, action ); for( p = symbol; p < q; ++p ) { target[*p] = DEFAULT; } } FREE( target ); FREE( symbol ); for( i = 0; i < npro; ++i ) { pro = protab[i]; if( pro != startpro ) { for( item = pro->item; item->p.sym != NULL; ) { ++item; } emitins( LBL, PROENTRY( pro->pidx ) ); emitins( ACTION, PROPACK( item - pro->item, i ) ); emitins( REDUCE, PROPACK( item - pro->item, pro->sym->token ) ); } } writeobj( label + 1 ); }
static int immediateShift( a_state *state, a_reduce_action *raction, a_pro *pro ) { a_sym *unit_lhs; a_sym *term_sym; a_state *after_lhs_state; a_state *final_state; a_state *check_state; a_parent *parent; a_word *follow; set_size *mp; int change_occurred; /* requirements: (1) state must have a reduction by a unit production (L1 <- r1) on a set of tokens (s) (2) all parents must shift to a state where a shift on a terminal in s ends up in a new state that is the same for all parents action: add shift on terminal to common parent shift state */ // dumpInternalState( state ); follow = raction->follow; unit_lhs = pro->sym; change_occurred = 0; for( mp = Members( follow ); mp != setmembers; ) { --mp; term_sym = symtab[*mp]; check_state = NULL; for( parent = state->parents; parent != NULL; parent = parent->next ) { after_lhs_state = findNewShiftState( parent->state, unit_lhs ); after_lhs_state = onlyShiftsOnTerminals( after_lhs_state ); if( after_lhs_state == NULL ) { check_state = NULL; break; } final_state = findNewShiftState( after_lhs_state, term_sym ); if( final_state == NULL ) { check_state = NULL; break; } if( check_state != NULL && check_state != final_state ) { check_state = NULL; break; } check_state = final_state; } if( check_state != NULL ) { /* all shifts in *terminal ended up in the same state! */ state->trans = addShiftAction( term_sym, check_state, state->trans ); ClearBit( follow, *mp ); change_occurred = 1; ++changeOccurred; } } if( Empty( follow ) ) { state->redun = removeReduceAction( raction, state->redun ); change_occurred = 1; } return( change_occurred ); }
void genobj( void ) { value_size token_size; action_n *actions, *parent, *other; base_n *base; token_n *p, *q, *r, *s; token_n *tokens, *same, *diff, *test, *best; set_size *mp; token_n tokval, dtoken, ptoken, ntoken; action_n actval, error, redun, new_action; a_sym *sym; a_pro *pro; an_item *item; a_state *x; a_shift_action *tx; a_reduce_action *rx; index_n i, j; set_size savings, min, *size; set_size shift; token_n parent_base; unsigned num_default, num_parent; if( fastflag ) { GenFastTables(); return; } if( bigflag || compactflag ) { token_size = FITS_A_WORD; } else { token_size = FITS_A_BYTE; } num_default = 0; num_parent = 0; ntoken = FirstNonTerminalTokenValue(); dtoken = ntoken++; ptoken = ntoken++; for( i = nterm; i < nsym; ++i ) { symtab[i]->token = ntoken++; } actions = CALLOC( ntoken, action_n ); error = nstate + npro; for( i = 0; i < ntoken; ++i ) { actions[i] = error; } tokens = CALLOC( ntoken, token_n ); test = CALLOC( ntoken, token_n ); best = CALLOC( ntoken, token_n ); other = CALLOC( nstate, action_n ); parent = CALLOC( nstate, action_n ); size = CALLOC( nstate, set_size ); base = CALLOC( nstate, base_n ); same = NULL; r = NULL; diff = NULL; used = 0; avail = 0; table = NULL; shift = 0; parent_base = 0; for( i = nstate; i > 0; ) { --i; x = statetab[i]; q = tokens; for( tx = x->trans; (sym = tx->sym) != NULL; ++tx ) { *q++ = sym->token; actions[sym->token] = tx->state->sidx; } savings = 0; for( rx = x->redun; (pro = rx->pro) != NULL; ++rx ) { redun = pro->pidx + nstate; mp = Members( rx->follow ); if( (set_size)( mp - setmembers ) > savings ) { savings = (set_size)( mp - setmembers ); r = q; } while( mp != setmembers ) { --mp; tokval = symtab[*mp]->token; *q++ = tokval; actions[tokval] = redun; } } if( savings ) { actval = actions[*r]; other[i] = actval; *q++ = dtoken; actions[dtoken] = actval; p = r; while( savings-- > 0 ) actions[*p++] = error; while( p < q ) *r++ = *p++; q = r; ++num_default; } else { other[i] = error; } r = q; min = (set_size)( q - tokens ); size[i] = min; parent[i] = nstate; for( j = nstate; --j > i; ) { if( abs( size[j] - size[i] ) < min ) { x = statetab[j]; p = test; q = test + ntoken; for( tx = x->trans; (sym = tx->sym) != NULL; ++tx ) { if( actions[sym->token] == tx->state->sidx ) { *p++ = sym->token; } else { *--q = sym->token; } } for( rx = x->redun; (pro = rx->pro) != NULL; ++rx ) { redun = pro->pidx + nstate; if( redun == other[j] ) redun = error; for( mp = Members( rx->follow ); mp != setmembers; ) { --mp; tokval = symtab[*mp]->token; if( actions[tokval] == redun ) { *p++ = tokval; } else { *--q = tokval; } } } if( other[j] != error ) { if( other[j] == other[i] ) { *p++ = dtoken; } else { *--q = dtoken; } } savings = (set_size)( size[i] + size[j] - 2 * ( p - test ) ); if( savings < min ) { min = savings; same = p; diff = q; s = test; test = best; best = s; parent[i] = j; } } } if( min >= size[i] ) { s = r; } else { ++num_parent; s = tokens; p = same; while( --p >= best ) actions[*p] = error; for( q = tokens; q < r; ++q ) { if( actions[*q] != error ) { *s++ = *q; } } p = best + ntoken; while( --p >= diff ) { if( actions[*p] == error ) { *s++ = *p; } } actval = parent[i]; *s++ = ptoken; actions[ptoken] = actval; } base[i] = addtotable( tokens, s, actions, dtoken, ptoken ); while( --s >= tokens ) { actions[*s] = error; } } FREE( actions ); FREE( tokens ); FREE( test ); FREE( best ); FREE( other ); FREE( size ); putambigs( base ); putnum( "YYNOACTION", error - nstate + used ); putnum( "YYEOFTOKEN", eofsym->token ); putnum( "YYERRTOKEN", errsym->token ); putnum( "YYETOKEN", errsym->token ); if( compactflag ) { parent_base = used + npro; putnum( "YYPARENT", parent_base ); shift = 8; for( i = 256; i < used; i <<= 1 ) { ++shift; } putnum( "YYPRODSIZE", shift ); } else { putnum( "YYPTOKEN", ptoken ); putnum( "YYDTOKEN", dtoken ); } putnum( "YYSTART", base[startstate->sidx] ); putnum( "YYSTOP", base[eofsym->enter->sidx] ); putnum( "YYERR", base[errstate->sidx] ); putnum( "YYUSED", used ); if( compactflag ) { begtab( "YYPACKTYPE", "yyacttab" ); j = nstate; for( i = 0; i < used; ++i ) { new_action = table[i].action; if( i == base[j - 1] ) { --j; // First element in each state is default/parent if( parent[j] == nstate ) { // No parent state tokval = used + parent_base; } else { tokval = base[parent[j]] + parent_base; } // 0 indicates no default if( new_action != 0 ) { if( new_action < nstate ) { // Shift new_action = base[new_action]; } else { // Reduce new_action -= nstate; // convert to 0 based new_action += used; // now convert to 'used' base } } } else { tokval = table[i].token; if( new_action < nstate ) { // Shift new_action = base[new_action]; } else { // Reduce new_action -= nstate; // convert to 0 based new_action += used; // now convert to 'used' base } } putcompact( tokval, new_action ); } endtab(); // Combine lengths & lhs into a single table begtab( "YYPRODTYPE", "yyprodtab" ); for( i = 0; i < npro; ++i ) { j = 0; for( item = protab[i]->items; item->p.sym != NULL; ++item ) { ++j; } puttab( FITS_A_WORD, (j << shift) + protab[i]->sym->token ); } endtab(); } else { begtab( "YYCHKTYPE", "yychktab" ); for( i = 0; i < used; ++i ) { puttab( token_size, Token( table + i ) ); } endtab(); begtab( "YYACTTYPE", "yyacttab" ); for( i = 0; i < used; ++i ) { j = Action( table + i ); if( j < nstate ) { puttab( FITS_A_WORD, base[j] ); } else { puttab( FITS_A_WORD, j - nstate + used ); } } endtab(); begtab( "YYPLENTYPE", "yyplentab" ); for( i = 0; i < npro; ++i ) { for( item = protab[i]->items; item->p.sym != NULL; ) { ++item; } puttab( FITS_A_BYTE, (unsigned)( item - protab[i]->items ) ); } endtab(); begtab( "YYPLHSTYPE", "yyplhstab" ); for( i = 0; i < npro; ++i ) { puttab( token_size, protab[i]->sym->token ); } endtab(); } FREE( table ); FREE( base ); FREE( parent ); dumpstatistic( "bytes used in tables", bytesused ); dumpstatistic( "states with defaults", num_default ); dumpstatistic( "states with parents", num_parent ); puttokennames( dtoken, token_size ); FREE( protab ); FREE( symtab ); }