int main() { char s[1024]; int wins=0; FILE *f = fopen("poker.txt", "rb"); if ( !f ) { perror("poker.txt"); return 1; } while ( fgets(s, 1023, f) != NULL ) { hand h1, h2; int n = read(h1, s); read(h2, s+n+1); if ( p1_wins(h1, h2) ) { printf("p1 wins "); ++wins; } else printf(" "); printf("p1=("); print(h1); score s1 = getscore(h1); printf(") %s (%d) -- ", rankstr(s1.rank), s1.topcard.value); printf("p2=("); print(h2); score s2 = getscore(h2); printf(") %s (%d)\n", rankstr(s2.rank), s2.topcard.value); } printf("p1 wins: %d\n", wins); }
int p1_wins(hand p1, hand p2) { score l = getscore(p1); score r = getscore(p2); if ( l.rank == r.rank ) { if ( l.topcard.value > r.topcard.value ) return 1; return 0; } return l.rank > r.rank; }
void WinScreen::draw(){ background(); ofSetColor(250, 200); font.drawString("Congratulations you beat all the levels", ofGetWidth()*0.2, ofGetHeight()*0.3); font.drawString("Total Score: "+ ofToString(getscore()) , ofGetWidth()*0.4, ofGetWindowHeight()*0.5); if(button(0.5,0.8, "Main Menu")) { State::setState(MM);//sends you to the next gamestate //State::gameStates[LVL1].get()->resetLevel(); } }
/************************** ** bench_with_confidence ** *************************** ** Given a benchmark id that indicates a function, this routine ** repeatedly calls that benchmark, seeking to collect and replace ** scores to get 5 that meet the confidence criteria. ** ** The above is mathematically questionable, as the statistical theory ** depends on independent observations, and if we exchange data points ** depending on what we already have then this certainly violates ** independence of the observations. Hence I changed this so that at ** most 30 observations are done, but none are deleted as we go ** along. We simply do more runs and hope to get a big enough sample ** size so that things stabilize. Uwe F. Mayer ** ** Return 0 if ok, -1 if failure. Returns mean ** and std. deviation of results if successful. */ static int bench_with_confidence(int fid, /* Function id */ double *mean, /* Mean of scores */ double *stdev, /* Standard deviation */ ulong *numtries) /* # of attempts */ { double myscores[30]; /* Need at least 5 scores, use at most 30 */ double c_half_interval; /* Confidence half interval */ int i; /* Index */ /* double newscore; */ /* For improving confidence interval */ /* ** Get first 5 scores. Then begin confidence testing. */ for (i=0;i<5;i++) { (*funcpointer[fid])(); myscores[i]=getscore(fid); #ifdef DEBUG printf("score # %d = %g\n", i, myscores[i]); #endif } *numtries=5; /* Show 5 attempts */ /* ** The system allows a maximum of 30 tries before it gives ** up. Since we've done 5 already, we'll allow 25 more. */ /* ** Enter loop to test for confidence criteria. */ while(1) { /* ** Calculate confidence. Should always return 0. */ if (0!=calc_confidence(myscores, *numtries, &c_half_interval, mean, stdev)) return(-1); /* ** Is the length of the half interval 5% or less of mean? ** If so, we can go home. Otherwise, we have to continue. */ if(c_half_interval/ (*mean) <= (double)0.05) break; #ifdef OLDCODE #undef OLDCODE #endif #ifdef OLDCODE /* this code is no longer valid, we now do not replace but add new scores */ /* Uwe F. Mayer */ /* ** Go get a new score and see if it ** improves existing scores. */ do { if(*numtries==10) return(-1); (*funcpointer[fid])(); *numtries+=1; newscore=getscore(fid); } while(seek_confidence(myscores,&newscore, &c_half_interval,mean,stdev)==0); #endif /* We now simply add a new test run and hope that the runs finally stabilize, Uwe F. Mayer */ if(*numtries==30) return(-1); (*funcpointer[fid])(); myscores[*numtries]=getscore(fid); #ifdef DEBUG printf("score # %ld = %g\n", *numtries, myscores[*numtries]); #endif *numtries+=1; } return(0); }
/************************** ** bench_with_confidence ** *************************** ** Given a benchmark id that indicates a function, this ** routine repeatedly calls that benchmark, seeking ** to collect enough scores to get 5 that meet the confidence ** criteria. Return 0 if ok, -1 if failure. ** Returns mean ans std. deviation of results if successful. */ static int bench_with_confidence(int fid, /* Function id */ double *mean, /* Mean of scores */ double *stdev, /* Standard deviation */ ulong *numtries) /* # of attempts */ { double myscores[5]; /* Need at least 5 scores */ double c_half_interval; /* Confidence half interval */ int i; /* Index */ double newscore; /* For improving confidence interval */ /* ** Get first 5 scores. Then begin confidence testing. */ TotalTime = (double)0.0; for (i=0;i<5;i++) { (*funcpointer[fid])(); myscores[i]=getscore(fid); TotalTime += CurrTime; } *numtries=5; /* Show 5 attempts */ /* ** The system allows a maximum of 10 tries before it gives ** up. Since we've done 5 already, we'll allow 5 more. */ /* ** Enter loop to test for confidence criteria. */ while(1) { /* ** Calculate confidence. */ calc_confidence(myscores, &c_half_interval, mean, stdev); /* ** Is half interval 5% or less of mean? ** If so, we can go home. Otherwise, ** we have to continue. */ if(c_half_interval/ (*mean) <= (double)0.05) break; /* ** Go get a new score and see if it ** improves existing scores. */ do { if(*numtries==10) return(-1); (*funcpointer[fid])(); *numtries+=1; newscore=getscore(fid); } while(seek_confidence(myscores,&newscore, &c_half_interval,mean,stdev)==0); } return(0); }
void tkd_exec(){ int i,j,k,s,t,tau; miss = 0; /* * calculate missing values O(N*D) and initialize T */ for(i = 0; i < N; ++i){ for(j = 0; j < D; ++j){ dataset[i].T[j] = N-1;//initialization miss += dataset[i].missing[j]; } } /* * calculate the number of objects a certain object dominates on a certain dimention O(D*N*logN) */ for(i = 0; i < D; ++i){ missd[i] = 0; for(j = 0; j < N; ++j){ arr[j] = j; if(dataset[j].missing[i]) ++missd[i]; } quicksort(arr,i,0,N-1); j = N-1; while(j>=missd[i]){ k = j; while(k>0 && dataset[arr[j]].value[i]==dataset[arr[--k]].value[i]); if(k==0) --k; for(s = k+1; s <= j; ++s) dataset[arr[s]].T[i] = missd[i]+N-k-2; j = k; } } /* * calculate maxscore O(N*D) */ for(i = 0;i < N; ++i){ maxscore[i]=dataset[i].T[0]; for(j = 1; j < D; ++j) if(dataset[i].T[j]<maxscore[i]) maxscore[i]=dataset[i].T[j]; } /* * maintain a priority queue O(N*logN) */ queue[0] = N; for(i = 1;i <= N; ++i) queue[i] = i-1; for(i = 1; i <= N/2; ++i) perculateUp(maxscore,queue,i); /* * maintain a candidate set with max scores and using pruning */ tau = -1,candidateset[0]=0; while(queue[0]){ t = popqueue(queue,maxscore); if(maxscore[t]<tau) // maxscore pruning break; else{ score[t]=getscore(t,tau,miss,candidateset[0]); // bitscore pruning if(score[t]>tau || tau<0){ if(candidateset[0]==K){ for(i=1;i<=candidateset[0];++i){ if(score[candidateset[i]]==tau){ candidateset[i]=t; break; } } } else { candidateset[++candidateset[0]]=t; } if(candidateset[0]==K){ // candidate set full tau=score[candidateset[1]]; for(i=2;i<=K;++i) if(score[candidateset[i]]<tau) tau=score[candidateset[i]]; } } } } }