void ribi::SimpleLinearRegression::Test() noexcept { { static bool is_tested { false }; if (is_tested) return; is_tested = true; } const TestTimer test_timer(__func__,__FILE__,1.0); { const std::vector<double> v { 75.0, 83.0, 96.0, 100.0, 121.0, 125.0 }; const double variance { CalculateVariance(v) }; const double expected { 332.666667 }; assert(std::abs(variance - expected) < 0.0001); } { const std::vector<double> v { 0.23, 0.37, 0.45, 0.49, 0.56, 0.63, 0.63, 0.70, 0.72, 0.82 }; const double variance { CalculateVariance(v) }; const double expected { 0.02846 }; assert(std::abs(variance - expected) < 0.0001); } for (int i=1; i!=5; ++i) //Human-based counting, following the Ansombe's Quartet indices { const std::vector<double> xs { GetAnscombesQuartetX(i) }; const std::vector<double> ys { GetAnscombesQuartetY(i) }; const std::pair<double,double> p { CalculateBestFit(xs,ys) }; const double mean_x { CalculateMean(xs) }; const double mean_y { CalculateMean(ys) }; const double slope { p.first }; const double intercept { p.second }; //const double variance_x = CalculateVariance(xs); //const double variance_y = CalculateVariance(ys); const double expected_mean_x { 9.0 }; const double expected_mean_y { 7.5 }; //to 2 decimal places const double expected_slope { 0.500 }; //to 3 decimal places const double expected_intercept { 3.00 }; //to 2 decimal places const double e { 0.01 }; assert(std::abs(expected_mean_x - mean_x) < e); assert(std::abs(expected_mean_y - mean_y) < e); //const double expected_variance_x = 11.0; //const double expected_variance_y = 4.125; //4.122 or 4.127 (to 3 decimal places) //const double expected_correlation = 0.816; //to 3 decimal places) //assert(std::abs(expected_variance_x - variance_x) < e); //assert(std::abs(expected_variance_y - variance_y) < e); assert(std::abs(expected_slope - slope) < e); assert(std::abs(expected_intercept - intercept) < e); } }
void Glicko2::calculateNewRatings(Player &player, const std::vector<Match> &matches, const float factor) { const Player old = player; // Step 1: done by Player initialization // Step 2: done by setRating and setRd // Step 3 const float v = CalculateVariance(player, matches); // Step 4 & 5 player.volatility = newVolatility(player, matches, v); // Step 6 player.deviation = preRatingRD(player.deviation, player.volatility); // Step 7 player.deviation = 1 / sqrt((1 / pow(player.deviation, 2)) + (1 / v)); float rd_sum = 0; const Player * opponent; for (auto it = matches.begin(); it != matches.end(); ++it) { opponent = it->opponent; rd_sum += g(opponent->deviation) * (it->result - E(player.rating, opponent->rating, opponent->deviation)); } player.rating = pow(player.deviation, 2) * rd_sum; // Step 8: Done by getRating and GetRd //adjust the values based on the factor used player.volatility = (player.volatility - old.volatility) * factor + old.volatility; player.deviation = (player.deviation - old.deviation) * factor + old.deviation; player.rating = (player.rating - old.rating) * factor + old.rating; }
double FindCoverageProbability( const double* gapest, const double* gbar, const int numCIs, const double numNonOverBatches, const int numBatches, const double za, const double OptGap, const int degreesFreedom, ofstream& debugFile, double* var, double* gapbar ) { int repCounter; //ii; double //gapsum = 0, //gbar = 0, svar = 0, coverage = 0; double //* var, * CI; int * check; bool varPassed; // true if var was passed in as an argument // gapbar = new double [numCIs]; if( !var ) { varPassed = false; var = new double [numCIs]; } else varPassed = true; CI = new double [numCIs]; check = new int [numCIs]; coverage = 0; for( repCounter = 0; repCounter < numCIs; repCounter++ ) { //calculate gapbar (average gap of numBatches replications) // This is the average of the overlapping gaps, which doesn't put enough emphasis on samples near either end of the distribution. // This needs to be changed somehow... Maybe an average of non-overlapping batches? Enforce n = km? //gapsum = 0.0; //for( ii = (repCounter*numBatches); ii < (repCounter*numBatches)+numBatches; ii++ ) //gapsum += gapest[ii]; //gbar = (double) gapsum / (double) numBatches; //cout << "gbar = " << gbar << endl; //cout << gbar << endl; // gapbar[repCounter] = gbar; //calculate variance of the gap in numBatches replications svar = CalculateVariance( gapest, gbar[repCounter], numBatches, repCounter, degreesFreedom); var[repCounter] = svar; //calculate CI and fill in check, update coverage CI[repCounter] = gbar[repCounter] + za * sqrt(svar) / sqrt(numNonOverBatches); if( CI[repCounter] < OptGap ) check[repCounter] = 0; else check[repCounter] = 1; coverage += check[repCounter]; } coverage /= numCIs; // return the average gap estimate, if it was passed in //if( gapbar ) //*gapbar = gbar; #if DEBUG_COVER int totssize = numBatches * numCIs; debugFile<<endl<<"Gap Estimate"<<endl; debugFile<<"------------"<<endl; for(repCounter=0; repCounter<totssize; repCounter++) debugFile<<gapest[repCounter]<<endl; // debugFile<<endl<<"ZnStar"<<endl; // debugFile<<"---------"<<endl; // for(repCounter=0; repCounter<totssize; repCounter++) // debugFile<<zn[repCounter]<<endl; // debugFile<<endl<<"ExpCost Estimate"<<endl; // debugFile<<"------------------"<<endl; // for(repCounter=0; repCounter<totssize; repCounter++) // debugFile<<expCost[repCounter]<<endl; debugFile<<endl; // debugFile<<endl<<"Gap_Bar"<<endl; // debugFile<<"------------"<<endl; // for(repCounter=0; repCounter<numCIs; repCounter++) // debugFile<<gapbar[repCounter]<<endl; debugFile<<endl<<"Var estimate"<<endl; debugFile<<"---------------"<<endl; for(repCounter=0; repCounter<numCIs; repCounter++) debugFile<<var[repCounter]<<endl; debugFile<<endl<<"CI Estimate"<<endl; debugFile<<"------------------"<<endl; for(repCounter=0; repCounter<numCIs; repCounter++) debugFile<<CI[repCounter]<<endl; debugFile<<endl<<"Checks of CI"<<endl; debugFile<<"------------------"<<endl; for(repCounter=0; repCounter<numCIs; repCounter++) debugFile<<check[repCounter]<<endl; debugFile.close(); #endif // if (gapbar) delete [] gapbar; if( !varPassed ) if (var) delete [] var; if (CI) delete [] CI; if (check) delete [] check; // gapbar = NULL; if( !varPassed ) var = NULL; CI = NULL; check = NULL; return coverage; } // end of FindCoverageProbability
effVOID EFF3DTerrainROAMImproveTileData::GenerateGeometryDataFromElevationMap(effUINT16 * pEM, effUINT16 * pError, effINT nLevel) { effBOOL * pVerticesUsed = new effBOOL[TERRAIN_TILE_PIXEL_SIZE * TERRAIN_TILE_PIXEL_SIZE]; memset(m_nIndicesNum, 0, sizeof(effINT) * 4); memset(m_nVerticesNum, 0, sizeof(effINT) * 4); for ( effINT i = 0; i < nLevel; i++ ) { TriTreeNode * pLeftNode = new TriTreeNode(); TriTreeNode * pRightNode = new TriTreeNode(); TriTreeNode * pTopNode = new TriTreeNode(); TriTreeNode * pBottomNode = new TriTreeNode(); effINT nGridNum = TERRAIN_TILE_PIXEL_SIZE - 1; effINT nCenterX = nGridNum / 2; effINT nCenterZ = nGridNum / 2; pLeftNode->Set(AL, nCenterX, nCenterZ, 0, 0, 0, nGridNum, pTopNode, NULL, NULL, NULL, pTopNode, pBottomNode, NULL); pBottomNode->Set(AL, nCenterX, nCenterZ, 0, nGridNum, nGridNum, nGridNum, NULL, pRightNode, NULL, NULL, pLeftNode, pRightNode, NULL); pRightNode->Set(AL, nCenterX, nCenterZ, nGridNum, nGridNum, nGridNum, 0, pBottomNode, pTopNode, NULL, NULL, pBottomNode, pTopNode, NULL); pTopNode->Set(AL, nCenterX, nCenterZ, nGridNum, 0, 0, 0, pRightNode, pLeftNode, NULL, NULL, pRightNode, pLeftNode, NULL); effINT varianceCount = TERRAIN_TILE_VARIANCE_NODE_NUM; effUINT16 * pLeftVariance = EFFNEW effUINT16[varianceCount]; memset(pLeftVariance, 0, sizeof(effUINT16) * varianceCount); CalculateVariance(pEM, pLeftVariance, pLeftNode->nApexX, pLeftNode->nApexZ, pLeftNode->nLeftX, pLeftNode->nLeftZ, pLeftNode->nRightX, pLeftNode->nRightZ, 1); effUINT16 * pBottomVariance = EFFNEW effUINT16[varianceCount]; memset(pBottomVariance, 0, sizeof(effUINT16) * varianceCount); CalculateVariance(pEM, pBottomVariance, pBottomNode->nApexX, pBottomNode->nApexZ, pBottomNode->nLeftX, pBottomNode->nLeftZ, pBottomNode->nRightX, pBottomNode->nRightZ, 1); effUINT16 * pRightVariance = EFFNEW effUINT16[varianceCount]; memset(pRightVariance, 0, sizeof(effUINT16) * varianceCount); CalculateVariance(pEM, pRightVariance, pRightNode->nApexX, pRightNode->nApexZ, pRightNode->nLeftX, pRightNode->nLeftZ, pRightNode->nRightX, pRightNode->nRightZ, 1); effUINT16 * pTopVariance = EFFNEW effUINT16[varianceCount]; memset(pTopVariance, 0, sizeof(effUINT16) * varianceCount); CalculateVariance(pEM, pTopVariance, pTopNode->nApexX, pTopNode->nApexZ, pTopNode->nLeftX, pTopNode->nLeftZ, pTopNode->nRightX, pTopNode->nRightZ, 1); pLeftNode->pVariance = pLeftVariance; pBottomNode->pVariance = pBottomVariance; pRightNode->pVariance = pRightVariance; pTopNode->pVariance = pTopVariance; TessellateTriTree(pBottomNode, pEM, pError[i]); TriTreeNode * pFirstNode = pBottomNode; while (pFirstNode->pPre != NULL) { pFirstNode = pFirstNode->pPre; } memset(pVerticesUsed, 0, sizeof(effBOOL) * TERRAIN_TILE_PIXEL_SIZE * TERRAIN_TILE_PIXEL_SIZE); std::vector<effUINT16> aryIndices; TriTreeNode * pPreNode = NULL; TriTreeNode * pNode = pFirstNode; while ( pNode != NULL ) { effINT nIndex[3]; nIndex[0] = pNode->nLeftZ * TERRAIN_TILE_PIXEL_SIZE + pNode->nLeftX; nIndex[1] = pNode->nApexZ * TERRAIN_TILE_PIXEL_SIZE + pNode->nApexX; nIndex[2] = pNode->nRightZ * TERRAIN_TILE_PIXEL_SIZE + pNode->nRightX; //add first triangle' three vertex if ( pPreNode == NULL ) { for ( effINT j = 0; j < 3; j++ ) { aryIndices.push_back((effUINT16)nIndex[j]); pVerticesUsed[nIndex[j]] = effTRUE; m_nIndicesNum[i]++; } } else { effINT nPreIndex[3]; nPreIndex[0] = pPreNode->nLeftZ * TERRAIN_TILE_PIXEL_SIZE + pPreNode->nLeftX; nPreIndex[1] = pPreNode->nApexZ * TERRAIN_TILE_PIXEL_SIZE + pPreNode->nApexX; nPreIndex[2] = pPreNode->nRightZ * TERRAIN_TILE_PIXEL_SIZE + pPreNode->nRightX; for ( effINT j = 0; j < 3; j++ ) { if ( nIndex[j] != nPreIndex[0] && nIndex[j] != nPreIndex[1] && nIndex[j] != nPreIndex[2] ) { //from the second triangle, we need to check if we should add a degenerate edge static effINT nSharedEdgeVertexIndex[3][2] = { {1,2}, {0,2}, {0,1} }; effINT nSize = (effINT)aryIndices.size(); effINT nSharedEdgeVertexIndex0 = nIndex[nSharedEdgeVertexIndex[j][0]]; effINT nSharedEdgeVertexIndex1 = nIndex[nSharedEdgeVertexIndex[j][1]]; //if the last edge(aryIndices[nSize-1], aryIndices[nSize-2]) in the triangle stripe(aryIndices) is not the shared edge //(nIndex[nSharedEdgeVertexIndex[j][0]], nIndex[nSharedEdgeVertexIndex[j][1]]) //so we need add a degenerate edge if ( !(((nSharedEdgeVertexIndex0 == aryIndices[nSize-1]) && (nSharedEdgeVertexIndex1 == aryIndices[nSize-2])) || ((nSharedEdgeVertexIndex1 == aryIndices[nSize-1]) && (nSharedEdgeVertexIndex0 == aryIndices[nSize-2]))) ) { TriTreeNode * pNextNode = pNode->pNext; if ( pNextNode != NULL ) { effINT nNextIndex[3]; nNextIndex[0] = pNextNode->nLeftZ*TERRAIN_TILE_PIXEL_SIZE + pNextNode->nLeftX; nNextIndex[1] = pNextNode->nApexZ*TERRAIN_TILE_PIXEL_SIZE + pNextNode->nApexX; nNextIndex[2] = pNextNode->nRightZ*TERRAIN_TILE_PIXEL_SIZE + pNextNode->nRightX; effBOOL bVertex0First = effTRUE; for ( effINT k = 0; k < 3; k++ ) { if ( nNextIndex[k] == nSharedEdgeVertexIndex0 ) { bVertex0First = effFALSE; break; } } if ( bVertex0First ) { aryIndices.push_back(nSharedEdgeVertexIndex0); aryIndices.push_back(nSharedEdgeVertexIndex1); m_nIndicesNum[i] += 2; } else { aryIndices.push_back(nSharedEdgeVertexIndex1); aryIndices.push_back(nSharedEdgeVertexIndex0); m_nIndicesNum[i] += 2; } } //we don't care the vertex sequence else { aryIndices.push_back(nSharedEdgeVertexIndex0); aryIndices.push_back(nSharedEdgeVertexIndex1); m_nIndicesNum[i] += 2; } } aryIndices.push_back((effUINT16)nIndex[j]); //pVerticesUsed[nIndex[j]] = effTRUE; m_nIndicesNum[i]++; } } } pPreNode = pNode; pNode = pNode->pNext; } m_pIndices[i] = EFFNEW effUINT16[m_nIndicesNum[i]]; memcpy(m_pIndices[i], &aryIndices[0], sizeof(effUINT16) * m_nIndicesNum[i]); pNode = pFirstNode; while ( pNode != NULL ) { TriTreeNode * pNext = pNode->pNext; SF_DELETE(pNode); pNode = pNext; } } effINT * pGeneratedVerticesId = EFFNEW effINT[TERRAIN_TILE_PIXEL_SIZE * TERRAIN_TILE_PIXEL_SIZE]; memset(pGeneratedVerticesId, -1, sizeof(effINT)*TERRAIN_TILE_PIXEL_SIZE * TERRAIN_TILE_PIXEL_SIZE); effINT nGeneratedVerticesNum = 0; std::vector<effFLOAT> aryVertices; for ( effINT i = 0; i < nLevel; i++ ) { for ( effINT j = 0; j < m_nIndicesNum[i]; j++ ) { effINT nIndex = (effINT)m_pIndices[i][j]; if ( pGeneratedVerticesId[nIndex] == -1 ) { effINT nX = nIndex % TERRAIN_TILE_PIXEL_SIZE; effINT nZ = -nIndex / TERRAIN_TILE_PIXEL_SIZE; aryVertices.push_back((effFLOAT)nX); aryVertices.push_back(((effFLOAT)pEM[nIndex]) * 0.01f); aryVertices.push_back((effFLOAT)nZ); m_pIndices[i][j] = (effUINT16)nGeneratedVerticesNum; pGeneratedVerticesId[nIndex] = nGeneratedVerticesNum; nGeneratedVerticesNum++; } else { m_pIndices[i][j] = (effUINT16)pGeneratedVerticesId[nIndex]; } } m_nVerticesNum[i] = nGeneratedVerticesNum; } m_pVertices = EFFNEW effFLOAT[sizeof(effFLOAT) * 3* nGeneratedVerticesNum]; memcpy(m_pVertices, &aryVertices[0], sizeof(effFLOAT) * 3 * nGeneratedVerticesNum); SFT_DELETE(pVerticesUsed); SFT_DELETE(pGeneratedVerticesId); }