Math::Matrix AAKR::estimate(Math::Matrix query, double variance) { Math::Matrix mean; Math::Matrix std; normalize(mean, std); // Use normalized query vector. computeDistance((query - mean) / std); computeWeights(variance); double s = sum(m_weights); // Avoid division by zero. if (!s) return query; // Combine with weights. Math::Matrix result = ((transpose(m_weights) * m_norm) / s); // Normalize. for (unsigned i = 0; i < sampleSize(); i++) result(i) = result(i) * std(i) + mean(i); return result; }
void CueContrastKernel::runkernel(CvPoint2D32f* nextpos, CSCVector* prob_cand, float* rel, float hval) { if(debug) std::cout << getName() << "::runkernel()\n"; bool result = false; int count = 0; CvPoint2D32f oldpos = m_y0; //CvSize objsize = m_track.winnerSize; if(debug) std::cout << getName() << "::runkernel()::oldpos = [" << oldpos.x << " " << oldpos.y << "]\n"; while(!result){ switch(m_opmode) { case SIMPLE: //cropNResize(&oldpos, &objsize, hval); cropNResize(&oldpos, &m_objsizenorm, hval); calculateProbability(prob_cand, hval); break; default: std::cerr << getName() << "::runkernel()::Currently only SIMPLE mode is supported!\n"; return; } computeWeights(prob_cand); // Step2 findNextLocation(nextpos, hval); // Step 3 //if(debug) std::cout << getName() << "::runkernel()::nextpos = [" << nextpos->x << " " << nextpos->y << "]\n"; result = checkCondition(&oldpos, nextpos, count); // Step 6 if(debug) std::cout << getName() << "::runkernel()::count = " << count << ", nextpos = [" << nextpos->x << ", " << nextpos->y << "]\n"; count++; } (*rel) = computeSimilarity(prob_cand, &m_target_model); if(debug) std::cout << getName() << "::runkernel()::while() loop complete::count = " << count << ", rel = " << (*rel) << ", nextpos = [" << nextpos->x << ", " << nextpos->y << "]\n"; }
ScSpectralValuesConvertor::ScSpectralValuesConvertor(eIlluminant illuminant, eObserver observer) { const ScCIEIlluminant& cieIlluminant = ScCIEData::instance().cieIlluminant(illuminant); const ScCIEObserver& cieObserver = ScCIEData::instance().cieObserver(observer); m_illuminantWhite = computeIlluminantWhite(cieIlluminant, cieObserver); computeWeights(cieIlluminant, cieObserver); }
linkedList* variableOrdering(graph *g,int choice){ item *currItem; linkedList *orderedList; vertex *v; int i; breakCycles(g); resetWeights(g); computeWeights(g); genericSuccessorsOrdering(g, choice); orderByWeights(g->vertices); resetVisited(g); orderedList= newLinkedList(); currItem = (g-> vertices)->head; while (currItem!= NULL) { if ((currItem->vert)->visited == 0) visit(currItem->vert,orderedList); currItem = currItem->next; } return orderedList; }
virtual void buildKernel(PtexSeparableKernel& k, float u, float v, float uw, float vw, Res faceRes) { // clamp filter width to no larger than 1.0 uw = PtexUtils::min(uw, 1.0f); vw = PtexUtils::min(vw, 1.0f); // clamp filter width to no smaller than a texel uw = PtexUtils::max(uw, PtexUtils::reciprocalPow2(faceRes.ulog2)); vw = PtexUtils::max(vw, PtexUtils::reciprocalPow2(faceRes.vlog2)); // compute desired texture res based on filter width uint8_t ureslog2 = (uint8_t)PtexUtils::calcResFromWidth(uw); uint8_t vreslog2 = (uint8_t)PtexUtils::calcResFromWidth(vw); Res res(ureslog2, vreslog2); k.res = res; // convert from normalized coords to pixel coords u = u * (float)k.res.u(); v = v * (float)k.res.v(); uw *= (float)k.res.u(); vw *= (float)k.res.v(); // find integer pixel extent: [u,v] +/- [uw/2,vw/2] // (box is 1 unit wide for a 1 unit filter period) float u1 = u - 0.5f*uw, u2 = u + 0.5f*uw; float v1 = v - 0.5f*vw, v2 = v + 0.5f*vw; float u1floor = PtexUtils::floor(u1), u2ceil = PtexUtils::ceil(u2); float v1floor = PtexUtils::floor(v1), v2ceil = PtexUtils::ceil(v2); k.u = int(u1floor); k.v = int(v1floor); k.uw = int(u2ceil)-k.u; k.vw = int(v2ceil)-k.v; // compute kernel weights along u and v directions computeWeights(k.ku, k.uw, 1.0f-(u1-u1floor), 1.0f-(u2ceil-u2)); computeWeights(k.kv, k.vw, 1.0f-(v1-v1floor), 1.0f-(v2ceil-v2)); }
const void SMC::resample( vector< StateProgression > * particles, \ vector < vector<double> > * cloudData, \ Params params, \ int currTime,\ int numOfParticles){ vector<double> weights(particles->size()); for(int i =0 ; i< numOfParticles; i ++ ) weights[i] = computeWeights(& particles->at(i) , currTime , cloudData , params); double sum = 0; for_each( weights.begin(), weights.end(), [&sum] (double y) mutable { sum+=y; }); for(unsigned int i =0 ; i < weights.size();i++) weights[i] = weights[i]/sum; vector< StateProgression > tempParts; for(unsigned int i = 0 ; i< particles->size() ; i ++){ int index = ut.randcat( & weights); tempParts.push_back(particles->at(index)); } *particles = tempParts; }
GaussianBlurFilter::GaussianBlurFilter(const TextureFilterBuilder *textureFilterBuilder, GaussianDirection gaussianDirection): TextureFilter(textureFilterBuilder), gaussianDirection(gaussianDirection), blurSize(textureFilterBuilder->getBlurSize()), nbTextureFetch(std::ceil(blurSize / 2.0f)), textureSize((GaussianDirection::VERTICAL==gaussianDirection) ? getTextureHeight() : getTextureWidth()) { //See http://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/ if(blurSize<=1) { throw std::invalid_argument("Blur size must be greater than one. Value: " + std::to_string(blurSize)); }else if(blurSize%2==0) { throw std::invalid_argument("Blur size must be an odd number. Value: " + std::to_string(blurSize)); } std::vector<float> weights = computeWeights(); std::vector<float> weightsLinearSampling = computeWeightsLinearSampling(weights); weightsTab = toShaderVectorValues(weightsLinearSampling); std::vector<float> offsetsLinearSampling = computeOffsetsLinearSampling(weights, weightsLinearSampling); offsetsTab = toShaderVectorValues(offsetsLinearSampling); }