void PatchTracking::setPatchRegion(RealImage::RegionType region) { _patchRegion[0] = region; _patchRegion[1] = region; extractPatch(0); extractPatch(1); transformPatchRegion(); }
cv::Point2f pyramidIteration(char& status, const cv::Point2f& pointI, const cv::Point2f& pointJ, const cv::Mat& I, const cv::Mat& J, const int patchSize = 5) { try { cv::Point2f result; // Extract a patch around the image std::vector<std::vector<float>> patch(patchSize + 2, std::vector<float>(patchSize + 2)); std::vector<std::vector<float>> patchIt(patchSize, std::vector<float>(patchSize)); status = extractPatch(patch, (int)pointI.x,(int)pointI.y, patchSize + 2, I); // if (status) // return result; status = extractPatchIt(patchIt, pointI.x, pointI.y, pointJ.x, pointJ.y, I, J, patchSize); // if (status) // return result; // Get the Ix, Iy and It vectors std::vector<float> ix, iy, it; getVectors(ix, iy, it, patch, patchIt, patchSize); // Calculate optical flow cv::Point2f delta; status = computeLK(delta, ix, iy, it); // if (status) // return result; result = pointJ + delta; return result; } catch (const std::exception& e) { error(e.what(), __LINE__, __FUNCTION__, __FILE__); return cv::Point2f{}; } }
//Do this when current trajectory is valid void TLD::learn() { if(!learningEnabled || !valid || !detectorEnabled) { learning = false; return; } learning = true; DetectionResult *detectionResult = detectorCascade->detectionResult; if(!detectionResult->containsValidData) { #ifndef USE_HTLD detectorCascade->detect(currImg); #else detectorCascade->detect(currImg, hTLDMaster->getTLDObject(tldObjId)->getFastDetStr(), false); #endif } //This is the positive patch NormalizedPatch patch; tldExtractNormalizedPatchRect(currImg, currBB, patch.values); float *overlap = new float[detectorCascade->numWindows]; tldOverlapRect(detectorCascade->windows, detectorCascade->numWindows, currBB, overlap); //Add all bounding boxes with high overlap vector<std::pair<int, float> > positiveIndices; vector<int> negativeIndices; vector<int> negativeIndicesForNN; //First: Find overlapping positive and negative patches for(int i = 0; i < detectorCascade->numWindows; i++) { if(overlap[i] > 0.6) { positiveIndices.push_back(std::pair<int, float>(i, overlap[i])); } if(overlap[i] < 0.2) { if(!detectorCascade->ensembleClassifier->enabled || detectionResult->posteriors[i] > 0.5) //Should be 0.5 according to the paper { negativeIndices.push_back(i); negativeIndicesForNN.push_back(i); } } } sort(positiveIndices.begin(), positiveIndices.end(), tldSortByOverlapDesc); vector<NormalizedPatch> patches; patch.positive = 1; patches.push_back(patch); //TODO: Flip int numIterations = std::min<size_t>(positiveIndices.size(), 10); //Take at most 10 bounding boxes (sorted by overlap) for(size_t i = 0; i < negativeIndices.size(); i++) { int idx = negativeIndices.at(i); //TODO: Somewhere here image warping might be possible detectorCascade->ensembleClassifier->learn(&detectorCascade->windows[TLD_WINDOW_SIZE * idx], false, &detectionResult->featureVectors[detectorCascade->numTrees * idx]); } //*********************************************************************************** //**************Warping Positive Patches & Traininng the Classifier...*************** //*********************************************************************************** int bbW; int bbH; std::vector<int> indices(numIterations); for(int i = 0; i<numIterations; i++) indices[i] = positiveIndices.at(i).first; bbHull(detectorCascade->windowOffsets, indices, currImg.cols, hull); bbW = hull[2] - hull[0] + 1; bbH = hull[3] - hull[1] + 1; cv::Rect roi(hull[0], hull[1], bbW, bbH); #ifdef USE_HTLD //Move Blurred Image to CPU... cudaMemcpy((void*)imBlurred->data, (void*)memMgr->getDevBlurredCurFrame(), sizeof(Npp8u) * currImg.rows * currImg.cols, cudaMemcpyDeviceToHost); #else gaussianFilter->apply(currImg, *imBlurred); #endif cv::Mat noise(bbH, bbW, CV_64FC1); cv::Mat result(bbH, bbW, CV_8UC1); //TODO: Make All Patch Related Params Configurable... for(int i = 0; i < 20; i++) { //Here 20 is equal to # of Warped-Images... if(i > 0) { //TODO: GPU is a Better Prospect for This Sort of Operations(Next Step for Parallelization)???!!!! extractPatch(*imBlurred, hull, bbW, bbH, (unsigned char)0, rng, 5.0, 20.0, 0.02, 0.02, noise, result); result.copyTo((*ppHolder)(roi)); }//End of if-Block... for(int j = 0; j<numIterations; j++) { int idx = positiveIndices.at(j).first; detectorCascade->ensembleClassifier->calcFeatureVectorPatch(ppHolder->data, idx, &detectionResult->featureVectors[detectorCascade->numTrees * idx]); detectorCascade->ensembleClassifier->learn(&detectorCascade->windows[TLD_WINDOW_SIZE * idx], true, &detectionResult->featureVectors[detectorCascade->numTrees * idx]); } } for(size_t i = 0; i < negativeIndicesForNN.size(); i++) { int idx = negativeIndicesForNN.at(i); NormalizedPatch patch; tldExtractNormalizedPatchBB(currImg, &detectorCascade->windows[TLD_WINDOW_SIZE * idx], patch.values); patch.positive = 0; patches.push_back(patch); } detectorCascade->nnClassifier->learn(patches); //cout << "NN has now " << detectorCascade->nnClassifier->truePositives->size() << " positives and " << detectorCascade->nnClassifier->falsePositives->size() << " negatives.\n"; delete[] overlap; }
void PatchTracking::translatePatchRegion(RealImage::RegionType region) { _patchRegion[0] = region; transformPatchRegion(); extractPatch(0, false); resamplePatch(); }
void NNUtilities::extractPatch(const Vector2i& center, const Vector2i& inSize, const Vector2i& outSize, const GrayscaledImage& src, GrayscaledImage& dest, const ExtractionMode mode) { dest.setResolution(static_cast<unsigned int>(outSize(0)), static_cast<unsigned int>(outSize(0))); extractPatch(center, inSize, outSize, src, dest[0], mode); }