DetectorEvaluationResult::DetectorEvaluationResult(size_t truePositives, size_t trueNegatives, size_t falsePositives, size_t falseNegatives) : _truePositives(truePositives), _trueNegatives(trueNegatives), _falsePositives(falsePositives), _falseNegatives(falseNegatives) { _precision = computePrecision(truePositives, falsePositives); _recall = computeRecall(truePositives, falseNegatives); _accuracy = computeAccuracy(truePositives, trueNegatives, falsePositives, falseNegatives); }
size_t CameraPathModel::sampleObservation(size_t s1, size_t a) const { static std::uniform_int_distribution<unsigned> dist1(0, 4); static std::uniform_real_distribution<double> prob(0, 1); // Modified this line from Basic s1 = convertToNormalState(s1); size_t positionUnderCamera = checkCameraField(a, s1); // If the person is under the camera, we see it more correctly // towards the center, and worse towards the edges (not exactly // like this, since we compute imprecision from center as if the // camera sees a line rather than an area, but close enough. if ( positionUnderCamera != 0 ) { double precision = computePrecision(positionUnderCamera, cameraData[a][0]); // Camera worked correctly if ( precision > prob(rand_) ) return positionUnderCamera; // We return a state close to the one we saw (kind of noise..) or nothing int cameraCheck = dist1(rand_); if ( cameraCheck == 4 ) return positionUnderCamera; return checkCameraField( a, getNextDirState(s1, cameraCheck) ); } // Otherwise we don't see the target. else { return 0; } }
DetectorEvaluationResult::DetectorEvaluationResult(Mat& votingMask, vector<Mat>& targetMasks, unsigned short votingMaskThreshold) : _truePositives(0), _trueNegatives(0), _falsePositives(0), _falseNegatives(0){ Mat mergedTargetsMask; if (ImageUtils::mergeTargetMasks(targetMasks, mergedTargetsMask)) { computeMasksSimilarity(votingMask, mergedTargetsMask, votingMaskThreshold, &_truePositives, &_trueNegatives, &_falsePositives, &_falseNegatives); _precision = computePrecision(_truePositives, _falsePositives); _recall = computeRecall(_truePositives, _falseNegatives); _accuracy = computeAccuracy(_truePositives, _trueNegatives, _falsePositives, _falseNegatives); } }
double CameraPathModel::getObservationProbability(size_t s1, size_t a, size_t o) const { // Changed this line from basic s1 = convertToNormalState(s1); size_t positionUnderCamera = checkCameraField(a, s1); // If the target is not under the camera, we cannot see it if ( !positionUnderCamera ) return o ? 0.0 : 1.0; // Precision with used camera double precision = computePrecision(positionUnderCamera, cameraData[a][0]); double error = (1.0 - precision)/5.0; if ( o == positionUnderCamera ) return precision + error; double retvalue = 0.0; // We accumulate due to non-movements around the edges. for ( unsigned i = 0; i < 4; ++i ) if ( o == checkCameraField(a, getNextDirState(s1, i)) ) retvalue += error; return retvalue; }