void FacialFeatureRecognizerTest::compareSmileFrownSimilarFacesTest() { Ptr<FaceRecognizer> model = createFisherFaceRecognizer(); FacialFeatureRecognizer recognizer(model, 0, NULL, NULL, NULL); QString trainingFile("/home/zane/Documents/COS301/training.xml"); recognizer.loadTrainingFromXML(trainingFile); Mat face1 = imread("../../testFiles/FaceRec/barack_smile.jpg", CV_LOAD_IMAGE_UNCHANGED); Mat face2 = imread("../../testFiles/FaceRec/barack_frown.jpg", CV_LOAD_IMAGE_UNCHANGED); string faceCascade = "/home/zane/Documents/COS301/MainProject/testFiles/haarcascade_frontalface_alt2.xml"; Filter* faceDetect = new FaceDetectFilter(faceCascade); Filter* preProc = new PreProcessingFilter(140, 150); ImageData* data1 = new ImageData(face1, 0); data1 = faceDetect->filter(data1); data1 = preProc->filter(data1); ImageData* data2 = new ImageData(face2, 0); data2 = faceDetect->filter(data2); data2 = preProc->filter(data2); double expected = 5800; double actual = recognizer.compareFaces(data1->faces[0], data2->faces[0]); QVERIFY(actual <= expected); }
bool Type::make_one_normal_field_by_enum(Enumerator<string> &enumerator, vector<const Type*> &all_types, vector<CVQualifiers> &all_quals, vector<const Type*> &fields, vector<CVQualifiers> &quals, vector<int> &fields_length, int i) { int types_size = all_types.size(); int quals_size = all_quals.size(); Filter *filter = SIMPLE_TYPES_PROB_FILTER; std::ostringstream ss1, ss2; ss1 << "field" << i; int typ_index = enumerator.get_elem(ss1.str()); assert(typ_index >= 0 && typ_index < types_size); Type *typ = const_cast<Type*>(all_types[typ_index]); if (typ->eType == eSimple) { assert(typ->simple_type != eVoid); if (filter->filter(typ->simple_type)) return false; } assert(typ != NULL); fields.push_back(typ); ss2 << "qualifier" << i; int qual_index = enumerator.get_elem(ss2.str()); assert(qual_index >= 0 && qual_index < quals_size); CVQualifiers qual = all_quals[qual_index]; quals.push_back(qual); fields_length.push_back(-1); return true; }
bool NonVoidNonVolatileTypeFilter::filter(int v) const { assert(static_cast<unsigned int>(v) < AllTypes.size()); Type *type = AllTypes[v]; if (type->eType == eSimple && type->simple_type == eVoid) return true; if (type->is_aggregate() && type->is_volatile_struct_union()) return true; if ((type->eType == eStruct) && (!CGOptions::arg_structs())) { return true; } if ((type->eType == eUnion) && (!CGOptions::arg_unions())) { return true; } if (!type->used) { Bookkeeper::record_type_with_bitfields(type); type->used = true; } typ_ = type; if (type->eType == eSimple) { Filter *filter = SIMPLE_TYPES_PROB_FILTER; return filter->filter(typ_->simple_type); } return false; }
void processFilters(QImage *& image, QListIterator<Filter *> i) { while (i.hasNext()) { Filter * f = i.next(); uint w = image->width(); uint h = image->height(); if (f->getArea().isNull()) { f->setArea(QRect(QPoint(0, 0), QSize(w, h))); } if (! f->isApplicable(w, h)) { QRect area = f->getArea(); fatal(QObject::tr("Too small image (%1x%2), pointed filter" " with pointed area (%3x%4+%5+%6) not applicable") .arg(w).arg(h).arg(area.x()).arg(area.y()) .arg(area.width()).arg(area.height())); } QImage * newImage = f->filter(*image); delete image; image = newImage; } }
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> bool Import::filterObject(Core::BaseObject* obj) { Filter* filter = Filter::GetFilter(obj->className()); if ( filter ) { if ( !filter->filter(obj) ) return true; } else { SEISCOMP_DEBUG("Filter for class: %s not available", obj->className()); return true; } return false; }
bool ChooseRandomTypeFilter::filter(int v) const { assert((v >= 0) && (static_cast<unsigned int>(v) < AllTypes.size())); typ_ = AllTypes[v]; assert(typ_); if (typ_->eType == eSimple) { Filter *filter = SIMPLE_TYPES_PROB_FILTER; return filter->filter(typ_->simple_type); } else if ((typ_->eType == eStruct) && (!CGOptions::return_structs())) { return true; } if (for_field_var_ && typ_->get_struct_depth() >= CGOptions::max_nested_struct_level()) { return true; } return false; }
bool NonVoidTypeFilter::filter(int v) const { assert(static_cast<unsigned int>(v) < AllTypes.size()); Type *type = AllTypes[v]; if (type->eType == eSimple && type->simple_type == eVoid) return true; if (!type->used) { Bookkeeper::record_type_with_bitfields(type); type->used = true; } typ_ = type; if (type->eType == eSimple) { Filter *filter = SIMPLE_TYPES_PROB_FILTER; return filter->filter(typ_->simple_type); } return false; }
int main(int argc, char const *argv[]) { Filter filter; auto f = [=](int & a) { std::cout << "a: " << a << std::endl; }; filter.push(f); auto sqrF = [=](int & a) { int sqr = a * a; std::cout << "a2: " << sqr << std::endl; }; filter.push(sqrF); filter.run(5); for (auto e : filter.filter([](int a){return a % 2 == 0;})) { std::cout << e << " "; } std::cout << std::endl; return 0; }
static void filter(Filter& f, PointView& view) { f.filter(view); }
int main(int argc, char* argv[]) { signal(SIGINT, interrupted); signal(SIGTERM, interrupted); // Evalutate/check command line arguments Options opts(argc, argv); if (opts.isOk() != true) { return 1; } Filter filter; filter.setRedEnabled(opts.isRedEnabled()); filter.setYellowEnabled(opts.isYellowEnabled()); // If processing a single file (-f FILE) if (opts.isFileMode()) { Mat orig = imread(opts.getImageFile()); // Create base name for output files string baseName(opts.getImageFile()); int pos = baseName.rfind('.'); if (pos != string::npos) { baseName.erase(pos); } avc::Timer timer; Found found = filter.filter(orig); filter.printFrameRate(cout, timer.secsElapsed()); // Write out individual image files filter.writeImages(baseName, orig, false); // Return 0 to parent process if we found image (for scripting) return (found == Found::None ? 1 : 0); } // Video processing VideoCapture videoFeed(0); { int attempts = 0; while (!videoFeed.isOpened()) { float waitSecs = 3; attempts++; cerr << "Failed to open camera on attempt " << attempts << ", trying again in " << waitSecs << " seconds.\n"; avc::Timer::sleep(waitSecs); if (isInterrupted) { return 1; } videoFeed.release(); videoFeed.open(0); } } videoFeed.set(CV_CAP_PROP_FRAME_WIDTH, 320); videoFeed.set(CV_CAP_PROP_FRAME_HEIGHT, 240); Mat origFrame; // Where to write out information about what we see ofstream stanchionsFile(opts.getStanchionsFile()); // Get initial frame and toss (incase first one is bad) videoFeed >> origFrame; avc::Timer timer; int foundLast = -1; while (!isInterrupted) { videoFeed >> origFrame; int found = filter.filter(origFrame); if ((found != foundLast) || opts.verbose()) { opts.writeToChangeDir(origFrame, filter.getFileData().frameCount); filter.printFrameRate(cout, timer.secsElapsed()); foundLast = found; } else { // No change in detection state, however, go write out image // if user enabled the periodic feature (-p PERIODIC) and we've // reached the periodic count opts.writePeriodic(origFrame, filter.getFileData().frameCount); } stanchionsFile.seekp(0); stanchionsFile.write((const char*) &filter.getFileData(), sizeof(FileData)); stanchionsFile.flush(); } if (filter.getFileData().frameCount > 0) { filter.printFrameRate(cout, timer.secsElapsed()); filter.writeImages(opts.getOutputDir() + "/avc-vision", origFrame, true); } else { cout << "***ERROR*** Failed to read/process any video frames from camera\n"; } return 0; }