bool FacemarkKazemiImpl::getFaces(InputArray image, OutputArray faces) { CV_Assert(faceDetector); return faceDetector(image, faces, faceDetectorData); }
// This method is the main workhorse, and is run by the camera thread. static void *FCamAppThread(void *ptr) { FCAM_INTERFACE_DATA *tdata = (FCAM_INTERFACE_DATA *)ptr; Timer timer; JNIEnv *env; tdata->javaVM->AttachCurrentThread(&env, 0); writer = 0; // Initialized on the first PARAM_OUTPUT_DIRECTORY set request. // Initialize FCam devices. FCam::Tegra::Sensor sensor; FCam::Tegra::Lens lens; FCam::Tegra::Flash flash; sensor.attach(&lens); sensor.attach(&flash); MyAutoFocus autofocus(&lens); MyFaceDetector faceDetector("/data/fcam/face.xml"); FCam::Image previewImage(PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT, FCam::YUV420p); FCam::Tegra::Shot shot; // Initialize FPS stat calculation. tdata->captureFps = 30; // assuming 30hz double fpsUpdateTime = timer.get(); int frameCount = 0; // Local task queue that processes messages from the Android application. std::queue<ParamSetRequest> taskQueue; ParamSetRequest task; for (;;) { FCAM_SHOT_PARAMS *currentShot = &tdata->currentShot; FCAM_SHOT_PARAMS *previousShot = &tdata->previousShot; // Copy tasks to local queue sAppData->requestQueue.consumeAll(taskQueue); // Parse all tasks from the Android applications. while (!taskQueue.empty()) { task = taskQueue.front(); taskQueue.pop(); bool prevValue; int taskId = task.getId() & 0xffff; int *taskData = (int *)task.getData(); int pictureId = task.getId() >> 16; switch (taskId) { case PARAM_SHOT: // Note: Exposure is bounded below at 1/1000 (FCam bug?) currentShot->captureSet[pictureId].exposure = taskData[SHOT_PARAM_EXPOSURE] < 1000 ? 1000 : taskData[SHOT_PARAM_EXPOSURE]; currentShot->captureSet[pictureId].focus = taskData[SHOT_PARAM_FOCUS]; currentShot->captureSet[pictureId].gain = taskData[SHOT_PARAM_GAIN]; currentShot->captureSet[pictureId].wb = taskData[SHOT_PARAM_WB]; currentShot->captureSet[pictureId].flashOn = taskData[SHOT_PARAM_FLASH]; break; case PARAM_PREVIEW_EXPOSURE: currentShot->preview.user.exposure = taskData[0]; break; case PARAM_PREVIEW_FOCUS: currentShot->preview.user.focus = taskData[0]; break; case PARAM_PREVIEW_GAIN: currentShot->preview.user.gain = taskData[0]; break; case PARAM_PREVIEW_WB: currentShot->preview.user.wb = taskData[0]; break; case PARAM_PREVIEW_AUTO_EXPOSURE_ON: prevValue = currentShot->preview.autoExposure; currentShot->preview.autoExposure = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoExposure != 0) { previousShot->preview.evaluated.exposure = currentShot->preview.user.exposure; } else { currentShot->preview.user.exposure = previousShot->preview.evaluated.exposure; } break; case PARAM_PREVIEW_AUTO_FOCUS_ON: prevValue = currentShot->preview.autoFocus; currentShot->preview.autoFocus = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoFocus != 0) { previousShot->preview.evaluated.focus = currentShot->preview.user.focus; } else { currentShot->preview.user.focus = previousShot->preview.evaluated.focus; } break; case PARAM_PREVIEW_AUTO_GAIN_ON: prevValue = currentShot->preview.autoGain; currentShot->preview.autoGain = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoGain != 0) { previousShot->preview.evaluated.gain = currentShot->preview.user.gain; } else { currentShot->preview.user.gain = previousShot->preview.evaluated.gain; } break; case PARAM_PREVIEW_AUTO_WB_ON: prevValue = currentShot->preview.autoWB; currentShot->preview.autoWB = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoWB != 0) { previousShot->preview.evaluated.wb = currentShot->preview.user.wb; } else { currentShot->preview.user.wb = previousShot->preview.evaluated.wb; } break; case PARAM_RESOLUTION: break; case PARAM_BURST_SIZE: currentShot->burstSize = taskData[0]; break; case PARAM_OUTPUT_FORMAT: break; case PARAM_VIEWER_ACTIVE: tdata->isViewerActive = taskData[0] != 0; break; case PARAM_OUTPUT_DIRECTORY: if (writer == 0) { writer = new AsyncImageWriter((char *)task.getData()); writer->setOnFileSystemChangedCallback(OnFileSystemChanged); } break; case PARAM_OUTPUT_FILE_ID: AsyncImageWriter::SetFreeFileId(taskData[0]); break; case PARAM_TAKE_PICTURE: if (writer != 0 && task.getDataAsInt() != 0) { // Don't take picture if we can't write out. // capture begin tdata->isCapturing = true; // notify capture start env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureStart); OnCapture(tdata, writer, sensor, flash, lens); // capture done tdata->isCapturing = false; // notify capture completion env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureComplete); } break; case PARAM_PRIV_FS_CHANGED: if (taskData[0] != 0) { // notify fs change env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyFileSystemChange); } break; /* [CS478] * You will probably want extra cases here, to handle messages * that request autofocus to be activated. Define any new * message types in ParamSetRequestion.h. */ case PARAM_AUTO_FOCUS_LOCAL_REG: //LOG("MYFOCUS local focus switch\n"); autofocus.state = AUTO_FOCUS_FOCUS; autofocus.setRect(taskData[0] - RECT_EDGE_LEN / 2, taskData[1] - RECT_EDGE_LEN / 2);//hack TODO autofocus.startSweep(); break; case PARAM_AUTO_FOCUS_GLOBAL: //LOG("MYFOCUS global focus switch\n"); autofocus.state = AUTO_FOCUS_FOCUS; autofocus.setRect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT); autofocus.startSweep(); break; /* [CS478] Assignment #2 * You will probably yet another extra case here to handle face- * based autofocus. Recall that it might be useful to add a new * message type in ParamSetRequest.h */ case PARAM_AUTO_FOCUS_FACE: LOG("MYFOCUS face focus switch\n"); autofocus.state = AUTO_FOCUS_FACE_DETECT; autofocus.fdWait(); //autofocus.startFaceDetect(); break; // TODO TODO TODO default: ERROR("TaskDispatch(): received unsupported task id (%i)!", taskId); } } if (!tdata->isViewerActive) continue; // Viewer is inactive, so skip capture. // Setup preview shot parameters. shot.exposure = currentShot->preview.autoExposure ? previousShot->preview.evaluated.exposure : currentShot->preview.user.exposure; shot.gain = currentShot->preview.autoGain ? previousShot->preview.evaluated.gain : currentShot->preview.user.gain; shot.whiteBalance = currentShot->preview.autoWB ? previousShot->preview.evaluated.wb : currentShot->preview.user.wb; shot.image = previewImage; shot.histogram.enabled = true; shot.histogram.region = FCam::Rect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT); shot.sharpness.enabled = currentShot->preview.autoFocus; shot.sharpness.size = FCam::Size(16, 12); shot.fastMode = true; shot.clearActions(); // If in manual focus mode, and the lens is not at the right place, add an action to move it. if (!currentShot->preview.autoFocus && previousShot->preview.user.focus != currentShot->preview.user.focus) { shot.clearActions(); FCam::Lens::FocusAction focusAction(&lens); focusAction.time = 0; focusAction.focus = currentShot->preview.user.focus; shot.addAction(focusAction); } // Send the shot request to FCam. sensor.stream(shot); // Fetch the incoming frame from FCam. FCam::Frame frame = sensor.getFrame(); // Process the incoming frame. If autoExposure or autoGain is enabled, update parameters based on the frame. if (currentShot->preview.autoExposure || currentShot->preview.autoGain) { FCam::autoExpose(&shot, frame, sensor.maxGain(), sensor.maxExposure(), sensor.minExposure(), 0.3); currentShot->preview.evaluated.exposure = shot.exposure; currentShot->preview.evaluated.gain = shot.gain; } // Process the incoming frame. If autoWB is enabled, update parameters based on the frame. if (currentShot->preview.autoWB) { FCam::autoWhiteBalance(&shot, frame); currentShot->preview.evaluated.wb = shot.whiteBalance; } if (autofocus.state == AUTO_FOCUS_FACE_DETECT) { std::vector<cv::Rect> facesFound = faceDetector.detectFace(frame.image()); for (unsigned int i = 0; i < facesFound.size(); i++) { cv::Rect r = facesFound[i]; for (int x = 0; x < r.width; x++) { frame.image()(r.x + x, r.y)[0] = 254u; frame.image()(r.x + x, r.y + r.height)[0] = 254u; } for (int y = 0; y < r.height; y++) { frame.image()(r.x, r.y + y)[0] = 254u; frame.image()(r.x + r.width, r.y + y)[0] = 254u; } } if (facesFound.size() != 0) autofocus.setRects(facesFound); autofocus.fdWait(); } /* [CS478] Assignment #2 * Above, facesFound contains the list of detected faces, for the given frame. * If applicable, you may pass these values to the MyAutoFocus instance. * * e.g. autofocus.setTarget(facesFound); * Note that MyAutoFocus currently has no setTarget method. You'd have * to write the appropriate interface. * * You should also only run faceDetector.detectFace(...) if it * is necessary (to save compute), so change "true" above to something else * appropriate. */ // TODO TODO TODO /* [CS478] Assignment #1 * You should process the incoming frame for autofocus, if necessary. * Your autofocus (MyAutoFocus.h) has a function called update(...). */ if(autofocus.state == AUTO_FOCUS_FOCUS) { autofocus.update(frame); //LOG("MYFOCUS update called\n"); } if(currentShot->preview.autoFocus) { currentShot->preview.evaluated.focus = (float) frame["lens.focus"]; } // TODO TODO TODO // Update histogram data const FCam::Histogram &histogram = frame.histogram(); int maxBinValue = 1; for (int i = 0; i < 64; i++) { int currBinValue = histogram(i); maxBinValue = (currBinValue > maxBinValue) ? currBinValue : maxBinValue; currentShot->histogramData[i * 4] = currBinValue; } float norm = 1.0f / maxBinValue; for (int i = 0; i < 64; i++) { currentShot->histogramData[i * 4] *= norm; currentShot->histogramData[i * 4 + 1] = 0.0f; currentShot->histogramData[i * 4 + 2] = 0.0f; currentShot->histogramData[i * 4 + 3] = 0.0f; } // Update the frame buffer. uchar *src = (uchar *)frame.image()(0, 0); FCam::Tegra::Hal::SharedBuffer *captureBuffer = tdata->tripleBuffer->getBackBuffer(); uchar *dest = (uchar *)captureBuffer->lock(); // Note: why do we need to shuffle U and V channels? It seems to be a bug. memcpy(dest, src, PI_PLANE_SIZE); memcpy(dest + PI_U_OFFSET, src + PI_V_OFFSET, PI_PLANE_SIZE >> 2); memcpy(dest + PI_V_OFFSET, src + PI_U_OFFSET, PI_PLANE_SIZE >> 2); captureBuffer->unlock(); tdata->tripleBuffer->swapBackBuffer(); // Frame capture complete, copy current shot data to previous one pthread_mutex_lock(&tdata->currentShotLock); memcpy(&tdata->previousShot, &tdata->currentShot, sizeof(FCAM_SHOT_PARAMS)); pthread_mutex_unlock(&tdata->currentShotLock); frameCount++; // Update FPS double time = timer.get(); double dt = time - fpsUpdateTime; if (dt > FPS_UPDATE_PERIOD) { float fps = frameCount * (1000.0 / dt); fpsUpdateTime = time; frameCount = 0; tdata->captureFps = fps; } } tdata->javaVM->DetachCurrentThread(); // delete instance ref env->DeleteGlobalRef(tdata->fcamInstanceRef); return 0; }
int main( ) { // init input video source // cvCaptureFromFile // cv::VideoCapture captureInput("/Users/andriybas/Downloads/elephant_wild_life.m4v"); // cv::VideoCapture captureInput("/Users/andriybas/Documents/test.mov"); cv::VideoCapture captureInput(0); if (!captureInput.isOpened()) { std::cout << "Could not open input source" << std::endl; return -1; } double fps = captureInput.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video std::cout << "Frame per seconds : " << fps << std::endl; cv::namedWindow("window1", CV_WINDOW_AUTOSIZE); int frameCount = 0; // loading classifiers cv::CascadeClassifier face_classifier(FACE_DETECT_CLASSIFIER_PATH); cv::CascadeClassifier profile_face_classifier(PROFILE_FACE_DETECT_CLASSIFIER_PATH); cv::CascadeClassifier elephant_classifier(ELEPHANT_DETECT_CLASSIFIER_PATH); cv::CascadeClassifier banana_classifier(BANANA_DETECT_CLASSIFIER_PATH); // creating detectors Detector faceDetector(face_classifier, "face"); faceDetector.setScaleFactor(2); // Detector faceProfileDetector(profile_face_classifier, "face_profile"); Detector elephantDetector(elephant_classifier, "elephant"); elephantDetector.setScaleFactor(3); elephantDetector.setMinNeighbours(4); Detector bananaDetector(banana_classifier, "banana"); bananaDetector.setScaleFactor(2); bananaDetector.setMinNeighbours(6); // init cascade DetectCascade detectCascade; detectCascade.addDetector(faceDetector); // detectCascade.addDetector(faceProfileDetector); detectCascade.addDetector(elephantDetector); detectCascade.addDetector(bananaDetector); VideoClassifier videoClassifier; DetectedResults detectedObjects; cv::Mat frame; long totalTime = 0; int detectedFrames = 0; while(true) { captureInput >> frame; if (frameCount < SKIP_COUNT) { frameCount++; } else { frameCount = 0; std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); detectedObjects = detectCascade.detect(frame); std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); // std::cout << duration << std::endl; totalTime += duration; detectedFrames++; videoClassifier.addFrame(detectedObjects); } drawDetectedFrames(frame, detectedObjects); drawTags(frame, detectedObjects); std::string videoClass = videoClassifier.getVideoClass(); drawClass(frame, videoClass); imshow("Video classifier", frame ); if (detectedFrames > 100) { std::cout << "Average frame detect: " << 1.0 * totalTime / detectedFrames << "\n"; detectedFrames = 0; totalTime = 0; } // Press 'c' to escape // if(waitKey(1) == 'c') break; } cv::waitKey(0); return 0; }