// This method is the main workhorse, and is run by the camera thread. static void *FCamAppThread(void *ptr) { FCAM_INTERFACE_DATA *tdata = (FCAM_INTERFACE_DATA *)ptr; Timer timer; JNIEnv *env; tdata->javaVM->AttachCurrentThread(&env, 0); writer = 0; // Initialized on the first PARAM_OUTPUT_DIRECTORY set request. // Initialize FCam devices. FCam::Tegra::Sensor sensor; FCam::Tegra::Lens lens; FCam::Tegra::Flash flash; sensor.attach(&lens); sensor.attach(&flash); MyAutoFocus autofocus(&lens); MyFaceDetector faceDetector("/data/fcam/face.xml"); FCam::Image previewImage(PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT, FCam::YUV420p); FCam::Tegra::Shot shot; // Initialize FPS stat calculation. tdata->captureFps = 30; // assuming 30hz double fpsUpdateTime = timer.get(); int frameCount = 0; // Local task queue that processes messages from the Android application. std::queue<ParamSetRequest> taskQueue; ParamSetRequest task; for (;;) { FCAM_SHOT_PARAMS *currentShot = &tdata->currentShot; FCAM_SHOT_PARAMS *previousShot = &tdata->previousShot; // Copy tasks to local queue sAppData->requestQueue.consumeAll(taskQueue); // Parse all tasks from the Android applications. while (!taskQueue.empty()) { task = taskQueue.front(); taskQueue.pop(); bool prevValue; int taskId = task.getId() & 0xffff; int *taskData = (int *)task.getData(); int pictureId = task.getId() >> 16; switch (taskId) { case PARAM_SHOT: // Note: Exposure is bounded below at 1/1000 (FCam bug?) currentShot->captureSet[pictureId].exposure = taskData[SHOT_PARAM_EXPOSURE] < 1000 ? 1000 : taskData[SHOT_PARAM_EXPOSURE]; currentShot->captureSet[pictureId].focus = taskData[SHOT_PARAM_FOCUS]; currentShot->captureSet[pictureId].gain = taskData[SHOT_PARAM_GAIN]; currentShot->captureSet[pictureId].wb = taskData[SHOT_PARAM_WB]; currentShot->captureSet[pictureId].flashOn = taskData[SHOT_PARAM_FLASH]; break; case PARAM_PREVIEW_EXPOSURE: currentShot->preview.user.exposure = taskData[0]; break; case PARAM_PREVIEW_FOCUS: currentShot->preview.user.focus = taskData[0]; break; case PARAM_PREVIEW_GAIN: currentShot->preview.user.gain = taskData[0]; break; case PARAM_PREVIEW_WB: currentShot->preview.user.wb = taskData[0]; break; case PARAM_PREVIEW_AUTO_EXPOSURE_ON: prevValue = currentShot->preview.autoExposure; currentShot->preview.autoExposure = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoExposure != 0) { previousShot->preview.evaluated.exposure = currentShot->preview.user.exposure; } else { currentShot->preview.user.exposure = previousShot->preview.evaluated.exposure; } break; case PARAM_PREVIEW_AUTO_FOCUS_ON: prevValue = currentShot->preview.autoFocus; currentShot->preview.autoFocus = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoFocus != 0) { previousShot->preview.evaluated.focus = currentShot->preview.user.focus; } else { currentShot->preview.user.focus = previousShot->preview.evaluated.focus; } break; case PARAM_PREVIEW_AUTO_GAIN_ON: prevValue = currentShot->preview.autoGain; currentShot->preview.autoGain = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoGain != 0) { previousShot->preview.evaluated.gain = currentShot->preview.user.gain; } else { currentShot->preview.user.gain = previousShot->preview.evaluated.gain; } break; case PARAM_PREVIEW_AUTO_WB_ON: prevValue = currentShot->preview.autoWB; currentShot->preview.autoWB = taskData[0] != 0; if (!prevValue && prevValue ^ currentShot->preview.autoWB != 0) { previousShot->preview.evaluated.wb = currentShot->preview.user.wb; } else { currentShot->preview.user.wb = previousShot->preview.evaluated.wb; } break; case PARAM_RESOLUTION: break; case PARAM_BURST_SIZE: currentShot->burstSize = taskData[0]; break; case PARAM_OUTPUT_FORMAT: break; case PARAM_VIEWER_ACTIVE: tdata->isViewerActive = taskData[0] != 0; break; case PARAM_OUTPUT_DIRECTORY: if (writer == 0) { writer = new AsyncImageWriter((char *)task.getData()); writer->setOnFileSystemChangedCallback(OnFileSystemChanged); } break; case PARAM_OUTPUT_FILE_ID: AsyncImageWriter::SetFreeFileId(taskData[0]); break; case PARAM_TAKE_PICTURE: if (writer != 0 && task.getDataAsInt() != 0) { // Don't take picture if we can't write out. // capture begin tdata->isCapturing = true; // notify capture start env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureStart); OnCapture(tdata, writer, sensor, flash, lens); // capture done tdata->isCapturing = false; // notify capture completion env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureComplete); } break; case PARAM_PRIV_FS_CHANGED: if (taskData[0] != 0) { // notify fs change env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyFileSystemChange); } break; /* [CS478] * You will probably want extra cases here, to handle messages * that request autofocus to be activated. Define any new * message types in ParamSetRequestion.h. */ case PARAM_AUTO_FOCUS_LOCAL_REG: //LOG("MYFOCUS local focus switch\n"); autofocus.state = AUTO_FOCUS_FOCUS; autofocus.setRect(taskData[0] - RECT_EDGE_LEN / 2, taskData[1] - RECT_EDGE_LEN / 2);//hack TODO autofocus.startSweep(); break; case PARAM_AUTO_FOCUS_GLOBAL: //LOG("MYFOCUS global focus switch\n"); autofocus.state = AUTO_FOCUS_FOCUS; autofocus.setRect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT); autofocus.startSweep(); break; /* [CS478] Assignment #2 * You will probably yet another extra case here to handle face- * based autofocus. Recall that it might be useful to add a new * message type in ParamSetRequest.h */ case PARAM_AUTO_FOCUS_FACE: LOG("MYFOCUS face focus switch\n"); autofocus.state = AUTO_FOCUS_FACE_DETECT; autofocus.fdWait(); //autofocus.startFaceDetect(); break; // TODO TODO TODO default: ERROR("TaskDispatch(): received unsupported task id (%i)!", taskId); } } if (!tdata->isViewerActive) continue; // Viewer is inactive, so skip capture. // Setup preview shot parameters. shot.exposure = currentShot->preview.autoExposure ? previousShot->preview.evaluated.exposure : currentShot->preview.user.exposure; shot.gain = currentShot->preview.autoGain ? previousShot->preview.evaluated.gain : currentShot->preview.user.gain; shot.whiteBalance = currentShot->preview.autoWB ? previousShot->preview.evaluated.wb : currentShot->preview.user.wb; shot.image = previewImage; shot.histogram.enabled = true; shot.histogram.region = FCam::Rect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT); shot.sharpness.enabled = currentShot->preview.autoFocus; shot.sharpness.size = FCam::Size(16, 12); shot.fastMode = true; shot.clearActions(); // If in manual focus mode, and the lens is not at the right place, add an action to move it. if (!currentShot->preview.autoFocus && previousShot->preview.user.focus != currentShot->preview.user.focus) { shot.clearActions(); FCam::Lens::FocusAction focusAction(&lens); focusAction.time = 0; focusAction.focus = currentShot->preview.user.focus; shot.addAction(focusAction); } // Send the shot request to FCam. sensor.stream(shot); // Fetch the incoming frame from FCam. FCam::Frame frame = sensor.getFrame(); // Process the incoming frame. If autoExposure or autoGain is enabled, update parameters based on the frame. if (currentShot->preview.autoExposure || currentShot->preview.autoGain) { FCam::autoExpose(&shot, frame, sensor.maxGain(), sensor.maxExposure(), sensor.minExposure(), 0.3); currentShot->preview.evaluated.exposure = shot.exposure; currentShot->preview.evaluated.gain = shot.gain; } // Process the incoming frame. If autoWB is enabled, update parameters based on the frame. if (currentShot->preview.autoWB) { FCam::autoWhiteBalance(&shot, frame); currentShot->preview.evaluated.wb = shot.whiteBalance; } if (autofocus.state == AUTO_FOCUS_FACE_DETECT) { std::vector<cv::Rect> facesFound = faceDetector.detectFace(frame.image()); for (unsigned int i = 0; i < facesFound.size(); i++) { cv::Rect r = facesFound[i]; for (int x = 0; x < r.width; x++) { frame.image()(r.x + x, r.y)[0] = 254u; frame.image()(r.x + x, r.y + r.height)[0] = 254u; } for (int y = 0; y < r.height; y++) { frame.image()(r.x, r.y + y)[0] = 254u; frame.image()(r.x + r.width, r.y + y)[0] = 254u; } } if (facesFound.size() != 0) autofocus.setRects(facesFound); autofocus.fdWait(); } /* [CS478] Assignment #2 * Above, facesFound contains the list of detected faces, for the given frame. * If applicable, you may pass these values to the MyAutoFocus instance. * * e.g. autofocus.setTarget(facesFound); * Note that MyAutoFocus currently has no setTarget method. You'd have * to write the appropriate interface. * * You should also only run faceDetector.detectFace(...) if it * is necessary (to save compute), so change "true" above to something else * appropriate. */ // TODO TODO TODO /* [CS478] Assignment #1 * You should process the incoming frame for autofocus, if necessary. * Your autofocus (MyAutoFocus.h) has a function called update(...). */ if(autofocus.state == AUTO_FOCUS_FOCUS) { autofocus.update(frame); //LOG("MYFOCUS update called\n"); } if(currentShot->preview.autoFocus) { currentShot->preview.evaluated.focus = (float) frame["lens.focus"]; } // TODO TODO TODO // Update histogram data const FCam::Histogram &histogram = frame.histogram(); int maxBinValue = 1; for (int i = 0; i < 64; i++) { int currBinValue = histogram(i); maxBinValue = (currBinValue > maxBinValue) ? currBinValue : maxBinValue; currentShot->histogramData[i * 4] = currBinValue; } float norm = 1.0f / maxBinValue; for (int i = 0; i < 64; i++) { currentShot->histogramData[i * 4] *= norm; currentShot->histogramData[i * 4 + 1] = 0.0f; currentShot->histogramData[i * 4 + 2] = 0.0f; currentShot->histogramData[i * 4 + 3] = 0.0f; } // Update the frame buffer. uchar *src = (uchar *)frame.image()(0, 0); FCam::Tegra::Hal::SharedBuffer *captureBuffer = tdata->tripleBuffer->getBackBuffer(); uchar *dest = (uchar *)captureBuffer->lock(); // Note: why do we need to shuffle U and V channels? It seems to be a bug. memcpy(dest, src, PI_PLANE_SIZE); memcpy(dest + PI_U_OFFSET, src + PI_V_OFFSET, PI_PLANE_SIZE >> 2); memcpy(dest + PI_V_OFFSET, src + PI_U_OFFSET, PI_PLANE_SIZE >> 2); captureBuffer->unlock(); tdata->tripleBuffer->swapBackBuffer(); // Frame capture complete, copy current shot data to previous one pthread_mutex_lock(&tdata->currentShotLock); memcpy(&tdata->previousShot, &tdata->currentShot, sizeof(FCAM_SHOT_PARAMS)); pthread_mutex_unlock(&tdata->currentShotLock); frameCount++; // Update FPS double time = timer.get(); double dt = time - fpsUpdateTime; if (dt > FPS_UPDATE_PERIOD) { float fps = frameCount * (1000.0 / dt); fpsUpdateTime = time; frameCount = 0; tdata->captureFps = fps; } } tdata->javaVM->DetachCurrentThread(); // delete instance ref env->DeleteGlobalRef(tdata->fcamInstanceRef); return 0; }
int main() { initialise_termination_handler(); int deviceDescriptor = open ("/dev/video0", O_RDWR /* required */ | O_NONBLOCK, 0); if (deviceDescriptor == -1) { std::cout << "Unable to open device\n"; return 1; } bool isAutofocusAvailable = isLogitechAutofocusModeSupported(deviceDescriptor); if (isAutofocusAvailable) { } int announce_socket; in_addr iaddr; memset(&iaddr, 0, sizeof(struct in_addr)); announce_socket = socket(AF_INET, SOCK_DGRAM, 0); if ( announce_socket < 0 ) { perror("Error creating socket"); exit(0); } int one = 1; int ret = setsockopt(announce_socket, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)); if (ret < 0) { perror("setsockopt"); return 1; } sockaddr_in saddr; memset(&saddr, 0, sizeof(struct sockaddr_in)); saddr.sin_family = AF_INET; saddr.sin_port = htons(CAMERA_ANNOUNCE_PORT); saddr.sin_addr.s_addr = INADDR_ANY; // bind socket to any interface int status = bind(announce_socket, (struct sockaddr *)&saddr, sizeof(sockaddr_in)); if ( status < 0 ) perror("Error binding socket to interface"), exit(0); ip_mreq mreq; memset(&mreq,0,sizeof(mreq)); mreq.imr_multiaddr.s_addr=inet_addr(CAMERA_ANNOUNCE_GROUP); mreq.imr_interface.s_addr=INADDR_ANY; if (setsockopt(announce_socket,IPPROTO_IP,IP_ADD_MEMBERSHIP,&mreq,sizeof(mreq)) < 0) { perror("setsockopt"); exit(1); } Autofocus autofocus(deviceDescriptor); while (running == 1 && !autofocus.isFocusComplete()) { BufferReference readyBuffer; memset(&readyBuffer,0,sizeof(readyBuffer)); msghdr socket_message; memset(&socket_message,0,sizeof(socket_message)); int result; std::vector<char> data(1024); result = recvfrom(announce_socket,data.data(),data.size(),0,NULL,NULL); if (result <= 0) { perror("recvfrom"); break; } timespec tp; clock_gettime(CLOCK_MONOTONIC,&tp); void* input_pointer = &readyBuffer; ber_decode(0,&asn_DEF_BufferReference,&input_pointer,data.data(),result); // timeval capture_time{readyBuffer.timestamp_seconds,readyBuffer.timestamp_microseconds}; std::string buffer_name = "/" + get_name_of_buffer(readyBuffer.sequence); int buffer_descriptor = shm_open(buffer_name.c_str(), O_RDONLY, 0); if (buffer_descriptor == -1) { std::cout << "Can not open shared buffer file " << buffer_name << std::endl; continue; } size_t buffer_length = readyBuffer.width*readyBuffer.height*2; void* pointer = mmap(NULL,buffer_length, PROT_READ,MAP_PRIVATE,buffer_descriptor,0); close(buffer_descriptor); if (pointer == MAP_FAILED) { std::cout << "Can not map shared buffer " << buffer_name << std::endl; continue; } timeval frame_timestamp {readyBuffer.timestamp_seconds,readyBuffer.timestamp_microseconds}; yuy2::view_t frame = boost::gil::interleaved_view(readyBuffer.width,readyBuffer.height,static_cast<yuy2::ptr_t>(pointer),readyBuffer.width*2); autofocus.submitFrame(frame_timestamp, frame); munmap(pointer,buffer_length); } close(announce_socket); return 0; }