void SlytherinApp::update() { uint32_t elapsedFrames = getElapsedFrames(); bool needsFrame = mLastUpdateFrame == UINT32_MAX || (elapsedFrames - mLastUpdateFrame) * mLinesPerFrame >= 1.0f; if (mCapture && needsFrame && mCapture->checkNewFrame()) { uint32_t lineCount = (uint32_t)floorf((elapsedFrames - mLastUpdateFrame) * mLinesPerFrame); Surface8u surface = mCapture->getSurface(); Texture texture = Texture(surface); if (mLineIndex + lineCount < mFBO.getHeight()) { // single segment // mLineIndex to mLineIndex + lineCount console() << "process (" << mLineIndex << "-" << mLineIndex+lineCount << ") on frame " << elapsedFrames << endl; mLineIndex += lineCount; } else { // two segments // mLineIndex to mFBO.getHeight() - 1 uint32_t overflowLineCount = mLineIndex + lineCount - mFBO.getHeight() + 1; // 0 to overflowLineCount console() << "process (" << mLineIndex << "-" << mFBO.getHeight() - 1 << ") and (0-" << overflowLineCount << ") on frame " << elapsedFrames << endl; mLineIndex = overflowLineCount; } mLastUpdateFrame = elapsedFrames; } }
void CinderVideoStreamServerApp::update() { if( mCapture && mCapture->checkNewFrame() ) { Surface8uRef surf = mCapture->getSurface(); #ifdef USE_JPEG_COMPRESSION OStreamMemRef os = OStreamMem::create(); DataTargetRef target = DataTargetStream::createRef( os ); writeImage( target, *surf, ImageTarget::Options().quality(mQuality), "jpeg" ); const void *data = os->getBuffer(); size_t dataSize = os->tell(); totalStreamSize += dataSize; BufferRef bufRef = Buffer::create(dataSize); memcpy(bufRef->getData(), data, dataSize); SurfaceRef jpeg = Surface::create(loadImage( DataSourceBuffer::create(bufRef)), SurfaceConstraintsDefault(), false ); queueToServer->push(jpeg->getData()); mTexture = gl::Texture::create( *jpeg ); mStatus.assign("Streaming JPG (") .append(std::to_string((int)(mQuality*100.0f))) .append("%) ") .append(std::to_string((int)(totalStreamSize*0.001/getElapsedSeconds()))) .append(" kB/sec ") .append(std::to_string((int)getFrameRate())) .append(" fps "); #else queueToServer->push(surf->getData()); mTexture = gl::Texture::create( *surf ); mStatus.assign("Streaming ").append(std::to_string((int)getFrameRate())).append(" fps"); #endif } }
void projections_balletApp::update(){ if( mCapture && mCapture->checkNewFrame() ) { mTexture = gl::Texture::create( mCapture->getSurface() ); } }
void HexagonMirrorApp::update() { // update webcam image if( mCapture && mCapture->checkNewFrame() ) mCaptureTexture = gl::Texture( mCapture->getSurface() ); else mCaptureTexture = mDummyTexture; }
void CinderCalibrationApp::update() { if ( mCapture->checkNewFrame() ) { mCaptureSurf = mCapture->getSurface(); mCaptureTex = gl::Texture::create( mCaptureSurf ); mCaptureMat = toOcv( mCaptureSurf ); collectImages(); switch ( mState ) { case STATE_CALIBRATING: if ( callibrate() ) { mState = STATE_CALIBRATED; } else { console() << "Calibration failed." << endl; exit(1); } case STATE_CALIBRATED: undistort( mCaptureMat, mUndistortedMat, intrinsic, distortion ); } } }
void camerasApp::update() { if( mCapture && mCapture->checkNewFrame() ) { mTexture = gl::Texture::create( mCapture->getSurface()); } }
void WayFinderApp::update() { if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) { detected = false; // TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance. if(capture && capture->checkNewFrame()) { frame = toOcv(capture->getSurface()); //cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray; //cvtColor(frame, frameGray, CV_BGR2GRAY); int blurAmount = 10; //cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount)); //threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY); // Get all contours. //bg.operator()(frameThresh,fore); bg.operator()(frame, fore); bg.getBackgroundImage(back); cv::erode(fore, fore, cv::Mat()); cv::dilate(fore, fore, cv::Mat()); cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); // Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects unsigned largestIndex = 0; unsigned largestContour = 0; for(unsigned i = 0; i < contours.size(); i++) { if(contours[i].size() > largestContour) { largestContour = contours[i].size(); largestIndex = i; } } vector<std::vector<cv::Point>> hack; cv::Rect rect; cv::Point center; if(contours.size() > 0) { hack.push_back(contours[largestIndex]); // Find bounding rectangle for largest countour. rect = boundingRect(contours[largestIndex]); // Make sure the blog is large enough to be a track-worthy. println("Rext area = " + boost::lexical_cast<std::string>(rect.area())); if(rect.area() >= 5000) { // TODO: Tweak this value. // Get center of rectangle. center = cv::Point( rect.x + (rect.width / 2), rect.y + (rect.height / 2) ); // Show guide. spotlightCenter2D.x = (float)center.x; spotlightCenter2D.y = (float)center.y; spotlightCenter3D.x = (float)center.x; spotlightCenter3D.y = (float)center.y; //spotlightRadius = (rect.width + rect.y) / 2; detected = true; } } // When debug mode is off, the background should be black. if(debugView) { if(contours.size() > 0) { cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2); cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2); rectangle(frame, rect, cv::Scalar(0, 255, 0), 3); circle(frame, center, 10, cv::Scalar(0, 255, 0), 3); } mTexture = gl::Texture(fromOcv(frame)); } } // TODO: Create control panel for all inputs. } }