void CinderVideoStreamServerApp::update() { if( mCapture && mCapture->checkNewFrame() ) { Surface8uRef surf = mCapture->getSurface(); #ifdef USE_JPEG_COMPRESSION OStreamMemRef os = OStreamMem::create(); DataTargetRef target = DataTargetStream::createRef( os ); writeImage( target, *surf, ImageTarget::Options().quality(mQuality), "jpeg" ); const void *data = os->getBuffer(); size_t dataSize = os->tell(); totalStreamSize += dataSize; BufferRef bufRef = Buffer::create(dataSize); memcpy(bufRef->getData(), data, dataSize); SurfaceRef jpeg = Surface::create(loadImage( DataSourceBuffer::create(bufRef)), SurfaceConstraintsDefault(), false ); queueToServer->push(jpeg->getData()); mTexture = gl::Texture::create( *jpeg ); mStatus.assign("Streaming JPG (") .append(std::to_string((int)(mQuality*100.0f))) .append("%) ") .append(std::to_string((int)(totalStreamSize*0.001/getElapsedSeconds()))) .append(" kB/sec ") .append(std::to_string((int)getFrameRate())) .append(" fps "); #else queueToServer->push(surf->getData()); mTexture = gl::Texture::create( *surf ); mStatus.assign("Streaming ").append(std::to_string((int)getFrameRate())).append(" fps"); #endif } }
void SlytherinApp::setup() { // setup webcam try { mCapture = Capture::create(640, 480); mCapture->start(); } catch(...) { console() << "ERROR - failed to initialize capture" << endl; quit(); } // setup webcam FBO gl::Fbo::Format format; format.enableColorBuffer(true); format.enableDepthBuffer(false); format.setWrap(GL_CLAMP, GL_CLAMP); mFBO = gl::Fbo(mCapture->getWidth(), mCapture->getHeight(), format); mFBO.bindFramebuffer(); gl::setViewport(mFBO.getBounds()); gl::clear(); mFBO.unbindFramebuffer(); setFrameRate(60.0f); mLastUpdateFrame = UINT32_MAX; mLinesPerFrame = 2.0f; // 1 line every 2 frames (at getFrameRate()) mLineIndex = 0; }
void SlytherinApp::update() { uint32_t elapsedFrames = getElapsedFrames(); bool needsFrame = mLastUpdateFrame == UINT32_MAX || (elapsedFrames - mLastUpdateFrame) * mLinesPerFrame >= 1.0f; if (mCapture && needsFrame && mCapture->checkNewFrame()) { uint32_t lineCount = (uint32_t)floorf((elapsedFrames - mLastUpdateFrame) * mLinesPerFrame); Surface8u surface = mCapture->getSurface(); Texture texture = Texture(surface); if (mLineIndex + lineCount < mFBO.getHeight()) { // single segment // mLineIndex to mLineIndex + lineCount console() << "process (" << mLineIndex << "-" << mLineIndex+lineCount << ") on frame " << elapsedFrames << endl; mLineIndex += lineCount; } else { // two segments // mLineIndex to mFBO.getHeight() - 1 uint32_t overflowLineCount = mLineIndex + lineCount - mFBO.getHeight() + 1; // 0 to overflowLineCount console() << "process (" << mLineIndex << "-" << mFBO.getHeight() - 1 << ") and (0-" << overflowLineCount << ") on frame " << elapsedFrames << endl; mLineIndex = overflowLineCount; } mLastUpdateFrame = elapsedFrames; } }
void projections_balletApp::update(){ if( mCapture && mCapture->checkNewFrame() ) { mTexture = gl::Texture::create( mCapture->getSurface() ); } }
void HexagonMirrorApp::update() { // update webcam image if( mCapture && mCapture->checkNewFrame() ) mCaptureTexture = gl::Texture( mCapture->getSurface() ); else mCaptureTexture = mDummyTexture; }
void projections_balletApp::keyDown( KeyEvent event ) { if( event.getChar() == 'f' ) setFullScreen( ! isFullScreen() ); else if( event.getChar() == ' ' ) ( mCapture && mCapture->isCapturing() ) ? mCapture->stop() : mCapture->start(); else if(event.getChar() == 'c'){ mode = CONFIG; window_coords.clear(); } }
void HexagonMirrorApp::setup() { // initialize camera CameraPersp cam; cam.setEyePoint( Vec3f(90, 70, 90) ); cam.setCenterOfInterestPoint( Vec3f(90, 70, 0) ); cam.setFov( 60.0f ); mCamera.setCurrentCam( cam ); // load shader try { mShaderInstanced = gl::GlslProg( loadAsset("phong_vert.glsl"), loadAsset("phong_frag.glsl") ); } catch( const std::exception &e ) { console() << "Could not load and compile shader: " << e.what() << std::endl; } // create a vertex array object, which allows us to efficiently position each instance initializeBuffer(); // load hexagon mesh loadMesh(); // connect to a webcam try { mCapture = Capture::create( 160, 120 ); mCapture->start(); } catch( const std::exception &e ) { console() << "Could not connect to webcam: " << e.what() << std::endl; try { mCaptureTexture = loadImage( loadAsset("placeholder.png") ); } catch( const std::exception &e ) { } } }
void WayFinderApp::setup() { println("WayFinderApp started."); // Load destinations from config file. destinations = Destination::getDestinations(); if(destinations.size() == 0) { println("No destinations found, check the config file."); exit(EXIT_FAILURE); } println("Destinations loaded."); // Initialized state. spotlightRadius = (float)getWindowWidth() / 16.0f; arrowLength = (float)min(getWindowWidth(), getWindowHeight()) / 2.0f; spotlightCenter2D = Vec2f((float)getWindowWidth() / 2.0f, (float)getWindowHeight() / 2.0f); spotlightCenter3D = Vec3f((float)getWindowWidth() / 2.0f, (float)getWindowHeight() / 2.0f, 0.0f); detected = false; //capture = Capture::create(WayFinderApp::WIDTH, WayFinderApp::HEIGHT); capture = Capture::create(getWindowWidth(), getWindowHeight()); capture->start(); //bg.set("bShadowDetection", false); bg.set("nmixtures", 3); bg.setBool("detectShadows", true); debugView = false; }
void CinderCalibrationApp::setup() { mState = STATE_DETECT; mImages = 0; showDistorted = true; mAvgError = 0; try { mCapture = Capture::create( CAPTURE_WIDTH, CAPTURE_HEIGHT ); mCapture->start(); console() << mCapture->getSize() << endl;; console() << getWindowSize() << endl; } catch( ... ) { console() << "Failed to initialize capture" << std::endl; } int numSquares = BOARD_CORNERS_X * BOARD_CORNERS_Y; for( int j = 0;j < numSquares; j++ ) { obj.push_back( Point3f( j / BOARD_CORNERS_X, j % BOARD_CORNERS_X, 0.0f ) ); } }
void CinderCalibrationApp::update() { if ( mCapture->checkNewFrame() ) { mCaptureSurf = mCapture->getSurface(); mCaptureTex = gl::Texture::create( mCaptureSurf ); mCaptureMat = toOcv( mCaptureSurf ); collectImages(); switch ( mState ) { case STATE_CALIBRATING: if ( callibrate() ) { mState = STATE_CALIBRATED; } else { console() << "Calibration failed." << endl; exit(1); } case STATE_CALIBRATED: undistort( mCaptureMat, mUndistortedMat, intrinsic, distortion ); } } }
void camerasApp::setup() { glEnable( GL_CULL_FACE ); glFrontFace( GL_CW ); // the default camera inverts to a clockwise front-facing direction mDrawVerbose = true; mUseConstraintAxis = false; mCurrentMouseDown = mInitialMouseDown = Vec2i( 200, 200 ); mCapture = Capture::create( 320,240 ); mCapture->start(); }
void projections_balletApp::setup(){ // print the devices for( auto device = Capture::getDevices().begin(); device != Capture::getDevices().end(); ++device ) { console() << "Device: " << (*device)->getName() << " " << std::endl; try { mCapture = Capture::create( 640, 480 ); mCapture->start(); } catch( ... ) { console() << "Failed to initialize capture" << std::endl; } } }
void CinderVideoStreamServerApp::setup() { // list out the devices //setFrameRate(30); try { mCapture = Capture::create( WIDTH, HEIGHT ); mCapture->start(); } catch( ci::Exception &exc ) { console() << "Failed to initialize capture, what: " << exc.what() << std::endl; } queueToServer = new ph::ConcurrentQueue<uint8_t*>(); mServerThreadRef = std::shared_ptr<std::thread>(new std::thread(std::bind(&CinderVideoStreamServerApp::threadLoop, this))); mServerThreadRef->detach(); if (!running) running = true; totalStreamSize = 0.0; mQuality = 0.1f; }
void camerasApp::update() { if( mCapture && mCapture->checkNewFrame() ) { mTexture = gl::Texture::create( mCapture->getSurface()); } }
void WayFinderApp::update() { if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) { detected = false; // TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance. if(capture && capture->checkNewFrame()) { frame = toOcv(capture->getSurface()); //cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray; //cvtColor(frame, frameGray, CV_BGR2GRAY); int blurAmount = 10; //cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount)); //threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY); // Get all contours. //bg.operator()(frameThresh,fore); bg.operator()(frame, fore); bg.getBackgroundImage(back); cv::erode(fore, fore, cv::Mat()); cv::dilate(fore, fore, cv::Mat()); cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); // Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects unsigned largestIndex = 0; unsigned largestContour = 0; for(unsigned i = 0; i < contours.size(); i++) { if(contours[i].size() > largestContour) { largestContour = contours[i].size(); largestIndex = i; } } vector<std::vector<cv::Point>> hack; cv::Rect rect; cv::Point center; if(contours.size() > 0) { hack.push_back(contours[largestIndex]); // Find bounding rectangle for largest countour. rect = boundingRect(contours[largestIndex]); // Make sure the blog is large enough to be a track-worthy. println("Rext area = " + boost::lexical_cast<std::string>(rect.area())); if(rect.area() >= 5000) { // TODO: Tweak this value. // Get center of rectangle. center = cv::Point( rect.x + (rect.width / 2), rect.y + (rect.height / 2) ); // Show guide. spotlightCenter2D.x = (float)center.x; spotlightCenter2D.y = (float)center.y; spotlightCenter3D.x = (float)center.x; spotlightCenter3D.y = (float)center.y; //spotlightRadius = (rect.width + rect.y) / 2; detected = true; } } // When debug mode is off, the background should be black. if(debugView) { if(contours.size() > 0) { cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2); cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2); rectangle(frame, rect, cv::Scalar(0, 255, 0), 3); circle(frame, center, 10, cv::Scalar(0, 255, 0), 3); } mTexture = gl::Texture(fromOcv(frame)); } } // TODO: Create control panel for all inputs. } }