示例#1
0
文件: ffmpeg_impl.hpp 项目: 119/vdc
inline void CvCapture_FFMPEG::seek(s64 _frame_number)
{
    _frame_number = std::min(_frame_number, get_total_frames());
    int delta = 16;

    // if we have not grabbed a single frame before first seek, let's read the first frame
    // and get some valuable information during the process
    if( first_frame_number < 0 && get_total_frames() > 1 )
        grabFrame();

    for(;;)
    {
        s64 _frame_number_temp = std::max(_frame_number-delta, (s64)0);
        double sec = (double)_frame_number_temp / get_fps();
        s64 time_stamp = ic->streams[video_stream]->start_time;
        double  time_base  = r2d(ic->streams[video_stream]->time_base);
        time_stamp += (s64)(sec / time_base + 0.5);
        if (get_total_frames() > 1) av_seek_frame(ic, video_stream, time_stamp, AVSEEK_FLAG_BACKWARD);
        avcodec_flush_buffers(ic->streams[video_stream]->codec);
        if( _frame_number > 0 )
        {
            grabFrame();

            if( _frame_number > 1 )
            {
                frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
                //printf("_frame_number = %d, frame_number = %d, delta = %d\n",
                //       (int)_frame_number, (int)frame_number, delta);

                if( frame_number < 0 || frame_number > _frame_number-1 )
                {
                    if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
                        break;
                    delta = delta < 16 ? delta*2 : delta*3/2;
                    continue;
                }
                while( frame_number < _frame_number-1 )
                {
                    if(!grabFrame())
                        break;
                }
                frame_number++;
                break;
            }
            else
            {
                frame_number = 1;
                break;
            }
        }
        else
        {
            frame_number = 0;
            break;
        }
    }
}
示例#2
0
// this is a VERY slow fallback function, ONLY used if ffmpeg's av_seek_frame delivers no correct result!
bool CvCapture_FFMPEG::slowSeek( int framenumber )
{
    if ( framenumber>picture_pts )
    {
        while ( picture_pts<framenumber )
            if ( !grabFrame() ) return false;
    }
    else if ( framenumber<picture_pts )
    {
        reopen();
        while ( picture_pts<framenumber )
            if ( !grabFrame() ) return false;
    }
    return true;
}
示例#3
0
bool ofxLibdc::grabVideo(ofImage& img, bool dropFrames) {
	setTransmit(true);
	img.allocate(width, height, imageType);
	if(dropFrames) {
		bool remaining;
		int i = 0;
		do {
			remaining = grabFrame(img);
			if(!remaining && i == 0)
				return false;
			i++;
		} while (remaining);
		return true;
	} else {
		return grabFrame(img);
	}
}
示例#4
0
void highSpeed::update()
{
	if(frameIsNew()&&!bFetching){
		hsText.loadData(uPixels(),width,height,GL_BGR);
	}
	else if(!bFetching) grabFrame();
	else if(bFetching){ 
		fetchFrames();
	}
}
示例#5
0
void QVideoInputDevice::stop()
{
	if(_timer != NULL)
	{
		QObject::disconnect(_timer,SIGNAL(timeout()),this,SLOT(grabFrame())) ;
		_timer->stop() ;
		delete _timer ;
		_timer = NULL ;
	}
	if(_capture_device != NULL)
	{
		// the camera will be deinitialized automatically in VideoCapture destructor
		_capture_device->release();
		delete _capture_device ;
		_capture_device = NULL ;
	}
}
示例#6
0
bool CvCapture_Images::open(const char * _filename)
{
    unsigned offset = 0;
    close();

    filename = icvExtractPattern(_filename, &offset);
    if(!filename)
        return false;

    // determine the length of the sequence
    length = 0;
    char str[_MAX_PATH];
    for(;;)
    {
        sprintf(str, filename, offset + length);
        struct stat s;
        if(stat(str, &s))
        {
            if(length == 0 && offset == 0) // allow starting with 0 or 1
            {
                offset++;
                continue;
            }
        }

        if(!cvHaveImageReader(str))
            break;

        length++;
    }

    if(length == 0)
    {
        close();
        return false;
    }

    firstframe = offset;

    // grab frame to enable properties retrieval
    bool grabRes = grabFrame();
    grabbedInOpen = true;
    currentframe = 0;

    return grabRes;
}
示例#7
0
void QVideoInputDevice::start()
{
	// make sure everything is re-initialised
	//
	stop() ;

	// Initialise la capture
	static const int cam_id = 0 ;
	_capture_device = new cv::VideoCapture(cam_id);

	if(!_capture_device->isOpened())
	{
		std::cerr << "Cannot initialise camera. Something's wrong." << std::endl;
		return ;
	}

	_timer = new QTimer ;
	QObject::connect(_timer,SIGNAL(timeout()),this,SLOT(grabFrame())) ;

	_timer->start(50) ;	// 10 images per second.
}
示例#8
0
bool MovieMaker::Snap()
{
	bool	result = false;
	
	if (movie && movieResRef && media && track)
	{
		OSStatus error = noErr;
		
		error = addFrame();
		
		if (error == noErr)
		{
			error = grabFrame();
		}

		if (error == noErr)
		{
			result = true;
		}
	}
	
	return result;
}
示例#9
0
IplImage* CWebcam::getFrame() {
	IplImage* image = (IplImage*)0;
	grabFrame();
	retrieveFrame();

	// color image
	if(frame) {
		if (frame->nChannels == 3) {
			int origin = frame->origin;
			CvMat* mat, stub;
			mat = cvGetMat( frame, &stub );
			cvConvertImage( mat, frame, CV_CVTIMG_SWAP_RB );
			image = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3 );
			image->widthStep = image->width * 3;

			if (origin == IPL_ORIGIN_TL)
				cvCopy( frame, image, 0);
			else
				cvFlip( frame, image, 0);
		}
	}

	return image;
}
void AVIReader::grabReadFrame(int Frame, unsigned char* imgdata) {
	skip(Frame);
	grabFrame();
	readCurFrameRGB(imgdata);
}
示例#11
0
void CameraHandle::grabFrameRight(sensor_msgs::Image& img, const ros::Time& triggerTime) {
  grabFrame(conf.portRight, img, triggerTime);
}
示例#12
0
void Grabber::update() {
	grabFrame();
}
示例#13
0
bool native_gl_render(float elapsed_)
{
    if(!init) {
        return false;
    }

    glImage image;
    image.width=800;
    image.height=480;
    image.textureID=textureId;

    spriteBatchDraw(0,0,GL2D_NO_SCALE|GL2D_NO_SRC,&image);

    elapsedTime+=elapsed_;
    totalTime+=elapsed_;

    bool notFinish=true;
#ifndef WIN32
    if(elapsedTime/1000000.0f>=movie_fps) {
#else
    if(elapsedTime>=movie_fps) {
#endif
        elapsedTime=0;
        notFinish=grabFrame(0);
    }
    return notFinish;
}



bool grabFrame(float elapsed_) {
    while(true) {
        if(av_read_frame(pFormatCtx, &packet) >= 0) {

            // Is this a packet from the video stream?
            if(packet.stream_index == videoStream) {
                // Decode video frame
                //clock_t time1 = clock();
                avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
                //clock_t time2 = clock();
                //__android_log_print(ANDROID_LOG_DEBUG,"avcodec_decode_video2", "%f", ((double)(time2-time1))/1000);

                // Did we get a video frame?
                if(frameFinished) {
                    struct SwsContext *pSWSContext = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                                                     pCodecCtx->pix_fmt, FRAME_X, FRAME_Y, PIX_FMT_RGB24, SWS_BILINEAR, 0, 0, 0);

                    sws_scale(pSWSContext, (const uint8_t * const*)pFrame->data, pFrame->linesize, 0,
                              pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

                    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, FRAME_X, FRAME_Y, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
                    //printf("Frame Number: %d\n", i);
                    //__android_log_print(ANDROID_LOG_DEBUG,"Frame Number: ", "%d", i);
                    //i++;
                    frameNumber++;
                }
                av_free_packet(&packet);
                break;
            }
            else {
                av_free_packet(&packet);
                continue;
            }
        } else {
            return false;
        }
    }
    return true;
}

void native_init(const char * path)
{
    if(init)
        destroy_();

    pFormatCtx=NULL;
    pCodecCtx=NULL;
    pCodec=NULL;
    pFrame=NULL;
    pFrameRGB=NULL;
    buffer=NULL;
    m_video_stream=NULL;

    frameFinished=0;
    numBytes=0;
    i=0;
    height=0;
    width=0;

    textures[0]=0;
    textureId=0;

    elapsedTime=0;
    totalTime=0;
    frameNumber=0;


    init=false;
    elapsedTime=0;
    totalTime=0;
    frameNumber=0;

    // TURN ON 2D TEXTURE
    glEnable(GL_TEXTURE_2D);
    glDisable(GL_BLEND);

    // allocate a texture name
    glGenTextures(1, &textureId);

    // BIND THE TEXTURE
    glBindTexture(GL_TEXTURE_2D, textureId);

    // SET TEXTURE PARAMS
#ifdef WIN32
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
#else
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
#endif
#ifdef WIN32
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
#else
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
#endif

    // REGISTER ALL THE CODECS AVAILABLE IN FFmpeg FOR USE
    av_register_all();

    bool isCopy;

    // LOAD FILE HEADERS
    int open = avformat_open_input(&pFormatCtx, path, NULL, 0);

#ifndef WIN32
    __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","START");
#endif

    if(open != 0) {
        // IO ERROR
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","could not open file. %d", open);
#endif
        printf( "could not open file.");
        return;
    }

    if(avformat_find_stream_info(pFormatCtx,NULL) < 0) {
        // STREAM INFO ERROR
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","could not find stream info.");
#endif
        printf( "could not find stream info.");
    }

    // FIND THE FIRST VIDEO STREAM
    videoStream = -1;
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            m_video_stream=pFormatCtx->streams[i];
            videoStream = i;
            break;
        }
    }
    if(videoStream == -1) {
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","didn't find a video stream.");
#endif
        printf( "didn't find a video stream.");
    }

    // POINTER TO CODEC FOR VIDEO STREAM
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // FIND VIDEO STREAM DECODER
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec == NULL) {
        // CODEC NOT FOUND
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","could not find codec.");
#endif
        printf("could not find codec.");
    }

    // OPEN CODEC
    if(avcodec_open(pCodecCtx, pCodec) < 0) {
        // OPEN CODEC ERROR
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","could not open codec.");
#endif
        printf( "could not open codec.");
    }

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB = avcodec_alloc_frame();
    if(pFrameRGB == NULL) {
#ifndef WIN32
        __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","null pFrameRGB.");
#endif
        printf("null pFrameRGB.");
    }

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(PIX_FMT_RGB24, FRAME_X, FRAME_Y);
    buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, FRAME_X, FRAME_Y);
#ifndef WIN32
    __android_log_print(ANDROID_LOG_DEBUG,"LOGGGGGG","LOADING SUCCESSFUL");
#endif
    init=true;
}

void destroy_()
{
    if(init) {

        if(textureId!=0)
        {
            glDeleteTextures(1,&textureId);
            textureId=0;
        }

        // Free the RGB image
        av_free(buffer);
        av_free(pFrameRGB);

        // Free the YUV frame
        av_free(pFrame);

        // Close the codec
        avcodec_close(pCodecCtx);

        // Close the video file
        avformat_close_input(&pFormatCtx);
        init=false;
    }
}
示例#14
0
/******************************************************************************
 * The update function runs continuously. Use it to update states and variables
 *****************************************************************************/
void testApp::update()
{	
    bNewFrame = false;
		
	if(activeInput){

		if(bcamera){
		   vidGrabber.grabFrame();
		   bNewFrame = vidGrabber.isFrameNew();
		}
		else{
			vidPlayer.idleMovie();
			bNewFrame = vidPlayer.isFrameNew();
		}

		
		if (bNewFrame)
		{
			ofBackground(110, 110, 110);
			
				
			//Calculate FPS of Camera
			frames++;
			float time = ofGetElapsedTimeMillis();
			if(time > (lastFPSlog + 1000)){		
				fps = frames;
				frames = 0;
				lastFPSlog = time;			
			}//End calculation

		
			if(bGPUMode){
				grabFrameToGPU(gpuSourceTex);
				applyGPUImageFilters();
				contourFinder.findContours(gpuReadBackImageGS, 1, (camWidth*camHeight)/25, 50, false);
			}
			else{
				grabFrame();
				applyImageFilters();
				contourFinder.findContours(processedImg, 1, (camWidth*camHeight)/25, 50, false);
				if(bFlowing){	
				//FLOW				
			        grayImg.threshold(100);	
					opticalFlowLK.calc(grayImg,processedImg,11);
					grayImg.blurHeavily();
					opticalFlowBM.calc(grayImg,processedImg,5);}
				}
			
			//Track found contours/blobs
			tracker.track(&contourFinder);
					
			/**************************************************
			* Background subtraction LearRate
			* If there are no blobs, add the background faster.
			* If there ARE blobs, add the background slower.
			***************************************************/
			if(bDynamicBG){
				fLearnRate = 0.01f;			
				if(contourFinder.nBlobs > 0){
					fLearnRate = 0.0003f;
				}
			}//End Background Learning rate


			if(bTUIOMode){
				//We're not using frameseq right now with OSC
				//myTUIO.update();

				//Start sending OSC
				myTUIO.sendOSC();
			}
		}
	} 
}
示例#15
0
//--------------------------------------------------------------------
void ofVideoGrabber::update(){
	grabFrame();
}
//--------------------------------------------------------------------
void ofxVideoGrabber::update()
{
	grabFrame();
	settings->update();
}
示例#17
0
//--------------------------------------------------------------------
void ofxVideoStreamer::update(){
	grabFrame();
}
示例#18
0
OSStatus MovieMaker::setupMovie()
{
	OSStatus	error = noErr;
	FSRef		fileRef;
	FSSpec		fileSpec;
	
	rowBytes = width * 4;
	bufferSize = height * rowBytes;
	buffer = (char*)malloc(bufferSize);
	invertedBuffer = (char*)malloc(bufferSize);

	rect.left = 0;
	rect.top = 0;
	rect.right = width;
	rect.bottom = height;
	
	error = NewGWorldFromPtr(&gworld, k32ARGBPixelFormat, &rect, 0, 0, 0, buffer, rowBytes);

	if (error == noErr)
	{
		LockPixels(GetGWorldPixMap(gworld));
	}

// MBW -- I think this needs to happen after all the dialogs, etc.	
//	if (error == noErr)
//	{
//		Microseconds(&lastFrameTime);
//		error = grabFrame();
//	}

	if (error == noErr)
	{
		error = EnterMovies();
	}
	
	if (error == noErr)
	{
		ci = OpenDefaultComponent(StandardCompressionType,StandardCompressionSubType);
		if(ci == NULL)
			error = paramErr;
	}
		
	if (error == noErr)
	{
		long flags;
		
		SCGetInfo(ci,scPreferenceFlagsType,&flags);
		flags &= ~scShowBestDepth;
		flags |= scAllowZeroFrameRate;
		SCSetInfo(ci,scPreferenceFlagsType,&flags);
	}

	if (error == noErr)
	{
		send_agent_pause();
		gViewerWindow->mWindow->beforeDialog();

		error = SCRequestSequenceSettings(ci);

		gViewerWindow->mWindow->afterDialog();
		send_agent_resume();

		if (error == scUserCancelled) 
		{
			// deal with user cancelling.
			EndCapture();
		}
	}
	
	if (error == noErr)
	{
		// This is stoopid. I have to take the passed full path, create the file so I can get an FSRef, and Get Info to get the FSSpec for QuickTime. Could Apple make this any more difficult...
		FILE* file = LLFile::fopen(fname, "w");		/* Flawfinder: ignore */
		if (file)
		{
			fclose(file);
			
			error = FSPathMakeRef((UInt8*)fname, &fileRef, NULL);
			if (error == noErr)
				error = FSGetCatalogInfo(&fileRef, 0, NULL, NULL, &fileSpec, NULL);
		}
		else
		{
			error = paramErr;
		}
	}
	
	if (error == noErr)
	{
		error = CreateMovieFile(&fileSpec, 'TVOD', smCurrentScript, createMovieFileDeleteCurFile | createMovieFileDontCreateResFile, &movieResRef, &movie);
	}
	
	if (error == noErr)
	{
		track = NewMovieTrack(movie, FixRatio(width, 1), FixRatio(height, 1), kNoVolume);
		error = GetMoviesError();
	}
	
	if (error == noErr)
	{
		media = NewTrackMedia(track, VideoMediaType, 600, NULL, 0);
		error = GetMoviesError();
	}
	
	if (error == noErr)
	{
		Microseconds(&lastFrameTime);
		error = grabFrame();
	}

	if (error == noErr)
	{
		error = SCCompressSequenceBegin(ci,GetPortPixMap(gworld),nil,&idh);
	}

	if (error == noErr)
	{
		error = BeginMediaEdits(media);
	}
	
	if (error != noErr)
	{
		media = NULL;
	}
	
	return error;
}
示例#19
0
/**************************************************************************************
** Main device loop. We check for exposure and temperature progress here
***************************************************************************************/
void SimpleDetector::TimerHit()
{
    long timeleft;

    if (!isConnected())
        return; //  No need to reset timer if we are not connected anymore

    if (InCapture)
    {
        timeleft = CalcTimeLeft();

        // Less than a 0.1 second away from exposure completion
        // This is an over simplified timing method, check DetectorSimulator and simpleDetector for better timing checks
        if (timeleft < 0.1)
        {
            /* We're done exposing */
            IDMessage(getDeviceName(), "Capture done, downloading image...");

            // Set exposure left to zero
            PrimaryDetector.setCaptureLeft(0);

            // We're no longer exposing...
            InCapture = false;

            /* grab and save image */
            grabFrame();
        }
        else
            // Just update time left in client
            PrimaryDetector.setCaptureLeft(timeleft);
    }

    // TemperatureNP is defined in INDI::Detector
    switch (TemperatureNP.s)
    {
        case IPS_IDLE:
        case IPS_OK:
            break;

        case IPS_BUSY:
            /* If target temperature is higher, then increase current Detector temperature */
            if (currentDetectorTemperature < TemperatureRequest)
                currentDetectorTemperature++;
            /* If target temperature is lower, then decrese current Detector temperature */
            else if (currentDetectorTemperature > TemperatureRequest)
                currentDetectorTemperature--;
            /* If they're equal, stop updating */
            else
            {
                TemperatureNP.s = IPS_OK;
                IDSetNumber(&TemperatureNP, "Target temperature reached.");

                break;
            }

            IDSetNumber(&TemperatureNP, nullptr);

            break;

        case IPS_ALERT:
            break;
    }

    SetTimer(POLLMS);
}
示例#20
0
文件: precomp.hpp 项目: 353/viewercv
 virtual IplImage* queryFrame() { return grabFrame() ? retrieveFrame(0) : 0; }
示例#21
0
//--------------------------------------------------------------------
void ofxFireFlyMv::update(){
	grabFrame();
}
示例#22
0
// image calculation
// load frame from video
void VideoFFmpeg::calcImage (unsigned int texId, double ts)
{
	if (m_status == SourcePlaying)
	{
		// get actual time
		double startTime = PIL_check_seconds_timer();
		double actTime;
		// timestamp passed from audio actuators can sometimes be slightly negative
		if (m_isFile && ts >= -0.5)
		{
			// allow setting timestamp only when not streaming
			actTime = ts;
			if (actTime * actFrameRate() < m_lastFrame)
			{
				// user is asking to rewind, force a cache clear to make sure we will do a seek
				// note that this does not decrement m_repeat if ts didn't reach m_range[1]
				stopCache();
			}
		}
		else
		{
			if (m_lastFrame == -1 && !m_isFile)
				m_startTime = startTime;
			actTime = startTime - m_startTime;
		}
		// if video has ended
		if (m_isFile && actTime * m_frameRate >= m_range[1])
		{
			// in any case, this resets the cache
			stopCache();
			// if repeats are set, decrease them
			if (m_repeat > 0)
				--m_repeat;
			// if video has to be replayed
			if (m_repeat != 0)
			{
				// reset its position
				actTime -= (m_range[1] - m_range[0]) / m_frameRate;
				m_startTime += (m_range[1] - m_range[0]) / m_frameRate;
			}
			// if video has to be stopped, stop it
			else
			{
				m_status = SourceStopped;
				return;
			}
		}
		// actual frame
		long actFrame = (m_isImage) ? m_lastFrame+1 : long(actTime * actFrameRate());
		// if actual frame differs from last frame
		if (actFrame != m_lastFrame)
		{
			AVFrame* frame;
			// get image
			if ((frame = grabFrame(actFrame)) != NULL)
			{
				if (!m_isFile && !m_cacheStarted)
				{
					// streaming without cache: detect synchronization problem
					double execTime = PIL_check_seconds_timer() - startTime;
					if (execTime > 0.005)
					{
						// exec time is too long, it means that the function was blocking
						// resynchronize the stream from this time
						m_startTime += execTime;
					}
				}
				// save actual frame
				m_lastFrame = actFrame;
				// init image, if needed
				init(short(m_codecCtx->width), short(m_codecCtx->height));
				// process image
				process((BYTE*)(frame->data[0]));
				// finished with the frame, release it so that cache can reuse it
				releaseFrame(frame);
				// in case it is an image, automatically stop reading it
				if (m_isImage)
				{
					m_status = SourceStopped;
					// close the file as we don't need it anymore
					release();
				}
			} else if (m_isStreaming)
			{
				// we didn't get a frame and we are streaming, this may be due to
				// a delay in the network or because we are getting the frame too fast.
				// In the later case, shift time by a small amount to compensate for a drift
				m_startTime += 0.001;
			}
		}
	}
}
示例#23
0
文件: V4L.cpp 项目: krejmano/DisCODe
/*!
 * Method gets one frame from frame grabber.
 */
IplImage * V4L::getOneFrame() {
    if (grabFrame() && retFrame()) {
        return &frame;
    } else
        return NULL;
}
示例#24
0
void ofxLibdcGrabber::update() {
	grabFrame();
}
示例#25
0
//--------------------------------------------------------------------
void ofxVideoGrabberPtgrey::update(){
	grabFrame();
}
void renderScene(void) 
{

    float t_flow = 0.0f;
    t_flow = grabFrame();

    // display the pixel buffer
    glBindTexture(GL_TEXTURE_RECTANGLE_NV, tex);
    // when the pbo is active, the source for this copy is the pbo
    if( capture) glTexSubImage2D(GL_TEXTURE_RECTANGLE_NV, 0, 0, 0, gw, gh, GL_BGR, GL_UNSIGNED_BYTE, c_image->imageData );
    assert( glGetError() == GL_NO_ERROR );

    //Set the clear color (black)
    glClearColor(0.0,0.0,0.0,1.0);
    //Clear the color buffer
    glClear(GL_COLOR_BUFFER_BIT);

    //stretch to screen
    glViewport(0,0,displayW,displayH);
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    gluOrtho2D(0,gw,gh,0);
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

	if( true ) {
		glEnable(GL_TEXTURE_RECTANGLE_NV);
		glBindTexture(GL_TEXTURE_RECTANGLE_NV, tex) ;

		glBegin(GL_QUADS);

		glTexCoord2f(0, 0 );
		glVertex2f(0.0, 0 );

		glTexCoord2f(0, gh);
		glVertex2f(0.0, gh);

		glTexCoord2f(gw, gh);
		glVertex2f(gw,gh);

		glTexCoord2f( gw, 0 );
		glVertex2f(gw, 0);
		glEnd();
		glDisable(GL_TEXTURE_RECTANGLE_NV);

	}

    renderQuiver(gw,gh);
    //swap buffers (double buffering)
    int vertPos = 20;
    char str[256] ;
    glColor3f(.8f, .8f, .2f);
    sprintf(str, "Lucas Kanade Pyramidal Optical Flow,   Dense (%dx%d points)", gw, gh );
    renderBitmapString( 10, vertPos, GLUT_BITMAP_HELVETICA_18,str); vertPos += 20;
    if( state.use_IPP ) {
#ifdef USE_IPP
        sprintf(str, "Hardware: CPU");
#else
		sprintf(str, "IPP Not enabled.");
#endif
	} else {
        sprintf(str, "Hardware: %s", device_string);
    }
    renderBitmapString( 10, vertPos, GLUT_BITMAP_HELVETICA_18,str); vertPos += 20;
    sprintf(str, "Processing Time/frame: %f ms", t_flow );
    renderBitmapString( 10, vertPos, GLUT_BITMAP_HELVETICA_18,str); vertPos +=20;

    glutSwapBuffers();
	if(state.bqatest) exit(0);
}
//--------------------------------------------------------------------
void ofxVideoGrabberPvAPI::update(){
	grabFrame();
}