Frame* V8Proxy::retrieveFrameForCallingContext()
{
    v8::Handle<v8::Context> context = v8::Context::GetCalling();
    if (context.IsEmpty())
        return 0;
    return retrieveFrame(context);
}
void AMRDeinterleaver::doGetNextFrame() {
  // First, try getting a frame from the deinterleaving buffer:
  if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize,
					   fFrameSize, fNumTruncatedBytes,
					   fLastFrameHeader, fPresentationTime,
					   fInputSource->isSynchronized())) {

    // Success!
    fNeedAFrame = False;

    fDurationInMicroseconds = uSecsPerFrame;

    // Call our own 'after getting' function.  Because we're not a 'leaf'
    // source, we can call this directly, without risking
    // infinite recursion
    afterGetting(this);
    return;
  }

  // No luck, so ask our source for help:
  fNeedAFrame = True;
  if (!fInputSource->isCurrentlyAwaitingData()) {
    fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(),
			       fDeinterleavingBuffer->inputBufferSize(),
			       afterGettingFrame, this,
			       FramedSource::handleClosure, this);
  }
}
Frame* V8Proxy::retrieveFrameForEnteredContext()
{
    v8::Handle<v8::Context> context = v8::Context::GetEntered();
    if (context.IsEmpty())
        return 0;
    return retrieveFrame(context);
}
Exemple #4
0
ScriptDebugListener* PageScriptDebugServer::getDebugListenerForContext(v8::Handle<v8::Context> context)
{
    v8::HandleScope scope;
    Frame* frame = retrieveFrame(context);
    if (!frame)
        return 0;
    return m_listenersMap.get(frame->page());
}
Exemple #5
0
void PageScriptDebugServer::runMessageLoopOnPause(v8::Handle<v8::Context> context)
{
    v8::HandleScope scope;
    Frame* frame = retrieveFrame(context);
    m_pausedPage = frame->page();

    // Wait for continue or step command.
    m_clientMessageLoop->run(m_pausedPage);

    // The listener may have been removed in the nested loop.
    if (ScriptDebugListener* listener = m_listenersMap.get(m_pausedPage))
        listener->didContinue();
     
    m_pausedPage = 0;
}
Exemple #6
0
IplImage* CWebcam::getFrame() {
	IplImage* image = (IplImage*)0;
	grabFrame();
	retrieveFrame();

	// color image
	if(frame) {
		if (frame->nChannels == 3) {
			int origin = frame->origin;
			CvMat* mat, stub;
			mat = cvGetMat( frame, &stub );
			cvConvertImage( mat, frame, CV_CVTIMG_SWAP_RB );
			image = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3 );
			image->widthStep = image->width * 3;

			if (origin == IPL_ORIGIN_TL)
				cvCopy( frame, image, 0);
			else
				cvFlip( frame, image, 0);
		}
	}

	return image;
}
Exemple #7
0
inline bool CvCapture_FFMPEG::grabFrame(unsigned char** dataRGB, int* step, int* width, int* height, int* cn,
        unsigned char** data, int *size)
{
    bool valid = false;
    int got_picture;

    int count_errs = 0;
    const int max_number_of_attempts = 1 << 16;

    if( !ic || !video_st )  return false;

    if( ic->streams[video_stream]->nb_frames > 0 &&
        frame_number > ic->streams[video_stream]->nb_frames )
        return false;

    av_free_packet (&packet);

    picture_pts = AV_NOPTS_VALUE_;

    unsigned long start;
    unsigned long end;

    // get the next frame
    while (!valid)
    {
        start = GetTickCount();
		//while (1){
        int ret = av_read_frame(ic, &packet);

        //printf("(%s,%d) Size %d ret %d %d\n", __FUNCTION__, __LINE__, packet.size, ret, 
	//		GetTickCount() - start);
		
        //DumpHex((char *)(packet.data), 20);
        if (ret == AVERROR(-1)) continue;

	//	}

        /* else if (ret < 0) break; */

        if( packet.stream_index != video_stream )
        {
	     //printf("(%s,%d)\n", __FUNCTION__, __LINE__);
            av_free_packet (&packet);
            count_errs++;
            if (count_errs > max_number_of_attempts)
                break;
            continue;
        }

        start = GetTickCount();
        // Decode video frame
        #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
            avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
        #elif LIBAVFORMAT_BUILD > 4628
                avcodec_decode_video(video_st->codec,
                                     picture, &got_picture,
                                     packet.data, packet.size);
        #else
                avcodec_decode_video(&video_st->codec,
                                     picture, &got_picture,
                                     packet.data, packet.size);
        #endif
        //printf("(%s,%d) avcodec_decode_video2  %d\n", __FUNCTION__, __LINE__, 
	//		GetTickCount() - start);
        // Did we get a video frame?
        if(got_picture)
        {
         //printf("(%s,%d) got_picture  %d\n", __FUNCTION__, __LINE__, 
	//		GetTickCount() - start);
            //picture_pts = picture->best_effort_timestamp;
            if( picture_pts == AV_NOPTS_VALUE_ )
                picture_pts = packet.pts != AV_NOPTS_VALUE_ && packet.pts != 0 ? packet.pts : packet.dts;
            frame_number++;
            valid = true;

            break;
        }
        else
        {
            count_errs++;
            if (count_errs > max_number_of_attempts)
                break;
        }

        av_free_packet (&packet);
    }

    if( valid && first_frame_number < 0 )
        first_frame_number = dts_to_frame_number(picture_pts);
    *size = 0;
    *data = NULL;
    if (valid == true)
    {
        retrieveFrame(0, dataRGB, step, width, height, cn);
        *size = packet.size;
        *data = pData;
        memcpy(pData, packet.data, packet.size);

        av_free_packet (&packet);
    }

    // return if we have a new picture or not
    return valid;
}
Exemple #8
0
 virtual IplImage* queryFrame() { return grabFrame() ? retrieveFrame(0) : 0; }