Example #1
0
bool VideoDecoder::UpdateFrame()
{
  if ( !GetNextFrame() )
    return false;

  return mImage.CopyRGBBuffer(mCodecContext->width, mCodecContext->height, mFrameRGB->data[0], mFrameRGB->linesize[0]);
}
// -----------------------------------------------------------------------------
// CG711PayloadFormatRead::FillSinkBuffer
// Fill SinkBuffer.
// -----------------------------------------------------------------------------
//
void CG711PayloadFormatRead::FillSinkBufferL()
{
    DP_G711_READ( "CG711PayloadFormatRead::FillSinkBuffer()" );

    CMMFDataBuffer* curFrameBuffer = NULL;
    if ( EBufferOne == iCurrentBuffer )
    {
        curFrameBuffer = iFrameBufferOne;
    }
    else
    {
        curFrameBuffer = iFrameBufferTwo;
    }

    TDes8& curFrameData( curFrameBuffer->Data() );
    curFrameData.SetLength( KVoIPHeaderLength );

    // Put next frame decoded from RTP payload to the framebuffer
    iBufferToReadExists = GetNextFrame( curFrameData );

    // G.711 Gwc expects 2 bytes for dtx-decision
    if ( iCnFrame )
    {
        DP_G711_READ( "CG711PayloadFormatRead::FillSinkBufferL add dtx-header" );

        curFrameData[0] = KVoIPCNFrame;
        curFrameData[1] = 0;
    }
    else
    {
        DP_G711_READ( "CG711PayloadFormatRead::FillSinkBufferL add voice-header" );

        curFrameData[0] = KVoIPAudioFrame;
        curFrameData[1] = 0;
    }

    curFrameBuffer->SetFrameNumber( iRecvHeader.iTimestamp + ( ( iFrameIndex - 1 )
                                    * TUint( iCInfo.iHwFrameTime * KDefaultSampleRateInkHz ) ) );

    DP_G711_READ2( "CG711PayloadFormatRead::FillSinkBuffer - FRAMENUM: %u",
                   curFrameBuffer->FrameNumber() );

    const TInt dataLen( ( iCInfo.iHwFrameTime * KBitsPerByte )
                        + KVoIPHeaderLength );
    curFrameData.SetLength( dataLen );
    curFrameBuffer->SetStatus( EFull );

    DP_G711_READ2( "CG711PayloadFormatRead: Filled framebuffer with buf. size: %d",
                   curFrameBuffer->BufferSize() );

    iStateMachine->ChangeState( EEmptyDataToSink );

    DP_G711_READ( "CG711PayloadFormatRead::FillSinkBuffer - DONE" );
}
/* static */ unsigned
ES_NativeStackFrame::GetDepth(ES_NativeStackFrame *stack_frame)
{
    unsigned depth = 0;
    do
    {
        stack_frame = GetNextFrame(stack_frame);
        ++depth;
    }
    while (GetCode(stack_frame));
    return depth;
}
Example #4
0
int main (int argc, char *argv[])
{
	OLRenderParams params;
	AVFrame *frame;
	int i;

	// Register all formats and codecs
	av_register_all();

	memset(&params, 0, sizeof params);
	params.rate = 48000;
	params.on_speed = 2.0/100.0;
	params.off_speed = 2.0/15.0;
	params.start_wait = 8;
	params.end_wait = 3;
	params.snap = 1/120.0;
	params.render_flags = RENDER_GRAYSCALE;
	params.min_length = 4;
	params.start_dwell = 2;
	params.end_dwell = 2;

	float snap_pix = 3;
	float aspect = 0;
	float framerate = 0;
	float overscan = 0;
	int thresh_dark = 60;
	int thresh_light = 160;
	int sw_dark = 100;
	int sw_light = 256;
	int decimate = 2;
	int edge_off = 0;

	int optchar;

	OLTraceParams tparams = {
		.mode = OL_TRACE_THRESHOLD,
		.sigma = 0,
		.threshold2 = 50
	};

	while ((optchar = getopt(argc, argv, "hct:T:b:w:B:W:O:d:m:S:E:D:g:s:p:a:r:R:o:v:")) != -1) {
		switch (optchar) {
			case 'h':
			case '?':
				usage(argv[0]);
				return 0;
			case 'c':
				tparams.mode = OL_TRACE_CANNY;
				tparams.sigma = 1;
				break;
			case 't':
				thresh_dark = thresh_light = atoi(optarg);
				break;
			case 'T':
				tparams.threshold2 = atoi(optarg);
				break;
			case 'b':
				thresh_dark = atoi(optarg);
				break;
			case 'w':
				thresh_light = atoi(optarg);
				break;
			case 'B':
				sw_dark = atoi(optarg);
				break;
			case 'W':
				sw_light = atoi(optarg);
				break;
			case 'O':
				edge_off = atoi(optarg);
				break;
			case 'd':
				decimate = atoi(optarg);
				break;
			case 'm':
				params.min_length = atoi(optarg);
				break;
			case 'S':
				params.start_wait = atoi(optarg);
				break;
			case 'E':
				params.end_wait = atoi(optarg);
				break;
			case 'D':
				params.start_dwell = atoi(optarg);
				params.end_dwell = atoi(optarg);
				break;
			case 'g':
				tparams.sigma = atof(optarg);
				break;
			case 's':
				params.off_speed = 2.0f/atof(optarg);
				break;
			case 'p':
				snap_pix = atof(optarg);
				break;
			case 'a':
				aspect = atof(optarg);
				break;
			case 'r':
				framerate = atof(optarg);
				break;
			case 'R':
				params.max_framelen = params.rate/atof(optarg);
				break;
			case 'o':
				overscan = atof(optarg);
				break;
			case 'v':
				volume = atof(optarg);
				break;
		}
	}

	if (optind == argc) {
		usage(argv[0]);
		return 1;
	}

	if (av_vid_init(argv[optind]) != 0) {
		printf("Video open/init failed\n");
		return 1;
	}
	if (av_aud_init(argv[optind]) != 0) {
		printf("Audio open/init failed\n");
		return 1;
	}

	if(olInit(FRAMES_BUF, 300000) < 0) {
		printf("OpenLase init failed\n");
		return 1;
	}

	if (aspect == 0)
		aspect = pCodecCtx->width / (float)pCodecCtx->height;

	if (framerate == 0)
		framerate = (float)pFormatCtx->streams[videoStream]->r_frame_rate.num / (float)pFormatCtx->streams[videoStream]->r_frame_rate.den;

	float iaspect = 1/aspect;

	if (aspect > 1) {
		olSetScissor(-1, -iaspect, 1, iaspect);
		olScale(1, iaspect);
	} else {
		olSetScissor(-aspect, -1, aspect, 1);
		olScale(aspect, 1);
	}

	printf("Aspect is %f %f\n", aspect, iaspect);
	printf("Overscan is %f\n", overscan);

	olScale(1+overscan, 1+overscan);
	olTranslate(-1.0f, 1.0f);
	olScale(2.0f/pCodecCtx->width, -2.0f/pCodecCtx->height);

	int maxd = pCodecCtx->width > pCodecCtx->height ? pCodecCtx->width : pCodecCtx->height;
	params.snap = (snap_pix*2.0)/(float)maxd;

	float frametime = 1.0f/framerate;
	printf("Framerate: %f (%fs per frame)\n", framerate, frametime);

	olSetAudioCallback(moreaudio);
	olSetRenderParams(&params);

	float vidtime = 0;
	int inf=0;
	int bg_white = -1;
	float time = 0;
	float ftime;
	int frames = 0;

	OLFrameInfo info;

	OLTraceCtx *trace_ctx;

	OLTraceResult result;

	memset(&result, 0, sizeof(result));

	tparams.width = pCodecCtx->width,
	tparams.height = pCodecCtx->height,
	olTraceInit(&trace_ctx, &tparams);

	while(GetNextFrame(pFormatCtx, pCodecCtx, videoStream, &frame)) {
		if (inf == 0)
			printf("Frame stride: %d\n", frame->linesize[0]);
		inf+=1;
		if (vidtime < time) {
			vidtime += frametime;
			printf("Frame skip!\n");
			continue;
		}
		vidtime += frametime;

		int thresh;
		int obj;
		int bsum = 0;
		int c;
		for (c=edge_off; c<(pCodecCtx->width-edge_off); c++) {
			bsum += frame->data[0][c+edge_off*frame->linesize[0]];
			bsum += frame->data[0][c+(pCodecCtx->height-edge_off-1)*frame->linesize[0]];
		}
		for (c=edge_off; c<(pCodecCtx->height-edge_off); c++) {
			bsum += frame->data[0][edge_off+c*frame->linesize[0]];
			bsum += frame->data[0][(c+1)*frame->linesize[0]-1-edge_off];
		}
		bsum /= (2*(pCodecCtx->width+pCodecCtx->height));
		if (bg_white == -1)
			bg_white = bsum > 128;
		if (bg_white && bsum < sw_dark)
			bg_white = 0;
		if (!bg_white && bsum > sw_light)
			bg_white = 1;

		if (bg_white)
			thresh = thresh_light;
		else
			thresh = thresh_dark;

		tparams.threshold = thresh;
		olTraceReInit(trace_ctx, &tparams);
		olTraceFree(&result);
		obj = olTrace(trace_ctx, frame->data[0], frame->linesize[0], &result);

		do {
			int i, j;
			for (i = 0; i < result.count; i++) {
				OLTraceObject *o = &result.objects[i];
				olBegin(OL_POINTS);
				OLTracePoint *p = o->points;
				for (j = 0; j < o->count; j++) {
					if (j % decimate == 0)
						olVertex(p->x, p->y, C_WHITE);
					p++;
				}
				olEnd();
			}

			ftime = olRenderFrame(200);
			olGetFrameInfo(&info);
			frames++;
			time += ftime;
			printf("Frame time: %.04f, Cur FPS:%6.02f, Avg FPS:%6.02f, Drift: %7.4f, "
				   "In %4d, Out %4d Thr %3d Bg %3d Pts %4d",
				   ftime, 1/ftime, frames/time, time-vidtime,
				   inf, frames, thresh, bsum, info.points);
			if (info.resampled_points)
				printf(" Rp %4d Bp %4d", info.resampled_points, info.resampled_blacks);
			if (info.padding_points)
				printf(" Pad %4d", info.padding_points);
			printf("\n");
		} while ((time+frametime) < vidtime);
	}

	olTraceDeinit(trace_ctx);

	for(i=0;i<FRAMES_BUF;i++)
		olRenderFrame(200);

	olShutdown();
	av_deinit();
	exit (0);
}
Example #5
0
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
만약에 Render했는데 화면에 안나온다!!! 그러면!
static VECTOR4 vConst( 1.0f, 1.0f, 1.0f, 100.0f );
m_pd3dDevice->SetVertexShaderConstantF( 95, (float*)&vConst, 1 );
SetTransformView( matView );
SetTransformProj( matProj );
SetDiffuse( 1.0, 1.0, 1.0 );
SetAmbient( 1.0, 1.0, 1.0 );

이걸 렌더 하기전에 불러줬는지 확인해라!!!!!!!!!!!!!!!!!!!!! 크아아악!
좆도 고생했네!

!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
 */
BOOL		CModelObject::Render( const MATRIX *mWorld )
{
	CObject3D	*pObject3D;
	O3D_ELEMENT	*pElem;
	int		i;
	int		nNextFrame;
	MATRIX m1;

#ifdef	_DEBUG
	if( m_nLoop == 0 )
		XERROR( "경고 : %s : CModelObject::m_nLoop가 지정되지 않았다.", m_pMotion->m_szName );
#endif

	if( m_pMotion )		// m_pMotion이 널이라면 m_mUpdateBone도 널이다.
		m_pMotion->AnimateBone( m_mUpdateBone, m_pMotionOld, m_fFrameCurrent, GetNextFrame(), m_nFrameOld, m_bMotionBlending, m_fBlendWeight );		// 일단 뼈대가 있다면 뼈대 애니메이션 시킴
//		AnimateBone( NULL );		// 일단 뼈대가 있다면 뼈대 애니메이션 시킴

	if( m_pBone )		// m_pBone이 있다면 뼈대가 있다는 얘기. VS를 써야 한다.
	{
		MATRIX *pmBones;
		MATRIX m1;
		MATRIX *pmBonesInv = m_pBaseBoneInv ;
		pmBones = m_mUpdateBone;

		if( m_pBone->m_bSendVS )	// 뼈대개수가 MAX_VS_BONE이하라서 한번에 다 전송한다.
		{
			int		nMaxBone = m_pBone->m_nMaxBone;
			const MATRIX *pmView = GetTransformView();
#ifdef	__XDEBUG
			if( nMaxBone > MAX_VS_BONE )	
				XERROR( "CModelObject::Render : 뼈대개수가 최대치를 초과했다. %d", nMaxBone );
#endif
			for( i = 0; i < nMaxBone; i ++ )	// MAX_VS_BONE개 이하	
			{
				MatrixMultiply( m1, pmBonesInv[i], pmBones[i] );
				
				glCurrentPaletteMatrixOES( i );
				MatrixMultiply( m1, m1, *mWorld );
				MatrixMultiply( m1, m1, *pmView );
				glLoadMatrixf( m1.f );
				
			}
		}
/*		
		MATRIX	mView, mProj;
		MATRIX	mViewProj, mViewProjTranspose, mInvWorld;

		VECTOR4 vLight = s_vLight;
		VECTOR4 vLightPos = s_vLightPos;

		mViewProj = *mWorld * s_mView * s_mProj;
		
		MATRIXTranspose( &mViewProjTranspose, &mViewProj );
		MATRIXTranspose( &mWorldTranspose, mWorld );

		MATRIXInverse( &mInvWorld, NULL, mWorld );
		Vec4Transform( &vLight, &vLight, &mInvWorld );
		Vec4Normalize( &vLight, &vLight );
		Vec4Transform( &vLightPos, &vLightPos, &mInvWorld );
//		Vec4Transform( &vEyePos, &vEyePos, &mInvWorld );

		m_pd3dDevice->SetVertexShaderConstantF( 84, (float*)&mViewProjTranspose, 4 );
//		m_pd3dDevice->SetVertexShaderConstantF( 88, (float*)&mWorldTranspose, 4 );
//		m_pd3dDevice->SetVertexShaderConstantF( 88, (float*)&vEyePos,  1 );		// specular use
//		m_pd3dDevice->SetVertexShaderConstantF( 89, (float*)&fSpecular, 1 );	// specular use
//		m_pd3dDevice->SetVertexShaderConstantF( 90, (float*)&fLightCol, 1 );	// specular use
		m_pd3dDevice->SetVertexShaderConstantF( 91, (float*)&vLightPos, 1 );
		m_pd3dDevice->SetVertexShaderConstantF( 92, (float*)&vLight,   1 );
		m_pd3dDevice->SetVertexShaderConstantF( 93, (float*)&s_fDiffuse, 1 );
		m_pd3dDevice->SetVertexShaderConstantF( 94, (float*)&s_fAmbient, 1 );
//		m_pd3dDevice->SetVertexShaderConstant( 95, &vConst, 1 );
 */
	}
/*
*/
	if( m_nNoEffect == 0 )
	{
		// 기본 설정 
//		pd3dDevice->SetTextureStageState( 0, D3DTSS_COLORARG1, D3DTA_TEXTURE );
//		pd3dDevice->SetTextureStageState( 0, D3DTSS_COLOROP,   D3DTOP_MODULATE );
//		pd3dDevice->SetTextureStageState( 0, D3DTSS_COLORARG2, D3DTA_DIFFUSE );
		
//		pd3dDevice->SetTextureStageState( 0, D3DTSS_ALPHAOP,   D3DTOP_DISABLE );
		glAlphaFunc( GL_GEQUAL, 0 );
		glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
//		pd3dDevice->SetRenderState( D3DRS_ALPHAFUNC, D3DCMP_GREATEREQUAL   );		
//		pd3dDevice->SetRenderState( D3DRS_SRCBLEND, D3DBLEND_SRCALPHA  );
//		pd3dDevice->SetRenderState( D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA );
		
	}
	DWORD dwBlendFactor = m_dwColor | ( m_dwBlendFactor << 24 );
	//DWORD dwBlendFactor = 0xffff0000 ;//| ( m_dwBlendFactor << 24 );
	// 오브젝트의 반투명 효과 세팅 
	if( m_dwBlendFactor < 255 || m_dwColor )
	{
		const GLfloat fAmbient[] = { 0, 0, 0, 1.0f };
		const GLfloat fDiffuse[] = { 1.0f, 1.0f, 1.0f, (float)((dwBlendFactor>>24)&0xff)/255.0f };
		glMaterialfv( GL_FRONT_AND_BACK, GL_AMBIENT, fAmbient );
		glMaterialfv( GL_FRONT_AND_BACK, GL_DIFFUSE, fDiffuse );
		glEnable( GL_BLEND );
		glEnable( GL_ALPHA_TEST );
	} else 
	{
/////////////
// Get frame
wxBitmap LAVCVideoProvider::GetFrame(int n) {
	// Return stored frame
	n = MID(0,n,GetFrameCount()-1);
	if (n == frameNumber) {
		if (!validFrame) {
			curFrame = AVFrameToWX(frame, n);
			validFrame = true;
		}
		return curFrame;
	}

	// Following frame, just get it
	if (n == frameNumber+1) {
		GetNextFrame();
		//wxLogMessage(wxString::Format(_T("%i"),lastDecodeTime));
	}

	// Needs to seek
	else {
		// Prepare seek
		__int64 seekTo;
		int result = 0;

#if 0
		// Get time to seek to
		if (isMkv) {
			//__int64 base = AV_TIME_BASE;
			//__int64 time = VFR_Output.GetTimeAtFrame(n,true) * base / 1000000;
			//seekTo = av_rescale(time,stream->time_base.den,AV_TIME_BASE * __int64(stream->time_base.num));
			//seekTo = __int64(n) * 1000 * stream->r_frame_rate.den / stream->r_frame_rate.num;
			//seekTo = bytePos[n];

			//result = av_seek_frame(formatContext,vidStream,seekTo,AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_BYTE);

			// Prepare mkv seek
			ulonglong startTime, endTime, filePos;
			unsigned int rt, frameSize, frameFlags;
			ulonglong targetTime = __int64(VFR_Output.GetTimeAtFrame(n,true,true))*1000000;
			//ulonglong targetTime = __int64(n) * 1000 * stream->r_frame_rate.den / stream->r_frame_rate.num;
			//ulonglong targetTime = mkv.rawFrames[n].time * 1000000;
			mkv_Seek(mkv.file,targetTime,MKVF_SEEK_TO_PREV_KEYFRAME);

			// Seek
			if (mkv_ReadFrame(mkv.file,0,&rt,&startTime,&endTime,&filePos,&frameSize,&frameFlags) == 0) {
				result = av_seek_frame(formatContext,vidStream,filePos,AVSEEK_FLAG_BYTE | AVSEEK_FLAG_BACKWARD);
				int curpos = 0;
				for (unsigned int i=0;i<mkv.rawFrames.size();i++) {
					if (mkv.rawFrames[i].time == startTime / 1000000.0) curpos = i;
				}
				int seek = n - curpos;
				for (int i=0;i<seek;i++) {
					GetNextFrame();
				}
			}
		}

		// Constant frame rate
		else {
#endif
			seekTo = n;
			result = av_seek_frame(lavcfile->fctx,vidStream,seekTo,AVSEEK_FLAG_BACKWARD);

			// Seek to keyframe
			if (result == 0) {
				avcodec_flush_buffers(codecContext);

				// Seek until final frame
				bool ok = true;
				do {
					ok = GetNextFrame();
				} while (lastDecodeTime <= n && ok);
			}

			// Failed seeking
			else {
				GetNextFrame();
			}
#if 0
		}
#endif
	}

	// Bitmap
	wxBitmap bmp;
	if (frame) bmp = AVFrameToWX(frame, n);
	else bmp = wxBitmap(GetWidth(),GetHeight());

	// Set current frame
	validFrame = true;
	curFrame = bmp;
	frameNumber = n;

	// Return
	return curFrame;
}
 SkeletonFrame::SkeletonFrame( std::shared_ptr< INuiInstance >& instance, DWORD dwMillisecondsToWait /*= 0*/ )
     : instance_( instance )
 {
     GetNextFrame( dwMillisecondsToWait );
 }
		/**
			@biref	Constructor
		*/
		SkeletonFrame::SkeletonFrame( INuiSensor* sensor, DWORD dwMillisecondsToWait /*= 0*/ )
			: sensor_( sensor )
		{
			GetNextFrame( dwMillisecondsToWait );
		}