コード例 #1
0
ファイル: BaseDecoderTest.cpp プロジェクト: BO45/openh264
void BaseDecoderTest::DecodeFile(const char* fileName, Callback* cbk) {
  std::ifstream file(fileName, std::ios::in | std::ios::binary);
  ASSERT_TRUE(file.is_open());

  BufferedData buf;
  while (true) {
    ReadFrame(&file, &buf);
    if (::testing::Test::HasFatalFailure()) {
      return;
    }
    if (buf.Length() == 0) {
      break;
    }
    DecodeFrame(buf.data(), buf.Length(), cbk);
    if (::testing::Test::HasFatalFailure()) {
      return;
    }
  }

  int32_t iEndOfStreamFlag = 1;
  decoder_->SetOption(DECODER_OPTION_END_OF_STREAM, &iEndOfStreamFlag);

  // Get pending last frame
  DecodeFrame(NULL, 0, cbk);
}
コード例 #2
0
ファイル: BaseDecoderTest.cpp プロジェクト: cisco/openh264
bool BaseDecoderTest::DecodeFile (const char* fileName, Callback* cbk) {
  std::ifstream file (fileName, std::ios::in | std::ios::binary);
  if (!file.is_open())
    return false;

  BufferedData buf;
  while (true) {
    if (false == ReadFrame(&file, &buf))
      return false;
    if (::testing::Test::HasFatalFailure()) {
      return false;
    }
    if (buf.Length() == 0) {
      break;
    }
    DecodeFrame (buf.data(), buf.Length(), cbk);
    if (::testing::Test::HasFatalFailure()) {
      return false;
    }
  }

  int32_t iEndOfStreamFlag = 1;
  decoder_->SetOption (DECODER_OPTION_END_OF_STREAM, &iEndOfStreamFlag);

  // Get pending last frame
  DecodeFrame (NULL, 0, cbk);
  // Flush out last frames in decoder buffer
  int32_t num_of_frames_in_buffer = 0;
  decoder_->GetOption (DECODER_OPTION_NUM_OF_FRAMES_REMAINING_IN_BUFFER, &num_of_frames_in_buffer);
  for (int32_t i = 0; i < num_of_frames_in_buffer; ++i) {
    FlushFrame (cbk);
  }
  return true;
}
コード例 #3
0
ファイル: BaseDecoderTest.cpp プロジェクト: BO45/openh264
bool BaseDecoderTest::DecodeNextFrame(Callback* cbk) {
  switch (decodeStatus_) {
  case Decoding:
    ReadFrame(&file_, &buf_);
    if (::testing::Test::HasFatalFailure()) {
      return false;
    }
    if (buf_.Length() == 0) {
      decodeStatus_ = EndOfStream;
      return true;
    }
    DecodeFrame(buf_.data(), buf_.Length(), cbk);
    if (::testing::Test::HasFatalFailure()) {
      return false;
    }
    return true;
  case EndOfStream: {
    int32_t iEndOfStreamFlag = 1;
    decoder_->SetOption(DECODER_OPTION_END_OF_STREAM, &iEndOfStreamFlag);
    DecodeFrame(NULL, 0, cbk);
    decodeStatus_ = End;
    break;
  }
  case OpenFile:
  case End:
    break;
  }
  return false;
}
コード例 #4
0
        bool ImageSequenceAsset::Open(  )
        {
            m_MaxMemUsed = 0;
            if ( !m_IsOpen ) {
                if ( m_CacheFrames ) {
                    std::vector<std::string>::iterator it = m_FileNames.begin();
                    while ( it != m_FileNames.end() ) {
                        std::string& fname = *it++;
                        // might not be an image file
                        pei::SurfacePtr s = ReadFrame( fname.c_str() );
                        if ( s ) {
                            m_FrameCache.push_back( s );
                            double w = s->GetWidth();
                            double h = s->GetHeight();

                            m_MaxMemUsed = (int)(m_MaxMemUsed + s->GetPitch() * h);
                            if ( m_Format.m_Width  < w ) m_Format.m_Width  = w;
                            if ( m_Format.m_Height < h ) m_Format.m_Height = h;
                            if ( m_Format.m_CanvasWidth  < w ) m_Format.m_CanvasWidth  = w;
                            if ( m_Format.m_CanvasHeight < h ) m_Format.m_CanvasHeight = h;
                        }
                    }
                    m_IsOpen = m_FrameCache.size() > 0;
                } else {
                    m_IsOpen = (m_FileNames.size() > 0 && DecodeFrame ( 0 ).get() != NULL);
                }
            }
            return m_IsOpen;
        }
コード例 #5
0
ファイル: welsDecoderExt.cpp プロジェクト: DB2060/openh264
DECODING_STATE CWelsDecoder::DecodeFrame (const unsigned char* kpSrc,
    const int kiSrcLen,
    unsigned char** ppDst,
    int* pStride,
    int& iWidth,
    int& iHeight) {
  DECODING_STATE eDecState = dsErrorFree;
  SBufferInfo    DstInfo;

  memset (&DstInfo, 0, sizeof (SBufferInfo));
  DstInfo.UsrData.sSystemBuffer.iStride[0] = pStride[0];
  DstInfo.UsrData.sSystemBuffer.iStride[1] = pStride[1];
  DstInfo.UsrData.sSystemBuffer.iWidth = iWidth;
  DstInfo.UsrData.sSystemBuffer.iHeight = iHeight;
  DstInfo.eBufferProperty = BUFFER_HOST;

  eDecState = DecodeFrame (kpSrc, kiSrcLen, (void_t**)ppDst, &DstInfo);
  if (eDecState == dsErrorFree) {
    pStride[0] = DstInfo.UsrData.sSystemBuffer.iStride[0];
    pStride[1] = DstInfo.UsrData.sSystemBuffer.iStride[1];
    iWidth     = DstInfo.UsrData.sSystemBuffer.iWidth;
    iHeight    = DstInfo.UsrData.sSystemBuffer.iHeight;
  }

  return eDecState;
}
コード例 #6
0
/* Decode data. */
void MovieTexture_Generic::DecodeSeconds( float fSeconds )
{
	m_fClock += fSeconds * m_fRate;

	/* We might need to decode more than one frame per update.  However, there
	 * have been bugs in ffmpeg that cause it to not handle EOF properly, which
	 * could make this never return, so let's play it safe. */
	int iMax = 4;
	while( --iMax )
	{
		/* If we don't have a frame decoded, decode one. */
		if( m_ImageWaiting == FRAME_NONE )
		{
			if( !DecodeFrame() )
				break;

			m_ImageWaiting = FRAME_DECODED;
		}

		/* If we have a frame decoded, see if it's time to display it. */
		float fTime = CheckFrameTime();
		if ( fTime <= 0 )
		{
			UpdateFrame();
			m_ImageWaiting = FRAME_NONE;
		}
		return;
	}

	LOG->MapLog( "movie_looping", "MovieTexture_Generic::Update looping" );
}
コード例 #7
0
ファイル: VideoDecoder.cpp プロジェクト: wangsitan/Mesen
void VideoDecoder::StopThread()
{
#ifndef LIBRETRO
	_stopFlag = true;
	if(_decodeThread) {
		_waitForFrame.Signal();
		_decodeThread->join();

		_decodeThread.reset();

		_hud.reset();
		_hdScreenInfo = nullptr;
		EmulationSettings::SetPpuModel(PpuModel::Ppu2C02);
		UpdateVideoFilter();
		if(_ppuOutputBuffer != nullptr) {
			//Clear whole screen
			for(uint32_t i = 0; i < PPU::PixelCount; i++) {
				_ppuOutputBuffer[i] = 14; //Black
			}
			DecodeFrame();
		}
		_ppuOutputBuffer = nullptr;
	}
#endif
}
コード例 #8
0
ファイル: VideoDecoder.cpp プロジェクト: wangsitan/Mesen
void VideoDecoder::UpdateFrameSync(void *ppuOutputBuffer, HdScreenInfo *hdScreenInfo)
{
	_isOddFrame = (PPU::GetFrameCount() & 0x01) == 0x01;
	_hdScreenInfo = hdScreenInfo;
	_ppuOutputBuffer = (uint16_t*)ppuOutputBuffer;
	DecodeFrame(true);
	_frameCount++;
}
コード例 #9
0
void MovieTexture_FFMpeg::DecoderThread()
{
#if defined(_WINDOWS)
	/* Windows likes to boost priority when processes come out of a wait state.  We don't
	 * want that, since it'll result in us having a small priority boost after each movie
	 * frame, resulting in skips in the gameplay thread. */
	if( !SetThreadPriorityBoost(GetCurrentThread(), TRUE) && GetLastError() != ERROR_CALL_NOT_IMPLEMENTED )
		LOG->Warn( werr_ssprintf(GetLastError(), "SetThreadPriorityBoost failed") );
#endif

	CHECKPOINT;

	while( m_State != DECODER_QUIT )
	{
		if( m_ImageWaiting == FRAME_NONE )
			DecodeFrame();

		/* If we still have no frame, we're at EOF and we didn't loop. */
		if( m_ImageWaiting != FRAME_DECODED )
		{
			usleep( 10000 );
			continue;
		}

		const float fTime = CheckFrameTime();
		if( fTime == -1 )	// skip frame
		{
			DiscardFrame();
		}
		else if( fTime > 0 )		// not time to decode a new frame yet
		{
			/* This needs to be relatively short so that we wake up quickly 
			 * from being paused or for changes in m_Rate. */
			usleep( 10000 );
		}
		else // fTime == 0
		{
			{
				/* The only reason m_BufferFinished might be non-zero right now (before
				 * ConvertFrame()) is if we're quitting. */
				int n = m_BufferFinished.GetValue();
				ASSERT_M( n == 0 || m_State == DECODER_QUIT, ssprintf("%i, %i", n, m_State) );
			}
			ConvertFrame();

			/* We just went into FRAME_WAITING.  Don't actually check; the main thread
			 * will change us back to FRAME_NONE without locking, and poke m_BufferFinished.
			 * Don't time out on this; if a new screen has started loading, this might not
			 * return for a while. */
			m_BufferFinished.Wait( false );

			/* If the frame wasn't used, then we must be shutting down. */
			ASSERT_M( m_ImageWaiting == FRAME_NONE || m_State == DECODER_QUIT, ssprintf("%i, %i", m_ImageWaiting, m_State) );
		}
	}
	CHECKPOINT;
}
コード例 #10
0
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with complete frames.
 ****************************************************************************/
static void *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    void *p_buf;

    if( !pp_block || !*pp_block ) return NULL;

    p_block = *pp_block;


    if( p_block->i_pts <= VLC_TS_INVALID && p_block->i_dts <= VLC_TS_INVALID &&
        !date_Get( &p_sys->pts ) )
    {
        /* We've just started the stream, wait for the first PTS. */
        block_Release( p_block );
        return NULL;
    }

    /* Date management: If there is a pts avaliable, use that. */
    if( p_block->i_pts > VLC_TS_INVALID )
    {
        date_Set( &p_sys->pts, p_block->i_pts );
    }
    else if( p_block->i_dts > VLC_TS_INVALID )
    {
        /* NB, davidf doesn't quite agree with this in general, it is ok
         * for rawvideo since it is in order (ie pts=dts), however, it
         * may not be ok for an out-of-order codec, so don't copy this
         * without thinking */
        date_Set( &p_sys->pts, p_block->i_dts );
    }

    if( p_block->i_buffer < p_sys->i_raw_size )
    {
        msg_Warn( p_dec, "invalid frame size (%zu < %zu)",
                  p_block->i_buffer, p_sys->i_raw_size );

        block_Release( p_block );
        return NULL;
    }

    if( p_sys->b_packetizer )
    {
        p_buf = SendFrame( p_dec, p_block );
    }
    else
    {
        p_buf = DecodeFrame( p_dec, p_block );
    }

    /* Date management: 1 frame per packet */
    date_Increment( &p_sys->pts, 1 );
    *pp_block = NULL;

    return p_buf;
}
コード例 #11
0
ファイル: xvid_dec_wce.cpp プロジェクト: bigbensk/gpac
static GF_Err XVID_ProcessData(GF_MediaDecoder *ifcg, 
		char *inBuffer, u32 inBufferLength,
		u16 ES_ID,
		char *outBuffer, u32 *outBufferLength,
		u8 PaddingBits, u32 mmlevel)
{
	unsigned char *pY, *pU, *pV;
	u32 i, uv_w, half_h;
	unsigned long pitch;
	XVIDCTX();

	/*check not using scalabilty*/
	if (ES_ID != ctx->ES_ID) return GF_BAD_PARAM;

	if (*outBufferLength < ctx->out_size) {
		*outBufferLength = ctx->out_size;
		return GF_BUFFER_TOO_SMALL;
	}

	if (!DecodeFrame(ctx->codec, inBuffer, inBufferLength, pY, pU, pV, pitch)) {
		*outBufferLength = 0;
		return GF_NON_COMPLIANT_BITSTREAM;
	}

	/*dispatch nothing if seeking or droping*/
	switch (mmlevel) {
	case GF_CODEC_LEVEL_SEEK:
	case GF_CODEC_LEVEL_DROP:
		*outBufferLength = 0;
		return GF_OK;
	default:
		break;
	}
	*outBufferLength = ctx->out_size;
	for (i=0; i<ctx->height; i++) {
		unsigned char *src = pY + pitch*i;
		char *dst = outBuffer + ctx->width*i;
		memcpy(dst, src, sizeof(char) * ctx->width);
	}
	outBuffer += ctx->width * ctx->height;
	half_h = ctx->height/2;
	uv_w = ctx->width/2;
	for (i=0; i<half_h; i++) {
		unsigned char *src = pU + pitch/2*i;
		char *dst = outBuffer + i*uv_w;
		memcpy(dst, src, sizeof(char) * uv_w);
	}
	outBuffer += ctx->width * ctx->height / 4;
	for (i=0; i<half_h; i++) {
		unsigned char *src = pV + pitch/2*i;
		char *dst = outBuffer + i*uv_w;
		memcpy(dst, src, sizeof(char) * uv_w);
	}

	return GF_OK;
}
コード例 #12
0
 /** decode a frame to the given format */
 pei::SurfacePtr ImageSequenceAsset::Decode( pei::Format& dest_format, double time /*= 0*/ )
 {
     if ( m_FileNames.size() > 0 ) {
         int frame = m_Format.m_FrameTime > 0 ? (int)(time / m_Format.m_FrameTime) : 0;
         dest_format = m_Format;
         // cannot decode x/y offsets from single frames
         dest_format.m_XOffset = 0;
         dest_format.m_YOffset = 0;
         return DecodeFrame( frame % m_FileNames.size() );
     } else if ( m_FrameCache.size() > 0 ) {
         int frame = m_Format.m_FrameTime > 0 ? (int)(time / m_Format.m_FrameTime) : 0;
         dest_format = m_Format;
         // cannot decode x/y offsets from single frames
         dest_format.m_XOffset = 0;
         dest_format.m_YOffset = 0;
         return DecodeFrame( frame % m_FrameCache.size() );
     }
     return pei::SurfacePtr();
 }
コード例 #13
0
ファイル: mfx_h265_dec_decode.cpp プロジェクト: ph0b/MediaSDK
// Wait until a frame is ready to be output and set necessary surface flags
mfxStatus VideoDECODEH265::DecodeFrame(mfxBitstream *, mfxFrameSurface1 *, mfxFrameSurface1 *surface_out)
{
    if (!m_isInit)
        return MFX_ERR_NOT_INITIALIZED;

    MFX_CHECK_NULL_PTR1(surface_out);
    mfxStatus sts = DecodeFrame(surface_out);

    return sts;
}
コード例 #14
0
ファイル: FFmpegImage.cpp プロジェクト: 69thelememt/xbmc
std::shared_ptr<Frame> CFFmpegImage::ReadFrame()
{
  AVFrame* avframe = ExtractFrame();
  if (avframe == nullptr)
    return nullptr;
  std::shared_ptr<Frame> frame(new Frame());
  frame->m_delay = (unsigned int)av_frame_get_pkt_duration(avframe);
  frame->m_pitch = avframe->width * 4;
  frame->m_pImage = new unsigned char[avframe->height * frame->m_pitch];
  DecodeFrame(avframe, avframe->width, avframe->height, frame->m_pitch, frame->m_pImage);
  av_frame_free(&avframe);
  return frame;
}
コード例 #15
0
//--------------------------------------------------------------
//  Read list of actions
//---------------------------------------------------------------
void CGaugeAction::ReadActions(SStream *str)
{ char txt[128];
  int na = 0;
  ReadInt(&na,str);
  while (na--)
  { ReadString(txt,128,str);
    BASE_ACT *a = new BASE_ACT();
    vact.push_back(a);
    if (DecodeFrame  (txt,a))   continue;
    if (DecodeMessage(txt,a))   continue;
    gtfo("Bad Gauge action");
  }
  return;
}
コード例 #16
0
ファイル: FFmpegImage.cpp プロジェクト: 69thelememt/xbmc
bool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height,
                          unsigned int pitch, unsigned int format)
{
  if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8)
    return false;

  if (!m_pFrame || !m_pFrame->data[0])
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "AVFrame member not allocated");
    return false;
  }

  return DecodeFrame(m_pFrame, width, height, pitch, pixels);
}
コード例 #17
0
ファイル: lpdecode.c プロジェクト: LynxInnovation/031
/*
  main routine

  Main program entry point.
*/
int main(void)
{
  Uint8           RxByte;
  struct timeval  CurrentTime;
  struct timeval  LastTime;
  struct timeval  LapsedTime;
  Uint8           Pos;
  Uint8           Frame[6];


  printf("Lynx Packet Decoder (%s)\n", __DATE__);
  gettimeofday(&LastTime, NULL);
  Pos = 0;

  // Loop reading characters

  while (fread(&RxByte,1,1,stdin)) {  // Wait for next character

    // Find out how much time has lapsed and show it

    gettimeofday(&CurrentTime, NULL);
    TimevalSubtract(&LapsedTime, &CurrentTime, &LastTime);

    // See if interframe time

    if ((LapsedTime.tv_sec) || (LapsedTime.tv_usec > 1000)) {
      if (Pos >= 6)
        DecodeFrame(Frame);
      printf("\n%03ld.%06ld", LapsedTime.tv_sec, LapsedTime.tv_usec);
      Pos = 0;
    }

		// Show the byte

    printf(" %02X", RxByte);

    // Save in a frame

    if (Pos < 6)
      Frame[Pos] = RxByte;

    if ((Pos) || (RxByte == 1))
      Pos++;

    // Set up ready for the next byte

    LastTime.tv_sec = CurrentTime.tv_sec;
    LastTime.tv_usec = CurrentTime.tv_usec;
  }
}
コード例 #18
0
ファイル: MpegAudioDecoder.cpp プロジェクト: DBCTRADO/TVTest
bool CMpegAudioDecoder::Decode(const BYTE *pData, DWORD *pDataSize, DecodeFrameInfo *pInfo)
{
	if (!m_bInitialized)
		return false;

	if (!DecodeFrame(pData, pDataSize, pInfo)) {
		m_bDecodeError = true;
		return false;
	}

	m_bDecodeError = false;

	return true;
}
コード例 #19
0
Buffer<QImage>* StreamingManager::Start()
{
  AVPacket  avpkt;
  AVFrame  *frame = avcodec_alloc_frame();
  av_init_packet(&avpkt);
  InitStreams();
  while (frameBuffer->Length() < FRAMEBUFFER_SIZE)
  {
    StreamConfig *decConfig = GetNextPacket(avFormatContextPtr, &avpkt);
    DecodeFrame(frame, &avpkt, decConfig);
  }

  return frameBuffer;
}
コード例 #20
0
ファイル: mfx_h265_dec_decode.cpp プロジェクト: ph0b/MediaSDK
// Decoder instance threads entry point. Do async tasks here
mfxStatus VideoDECODEH265::RunThread(void * params, mfxU32 threadNumber)
{
    MFX_AUTO_LTRACE(MFX_TRACE_LEVEL_API, "VideoDECODEH265::RunThread");
    ThreadTaskInfo * info = (ThreadTaskInfo *)params;

    mfxStatus sts = MFX_TASK_WORKING;

    bool isDecoded;
    {
        UMC::AutomaticUMCMutex guard(m_mGuardRunThread);

        if (!info->surface_work)
            return MFX_TASK_DONE;

        isDecoded = m_pH265VideoDecoder->CheckDecoding(true, info->pFrame);
    }

    if (!isDecoded)
    {
        sts = m_pH265VideoDecoder->RunThread(threadNumber);
    }

    {
        UMC::AutomaticUMCMutex guard(m_mGuardRunThread);
        if (!info->surface_work)
            return MFX_TASK_DONE;

        isDecoded = m_pH265VideoDecoder->CheckDecoding(true, info->pFrame);
        if (isDecoded)
        {
            info->surface_work = 0;
        }
    }

    if (isDecoded)
    {
        if (!info->pFrame->wasDisplayed() && info->surface_out)
        {
            mfxStatus status = DecodeFrame(info->surface_out, info->pFrame);

            if (status != MFX_ERR_NONE && status != MFX_ERR_NOT_FOUND)
                return status;
        }

        return MFX_TASK_DONE;
    }

    return sts;
}
コード例 #21
0
ファイル: VideoDecoder.cpp プロジェクト: wangsitan/Mesen
void VideoDecoder::DecodeThread()
{
	//This thread will decode the PPU's output (color ID to RGB, intensify r/g/b and produce a HD version of the frame if needed)
	while(!_stopFlag.load()) {
		//DecodeFrame returns the final ARGB frame we want to display in the emulator window
		while(!_frameChanged) {
			_waitForFrame.Wait();
			if(_stopFlag.load()) {
				return;
			}
		}

		DecodeFrame();
	}
}
コード例 #22
0
ファイル: rawvideo.c プロジェクト: forthyen/SDesk
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with complete frames.
 ****************************************************************************/
static void *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    void *p_buf;

    if( !pp_block || !*pp_block ) return NULL;

    p_block = *pp_block;

    if( !p_sys->i_pts && !p_block->i_pts && !p_block->i_dts )
    {
        /* We've just started the stream, wait for the first PTS. */
        block_Release( p_block );
        return NULL;
    }

    /* Date management */
    if( p_block->i_pts > 0 || p_block->i_dts > 0 )
    {
        if( p_block->i_pts > 0 ) p_sys->i_pts = p_block->i_pts;
        else if( p_block->i_dts > 0 ) p_sys->i_pts = p_block->i_dts;
    }

    if( p_block->i_buffer < p_sys->i_raw_size )
    {
        msg_Warn( p_dec, "invalid frame size (%d < %d)",
                  p_block->i_buffer, p_sys->i_raw_size );

        block_Release( p_block );
        return NULL;
    }

    if( p_sys->b_packetizer )
    {
        p_buf = SendFrame( p_dec, p_block );
    }
    else
    {
        p_buf = DecodeFrame( p_dec, p_block );
    }

    /* Date management: 1 frame per packet */
    p_sys->i_pts += ( I64C(1000000) * 1.0 / 25 /*FIXME*/ );
    *pp_block = NULL;

    return p_buf;
}
コード例 #23
0
mpeg2_state_t CMpeg2DecoderDXVA2::Parse()
{
	mpeg2_state_t State = CMpeg2Decoder::Parse();

	switch (State) {
	case STATE_PICTURE:
		m_SliceDataSize = 0;
		m_SliceCount = 0;
		break;

	case STATE_SLICE_1ST:
		DecodeFrame(nullptr);
		m_SliceDataSize = 0;
		m_SliceCount = 0;
		break;
	}

	return State;
}
コード例 #24
0
// return 0 if not decoded
int CDecoderVideo::Decode(JNIEnv*  env, jbyteArray iData, int iSize){
	jbyte *pbydatain  = env->GetByteArrayElements(iData, 0);
	/*FILE *_fp;
	char sfile[130];
	sprintf(sfile, "/sdcard/video.h263", _icnt);
	_icnt++;
	_fp = fopen(sfile, "a");
	fwrite(pbydatain, sizeof(uint8), iSize, _fp);
	fclose(_fp);*/
		
	if (_pvDec){
		uint32 TimeStamp =0xFFFFFFFF;
		uint UseExtTimestamp = 0;
		//bool bRes = _pvDec->DecodeVideoFrame((uint8**)&pbydatain, (uint32*)&TimeStamp, (int32*)&iSize, &UseExtTimestamp, (uint8 *)m_pbyDecoded);		
		bool bRes = DecodeFrame((uint8**)&pbydatain, (uint32*)&TimeStamp, (int32*)&iSize, &UseExtTimestamp, (uint8 *)m_pbyDecoded);		
		if (!bRes)
			__android_log_write(ANDROID_LOG_DEBUG, "CDecoderVideo::Decode", "Failed");
		else{		
			//__android_log_write(ANDROID_LOG_DEBUG, "CDecoderVideo::Decode", "OK");			
			//ConvertYUVtoRGB32(_py, _pu, _pv, (unsigned int *)m_piDecoded, 176, 144);			
		    if (_pCC->Convert((uint8 *)m_pbyDecoded, (uint8 *)m_pbyRGB) == 0)
		    {
				__android_log_write(ANDROID_LOG_DEBUG, "CDecoderVideo::ColorConversion", "Failed");
			}
			int is = 176 * 144;
			int *p = (int*)m_pbyRGB;
			int i;
			for(i=0;i<is;i++){
				m_piRGB[i] = *p;
				m_piRGB[i]|= 0xFF000000;
				p++;
			}
		}
	}
	else
		__android_log_write(ANDROID_LOG_DEBUG, "CDecoderVideo::Decode", "Decoder doesn't init");
		
	
	env->ReleaseByteArrayElements(iData, pbydatain, 0);
	
	return 0;
}
コード例 #25
0
void MovieTexture_FFMpeg::Update(float fDeltaTime)
{
	/* We might need to decode more than one frame per update.  However, there
	 * have been bugs in ffmpeg that cause it to not handle EOF properly, which
	 * could make this never return, so let's play it safe. */
	int iMax = 4;
	while( --iMax )
	{
		if( !m_bThreaded )
		{
			/* If we don't have a frame decoded, decode one. */
			if( m_ImageWaiting == FRAME_NONE )
				DecodeFrame();

			/* If we have a frame decoded, see if it's time to display it. */
			if( m_ImageWaiting == FRAME_DECODED )
			{
				float fTime = CheckFrameTime();
				if( fTime > 0 )
					return;
				else if( fTime == -1 )
					DiscardFrame();
				else
					ConvertFrame();
			}
		}

		/* Note that if there's an image waiting, we *must* signal m_BufferFinished, or
		* the decoder thread may sit around waiting for it, even though Pause and Play
		* calls, causing the clock to keep running. */
		if( m_ImageWaiting != FRAME_WAITING )
			return;
		CHECKPOINT;

		UpdateFrame();
		
		if( m_bThreaded )
			m_BufferFinished.Post();
	}

	LOG->MapLog( "ffmpeg_looping", "MovieTexture_FFMpeg::Update looping" );
}
コード例 #26
0
ファイル: FFmpegImage.cpp プロジェクト: 68foxboris/xbmc
bool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height,
                          unsigned int pitch, unsigned int format)
{
  if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8)
    return false;

  if (pixels == nullptr)
  {
    CLog::Log(LOGERROR, "%s - No valid buffer pointer (nullptr) passed", __FUNCTION__);
    return false;
  }

  if (!m_pFrame || !m_pFrame->data[0])
  {
    CLog::LogF(LOGERROR, "AVFrame member not allocated");
    return false;
  }

  return DecodeFrame(m_pFrame, width, height, pitch, pixels);
}
コード例 #27
0
bool MADDecoder::Open(FileSpecifier &File)
{
	if (!File.Open(file)) return false;

	file_done = false;

	if (DecodeFrame())
	{
		stereo = (MAD_NCHANNELS(&Frame.header) == 2);
		bytes_per_frame = 2 * (stereo ? 2 : 1);
		rate = Frame.header.samplerate;

		sample = 0;
		return true;
	}
	else
	{
		return false;
	}
}
コード例 #28
0
ファイル: xvid_dec_wce.cpp プロジェクト: bigbensk/gpac
static GF_Err XVID_AttachStream(GF_BaseDecoder *ifcg, GF_ESD *esd)
{
	GF_M4VDecSpecInfo dsi;
	GF_Err e;
	unsigned char *ptr;
	unsigned long pitch;

	XVIDCTX();

	if (ctx->ES_ID && ctx->ES_ID!=esd->ESID) return GF_NOT_SUPPORTED;
	if (!esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data) return GF_NON_COMPLIANT_BITSTREAM;

	/*decode DSI*/
	e = gf_m4v_get_config((char *) esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi);
	if (e) return e;
	if (!dsi.width || !dsi.height) return GF_NON_COMPLIANT_BITSTREAM;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[XviD] Attaching Stream %d - framesize %d x %d\n", esd->ESID, dsi.width, dsi.height ));

	ctx->codec =  InitCodec(dsi.width, dsi.height, GF_4CC('x', 'v', 'i', 'd'));
	if (!ctx->codec) return GF_OUT_OF_MEM;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[XviD] Decoding DecoderSpecificInfo\n"));

	DecodeFrame(ctx->codec, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, ptr, ptr, ptr, pitch);

	/*note that this may be irrelevant when used through systems (FPS is driven by systems CTS)*/
	ctx->FPS = dsi.clock_rate;
	ctx->FPS /= 1000;
	if (!ctx->FPS) ctx->FPS = 30.0f;
	ctx->width = dsi.width;
	ctx->height = dsi.height;
	ctx->pixel_ar = (dsi.par_num<<16) | dsi.par_den;
	ctx->pixel_ar = 0;
	ctx->ES_ID = esd->ESID;
	ctx->first_frame = 1;
	/*output in YV12 only - let the player handle conversion*/
	ctx->out_size = 3 * ctx->width * ctx->height / 2;
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[XviD] Decoder setup - output size %d\n", ctx->out_size ));
	return GF_OK;
}
コード例 #29
0
ファイル: AacDecoder.cpp プロジェクト: kento1218/TVTest
bool CAacDecoder::Decode(const BYTE *pData, DWORD *pDataSize, DecodeFrameInfo *pInfo)
{
	if (m_hDecoder == NULL)
		return false;

	m_pAdtsFrame = NULL;

	CAdtsFrame *pFrame;
	if (!m_AdtsParser.StoreEs(pData, pDataSize, &pFrame))
		return false;

	if (!DecodeFrame(pFrame, pInfo)) {
		m_bDecodeError = true;
		return false;
	}

	m_pAdtsFrame = pFrame;
	m_bDecodeError = false;

	return true;
}
コード例 #30
0
int ODFFmpegDecoder::Decode(SampleBuffer & data, sampleFormat & format, sampleCount start, sampleCount len, unsigned int channel)
{
   format = mScs[mStreamIndex]->m_osamplefmt;

   data.Allocate(len, format);
   samplePtr bufStart = data.ptr();
   streamContext* sc = NULL;

   // printf("start %llu len %llu\n", start, len);
   //TODO update this to work with seek - this only works linearly now.
   if(mCurrentPos > start && mCurrentPos  <= start+len + kDecodeSampleAllowance)
   {
      //this next call takes data, start and len as reference variables and updates them to reflect the NEW area that is needed.
      FillDataFromCache(bufStart, format, start,len,channel);
   }

   bool seeking = false;
   //look at the decoding timestamp and see if the next sample that will be decoded is not the next sample we need.
   if(len && (mCurrentPos > start + len  || mCurrentPos + kDecodeSampleAllowance < start ) && SeekingAllowed()) {
      sc = mScs[mStreamIndex];
      AVStream* st = sc->m_stream;
      int stindex = -1;
      uint64_t targetts;

      //printf("attempting seek to %llu\n", start);
      //we have to find the index for this stream.
      for (unsigned int i = 0; i < mFormatContext->nb_streams; i++) {
         if (mFormatContext->streams[i] == sc->m_stream )
            stindex =i;
      }

      if(stindex >=0) {
         int numAttempts = 0;
         //reset mCurrentPos to a bogus value
         mCurrentPos = start+len +1;
         while(numAttempts++ < kMaxSeekRewindAttempts && mCurrentPos > start) {
            //we want to move slightly before the start of the block file, but not too far ahead
            targetts = (start-kDecodeSampleAllowance*numAttempts/kMaxSeekRewindAttempts)  * ((double)st->time_base.den/(st->time_base.num * st->codec->sample_rate ));
            if(targetts<0)
               targetts=0;

            //printf("attempting seek to %llu, attempts %d\n", targetts, numAttempts);
            if(av_seek_frame(mFormatContext,stindex,targetts,0) >= 0){
               //find out the dts we've seekd to.
               sampleCount actualDecodeStart = 0.5 + st->codec->sample_rate * st->cur_dts  * ((double)st->time_base.num/st->time_base.den);      //this is mostly safe because den is usually 1 or low number but check for high values.

               mCurrentPos = actualDecodeStart;
               seeking = true;

               //if the seek was past our desired position, rewind a bit.
               //printf("seek ok to %llu samps, float: %f\n",actualDecodeStart,actualDecodeStartDouble);
            } else {
               printf("seek failed");
               break;
            }
         }
         if(mCurrentPos>start){
            mSeekingAllowedStatus = (bool)ODFFMPEG_SEEKING_TEST_FAILED;
            //               url_fseek(mFormatContext->pb,sc->m_pkt.pos,SEEK_SET);
            printf("seek fail, reverting to previous pos\n");
            return -1;
         }
      }
   }
   bool firstpass = true;

   //we decode up to the end of the blockfile
   while (len>0 && (mCurrentPos < start+len) && (sc = ReadNextFrame()) != NULL)
   {
      // ReadNextFrame returns 1 if stream is not to be imported
      if (sc != (streamContext*)1)
      {
         //find out the dts we've seekd to.  can't use the stream->cur_dts because it is faulty.  also note that until we do the first seek, pkt.dts can be false and will change for the same samples after the initial seek.
         sampleCount actualDecodeStart = mCurrentPos;

         // we need adjacent samples, so don't use dts most of the time which will leave gaps between frames
         // for some formats
         // The only other case for inserting silence is for initial offset and ImportFFmpeg.cpp does this for us
         if (seeking) {
            actualDecodeStart = 0.52 + (sc->m_stream->codec->sample_rate * sc->m_pkt.dts
                                        * ((double)sc->m_stream->time_base.num / sc->m_stream->time_base.den));
            //this is mostly safe because den is usually 1 or low number but check for high values.

            //hack to get rounding to work to neareset frame size since dts isn't exact
            if (sc->m_stream->codec->frame_size) {
               actualDecodeStart = ((actualDecodeStart + sc->m_stream->codec->frame_size/2) / sc->m_stream->codec->frame_size) * sc->m_stream->codec->frame_size;
            }
            // reset for the next one
            seeking = false;
         }
         if(actualDecodeStart != mCurrentPos)
            printf("ts not matching - now:%llu , last:%llu, lastlen:%llu, start %llu, len %llu\n",actualDecodeStart, mCurrentPos, mCurrentLen, start, len);
            //if we've skipped over some samples, fill the gap with silence.  This could happen often in the beginning of the file.
         if(actualDecodeStart>start && firstpass) {
            // find the number of samples for the leading silence
            int amt = actualDecodeStart - start;
            FFMpegDecodeCache* cache = new FFMpegDecodeCache;

            //printf("skipping/zeroing %i samples. - now:%llu (%f), last:%llu, lastlen:%llu, start %llu, len %llu\n",amt,actualDecodeStart, actualDecodeStartdouble, mCurrentPos, mCurrentLen, start, len);

            //put it in the cache so the other channels can use it.
            cache->numChannels = sc->m_stream->codec->channels;
            cache->len = amt;
            cache->start=start;
            // 8 bit and 16 bit audio output from ffmpeg means
            // 16 bit int out.
            // 32 bit int, float, double mean float out.
            if (format == int16Sample)
               cache->samplefmt = SAMPLE_FMT_S16;
            else
               cache->samplefmt = SAMPLE_FMT_FLT;

            cache->samplePtr = (uint8_t*) malloc(amt * cache->numChannels * SAMPLE_SIZE(format));

            memset(cache->samplePtr, 0, amt * cache->numChannels * SAMPLE_SIZE(format));

            InsertCache(cache);
         }
         firstpass=false;
         mCurrentPos = actualDecodeStart;
         //decode the entire packet (unused bits get saved in cache, so as long as cache size limit is bigger than the
         //largest packet size, we're ok.
         while (sc->m_pktRemainingSiz > 0)
            //Fill the cache with decoded samples
               if (DecodeFrame(sc,false) < 0)
                  break;

         // Cleanup after frame decoding
         if (sc->m_pktValid)
         {
            av_free_packet(&sc->m_pkt);
            sc->m_pktValid = 0;
         }
      }
   }

   // Flush the decoders if we're done.
   if((!sc || sc == (streamContext*) 1)&& len>0)
   {
      for (int i = 0; i < mNumStreams; i++)
      {
         if (DecodeFrame(mScs[i], true) == 0)
         {
            if (mScs[i]->m_pktValid)
            {
               av_free_packet(&mScs[i]->m_pkt);
               mScs[i]->m_pktValid = 0;
            }
         }
      }
   }

   //this next call takes data, start and len as reference variables and updates them to reflect the NEW area that is needed.
   FillDataFromCache(bufStart, format, start, len, channel);

   // CHECK: not sure if we need this.  In any case it has to be updated for the NEW float case (not just int16)
   //if for some reason we couldn't get the samples, fill them with silence
   /*
   int16_t* outBuf = (int16_t*) bufStart;
   for(int i=0;i<len;i++)
      outBuf[i]=0;
   */
   return 1;
}