示例#1
0
文件: theora.c 项目: cobr123/qtVlc
/*****************************************************************************
 * DecodePacket: decodes a Theora packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;
    yuv_buffer yuv;

    theora_decode_packetin( &p_sys->td, p_oggpacket );

    /* Check for keyframe */
    if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
        !(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
        p_sys->b_decoded_first_keyframe = true;

    /* If we haven't seen a single keyframe yet, don't let Theora decode
     * anything, otherwise we'll get display artifacts.  (This is impossible
     * in the general case, but can happen if e.g. we play a network stream
     * using a timed URL, such that the server doesn't start the video with a
     * keyframe). */
    if( p_sys->b_decoded_first_keyframe )
        theora_decode_YUVout( &p_sys->td, &yuv );
    else
        return NULL;

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) return NULL;

    theora_CopyPicture( p_pic, &yuv );

    p_pic->date = p_sys->i_pts;

    return p_pic;
}
示例#2
0
static void dec_process_frame(MSFilter *f, DecState *s, ogg_packet *op){
	yuv_buffer yuv;
	if (theora_decode_packetin(&s->tstate,op)==0){
		if (theora_decode_YUVout(&s->tstate,&yuv)==0){
			mblk_t *om;
			int i;
			int ylen=yuv.y_width*yuv.y_height;
			int uvlen=yuv.uv_width*yuv.uv_height;
			ms_debug("Got yuv buffer from theora decoder");
			if (s->yuv==NULL){
				int len=(ylen)+(2*uvlen);
				s->yuv=allocb(len,0);
			}
			om=dupb(s->yuv);
			for(i=0;i<yuv.y_height;++i){
				memcpy(om->b_wptr,yuv.y+yuv.y_stride*i,yuv.y_width);
				om->b_wptr+=yuv.y_width;
			}
			for(i=0;i<yuv.uv_height;++i){
				memcpy(om->b_wptr,yuv.u+yuv.uv_stride*i,yuv.uv_width);
				om->b_wptr+=yuv.uv_width;
			}
			for(i=0;i<yuv.uv_height;++i){
				memcpy(om->b_wptr,yuv.v+yuv.uv_stride*i,yuv.uv_width);
				om->b_wptr+=yuv.uv_width;
			}
			ms_queue_put(f->outputs[0],om);
		}
	}else{
		ms_warning("theora decoding error");
	}
}
示例#3
0
static GF_Err THEO_ProcessData(GF_MediaDecoder *ifcg, 
		char *inBuffer, u32 inBufferLength,
		u16 ES_ID,
		char *outBuffer, u32 *outBufferLength,
		u8 PaddingBits, u32 mmlevel)
{
	ogg_packet op;
	yuv_buffer yuv;
	u32 i;
	char *pYO, *pUO, *pVO;
	unsigned char *pYD, *pUD, *pVD;

	THEORACTX();
	/*not using scalabilty*/
	assert(ctx->ES_ID == ES_ID);

	op.granulepos = -1;
	op.b_o_s = 0;
	op.e_o_s = 0;
	op.packetno = 0;
    op.packet = inBuffer;
    op.bytes = inBufferLength;


	*outBufferLength = 0;

    if (theora_decode_packetin(&ctx->td, &op)<0) return GF_NON_COMPLIANT_BITSTREAM;
	if (mmlevel	== GF_CODEC_LEVEL_SEEK) return GF_OK;
    if (theora_decode_YUVout(&ctx->td, &yuv)<0) return GF_OK;

	pYO = yuv.y;
	pUO = yuv.u;
	pVO = yuv.v;
	pYD = outBuffer;
	pUD = outBuffer + ctx->ti.width * ctx->ti.height;
	pVD = outBuffer + 5 * ctx->ti.width * ctx->ti.height / 4;
	
	for (i=0; i<(u32)yuv.y_height; i++) {
		memcpy(pYD, pYO, sizeof(char) * yuv.y_width);
		pYD += ctx->ti.width;
		pYO += yuv.y_stride;
		if (i%2) continue;

		memcpy(pUD, pUO, sizeof(char) * yuv.uv_width);
		memcpy(pVD, pVO, sizeof(char) * yuv.uv_width);
		pUD += ctx->ti.width/2;
		pVD += ctx->ti.width/2;
		pUO += yuv.uv_stride;
		pVO += yuv.uv_stride;
	}
	*outBufferLength = 3*ctx->ti.width*ctx->ti.height/2;
	return GF_OK;
}
/* write out the planar YUV frame, uncropped */
static void video_write(void){
  int i;

  yuv_buffer yuv;
  theora_decode_YUVout(&td,&yuv);

  for(i=0;i<yuv.y_height;i++)
    fwrite(yuv.y+yuv.y_stride*i, 1, yuv.y_width, outfile);
  for(i=0;i<yuv.uv_height;i++)
    fwrite(yuv.u+yuv.uv_stride*i, 1, yuv.uv_width, outfile);
  for(i=0;i<yuv.uv_height;i++)
    fwrite(yuv.v+yuv.uv_stride*i, 1, yuv.uv_width, outfile);

}
uint8_t     decoderTheora::uncompress(uint8_t *in,uint8_t *out,uint32_t len,uint32_t *flagz)
										
{
 int got_picture=0;

		if(len==0) // Null frame, silently skip
			{
            	if(flagz) *flagz=0;
				return 1;
			}
	ogg_packet ogg;
	
				memset(&ogg,0,sizeof(ogg));
				ogg.packet=in;
				ogg.bytes	=len;
				/*
	typedef struct {
  unsigned char *packet;
  long  bytes;
  long  b_o_s;
  long  e_o_s;

  ogg_int64_t  granulepos;
  
  ogg_int64_t  packetno;      sequence number for decode; the framing
				knows where there's a hole in the data,
				but we need coupling so that the codec
				(which is in a seperate abstraction
				layer) also knows about the gap 
} ogg_packet;*/
		if(theora_decode_packetin(&_tstate,&ogg))
		{
		   	printf("\n error decoding theora ..\n");
		    return 0;
		}
	  yuv_buffer yuv;
			  theora_decode_YUVout(&_tstate,&yuv);
			  memcpy(out, yuv.y,_w*_h);
			  memset(out+_w*_h,128,(_w*_h)>>1);



		      
			      
 		return 1;
}
示例#6
0
static void video_write(void){
  int i;
  yuv_buffer yuv;
  int crop_offset;
  theora_decode_YUVout(&td,&yuv);

  /* Lock SDL_yuv_overlay */
  if ( SDL_MUSTLOCK(screen) ) {
    if ( SDL_LockSurface(screen) < 0 ) return;
  }
  if (SDL_LockYUVOverlay(yuv_overlay) < 0) return;

  /* let's draw the data on a SDL screen (*screen) */
  /* deal with border stride */
  /* reverse u and v for SDL */
  /* and crop input properly, respecting the encoded frame rect */
  /* problems may exist for odd frame rect for some encodings */
  crop_offset=ti.offset_x+yuv.y_stride*ti.offset_y;
  for(i=0;i<yuv_overlay->h;i++)
    memcpy(yuv_overlay->pixels[0]+yuv_overlay->pitches[0]*i,
           yuv.y+crop_offset+yuv.y_stride*i,
           yuv_overlay->w);
  crop_offset=(ti.offset_x/2)+(yuv.uv_stride)*(ti.offset_y/2);
  for(i=0;i<yuv_overlay->h/2;i++){
    memcpy(yuv_overlay->pixels[1]+yuv_overlay->pitches[1]*i,
           yuv.v+crop_offset+yuv.uv_stride*i,
           yuv_overlay->w/2);
    memcpy(yuv_overlay->pixels[2]+yuv_overlay->pitches[2]*i,
           yuv.u+crop_offset+yuv.uv_stride*i,
           yuv_overlay->w/2);
  }

  /* Unlock SDL_yuv_overlay */
  if ( SDL_MUSTLOCK(screen) ) {
    SDL_UnlockSurface(screen);
  }
  SDL_UnlockYUVOverlay(yuv_overlay);


  /* Show, baby, show! */
  SDL_DisplayYUVOverlay(yuv_overlay, &rect);

}
示例#7
0
bool Oggeyman::process_packet(unsigned char *BGRAbuffer) {
	// if we got to here, it should be that
	// a) we have theora stream
	// b) this particular packet belongs to theora
	double this_packet_time = get_packet_time();
#ifdef ANDR
	struct timespec time1,time2;
	clock_gettime(CLOCK_MONOTONIC, &time1);
#endif
	int retvalue = theora_decode_packetin(&theo_state, &ogg_theora_packet);
#ifdef ANDR
	clock_gettime(CLOCK_MONOTONIC, &time2);
	__android_log_print(ANDROID_LOG_INFO, "foo",
			"\tin process_packet:  theora_decode_packetin  %6.2lf ms\n", (time2.tv_nsec - time1.tv_nsec)/1.e6 );
#endif
	if (retvalue != 0)
		return false;
	// decoding ok, give us the YUV buffer pls
#ifdef ANDR
	clock_gettime(CLOCK_MONOTONIC, &time1);
#endif

	theora_decode_YUVout(&theo_state, &YUVbuffer);
#ifdef ANDR
	clock_gettime(CLOCK_MONOTONIC, &time2);
	__android_log_print(ANDROID_LOG_INFO, "foo",
			"\tin process_packet:  theora_decode_YUVout  %6.2lf ms\n", (time2.tv_nsec - time1.tv_nsec)/1.e6 );
#endif
	// translate the buffer to BGRA, the Opengl's favorite format
#ifdef ANDR
	clock_gettime(CLOCK_MONOTONIC, &time1);
#endif
	yuv2bgra(BGRAbuffer);
#ifdef ANDR
	clock_gettime(CLOCK_MONOTONIC, &time2);
	__android_log_print(ANDROID_LOG_INFO, "foo",
			"\tin process_packet:  yuv2bgra  %6.2lf ms\n",
			(time2.tv_sec - time1.tv_sec)*1.e3 + (time2.tv_nsec - time1.tv_nsec)/1.e6 );
#endif
	last_packet_time = this_packet_time;
	return true;

}
示例#8
0
/**
**  Draw Ogg data to the overlay
*/
static int OutputTheora(OggData *data, SDL_Overlay *yuv_overlay, SDL_Rect *rect)
{
    int i;
    yuv_buffer yuv;
    int crop_offset;

    theora_decode_YUVout(&data->tstate, &yuv);

    if (SDL_MUSTLOCK(TheScreen)) {
        if (SDL_LockSurface(TheScreen) < 0) {
            return - 1;
        }
    }

    if (SDL_LockYUVOverlay(yuv_overlay) < 0) {
        return -1;
    }

    crop_offset = data->tinfo.offset_x + yuv.y_stride * data->tinfo.offset_y;
    for (i = 0; i < yuv_overlay->h; ++i) {
        memcpy(yuv_overlay->pixels[0] + yuv_overlay->pitches[0] * i,
               yuv.y + crop_offset + yuv.y_stride * i, yuv_overlay->w);
    }

    crop_offset = (data->tinfo.offset_x / 2) + (yuv.uv_stride) *
                  (data->tinfo.offset_y / 2);
    for (i = 0; i < yuv_overlay->h / 2; ++i) {
        memcpy(yuv_overlay->pixels[1] + yuv_overlay->pitches[1] * i,
               yuv.v + yuv.uv_stride * i, yuv_overlay->w / 2);
        memcpy(yuv_overlay->pixels[2] + yuv_overlay->pitches[2] * i,
               yuv.u + crop_offset + yuv.uv_stride * i, yuv_overlay->w / 2);
    }

    if (SDL_MUSTLOCK(TheScreen)) {
        SDL_UnlockSurface(TheScreen);
    }
    SDL_UnlockYUVOverlay(yuv_overlay);

    SDL_DisplayYUVOverlay(yuv_overlay, rect);

    return 0;
}
示例#9
0
int
mm_decode_video(mm_file *mf, SDL_Overlay *ovl)
{
    int rv = 0;
    ogg_packet pkt;
    yuv_buffer yuv;

    assert(mf);

    if (!mf->video) {
        return -1;
    }

    if (mf->drop_packets & MEDIA_VIDEO) {
        WARNING1("requested decode but MEDIA_VIDEO is set to ignore");
        return -1;
    }

    for (;;) {
        rv = get_packet(mf, &pkt, MEDIA_VIDEO);

        if (rv <= 0) {
            return rv;
        }

        /* we got packet, decode */
        if (theora_decode_packetin(mf->video_ctx, &pkt) == 0) {
            break;
        } else {
            WARNING1("packet does not contain theora frame");
            /* get next packet */
        }
    }

    theora_decode_YUVout(mf->video_ctx, &yuv);

    if (yuv_to_overlay(mf, &yuv, ovl) < 0) {
        return -1;
    }

    return 1;
}
示例#10
0
/*
 * decode frame
 */
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags) 
{
   theora_struct_t *context = (theora_struct_t *)sh->context;
   int errorCode = 0;
   ogg_packet op;
   yuv_buffer yuv;
   mp_image_t* mpi;

   memset (&op, 0, sizeof (op));
   op.bytes = len;
   op.packet = data;
   op.granulepos = -1;

   errorCode = theora_decode_packetin (&context->st, &op);
   if (errorCode)
   {
      mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Theora decode packetin failed: %i \n",
	     errorCode);
      return NULL;
   }

   errorCode = theora_decode_YUVout (&context->st, &yuv);
   if (errorCode)
   {
      mp_msg(MSGT_DEMUX,MSGL_ERR,"Theora decode YUVout failed: %i \n",
	     errorCode);
      return 0;
   }

    mpi = mpcodecs_get_image(sh, MP_IMGTYPE_EXPORT, 0, yuv.y_width, yuv.y_height);
    if(!mpi) return NULL;

    mpi->planes[0]=yuv.y;
    mpi->stride[0]=yuv.y_stride;
    mpi->planes[1]=yuv.u;
    mpi->stride[1]=yuv.uv_stride;
    mpi->planes[2]=yuv.v;
    mpi->stride[2]=yuv.uv_stride;
   
    return mpi;
}
示例#11
0
void DGVideo::update() {
    if (_state == DGVideoPlaying) {
        time_t currentTime = DGSystem::getInstance().wallTime();
        double duration = (double)(currentTime - _lastTime) / CLOCKS_PER_SEC;
        
        if (duration >= _frameDuration) {
            yuv_buffer yuv;
            
            // TODO: Skip frames if required here?
            //int frames = (int)floor(duration / _frameDuration);
            //for (int i = 1; i <= frames; i++)
                _prepareFrame();
            
            theora_decode_YUVout(&_theoraInfo->td, &yuv);
            _convertToRGB(yuv.y, yuv.y_stride,
                       yuv.u, yuv.v, yuv.uv_stride,
                       _currentFrame.data, _theoraInfo->ti.width, _theoraInfo->ti.height, _theoraInfo->ti.width);
            
            _lastTime = currentTime;
            
            _hasNewFrame = true;
        }
	}
}
示例#12
0
bool video_write(SDL_Surface *screen, SDL_Overlay *yuv_overlay, const int x, const int y)
//Writes a frame of video to screen+overlay.
{
	yuv_buffer yuv;
	theora_decode_YUVout(&td,&yuv);

	// Lock SDL_yuv_overlay
	if (SDL_MUSTLOCK(screen))
		if (SDL_LockSurface(screen) < 0)
			return false;
	if (SDL_LockYUVOverlay(yuv_overlay) < 0)
		return false;

	// Draw the data (*yuv[3]) on an SDL screen (*screen)
	// deal with border stride
	// and crop input properly, respecting the encoded frame rect
	UINT i,j;
	int crop_offset=ti.offset_x+yuv.y_stride*ti.offset_y;
	Uint8 *pY = (Uint8*)yuv.y+crop_offset;

	//Luminance range is normalized to [16,235].  Denormalize to the [0,255] range.
	//Similarly, chromiance is in the range [16,239].  Denormalize to [0,255].
	//Prepare lookup tables of pre-converted and clamped results to minimize conversion overhead.
	static BYTE lumConversion[256], chrConversion[256];
	static bool bFirst = true;
	if (bFirst)
	{
		static const UINT MIN_LUM = 13, MAX_LUM = 235; //[13,235] works better for some reason
		static const UINT MIN_CHR = 16, MAX_CHR = 239;
		for (i=0; i<256; ++i)
		{
			lumConversion[i] = i <= MIN_LUM ? 0 : i >= MAX_LUM ? 255 : (i-MIN_LUM)*255/(MAX_LUM-MIN_LUM);
			chrConversion[i] = i <= MIN_CHR ? 0 : i >= MAX_CHR ? 255 : (i-MIN_CHR)*255/(MAX_CHR-MIN_CHR);
		}
		bFirst = false;
	}

	//Translate values instead of doing a straight copy.
	Uint8 *src, *dest;
	for (i=0; i<(UINT)yuv_overlay->h; ++i)
	{
		src = pY+yuv.y_stride*i;
		dest = yuv_overlay->pixels[0]+yuv_overlay->pitches[0]*i;
		for (j=0; j<(UINT)yuv_overlay->w; ++j)
			*(dest++) = lumConversion[*(src++)];
		//memcpy(dest, src, yuv_overlay->w);
	}

	crop_offset=(ti.offset_x/2)+(yuv.uv_stride)*(ti.offset_y/2);
	Uint8 *pU = (Uint8*) yuv.u+crop_offset, *pV = (Uint8*) yuv.v+crop_offset;
	const UINT hOver2 = yuv_overlay->h/2;
	const UINT wOver2 = yuv_overlay->w/2;
	Uint8 *src2, *dest2;
	for (i=0; i<hOver2; ++i)
	{
		src = pU+yuv.uv_stride*i;
		dest = yuv_overlay->pixels[1]+yuv_overlay->pitches[1]*i;
		src2 = pV+yuv.uv_stride*i;
		dest2 = yuv_overlay->pixels[2]+yuv_overlay->pitches[2]*i;
		for (j=0; j<wOver2; ++j)
		{
			*(dest++) = chrConversion[*(src++)];
			*(dest2++) = chrConversion[*(src2++)];
		}
		//memcpy(dest, src, wOver2);
		//memcpy(dest2, src2, wOver2);
	}

	// Unlock SDL_yuv_overlay
	SDL_UnlockYUVOverlay(yuv_overlay);
	if (SDL_MUSTLOCK(screen))
		SDL_UnlockSurface(screen);

	// Show frame to surface!
	SDL_Rect rect = {x, y, (Uint16)ti.frame_width, (Uint16)ti.frame_height};
	if (SDL_DisplayYUVOverlay(yuv_overlay, &rect) == 0)
		return true; //successful display

	//Attempt to display to the origin if a different position doesn't work.
	rect.x = rect.y = 0;
	if (SDL_DisplayYUVOverlay(yuv_overlay, &rect) == 0)
		return true; //successful display

	//Error displaying.
	char text[512];
	sprintf(text, "SDL: Couldn't display SDL_yuv_overlay (%i,%i): %s\n", rect.w, rect.h, SDL_GetError());
	LOGERR(text);
	return false;
}
示例#13
0
static void *ucil_theora_worker_thread( ucil_theora_input_file_object_t *vobj )
{
   unicap_data_buffer_t new_frame_buffer;

   struct timeval ltime;
   int eos = 0;

   unicap_copy_format( &new_frame_buffer.format, &vobj->format );
   new_frame_buffer.type = UNICAP_BUFFER_TYPE_SYSTEM;
   new_frame_buffer.buffer_size = new_frame_buffer.format.buffer_size;
   new_frame_buffer.data = malloc( new_frame_buffer.format.buffer_size );

   gettimeofday( &ltime, NULL );
   
   while( !vobj->quit_capture_thread )
   {
      struct timespec abs_timeout;
      struct timeval  ctime;
      GList *entry;
      ogg_page og;
      ogg_packet op;
      size_t bytes;

      int buffer_ready = 0;
      


      if( !eos && ( ogg_stream_packetout( &vobj->os, &op ) > 0 ) )
      {
	 yuv_buffer yuv;

	 theora_decode_packetin( &vobj->th, &op );
	 theora_decode_YUVout( &vobj->th, &yuv );
	 copy_yuv( new_frame_buffer.data, &yuv, &vobj->ti );

	 buffer_ready = 1;
      } 
      else if( !eos )
      {
	 bytes = buffer_data( vobj->f, &vobj->oy );      
	 if( !bytes )
	 {
	    TRACE( "End of stream\n" );
	    eos = 1;
	    
	 }
	 
	 while( ogg_sync_pageout( &vobj->oy, &og ) > 0 )
	 {
	    ogg_stream_pagein( &vobj->os, &og );
	 }
	 continue;
      }
      else
      {
	 buffer_ready = 1;
      }

      gettimeofday( &ctime, NULL );
      abs_timeout.tv_sec = ctime.tv_sec + 1;
      abs_timeout.tv_nsec = ctime.tv_usec * 1000;      
      if( sem_timedwait( &vobj->sema, &abs_timeout ) )
      {
	 TRACE( "SEM_WAIT FAILED\n" );
	 continue;
      }

      if( buffer_ready && vobj->event_callback )
      {
	 vobj->event_callback( vobj->event_unicap_handle, UNICAP_EVENT_NEW_FRAME, &new_frame_buffer );
	 TRACE( "New frame\n" );
      }
      
      unicap_data_buffer_t *data_buffer = g_queue_pop_head( vobj->in_queue );
      if( data_buffer )
      {
	 unicap_copy_format( &data_buffer->format, &vobj->format );
	 memcpy( data_buffer->data, new_frame_buffer.data, vobj->format.buffer_size );
	 
	 g_queue_push_tail( vobj->out_queue, data_buffer );
      }

      sem_post( &vobj->sema );
      
      if( buffer_ready )
      {
	 gettimeofday( &ctime, NULL );
	 if( ctime.tv_usec < ltime.tv_usec )
	 {
	    ctime.tv_usec += 1000000;
	    ctime.tv_sec -= 1;
	 }
	 
	 ctime.tv_usec -= ltime.tv_usec;
	 ctime.tv_sec -= ltime.tv_sec;
	 
	 if( ( ctime.tv_sec == 0 ) &&
	     ( ctime.tv_usec < vobj->frame_intervall ) )
	 {
	    usleep( vobj->frame_intervall - ctime.tv_usec );
	 }
      
	 gettimeofday( &ltime, NULL );
      }
   }

   free( new_frame_buffer.data );
   return NULL;
}
示例#14
0
void JVideoServer::Init()
{return;
    memset( &m_StreamState, 0, sizeof( m_StreamState    ) );
    memset( &m_SyncState,   0, sizeof( m_SyncState      ) );
    memset( &m_Page,        0, sizeof( m_Page           ) );
    memset( &m_Packet,      0, sizeof( m_Packet         ) );
    memset( &m_Comment,     0, sizeof( m_Comment        ) );
    memset( &m_Info,        0, sizeof( m_Info           ) );
    memset( &m_State,       0, sizeof( m_State          ) );
    memset( &m_YUVBuffer,   0, sizeof( m_YUVBuffer      ) );

    ogg_stream_clear( &m_StreamState );
    ogg_sync_init( &m_SyncState );
  
    theora_comment_init( &m_Comment );
    theora_info_init( &m_Info );

    // теперь ищем начало логического потока theora
    bool bStartHeader = true;
    int nHeaderPackets = 0;  // число обработанных пакетов заголовков theora

    do
    {
      if (LoadChunk( m_File, &m_SyncState ) ==0)
      {
        // кончился файл, на данном этапе это ошибка
        assert( "!eof searched, terminate...");
      }

      // ogg_sync_pageout - формирует страницу
      while (ogg_sync_pageout( &m_SyncState, &m_Page ) > 0)
        // 1-больше данных не требуется
        // 0-требуется больше данных для создания страницы
      {

        // что страница сформирована успешно

        // это страница заголовков? если нет, кончаем искать заголовки
        if (ogg_page_bos( &m_Page ) == false)
        {
          // нет, это не страница заголовков
          // значит, страницы заголовков всех логических потоков кончились
          // и начались данные этих потоков
          // таким образом надо переходить к чтению страниц данных

          // закидываем эту страничку в логический видеопоток
          PushPage( &m_Page );
          // PushPage - закидывает страничку
          // в логический поток theora, если
          // совпадает идентификатор логического потока
          // в противном случае страница игнорируется

          // выходим из циклов
          bStartHeader = false;
          break;
        }
        else
        {
          // да, это страница заголовков

          // тестовый логический поток
          ogg_stream_state m_StreamStateTest;
          memset(&m_StreamStateTest, 0x00, sizeof(ogg_stream_state));

          // инициализируем тестовый поток на тот же поток с таким же
          // идентификатором потока, как и у текущей странички
          if(0!= ogg_stream_init(&m_StreamStateTest,ogg_page_serialno(&m_Page)) )
            assert( "!error during ogg_stream_init");
          
          // добавляем страницу в тестовый поток
          if(0!= ogg_stream_pagein(&m_StreamStateTest,&m_Page) )
            assert( "!error during ogg_stream_pagein");

          // декодируем данные из этого тестового потока в пакет
          if( ogg_stream_packetout(&m_StreamStateTest,&m_Packet) ==-1)
            assert( "!error during ogg_stream_packetout");

          // nHeaderPackets - число прочитанных
          // заголовочных ПАКЕТОВ theora (не страниц)
          // по спецификации theora таких пакетов должно быть три
          if(nHeaderPackets==0)
          {
            int dhr = theora_decode_header (&m_Info, &m_Comment, &m_Packet);
            // декодируем заголовок theora

            if(dhr<0)
            {
              // это не заголовок theora
                
              // очищаем структуру тестового потока
              ogg_stream_clear(&m_StreamStateTest);
              //и продолжаем цикл в поисках заголовков theora
            }
            else
            {
              // это заголовок theora!

              // вот таким образом "инициализируем" логический поток theora:
              memcpy(&m_StreamState, &m_StreamStateTest,
                              sizeof(m_StreamStateTest));
              // теперь из этого потока будут всегда сыпаться пакеты theora

              nHeaderPackets++;

              // после того, как мы нашли заголовочную страницу логического потока theora,
              // нам необходимо прочитать все остальные заголовочные страницы
              // других потоков и отбросить их (если таковые, конечно, имеются)
            }
          }
        }
      }

    }
    while (bStartHeader);
 
    // сейчас надо получить еще два пакета заголовков theora (см. её документацию)
    // и можно переходить к потоковому воспроизведению

    while(nHeaderPackets<3)
    {
        int result=ogg_stream_packetout(&m_StreamState,&m_Packet);
        // если функция возвращает нуль, значит не хватает данных для декодирования
        // почему то этого НЕТ в спецификации libogg, или я плохо искал

        if (result < 0)
        {
          // ошибка декодирования, поврежденный поток
          assert( "!error during ogg_stream_packetout");
        }

        if (result > 0)
        {
          // удалось успешно извлечь пакет информации theora

          int result2 = theora_decode_header( &m_Info, &m_Comment, &m_Packet );

          if(result2<0)
          {
            // ошибка декодирования, поврежденный поток
            rlog.err("VIDEO: error during theora_decode_header (corrupt stream)");
          }

          ++nHeaderPackets;
        }

      // эту страничку обработали, надо извлечь новую
      // для этого проверяем буфер чтения, вдруг там осталось что-нить похожее
      // на страничку. Если не осталось, тогда просто читаем эти данные из файла:

      if (ogg_sync_pageout( &m_SyncState, &m_Page ) > 0)
        // ogg_sync_pageout - функция, берет данные из буфера приема ogg
        // и записывает их в ogg_page
      {
        //мы нашли страничку в буфере и...
        PushPage( &m_Page );
        // ...пихаем эти данные в подходящий поток
      }
      else
      {
        // ничего мы в буфере не нашли
        int ret = LoadChunk( m_File, &m_SyncState );
        // надо больше данных! читаем их из файла
        
        if (ret == 0)
        {
          // опять файл кончился!
          rlog.err("VIDEO: eof searched. terminate...");
        }
      }
    }

    // init videostream
    theora_decode_init( &m_State, &m_Info );

    switch(m_Info.colorspace)
    {
      case OC_CS_UNSPECIFIED:
        // nothing to report
        break;
      case OC_CS_ITU_REC_470M:
        rlog.msg("Encoder specified ITU Rec 470M (NTSC) color.");
        // выводим в лог информацию о цветовом пространстве
        break;
      case OC_CS_ITU_REC_470BG:
        rlog.msg("Encoder specified ITU Rec 470BG (PAL) color.");
        break;
      default:
        rlog.msg("Warning: encoder specified unknown colorspace.");
        break;
    }

  // theora processing...
  while (ogg_stream_packetout( &m_StreamState, &m_Packet ) <= 0)
  {
    // не хватает данных в логическом потоке theora
    // надо надергать данных из физического потока и затолкать их в логический поток

    // читаем данные из файла
    int ret = LoadChunk( m_File, &m_SyncState );
    if (ret == 0)
    {
      // файл кончился, необходимо выполнить закрывающие действия
      // и выйти из приложения
      TheoraClose();
      return;
    }

    while (ogg_sync_pageout( &m_SyncState, &m_Page ) > 0)
      // декодируем данные из буфера в страницы (ogg_page)
      // пока они не кончатся в буфере
    {
      // пихаем эти страницы в соотв. логические потоки
      PushPage( &m_Page );
    }
  }


   
  // удачно декодировали. в пакете содержится декодированная ogg-информация
  // (то бишь закодированная theora-информация)

  // загружаем пакет в декодер theora
  if (theora_decode_packetin(&m_State,&m_Packet) == OC_BADPACKET)
  {
    // ошибка декодирования
      rlog.err( "error during theora_decode_packetin..." );
  }

  // все данные получены, готовим кадр

  // декодируем страничку в YUV-виде в спец. структуру yuv_buffer
  if (theora_decode_YUVout( &m_State, &m_YUVBuffer ) != 0)
  {
    // ошибка декодирования
    rlog.err( "error during theora_decode_YUVout...");
  }
    
    // если это первый кадр, то создаем буфер кадра
    BYTE* frame = new BYTE[m_YUVBuffer.y_height*m_YUVBuffer.y_width*4];

  // yuv to rgb
  for (int cy = 0; cy < m_YUVBuffer.y_height; cy++)
  {
    int nYShift  = m_YUVBuffer.y_stride*cy;
    int nUVShift = m_YUVBuffer.uv_stride*(cy >> 1);
    
    for (int cx = 0; cx < m_YUVBuffer.y_width; cx++)
    {
      int nHX = (cx >> 1);

      BYTE nY = *(BYTE*)(m_YUVBuffer.y + nYShift  + cx  );
      BYTE nU = *(BYTE*)(m_YUVBuffer.u + nUVShift + nHX );
      BYTE nV = *(BYTE*)(m_YUVBuffer.v + nUVShift + nHX );

      int index = (cy*m_YUVBuffer.y_width + cx)*4;

      float r = nY + 1.371f*(nV - 128);
      float g = nY - 0.698f*(nV - 128) - 0.336f*(nU - 128);
      float b = nY + 1.732f*(nU - 128);

      frame[index + 0] = (BYTE)clamp( r, 0.0f, 255.0f );
      frame[index + 1] = (BYTE)clamp( g, 0.0f, 255.0f );
      frame[index + 2] = (BYTE)clamp( b, 0.0f, 255.0f );
      frame[index + 3] = 255;
    }
  }
} // JVideoServer::Init
示例#15
0
static GstFlowReturn
theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet,
    GstClockTime outtime)
{
  /* normal data packet */
  yuv_buffer yuv;
  GstBuffer *out;
  guint i;
  gboolean keyframe;
  gint out_size;
  gint stride_y, stride_uv;
  gint width, height;
  gint cwidth, cheight;
  GstFlowReturn result;

  if (G_UNLIKELY (!dec->have_header))
    goto not_initialized;

  /* the second most significant bit of the first data byte is cleared 
   * for keyframes. We can only check it if it's not a zero-length packet. */
  keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0);
  if (G_UNLIKELY (keyframe)) {
    GST_DEBUG_OBJECT (dec, "we have a keyframe");
    dec->need_keyframe = FALSE;
  } else if (G_UNLIKELY (dec->need_keyframe)) {
    goto dropping;
  }

  GST_DEBUG_OBJECT (dec, "parsing data packet");

  /* this does the decoding */
  if (G_UNLIKELY (theora_decode_packetin (&dec->state, packet)))
    goto decode_error;

  if (outtime != -1) {
    gboolean need_skip;
    GstClockTime qostime;

    /* qos needs to be done on running time */
    qostime = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME,
        outtime);

    GST_OBJECT_LOCK (dec);
    /* check for QoS, don't perform the last steps of getting and
     * pushing the buffers that are known to be late. */
    /* FIXME, we can also entirely skip decoding if the next valid buffer is 
     * known to be after a keyframe (using the granule_shift) */
    need_skip = dec->earliest_time != -1 && qostime <= dec->earliest_time;
    GST_OBJECT_UNLOCK (dec);

    if (need_skip)
      goto dropping_qos;
  }

  /* this does postprocessing and set up the decoded frame
   * pointers in our yuv variable */
  if (G_UNLIKELY (theora_decode_YUVout (&dec->state, &yuv) < 0))
    goto no_yuv;

  if (G_UNLIKELY ((yuv.y_width != dec->info.width)
          || (yuv.y_height != dec->info.height)))
    goto wrong_dimensions;

  width = dec->width;
  height = dec->height;
  cwidth = width / 2;
  cheight = height / 2;

  /* should get the stride from the caps, for now we round up to the nearest
   * multiple of 4 because some element needs it. chroma needs special 
   * treatment, see videotestsrc. */
  stride_y = GST_ROUND_UP_4 (width);
  stride_uv = GST_ROUND_UP_8 (width) / 2;

  out_size = stride_y * height + stride_uv * cheight * 2;

  /* now copy over the area contained in offset_x,offset_y,
   * frame_width, frame_height */
  result =
      gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE,
      out_size, GST_PAD_CAPS (dec->srcpad), &out);
  if (G_UNLIKELY (result != GST_FLOW_OK))
    goto no_buffer;

  /* copy the visible region to the destination. This is actually pretty
   * complicated and gstreamer doesn't support all the needed caps to do this
   * correctly. For example, when we have an odd offset, we should only combine
   * 1 row/column of luma samples with one chroma sample in colorspace conversion. 
   * We compensate for this by adding a black border around the image when the
   * offset or size is odd (see above).
   */
  {
    guchar *dest_y, *src_y;
    guchar *dest_u, *src_u;
    guchar *dest_v, *src_v;
    gint offset;

    dest_y = GST_BUFFER_DATA (out);
    dest_u = dest_y + stride_y * height;
    dest_v = dest_u + stride_uv * cheight;

    src_y = yuv.y + dec->offset_x + dec->offset_y * yuv.y_stride;

    for (i = 0; i < height; i++) {
      memcpy (dest_y, src_y, width);

      dest_y += stride_y;
      src_y += yuv.y_stride;
    }

    offset = dec->offset_x / 2 + dec->offset_y / 2 * yuv.uv_stride;

    src_u = yuv.u + offset;
    src_v = yuv.v + offset;

    for (i = 0; i < cheight; i++) {
      memcpy (dest_u, src_u, cwidth);
      memcpy (dest_v, src_v, cwidth);

      dest_u += stride_uv;
      src_u += yuv.uv_stride;
      dest_v += stride_uv;
      src_v += yuv.uv_stride;
    }
  }

  GST_BUFFER_OFFSET (out) = dec->frame_nr;
  if (dec->frame_nr != -1)
    dec->frame_nr++;
  GST_BUFFER_OFFSET_END (out) = dec->frame_nr;
  if (dec->granulepos != -1) {
    gint64 cf = _theora_granule_frame (dec, dec->granulepos) + 1;

    GST_BUFFER_DURATION (out) = gst_util_uint64_scale_int (cf * GST_SECOND,
        dec->info.fps_denominator, dec->info.fps_numerator) - outtime;
  } else {
    GST_BUFFER_DURATION (out) =
        gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator,
        dec->info.fps_numerator);
  }
  GST_BUFFER_TIMESTAMP (out) = outtime;

  if (dec->segment.rate >= 0.0)
    result = theora_dec_push_forward (dec, out);
  else
    result = theora_dec_push_reverse (dec, out);

  return result;

  /* ERRORS */
not_initialized:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("no header sent yet"));
    return GST_FLOW_ERROR;
  }
dropping:
  {
    GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe");
    dec->discont = TRUE;
    return GST_FLOW_OK;
  }
dropping_qos:
  {
    if (dec->frame_nr != -1)
      dec->frame_nr++;
    dec->discont = TRUE;
    GST_WARNING_OBJECT (dec, "dropping frame because of QoS");
    return GST_FLOW_OK;
  }
decode_error:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("theora decoder did not decode data packet"));
    return GST_FLOW_ERROR;
  }
no_yuv:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("couldn't read out YUV image"));
    return GST_FLOW_ERROR;
  }
wrong_dimensions:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT,
        (NULL), ("dimensions of image do not match header"));
    return GST_FLOW_ERROR;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }
}
示例#16
0
static int OGV_LoadVideoFrame(cinematic_t *cin)
{
	int        r = 0;
	ogg_packet op;

	memset(&op, 0, sizeof(op));

	while (!r && (ogg_stream_packetout(&g_ogm->os_video, &op)))
	{
		ogg_int64_t th_frame;

		theora_decode_packetin(&g_ogm->th_state, &op);

		th_frame = theora_granule_frame(&g_ogm->th_state, g_ogm->th_state.granulepos);

		if ((g_ogm->VFrameCount < th_frame && th_frame >= OGV_NextNeededVFrame(cin)) || !cin->frameBuffer[0])
		{
			if (theora_decode_YUVout(&g_ogm->th_state, &g_ogm->th_yuvbuffer))
			{
				continue;
			}

			if (cin->frameWidth != g_ogm->th_info.width || cin->frameHeight != g_ogm->th_info.height)
			{
				cin->frameWidth  = g_ogm->th_info.width;
				cin->frameHeight = g_ogm->th_info.height;
				Com_DPrintf("Theora new resolution %dx%d\n", cin->frameWidth, cin->frameHeight);
			}

			if (cin->frameBufferSize < g_ogm->th_info.width * g_ogm->th_info.height)
			{

				cin->frameBufferSize = g_ogm->th_info.width * g_ogm->th_info.height;

				/* Free old output buffer */
				if (cin->frameBuffer[0])
				{
					Com_Dealloc(cin->frameBuffer[0]);
					cin->frameBuffer[0] = NULL;
				}

				/* Allocate the new buffer */
				cin->frameBuffer[0] = (unsigned char *)Com_Allocate(cin->frameBufferSize * 4);
				if (cin->frameBuffer[0] == NULL)
				{
					cin->frameBufferSize = 0;
					r                    = -2;
					break;
				}
			}

			if (OGV_yuv_to_rgb24(&g_ogm->th_yuvbuffer, &g_ogm->th_info, (unsigned int *) cin->frameBuffer[0]))
			{
				r                  = 1;
				g_ogm->VFrameCount = th_frame;
			}
			else
			{
				r = -1;
			}
		}
	}

	return r;
}
static int loadVideoFrameTheora(void) {
	int r = 0;
	ogg_packet	op;

	memset(&op,0,sizeof(op));

	while( !r && (ogg_stream_packetout(&g_ogm.os_video,&op)) ) {
		ogg_int64_t th_frame;
		theora_decode_packetin(&g_ogm.th_state, &op);

		th_frame = theora_granule_frame(&g_ogm.th_state,g_ogm.th_state.granulepos);

		if((g_ogm.VFrameCount<th_frame && th_frame>=nextNeededVFrame()) || !g_ogm.outputBuffer) {
//			int i,j;
			int yWShift, uvWShift;
			int yHShift, uvHShift;

			if( theora_decode_YUVout(&g_ogm.th_state, &g_ogm.th_yuvbuffer) )
				continue;

			if(g_ogm.outputWidht != g_ogm.th_info.width || g_ogm.outputHeight != g_ogm.th_info.height) {
				g_ogm.outputWidht = g_ogm.th_info.width;
				g_ogm.outputHeight = g_ogm.th_info.height;
				Com_DPrintf("[Theora(ogg)]new resolution %dx%d\n",g_ogm.outputWidht,g_ogm.outputHeight);
			}

			if(g_ogm.outputBufferSize < g_ogm.th_info.width*g_ogm.th_info.height) {

				g_ogm.outputBufferSize = g_ogm.th_info.width*g_ogm.th_info.height;

				/* Free old output buffer*/
				if(g_ogm.outputBuffer) free(g_ogm.outputBuffer);

				/* Allocate the new buffer */
				g_ogm.outputBuffer = (unsigned char*)malloc(g_ogm.outputBufferSize*4);
				if(g_ogm.outputBuffer == NULL) {
					g_ogm.outputBufferSize = 0;
					r = -2;
					break;
				}
			}

			yWShift  = findSizeShift(g_ogm.th_yuvbuffer.y_width,  g_ogm.th_info.width);
			uvWShift = findSizeShift(g_ogm.th_yuvbuffer.uv_width, g_ogm.th_info.width);
			yHShift  = findSizeShift(g_ogm.th_yuvbuffer.y_height, g_ogm.th_info.height);
			uvHShift = findSizeShift(g_ogm.th_yuvbuffer.uv_height,g_ogm.th_info.height);

			if(yWShift<0 || uvWShift<0 || yHShift<0 || uvHShift<0) {
				Com_Printf("[Theora] unexpected resolution in a yuv-Frame\n");
				r = -1;
			}
			else {

				Frame_yuv_to_rgb24(g_ogm.th_yuvbuffer.y,g_ogm.th_yuvbuffer.u,g_ogm.th_yuvbuffer.v,
						g_ogm.th_info.width, g_ogm.th_info.height, g_ogm.th_yuvbuffer.y_stride, g_ogm.th_yuvbuffer.uv_stride,
						yWShift, uvWShift, yHShift, uvHShift, (unsigned int*)g_ogm.outputBuffer );

/*				unsigned char*	pixelPtr = g_ogm.outputBuffer;
				unsigned int*	pixPtr;
				pixPtr = (unsigned int*)g_ogm.outputBuffer;

				//TODO: use one yuv->rgb funktion for the hole frame (the big amout of stack movement(yuv->rgb calls) couldn't be good ;) )
				for(j=0;j<g_ogm.th_info.height;++j) {
					for(i=0;i<g_ogm.th_info.width;++i) {
#if 1
						// simple grayscale-output ^^
						pixelPtr[0] =
							pixelPtr[1] =
							pixelPtr[2] = g_ogm.th_yuvbuffer.y[i+j*g_ogm.th_yuvbuffer.y_stride];
						pixelPtr+=4;

#else
						// using RoQ yuv->rgb code
						*pixPtr++ = yuv_to_rgb24( g_ogm.th_yuvbuffer.y[(i>>yWShift)+(j>>yHShift)*g_ogm.th_yuvbuffer.y_stride],
												g_ogm.th_yuvbuffer.u[(i>>uvWShift)+(j>>uvHShift)*g_ogm.th_yuvbuffer.uv_stride],
												g_ogm.th_yuvbuffer.v[(i>>uvWShift)+(j>>uvHShift)*g_ogm.th_yuvbuffer.uv_stride]);
#endif
					}
				}
*/

				r = 1;
				g_ogm.VFrameCount=th_frame;
			}
		}


	}

	return r;
}