bool TheoraState::Init() { if (!mActive) return false; int64_t n = mInfo.aspect_numerator; int64_t d = mInfo.aspect_denominator; mPixelAspectRatio = (n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d); // Ensure the frame and picture regions aren't larger than our prescribed // maximum, or zero sized. nsIntSize frame(mInfo.frame_width, mInfo.frame_height); nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height); if (!VideoInfo::ValidateVideoRegion(frame, picture, frame)) { return mActive = false; } mCtx = th_decode_alloc(&mInfo, mSetup); if (mCtx == nullptr) { return mActive = false; } return true; }
void TheoraVideoStream::parseHeader() { if (headerParsed) return; th_comment comment; th_setup_info *setupInfo = nullptr; th_comment_init(&comment); int ret; demuxer.readPacket(packet); ret = th_decode_headerin(&videoInfo, &comment, &setupInfo, &packet); if (ret < 0) { th_comment_clear(&comment); throw love::Exception("Could not find header"); } while (ret > 0) { demuxer.readPacket(packet); ret = th_decode_headerin(&videoInfo, &comment, &setupInfo, &packet); } th_comment_clear(&comment); decoder = th_decode_alloc(&videoInfo, setupInfo); th_setup_free(setupInfo); Frame *buffers[2] = {backBuffer, frontBuffer}; yPlaneXOffset = cPlaneXOffset = videoInfo.pic_x; yPlaneYOffset = cPlaneYOffset = videoInfo.pic_y; scaleFormat(videoInfo.pixel_fmt, cPlaneXOffset, cPlaneYOffset); for (int i = 0; i < 2; i++) { buffers[i]->cw = buffers[i]->yw = videoInfo.pic_width; buffers[i]->ch = buffers[i]->yh = videoInfo.pic_height; scaleFormat(videoInfo.pixel_fmt, buffers[i]->cw, buffers[i]->ch); buffers[i]->yplane = new unsigned char[buffers[i]->yw * buffers[i]->yh]; buffers[i]->cbplane = new unsigned char[buffers[i]->cw * buffers[i]->ch]; buffers[i]->crplane = new unsigned char[buffers[i]->cw * buffers[i]->ch]; memset(buffers[i]->yplane, 16, buffers[i]->yw * buffers[i]->yh); memset(buffers[i]->cbplane, 128, buffers[i]->cw * buffers[i]->ch); memset(buffers[i]->crplane, 128, buffers[i]->cw * buffers[i]->ch); } headerParsed = true; th_decode_packetin(decoder, &packet, nullptr); }
static int noop_test_decode () { th_info ti; th_dec_ctx *td; INFO ("+ Testing decoder context with null info and setup"); td = th_decode_alloc(NULL, NULL); if (td != NULL) FAIL("td_decode_alloc accepted null info pointers"); INFO ("+ Initializing th_info struct"); th_info_init (&ti); INFO ("+ Testing decoder context with empty info and null setup"); td = th_decode_alloc(&ti, NULL); if (td != NULL) FAIL("td_decode_alloc accepted null info pointers"); INFO ("+ Clearing th_info struct"); th_info_clear (&ti); return 0; }
IoObject *IoTheoraDecodeContext_setup(IoTheoraDecodeContext *self, IoObject *locals, IoMessage *m) { /*doc IoTheoraDecodeContext setup(info, setup) Initialize for decoding using the information obtained from reading the Theora headers. */ IoTheoraInfo *info = IoMessage_locals_theoraInfoArgAt_(m, locals, 0); IoTheoraSetupInfo *setup = IoMessage_locals_theoraSetupInfoArgAt_(m, locals, 1); th_dec_ctx* ctx = th_decode_alloc((th_info*)(IoObject_dataPointer(info)), *((th_setup_info**)(IoObject_dataPointer(setup)))); IOASSERT(ctx, "th_decode_alloc failed"); DATA(self)->ctx = ctx; return self; }
PRBool nsTheoraState::Init() { if (!mActive) return PR_FALSE; PRInt64 n = mInfo.fps_numerator; PRInt64 d = mInfo.fps_denominator; PRInt64 f; if (!MulOverflow(1000, d, f)) { return mActive = PR_FALSE; } f /= n; if (f > PR_UINT32_MAX) { return mActive = PR_FALSE; } mFrameDuration = static_cast<PRUint32>(f); n = mInfo.aspect_numerator; d = mInfo.aspect_denominator; mPixelAspectRatio = (n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d); // Ensure the frame region isn't larger than our prescribed maximum. PRUint32 pixels; if (!MulOverflow32(mInfo.frame_width, mInfo.frame_height, pixels) || pixels > MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT || pixels == 0) { return mActive = PR_FALSE; } // Ensure the picture region isn't larger than our prescribed maximum. if (!MulOverflow32(mInfo.pic_width, mInfo.pic_height, pixels) || pixels > MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT || pixels == 0) { return mActive = PR_FALSE; } mCtx = th_decode_alloc(&mInfo, mSetup); if (mCtx == NULL) { return mActive = PR_FALSE; } return PR_TRUE; }
krad_theora_decoder_t *krad_theora_decoder_create(unsigned char *header1, int header1len, unsigned char *header2, int header2len, unsigned char *header3, int header3len) { krad_theora_decoder_t *krad_theora; krad_theora = calloc(1, sizeof(krad_theora_decoder_t)); krad_theora->granulepos = -1; th_comment_init(&krad_theora->comment); th_info_init(&krad_theora->info); krad_theora->packet.packet = header1; krad_theora->packet.bytes = header1len; krad_theora->packet.b_o_s = 1; krad_theora->packet.packetno = 1; th_decode_headerin(&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); //printf("x is %d len is %d\n", x, header1len); krad_theora->packet.packet = header2; krad_theora->packet.bytes = header2len; krad_theora->packet.b_o_s = 0; krad_theora->packet.packetno = 2; th_decode_headerin(&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); //printf("x is %d len is %d\n", x, header2len); krad_theora->packet.packet = header3; krad_theora->packet.bytes = header3len; krad_theora->packet.packetno = 3; th_decode_headerin(&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); printf("Theora %dx%d %.02f fps video\n Encoded frame content is %dx%d with %dx%d offset\n", krad_theora->info.frame_width, krad_theora->info.frame_height, (double)krad_theora->info.fps_numerator/krad_theora->info.fps_denominator, krad_theora->info.pic_width, krad_theora->info.pic_height, krad_theora->info.pic_x, krad_theora->info.pic_y); krad_theora->decoder = th_decode_alloc(&krad_theora->info, krad_theora->setup_info); th_setup_free(krad_theora->setup_info); return krad_theora; }
PRBool nsTheoraState::Init() { if (!mActive) return PR_FALSE; PRInt64 n = mInfo.fps_numerator; PRInt64 d = mInfo.fps_denominator; PRInt64 f; if (!MulOverflow(1000, d, f)) { return mActive = PR_FALSE; } f /= n; if (f > PR_UINT32_MAX) { return mActive = PR_FALSE; } mFrameDuration = static_cast<PRUint32>(f); n = mInfo.aspect_numerator; d = mInfo.aspect_denominator; mPixelAspectRatio = (n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d); // Ensure the frame and picture regions aren't larger than our prescribed // maximum, or zero sized. nsIntSize frame(mInfo.frame_width, mInfo.frame_height); nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height); if (!nsVideoInfo::ValidateVideoRegion(frame, picture, frame)) { return mActive = PR_FALSE; } mCtx = th_decode_alloc(&mInfo, mSetup); if (mCtx == NULL) { return mActive = PR_FALSE; } return PR_TRUE; }
TheoraDecoder::TheoraVideoTrack::TheoraVideoTrack(const Graphics::PixelFormat &format, th_info &theoraInfo, th_setup_info *theoraSetup) { _theoraDecode = th_decode_alloc(&theoraInfo, theoraSetup); if (theoraInfo.pixel_fmt != TH_PF_420) error("Only theora YUV420 is supported"); int postProcessingMax; th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &postProcessingMax, sizeof(postProcessingMax)); th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &postProcessingMax, sizeof(postProcessingMax)); _surface.create(theoraInfo.frame_width, theoraInfo.frame_height, format); // Set up a display surface _displaySurface.init(theoraInfo.pic_width, theoraInfo.pic_height, _surface.pitch, _surface.getBasePtr(theoraInfo.pic_x, theoraInfo.pic_y), format); // Set the frame rate _frameRate = Common::Rational(theoraInfo.fps_numerator, theoraInfo.fps_denominator); _endOfVideo = false; _nextFrameStartTime = 0.0; _curFrame = -1; }
int theora_decode_init(theora_state *_td,theora_info *_ci){ th_api_info *apiinfo; th_api_wrapper *api; th_info info; api=(th_api_wrapper *)_ci->codec_setup; /*Allocate our own combined API wrapper/theora_info struct. We put them both in one malloc'd block so that when the API wrapper is freed, the info struct goes with it. This avoids having to figure out whether or not we need to free the info struct in either theora_info_clear() or theora_clear().*/ apiinfo=(th_api_info *)_ogg_calloc(1,sizeof(*apiinfo)); /*Make our own copy of the info struct, since its lifetime should be independent of the one we were passed in.*/ *&apiinfo->info=*_ci; /*Convert the info struct now instead of saving the the one we decoded with theora_decode_header(), since the user might have modified values (i.e., color space, aspect ratio, etc. can be specified from a higher level). The user also might be doing something "clever" with the header packets if they are not using an Ogg encapsulation.*/ oc_theora_info2th_info(&info,_ci); /*Don't bother to copy the setup info; th_decode_alloc() makes its own copy of the stuff it needs.*/ apiinfo->api.decode=th_decode_alloc(&info,api->setup); if(apiinfo->api.decode==NULL){ _ogg_free(apiinfo); return OC_EINVAL; } apiinfo->api.clear=(oc_setup_clear_func)th_dec_api_clear; _td->internal_encode=NULL; /*Provide entry points for ABI compatibility with old decoder shared libs.*/ _td->internal_decode=(void *)&OC_DEC_DISPATCH_VTBL; _td->granulepos=0; _td->i=&apiinfo->info; _td->i->codec_setup=&apiinfo->api; return 0; }
void VideoClip_Theora::_executeRestart() { bool paused = this->timer->isPaused(); if (!paused) { this->timer->pause(); } long initialGranule = 0; th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &initialGranule, sizeof(initialGranule)); th_decode_free(this->info.TheoraDecoder); this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup); ogg_stream_reset(&this->info.TheoraStreamState); if (this->audioInterface != NULL) { // empty the DSP buffer ogg_packet opVorbis; this->readAudioSamples = 0; while (ogg_stream_packetout(&this->info.VorbisStreamState, &opVorbis) > 0) { if (vorbis_synthesis(&this->info.VorbisBlock, &opVorbis) == 0) { vorbis_synthesis_blockin(&this->info.VorbisDSPState, &this->info.VorbisBlock); } } ogg_stream_reset(&this->info.VorbisStreamState); } ogg_sync_reset(&this->info.OggSyncState); this->stream->seek(0); ogg_int64_t granulePos = 0; th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &granulePos, sizeof(granulePos)); this->endOfFile = false; this->restarted = true; if (!paused) { this->timer->play(); } }
/***************************************************************************** * ProcessHeaders: process Theora headers. *****************************************************************************/ static int ProcessHeaders( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; ogg_packet oggpacket; th_setup_info *ts = NULL; /* theora setup information */ int i_max_pp, i_pp; unsigned pi_size[XIPH_MAX_HEADER_COUNT]; void *pp_data[XIPH_MAX_HEADER_COUNT]; unsigned i_count; if( xiph_SplitHeaders( pi_size, pp_data, &i_count, p_dec->fmt_in.i_extra, p_dec->fmt_in.p_extra) ) return VLC_EGENERIC; if( i_count < 3 ) goto error; oggpacket.granulepos = -1; oggpacket.e_o_s = 0; oggpacket.packetno = 0; /* Take care of the initial Vorbis header */ oggpacket.b_o_s = 1; /* yes this actually is a b_o_s packet :) */ oggpacket.bytes = pi_size[0]; oggpacket.packet = pp_data[0]; if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 ) { msg_Err( p_dec, "this bitstream does not contain Theora video data" ); goto error; } /* Set output properties */ if( !p_sys->b_packetizer ) switch( p_sys->ti.pixel_fmt ) { case TH_PF_420: p_dec->fmt_out.i_codec = VLC_CODEC_I420; break; case TH_PF_422: p_dec->fmt_out.i_codec = VLC_CODEC_I422; break; case TH_PF_444: p_dec->fmt_out.i_codec = VLC_CODEC_I444; break; case TH_PF_RSVD: default: msg_Err( p_dec, "unknown chroma in theora sample" ); break; } p_dec->fmt_out.video.i_width = p_sys->ti.frame_width; p_dec->fmt_out.video.i_height = p_sys->ti.frame_height; if( p_sys->ti.pic_width && p_sys->ti.pic_height ) { p_dec->fmt_out.video.i_visible_width = p_sys->ti.pic_width; p_dec->fmt_out.video.i_visible_height = p_sys->ti.pic_height; if( p_sys->ti.pic_x || p_sys->ti.pic_y ) { p_dec->fmt_out.video.i_x_offset = p_sys->ti.pic_x; p_dec->fmt_out.video.i_y_offset = p_sys->ti.pic_y; } } if( p_sys->ti.aspect_denominator && p_sys->ti.aspect_numerator ) { p_dec->fmt_out.video.i_sar_num = p_sys->ti.aspect_numerator; p_dec->fmt_out.video.i_sar_den = p_sys->ti.aspect_denominator; } else { p_dec->fmt_out.video.i_sar_num = 1; p_dec->fmt_out.video.i_sar_den = 1; } if( p_sys->ti.fps_numerator > 0 && p_sys->ti.fps_denominator > 0 ) { p_dec->fmt_out.video.i_frame_rate = p_sys->ti.fps_numerator; p_dec->fmt_out.video.i_frame_rate_base = p_sys->ti.fps_denominator; } msg_Dbg( p_dec, "%dx%d %.02f fps video, frame content " "is %dx%d with offset (%d,%d)", p_sys->ti.frame_width, p_sys->ti.frame_height, (double)p_sys->ti.fps_numerator/p_sys->ti.fps_denominator, p_sys->ti.pic_width, p_sys->ti.pic_height, p_sys->ti.pic_x, p_sys->ti.pic_y ); /* Some assertions based on the documentation. These are mandatory restrictions. */ assert( p_sys->ti.frame_height % 16 == 0 && p_sys->ti.frame_height < 1048576 ); assert( p_sys->ti.frame_width % 16 == 0 && p_sys->ti.frame_width < 1048576 ); assert( p_sys->ti.keyframe_granule_shift >= 0 && p_sys->ti.keyframe_granule_shift <= 31 ); assert( p_sys->ti.pic_x <= __MIN( p_sys->ti.frame_width - p_sys->ti.pic_width, 255 ) ); assert( p_sys->ti.pic_y <= p_sys->ti.frame_height - p_sys->ti.pic_height); assert( p_sys->ti.frame_height - p_sys->ti.pic_height - p_sys->ti.pic_y <= 255 ); /* Sanity check that seems necessary for some corrupted files */ if( p_sys->ti.frame_width < p_sys->ti.pic_width || p_sys->ti.frame_height < p_sys->ti.pic_height ) { msg_Warn( p_dec, "trying to correct invalid theora header " "(frame size (%dx%d) is smaller than frame content (%d,%d))", p_sys->ti.frame_width, p_sys->ti.frame_height, p_sys->ti.pic_width, p_sys->ti.pic_height ); if( p_sys->ti.frame_width < p_sys->ti.pic_width ) p_sys->ti.frame_width = p_sys->ti.pic_width; if( p_sys->ti.frame_height < p_sys->ti.pic_height ) p_sys->ti.frame_height = p_sys->ti.pic_height; } /* The next packet in order is the comments header */ oggpacket.b_o_s = 0; oggpacket.bytes = pi_size[1]; oggpacket.packet = pp_data[1]; if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 ) { msg_Err( p_dec, "2nd Theora header is corrupted" ); goto error; } ParseTheoraComments( p_dec ); /* The next packet in order is the codebooks header * We need to watch out that this packet is not missing as a * missing or corrupted header is fatal. */ oggpacket.b_o_s = 0; oggpacket.bytes = pi_size[2]; oggpacket.packet = pp_data[2]; if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 ) { msg_Err( p_dec, "3rd Theora header is corrupted" ); goto error; } if( !p_sys->b_packetizer ) { /* We have all the headers, initialize decoder */ if ( ( p_sys->tcx = th_decode_alloc( &p_sys->ti, ts ) ) == NULL ) { msg_Err( p_dec, "Could not allocate Theora decoder" ); goto error; } i_pp = var_InheritInteger( p_dec, DEC_CFG_PREFIX "postproc" ); if ( i_pp >= 0 && !th_decode_ctl( p_sys->tcx, TH_DECCTL_GET_PPLEVEL_MAX, &i_max_pp, sizeof(int) ) ) { i_pp = __MIN( i_pp, i_max_pp ); if ( th_decode_ctl( p_sys->tcx, TH_DECCTL_SET_PPLEVEL, &i_pp, sizeof(int) ) ) msg_Err( p_dec, "Failed to set post processing level to %d", i_pp ); else msg_Dbg( p_dec, "Set post processing level to %d / %d", i_pp, i_max_pp ); } } else { p_dec->fmt_out.i_extra = p_dec->fmt_in.i_extra; p_dec->fmt_out.p_extra = xrealloc( p_dec->fmt_out.p_extra, p_dec->fmt_out.i_extra ); memcpy( p_dec->fmt_out.p_extra, p_dec->fmt_in.p_extra, p_dec->fmt_out.i_extra ); } for( unsigned i = 0; i < i_count; i++ ) free( pp_data[i] ); /* Clean up the decoder setup info... we're done with it */ th_setup_free( ts ); return VLC_SUCCESS; error: for( unsigned i = 0; i < i_count; i++ ) free( pp_data[i] ); /* Clean up the decoder setup info... we're done with it */ th_setup_free( ts ); return VLC_EGENERIC; }
yuv_buffer* TheoraDecoder::decodeTheora(StampedOggPacket* inPacket) { //Accepts packet and deletes it. LOG(logDEBUG3) << __FUNCTIONW__; if (mPacketCount < 3) { decodeHeader(inPacket); //Accepts header and deletes it. if (mPacketCount == 3) { mTheoraState = th_decode_alloc(&mTheoraInfo, mTheoraSetup); //TODO::: Post processing http://people.xiph.org/~tterribe/doc/libtheora-exp/theoradec_8h.html#a1 } LOG(logDEBUG3) << __FUNCTIONW__ << " PacketCount under 3: " << mPacketCount; return NULL; } else { //if (mFirstPacket) //{ // theora_decode_init(&mTheoraState, &mTheoraInfo); // mFirstPacket = false; //} if (inPacket->packetSize() > 0 && (inPacket->packetData()[0] & 128) != 0) { //Ignore header packets delete inPacket; LOG(logDEBUG3) << __FUNCTIONW__ << " Ignoring header packets"; return NULL; } ogg_packet* locOldPack = simulateOldOggPacket(inPacket); //Accepts the packet and deletes it. th_decode_packetin(mTheoraState, locOldPack, NULL); delete locOldPack->packet; delete locOldPack; th_decode_ycbcr_out(mTheoraState, mYCbCrBuffer); //TODO::: //This is slightly nasty for now... since changing the return type // will screw with other stuff // //Need to probably use the theora-exp buffer type and change all the // uses of yuv_buffer to handle this, and avoid assumptions about // the relative size of the Y and U and V buffer mYUVBuffer.y_width = mYCbCrBuffer[0].width; mYUVBuffer.y_height = mYCbCrBuffer[0].height; mYUVBuffer.y_stride = mYCbCrBuffer[0].stride; mYUVBuffer.y = mYCbCrBuffer[0].data; mYUVBuffer.uv_width = mYCbCrBuffer[1].width; mYUVBuffer.uv_height = mYCbCrBuffer[1].height; mYUVBuffer.uv_stride = mYCbCrBuffer[1].stride; mYUVBuffer.u = mYCbCrBuffer[1].data; mYUVBuffer.v = mYCbCrBuffer[2].data; return &mYUVBuffer; } }
int main(int argc,char *const *argv){ int pp_level_max; int pp_level; int pp_inc; int i,j; ogg_packet op; FILE *infile = stdin; int frames = 0; int dropped = 0; #ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */ /* Beware the evil ifdef. We avoid these where we can, but this one we cannot. Don't add any more, you'll probably go to hell if you do. */ _setmode( _fileno( stdin ), _O_BINARY ); #endif /* open the input file if any */ if(argc==2){ infile=fopen(argv[1],"rb"); if(infile==NULL){ fprintf(stderr,"Unable to open '%s' for playback.\n", argv[1]); exit(1); } } if(argc>2){ usage(); exit(1); } /* start up Ogg stream synchronization layer */ ogg_sync_init(&oy); /* init supporting Vorbis structures needed in header parsing */ vorbis_info_init(&vi); vorbis_comment_init(&vc); /* init supporting Theora structures needed in header parsing */ th_comment_init(&tc); th_info_init(&ti); /* Ogg file open; parse the headers */ /* Only interested in Vorbis/Theora streams */ while(!stateflag){ int ret=buffer_data(infile,&oy); if(ret==0)break; while(ogg_sync_pageout(&oy,&og)>0){ ogg_stream_state test; /* is this a mandated initial header? If not, stop parsing */ if(!ogg_page_bos(&og)){ /* don't leak the page; get it into the appropriate stream */ queue_page(&og); stateflag=1; break; } ogg_stream_init(&test,ogg_page_serialno(&og)); ogg_stream_pagein(&test,&og); ogg_stream_packetout(&test,&op); /* identify the codec: try theora */ if(!theora_p && th_decode_headerin(&ti,&tc,&ts,&op)>=0){ /* it is theora */ memcpy(&to,&test,sizeof(test)); theora_p=1; }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){ /* it is vorbis */ memcpy(&vo,&test,sizeof(test)); vorbis_p=1; }else{ /* whatever it is, we don't care about it */ ogg_stream_clear(&test); } } /* fall through to non-bos page parsing */ } /* we're expecting more header packets. */ while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){ int ret; /* look for further theora headers */ while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){ if(ret<0){ fprintf(stderr,"Error parsing Theora stream headers; " "corrupt stream?\n"); exit(1); } if(!th_decode_headerin(&ti,&tc,&ts,&op)){ fprintf(stderr,"Error parsing Theora stream headers; " "corrupt stream?\n"); exit(1); } theora_p++; } /* look for more vorbis header packets */ while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){ if(ret<0){ fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n"); exit(1); } if(vorbis_synthesis_headerin(&vi,&vc,&op)){ fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n"); exit(1); } vorbis_p++; if(vorbis_p==3)break; } /* The header pages/packets will arrive before anything else we care about, or the stream is not obeying spec */ if(ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); /* demux into the appropriate stream */ }else{ int ret=buffer_data(infile,&oy); /* someone needs more data */ if(ret==0){ fprintf(stderr,"End of file while searching for codec headers.\n"); exit(1); } } } /* and now we have it all. initialize decoders */ if(theora_p){ td=th_decode_alloc(&ti,ts); printf("Ogg logical stream %lx is Theora %dx%d %.02f fps", to.serialno,ti.pic_width,ti.pic_height, (double)ti.fps_numerator/ti.fps_denominator); px_fmt=ti.pixel_fmt; switch(ti.pixel_fmt){ case TH_PF_420: printf(" 4:2:0 video\n"); break; case TH_PF_422: printf(" 4:2:2 video\n"); break; case TH_PF_444: printf(" 4:4:4 video\n"); break; case TH_PF_RSVD: default: printf(" video\n (UNKNOWN Chroma sampling!)\n"); break; } if(ti.pic_width!=ti.frame_width || ti.pic_height!=ti.frame_height) printf(" Frame content is %dx%d with offset (%d,%d).\n", ti.frame_width, ti.frame_height, ti.pic_x, ti.pic_y); report_colorspace(&ti); dump_comments(&tc); th_decode_ctl(td,TH_DECCTL_GET_PPLEVEL_MAX,&pp_level_max, sizeof(pp_level_max)); pp_level=pp_level_max; th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,sizeof(pp_level)); pp_inc=0; /*{ int arg = 0xffff; th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MBMODE,&arg,sizeof(arg)); th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MV,&arg,sizeof(arg)); th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_QI,&arg,sizeof(arg)); arg=10; th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_BITS,&arg,sizeof(arg)); }*/ }else{ /* tear down the partial theora setup */ th_info_clear(&ti); th_comment_clear(&tc); } th_setup_free(ts); if(vorbis_p){ vorbis_synthesis_init(&vd,&vi); vorbis_block_init(&vd,&vb); fprintf(stderr,"Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n", vo.serialno,vi.channels,vi.rate); }else{ /* tear down the partial vorbis setup */ vorbis_info_clear(&vi); vorbis_comment_clear(&vc); } /* open audio */ if(vorbis_p)open_audio(); /* open video */ if(theora_p)open_video(); /* install signal handler as SDL clobbered the default */ signal (SIGINT, sigint_handler); /* on to the main decode loop. We assume in this example that audio and video start roughly together, and don't begin playback until we have a start frame for both. This is not necessarily a valid assumption in Ogg A/V streams! It will always be true of the example_encoder (and most streams) though. */ stateflag=0; /* playback has not begun */ while(!got_sigint){ /* we want a video and audio frame ready to go at all times. If we have to buffer incoming, buffer the compressed data (ie, let ogg do the buffering) */ while(vorbis_p && !audiobuf_ready){ int ret; float **pcm; /* if there's pending, decoded audio, grab it */ if((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0){ int count=audiobuf_fill/2; int maxsamples=(audiofd_fragsize-audiobuf_fill)/2/vi.channels; for(i=0;i<ret && i<maxsamples;i++) for(j=0;j<vi.channels;j++){ int val=rint(pcm[j][i]*32767.f); if(val>32767)val=32767; if(val<-32768)val=-32768; audiobuf[count++]=val; } vorbis_synthesis_read(&vd,i); audiobuf_fill+=i*vi.channels*2; if(audiobuf_fill==audiofd_fragsize)audiobuf_ready=1; if(vd.granulepos>=0) audiobuf_granulepos=vd.granulepos-ret+i; else audiobuf_granulepos+=i; }else{ /* no pending audio; is there a pending packet to decode? */ if(ogg_stream_packetout(&vo,&op)>0){ if(vorbis_synthesis(&vb,&op)==0) /* test for success! */ vorbis_synthesis_blockin(&vd,&vb); }else /* we need more data; break out to suck in another page */ break; } } while(theora_p && !videobuf_ready){ /* theora is one in, one out... */ if(ogg_stream_packetout(&to,&op)>0){ if(pp_inc){ pp_level+=pp_inc; th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level, sizeof(pp_level)); pp_inc=0; } /*HACK: This should be set after a seek or a gap, but we might not have a granulepos for the first packet (we only have them for the last packet on a page), so we just set it as often as we get it. To do this right, we should back-track from the last packet on the page and compute the correct granulepos for the first packet after a seek or a gap.*/ if(op.granulepos>=0){ th_decode_ctl(td,TH_DECCTL_SET_GRANPOS,&op.granulepos, sizeof(op.granulepos)); } if(th_decode_packetin(td,&op,&videobuf_granulepos)==0){ videobuf_time=th_granule_time(td,videobuf_granulepos); frames++; /* is it already too old to be useful? This is only actually useful cosmetically after a SIGSTOP. Note that we have to decode the frame even if we don't show it (for now) due to keyframing. Soon enough libtheora will be able to deal with non-keyframe seeks. */ if(videobuf_time>=get_time()) videobuf_ready=1; else{ /*If we are too slow, reduce the pp level.*/ pp_inc=pp_level>0?-1:0; dropped++; } } }else break; } if(!videobuf_ready && !audiobuf_ready && feof(infile))break; if(!videobuf_ready || !audiobuf_ready){ /* no data yet for somebody. Grab another page */ buffer_data(infile,&oy); while(ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); } } /* If playback has begun, top audio buffer off immediately. */ if(stateflag) audio_write_nonblocking(); /* are we at or past time for this video frame? */ if(stateflag && videobuf_ready && videobuf_time<=get_time()){ video_write(); videobuf_ready=0; } if(stateflag && (audiobuf_ready || !vorbis_p) && (videobuf_ready || !theora_p) && !got_sigint){ /* we have an audio frame ready (which means the audio buffer is full), it's not time to play video, so wait until one of the audio buffer is ready or it's near time to play video */ /* set up select wait on the audiobuffer and a timeout for video */ struct timeval timeout; fd_set writefs; fd_set empty; int n=0; FD_ZERO(&writefs); FD_ZERO(&empty); if(audiofd>=0){ FD_SET(audiofd,&writefs); n=audiofd+1; } if(theora_p){ double tdiff; long milliseconds; tdiff=videobuf_time-get_time(); /*If we have lots of extra time, increase the post-processing level.*/ if(tdiff>ti.fps_denominator*0.25/ti.fps_numerator){ pp_inc=pp_level<pp_level_max?1:0; } else if(tdiff<ti.fps_denominator*0.05/ti.fps_numerator){ pp_inc=pp_level>0?-1:0; } milliseconds=tdiff*1000-5; if(milliseconds>500)milliseconds=500; if(milliseconds>0){ timeout.tv_sec=milliseconds/1000; timeout.tv_usec=(milliseconds%1000)*1000; n=select(n,&empty,&writefs,&empty,&timeout); if(n)audio_calibrate_timer(0); } }else{ select(n,&empty,&writefs,&empty,NULL); } } /* if our buffers either don't exist or are ready to go, we can begin playback */ if((!theora_p || videobuf_ready) && (!vorbis_p || audiobuf_ready))stateflag=1; /* same if we've run out of input */ if(feof(infile))stateflag=1; } /* tear it all down */ audio_close(); SDL_Quit(); if(vorbis_p){ ogg_stream_clear(&vo); vorbis_block_clear(&vb); vorbis_dsp_clear(&vd); vorbis_comment_clear(&vc); vorbis_info_clear(&vi); } if(theora_p){ ogg_stream_clear(&to); th_decode_free(td); th_comment_clear(&tc); th_info_clear(&ti); } ogg_sync_clear(&oy); if(infile && infile!=stdin)fclose(infile); fprintf(stderr, "\r \r"); fprintf(stderr, "%d frames", frames); if (dropped) fprintf(stderr, " (%d dropped)", dropped); fprintf(stderr, "\n"); fprintf(stderr, "\nDone.\n"); return(0); }
int main(int argc,char *argv[]){ ogg_packet op; int long_option_index; int c; struct timeb start; struct timeb after; struct timeb last; int fps_only=0; int frames = 0; FILE *infile = stdin; outfile = stdout; #ifdef _WIN32 /* We need to set stdin/stdout to binary mode on windows. */ /* Beware the evil ifdef. We avoid these where we can, but this one we cannot. Don't add any more, you'll probably go to hell if you do. */ _setmode( _fileno( stdin ), _O_BINARY ); _setmode( _fileno( stdout ), _O_BINARY ); #endif /* Process option arguments. */ while((c=getopt_long(argc,argv,optstring,options,&long_option_index))!=EOF){ switch(c){ case 'o': if(strcmp(optarg,"-")!=0){ outfile=fopen(optarg,"wb"); if(outfile==NULL){ fprintf(stderr,"Unable to open output file '%s'\n", optarg); exit(1); } }else{ outfile=stdout; } break; case 'c': crop=1; break; case 'r': raw=1; break; case 'f': fps_only = 1; outfile = NULL; break; default: usage(); } } if(optind<argc){ infile=fopen(argv[optind],"rb"); if(infile==NULL){ fprintf(stderr,"Unable to open '%s' for extraction.\n", argv[optind]); exit(1); } if(++optind<argc){ usage(); exit(1); } } /*Ok, Ogg parsing. The idea here is we have a bitstream that is made up of Ogg pages. The libogg sync layer will find them for us. There may be pages from several logical streams interleaved; we find the first theora stream and ignore any others. Then we pass the pages for our stream to the libogg stream layer which assembles our original set of packets out of them. It's the packets that libtheora actually knows how to handle.*/ /* start up Ogg stream synchronization layer */ ogg_sync_init(&oy); /* init supporting Theora structures needed in header parsing */ th_comment_init(&tc); th_info_init(&ti); /*Ogg file open; parse the headers. Theora (like Vorbis) depends on some initial header packets for decoder setup and initialization. We retrieve these first before entering the main decode loop.*/ /* Only interested in Theora streams */ while(!stateflag){ int ret=buffer_data(infile,&oy); if(ret==0)break; while(ogg_sync_pageout(&oy,&og)>0){ int got_packet; ogg_stream_state test; /* is this a mandated initial header? If not, stop parsing */ if(!ogg_page_bos(&og)){ /* don't leak the page; get it into the appropriate stream */ queue_page(&og); stateflag=1; break; } ogg_stream_init(&test,ogg_page_serialno(&og)); ogg_stream_pagein(&test,&og); got_packet = ogg_stream_packetpeek(&test,&op); /* identify the codec: try theora */ if((got_packet==1) && !theora_p && (theora_processing_headers= th_decode_headerin(&ti,&tc,&ts,&op))>=0){ /* it is theora -- save this stream state */ memcpy(&to,&test,sizeof(test)); theora_p=1; /*Advance past the successfully processed header.*/ if(theora_processing_headers)ogg_stream_packetout(&to,NULL); }else{ /* whatever it is, we don't care about it */ ogg_stream_clear(&test); } } /* fall through to non-bos page parsing */ } /* we're expecting more header packets. */ while(theora_p && theora_processing_headers){ int ret; /* look for further theora headers */ while(theora_processing_headers&&(ret=ogg_stream_packetpeek(&to,&op))){ if(ret<0)continue; theora_processing_headers=th_decode_headerin(&ti,&tc,&ts,&op); if(theora_processing_headers<0){ fprintf(stderr,"Error parsing Theora stream headers; " "corrupt stream?\n"); exit(1); } else if(theora_processing_headers>0){ /*Advance past the successfully processed header.*/ ogg_stream_packetout(&to,NULL); } theora_p++; } /*Stop now so we don't fail if there aren't enough pages in a short stream.*/ if(!(theora_p && theora_processing_headers))break; /* The header pages/packets will arrive before anything else we care about, or the stream is not obeying spec */ if(ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); /* demux into the appropriate stream */ }else{ int ret=buffer_data(infile,&oy); /* someone needs more data */ if(ret==0){ fprintf(stderr,"End of file while searching for codec headers.\n"); exit(1); } } } /* and now we have it all. initialize decoders */ if(theora_p){ dump_comments(&tc); td=th_decode_alloc(&ti,ts); fprintf(stderr,"Ogg logical stream %lx is Theora %dx%d %.02f fps video\n" "Encoded frame content is %dx%d with %dx%d offset\n", to.serialno,ti.frame_width,ti.frame_height, (double)ti.fps_numerator/ti.fps_denominator, ti.pic_width,ti.pic_height,ti.pic_x,ti.pic_y); /*{ int arg = 0xffff; th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MBMODE,&arg,sizeof(arg)); th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MV,&arg,sizeof(arg)); th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_QI,&arg,sizeof(arg)); arg=10; th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_BITS,&arg,sizeof(arg)); }*/ }else{ /* tear down the partial theora setup */ th_info_clear(&ti); th_comment_clear(&tc); } /*Either way, we're done with the codec setup data.*/ th_setup_free(ts); /* open video */ if(theora_p)open_video(); if(!raw && outfile){ static const char *CHROMA_TYPES[4]={"420jpeg",NULL,"422jpeg","444"}; int width; int height; if(ti.pixel_fmt>=4||ti.pixel_fmt==TH_PF_RSVD){ fprintf(stderr,"Unknown pixel format: %i\n",ti.pixel_fmt); exit(1); } if(crop){ int hdec; int vdec; hdec=!(ti.pixel_fmt&1); vdec=!(ti.pixel_fmt&2); if((ti.pic_x&hdec)||(ti.pic_width&hdec) ||(ti.pic_y&vdec)||(ti.pic_height&vdec)){ fprintf(stderr, "Error: Cropped images with odd offsets/sizes and chroma subsampling\n" "cannot be output to YUV4MPEG2. Remove the --crop flag or add the\n" "--raw flag.\n"); exit(1); } width=ti.pic_width; height=ti.pic_height; } else{ width=ti.frame_width; height=ti.frame_height; } fprintf(outfile,"YUV4MPEG2 C%s W%d H%d F%d:%d I%c A%d:%d\n", CHROMA_TYPES[ti.pixel_fmt],width,height, ti.fps_numerator,ti.fps_denominator,'p', ti.aspect_numerator,ti.aspect_denominator); } /* install signal handler */ signal (SIGINT, sigint_handler); /*Finally the main decode loop. It's one Theora packet per frame, so this is pretty straightforward if we're not trying to maintain sync with other multiplexed streams. The videobuf_ready flag is used to maintain the input buffer in the libogg stream state. If there's no output frame available at the end of the decode step, we must need more input data. We could simplify this by just using the return code on ogg_page_packetout(), but the flag system extends easily to the case where you care about more than one multiplexed stream (like with audio playback). In that case, just maintain a flag for each decoder you care about, and pull data when any one of them stalls. videobuf_time holds the presentation time of the currently buffered video frame. We ignore this value.*/ stateflag=0; /* playback has not begun */ /* queue any remaining pages from data we buffered but that did not contain headers */ while(ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); } if(fps_only){ ftime(&start); ftime(&last); } while(!got_sigint){ while(theora_p && !videobuf_ready){ /* theora is one in, one out... */ if(ogg_stream_packetout(&to,&op)>0){ if(th_decode_packetin(td,&op,&videobuf_granulepos)>=0){ videobuf_time=th_granule_time(td,videobuf_granulepos); videobuf_ready=1; frames++; if(fps_only) ftime(&after); } }else break; } if(fps_only && (videobuf_ready || fps_only==2)){ long ms = after.time*1000.+after.millitm- (last.time*1000.+last.millitm); if(ms>500 || fps_only==1 || (feof(infile) && !videobuf_ready)){ float file_fps = (float)ti.fps_numerator/ti.fps_denominator; fps_only=2; ms = after.time*1000.+after.millitm- (start.time*1000.+start.millitm); fprintf(stderr,"\rframe:%d rate:%.2fx ", frames, frames*1000./(ms*file_fps)); memcpy(&last,&after,sizeof(last)); } } if(!videobuf_ready && feof(infile))break; if(!videobuf_ready){ /* no data yet for somebody. Grab another page */ buffer_data(infile,&oy); while(ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); } } /* dumpvideo frame, and get new one */ else if(outfile)video_write(); videobuf_ready=0; } /* end of decoder loop -- close everything */ if(theora_p){ ogg_stream_clear(&to); th_decode_free(td); th_comment_clear(&tc); th_info_clear(&ti); } ogg_sync_clear(&oy); if(infile && infile!=stdin)fclose(infile); if(outfile && outfile!=stdout)fclose(outfile); fprintf(stderr, "\n\n%d frames\n", frames); fprintf(stderr, "\nDone.\n"); return(0); }
void VideoStreamPlaybackTheora::set_file(const String &p_file) { ERR_FAIL_COND(playing); ogg_packet op; th_setup_info *ts = NULL; file_name = p_file; if (file) { memdelete(file); } file = FileAccess::open(p_file, FileAccess::READ); ERR_FAIL_COND(!file); #ifdef THEORA_USE_THREAD_STREAMING thread_exit = false; thread_eof = false; //pre-fill buffer int to_read = ring_buffer.space_left(); int read = file->get_buffer(read_buffer.ptr(), to_read); ring_buffer.write(read_buffer.ptr(), read); thread = Thread::create(_streaming_thread, this); #endif ogg_sync_init(&oy); /* init supporting Vorbis structures needed in header parsing */ vorbis_info_init(&vi); vorbis_comment_init(&vc); /* init supporting Theora structures needed in header parsing */ th_comment_init(&tc); th_info_init(&ti); theora_eos = false; vorbis_eos = false; /* Ogg file open; parse the headers */ /* Only interested in Vorbis/Theora streams */ int stateflag = 0; int audio_track_skip = audio_track; while (!stateflag) { int ret = buffer_data(); if (ret == 0) break; while (ogg_sync_pageout(&oy, &og) > 0) { ogg_stream_state test; /* is this a mandated initial header? If not, stop parsing */ if (!ogg_page_bos(&og)) { /* don't leak the page; get it into the appropriate stream */ queue_page(&og); stateflag = 1; break; } ogg_stream_init(&test, ogg_page_serialno(&og)); ogg_stream_pagein(&test, &og); ogg_stream_packetout(&test, &op); /* identify the codec: try theora */ if (!theora_p && th_decode_headerin(&ti, &tc, &ts, &op) >= 0) { /* it is theora */ copymem(&to, &test, sizeof(test)); theora_p = 1; } else if (!vorbis_p && vorbis_synthesis_headerin(&vi, &vc, &op) >= 0) { /* it is vorbis */ if (audio_track_skip) { vorbis_info_clear(&vi); vorbis_comment_clear(&vc); ogg_stream_clear(&test); vorbis_info_init(&vi); vorbis_comment_init(&vc); audio_track_skip--; } else { copymem(&vo, &test, sizeof(test)); vorbis_p = 1; } } else { /* whatever it is, we don't care about it */ ogg_stream_clear(&test); } } /* fall through to non-bos page parsing */ } /* we're expecting more header packets. */ while ((theora_p && theora_p < 3) || (vorbis_p && vorbis_p < 3)) { int ret; /* look for further theora headers */ while (theora_p && (theora_p < 3) && (ret = ogg_stream_packetout(&to, &op))) { if (ret < 0) { fprintf(stderr, "Error parsing Theora stream headers; " "corrupt stream?\n"); clear(); return; } if (!th_decode_headerin(&ti, &tc, &ts, &op)) { fprintf(stderr, "Error parsing Theora stream headers; " "corrupt stream?\n"); clear(); return; } theora_p++; } /* look for more vorbis header packets */ while (vorbis_p && (vorbis_p < 3) && (ret = ogg_stream_packetout(&vo, &op))) { if (ret < 0) { fprintf(stderr, "Error parsing Vorbis stream headers; corrupt stream?\n"); clear(); return; } ret = vorbis_synthesis_headerin(&vi, &vc, &op); if (ret) { fprintf(stderr, "Error parsing Vorbis stream headers; corrupt stream?\n"); clear(); return; } vorbis_p++; if (vorbis_p == 3) break; } /* The header pages/packets will arrive before anything else we care about, or the stream is not obeying spec */ if (ogg_sync_pageout(&oy, &og) > 0) { queue_page(&og); /* demux into the appropriate stream */ } else { int ret = buffer_data(); /* someone needs more data */ if (ret == 0) { fprintf(stderr, "End of file while searching for codec headers.\n"); clear(); return; } } } /* and now we have it all. initialize decoders */ if (theora_p) { td = th_decode_alloc(&ti, ts); printf("Ogg logical stream %lx is Theora %dx%d %.02f fps", to.serialno, ti.pic_width, ti.pic_height, (double)ti.fps_numerator / ti.fps_denominator); px_fmt = ti.pixel_fmt; switch (ti.pixel_fmt) { case TH_PF_420: printf(" 4:2:0 video\n"); break; case TH_PF_422: printf(" 4:2:2 video\n"); break; case TH_PF_444: printf(" 4:4:4 video\n"); break; case TH_PF_RSVD: default: printf(" video\n (UNKNOWN Chroma sampling!)\n"); break; } if (ti.pic_width != ti.frame_width || ti.pic_height != ti.frame_height) printf(" Frame content is %dx%d with offset (%d,%d).\n", ti.frame_width, ti.frame_height, ti.pic_x, ti.pic_y); th_decode_ctl(td, TH_DECCTL_GET_PPLEVEL_MAX, &pp_level_max, sizeof(pp_level_max)); pp_level = 0; th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level)); pp_inc = 0; int w; int h; w = (ti.pic_x + ti.frame_width + 1 & ~1) - (ti.pic_x & ~1); h = (ti.pic_y + ti.frame_height + 1 & ~1) - (ti.pic_y & ~1); size.x = w; size.y = h; texture->create(w, h, Image::FORMAT_RGBA8, Texture::FLAG_FILTER | Texture::FLAG_VIDEO_SURFACE); } else { /* tear down the partial theora setup */ th_info_clear(&ti); th_comment_clear(&tc); } th_setup_free(ts); if (vorbis_p) { vorbis_synthesis_init(&vd, &vi); vorbis_block_init(&vd, &vb); fprintf(stderr, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n", vo.serialno, vi.channels, vi.rate); //_setup(vi.channels, vi.rate); } else { /* tear down the partial vorbis setup */ vorbis_info_clear(&vi); vorbis_comment_clear(&vc); } playing = false; buffering = true; time = 0; audio_frames_wrote = 0; };
void VideoClip_Theora::_load(DataSource* source) { #ifdef _DEBUG log("-----"); #endif this->stream = source; this->_readTheoraVorbisHeaders(); this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup); this->width = this->info.TheoraInfo.frame_width; this->height = this->info.TheoraInfo.frame_height; this->subFrameWidth = this->info.TheoraInfo.pic_width; this->subFrameHeight = this->info.TheoraInfo.pic_height; this->subFrameX = this->info.TheoraInfo.pic_x; this->subFrameY = this->info.TheoraInfo.pic_y; this->stride = this->getWidth(); if (this->useStride) { this->stride = potCeil(this->stride); } this->fps = this->info.TheoraInfo.fps_numerator / (float)this->info.TheoraInfo.fps_denominator; #ifdef _DEBUG log("width: " + str(this->width) + ", height: " + str(this->height) + ", fps: " + str((int)this->getFps())); #endif this->frameQueue = new FrameQueue(this); this->frameQueue->setSize(this->precachedFramesCount); // find out the duration of the file by seeking to the end // having ogg decode pages, extract the granule pos from // the last theora page and seek back to beginning of the file char* buffer = NULL; int bytesRead = 0; uint64_t streamSize = this->stream->getSize(); uint64_t seekPos = 0; int result = 0; ogg_int64_t granule = 0; for (unsigned int i = 1; i <= 50; ++i) { ogg_sync_reset(&this->info.OggSyncState); seekPos = (BUFFER_SIZE * i > streamSize ? 0 : streamSize - BUFFER_SIZE * i); this->stream->seek(seekPos); buffer = ogg_sync_buffer(&this->info.OggSyncState, BUFFER_SIZE * i); bytesRead = this->stream->read(buffer, BUFFER_SIZE * i); ogg_sync_wrote(&this->info.OggSyncState, bytesRead); ogg_sync_pageseek(&this->info.OggSyncState, &this->info.OggPage); while (true) { result = ogg_sync_pageout(&this->info.OggSyncState, &this->info.OggPage); if (result == 0) { break; } // if page is not a theora page or page is unsynced(-1), skip it if (result == -1 || ogg_page_serialno(&this->info.OggPage) != this->info.TheoraStreamState.serialno) { continue; } granule = ogg_page_granulepos(&this->info.OggPage); if (granule >= 0) { this->framesCount = (int)th_granule_frame(this->info.TheoraDecoder, granule) + 1; } else if (this->framesCount > 0) { ++this->framesCount; // append delta frames at the end to get the exact number } } if (this->framesCount > 0 || streamSize < BUFFER_SIZE * i) { break; } } if (this->framesCount < 0) { log("unable to determine file duration!"); } else { this->duration = this->framesCount / this->fps; #ifdef _DEBUG log("duration: " + strf(this->duration) + " seconds"); #endif } // restore to beginning of stream. ogg_sync_reset(&this->info.OggSyncState); this->stream->seek(0); if (this->vorbisStreams > 0) // if there is no audio interface factory defined, even though the video clip might have audio, it will be ignored { vorbis_synthesis_init(&this->info.VorbisDSPState, &this->info.VorbisInfo); vorbis_block_init(&this->info.VorbisDSPState, &this->info.VorbisBlock); this->audioChannelsCount = this->info.VorbisInfo.channels; this->audioFrequency = (int) this->info.VorbisInfo.rate; // create an audio interface instance if available AudioInterfaceFactory* audioInterfaceFactory = theoraplayer::manager->getAudioInterfaceFactory(); if (audioInterfaceFactory != NULL) { this->setAudioInterface(audioInterfaceFactory->createInstance(this, this->audioChannelsCount, this->audioFrequency)); } } this->frameDuration = 1.0f / this->getFps(); #ifdef _DEBUG log("-----"); #endif }
void VideoClip_Theora::_executeSeek() { #if _DEBUG log(this->name + " [seek]: seeking to frame " + str(this->seekFrame)); #endif int frame = 0; float time = this->seekFrame / getFps(); this->timer->seek(time); bool paused = this->timer->isPaused(); if (!paused) { this->timer->pause(); // pause until seeking is done } this->endOfFile = false; this->restarted = false; this->_resetFrameQueue(); // reset the video decoder. ogg_stream_reset(&this->info.TheoraStreamState); th_decode_free(this->info.TheoraDecoder); this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup); Mutex::ScopeLock audioMutexLock; if (this->audioInterface != NULL) { audioMutexLock.acquire(this->audioMutex); ogg_stream_reset(&this->info.VorbisStreamState); vorbis_synthesis_restart(&this->info.VorbisDSPState); this->destroyAllAudioPackets(); } // first seek to desired frame, then figure out the location of the // previous key frame and seek to it. // then by setting the correct time, the decoder will skip N frames untill // we get the frame we want. frame = (int)this->_seekPage(this->seekFrame, 1); // find the key frame nearest to the target frame #ifdef _DEBUG // log(mName + " [seek]: nearest key frame for frame " + str(mSeekFrame) + " is frame: " + str(frame)); #endif this->_seekPage(std::max(0, frame - 1), 0); ogg_packet opTheora; ogg_int64_t granulePos; bool granuleSet = false; if (frame <= 1) { if (this->info.TheoraInfo.version_major == 3 && this->info.TheoraInfo.version_minor == 2 && this->info.TheoraInfo.version_subminor == 0) { granulePos = 0; } else { granulePos = 1; // because of difference in granule interpretation in theora streams 3.2.0 and newer ones } th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &granulePos, sizeof(granulePos)); granuleSet = true; } // now that we've found the key frame that preceeds our desired frame, lets keep on decoding frames until we // reach our target frame. int status = 0; while (this->seekFrame != 0) { if (ogg_stream_packetout(&this->info.TheoraStreamState, &opTheora) > 0) { if (!granuleSet) { // theora decoder requires to set the granule pos after seek to be able to determine the current frame if (opTheora.granulepos < 0) { continue; // ignore prev delta frames until we hit a key frame } th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &opTheora.granulepos, sizeof(opTheora.granulepos)); granuleSet = true; } status = th_decode_packetin(this->info.TheoraDecoder, &opTheora, &granulePos); if (status != 0 && status != TH_DUPFRAME) { continue; } frame = (int)th_granule_frame(this->info.TheoraDecoder, granulePos); if (frame >= this->seekFrame - 1) { break; } } else if (!this->_readData()) { log(this->name + " [seek]: fineseeking failed, _readData failed!"); return; } } #ifdef _DEBUG // log(mName + " [seek]: fineseeked to frame " + str(frame + 1) + ", requested: " + str(mSeekFrame)); #endif if (this->audioInterface != NULL) { // read audio data until we reach a timeStamp. this usually takes only one iteration, but just in case let's // wrap it in a loop float timeStamp = 0.0f; while (true) { timeStamp = this->_decodeAudio(); if (timeStamp >= 0) { break; } this->_readData(); } float rate = (float)this->audioFrequency * this->audioChannelsCount; float queuedTime = this->getAudioPacketQueueLength(); int trimmedCount = 0; // at this point there are only 2 possibilities: either we have too much packets and we have to delete // the first N ones, or we don't have enough, so let's fill the gap with silence. if (time > timeStamp - queuedTime) { while (this->audioPacketQueue != NULL) { if (time <= timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate) { trimmedCount = (int)((timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate - time) * rate); if (this->audioPacketQueue->samplesCount - trimmedCount <= 0) { this->destroyAudioPacket(this->popAudioPacket()); // if there's no data to be left, just destroy it } else { for (int i = trimmedCount, j = 0; i < this->audioPacketQueue->samplesCount; ++i, ++j) { this->audioPacketQueue->pcmData[j] = this->audioPacketQueue->pcmData[i]; } this->audioPacketQueue->samplesCount -= trimmedCount; } break; } queuedTime -= this->audioPacketQueue->samplesCount / rate; this->destroyAudioPacket(this->popAudioPacket()); } } // expand the first packet with silence. else if (this->audioPacketQueue != NULL) { int i = 0; int j = 0; int missingCount = (int)((timeStamp - queuedTime - time) * rate); if (missingCount > 0) { float* samples = new float[missingCount + this->audioPacketQueue->samplesCount]; if (missingCount > 0) { memset(samples, 0, missingCount * sizeof(float)); } for (j = 0; i < missingCount + this->audioPacketQueue->samplesCount; ++i, ++j) { samples[i] = this->audioPacketQueue->pcmData[j]; } delete[] this->audioPacketQueue->pcmData; this->audioPacketQueue->pcmData = samples; } } this->lastDecodedFrameNumber = this->seekFrame; this->readAudioSamples = (unsigned int)(timeStamp * this->audioFrequency); audioMutexLock.release(); } if (!paused) { this->timer->play(); } this->seekFrame = -1; }
void TheoraPlayer::OpenFile(const String &path) { ReleaseData(); if(path == "") return; filePath = path; file = File::Create(path, File::OPEN | File::READ); ogg_sync_init(&theoraData->syncState); th_info_init(&theoraData->thInfo); th_comment_init(&theoraData->thComment); int32 stateflag = 0; while(!stateflag) { if(!BufferData()) break; while(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0) { ogg_stream_state test; /* is this a mandated initial header? If not, stop parsing */ if(!ogg_page_bos(&theoraData->page)) { /* don't leak the page; get it into the appropriate stream */ ogg_stream_pagein(&theoraData->state, &theoraData->page); stateflag = 1; break; } ogg_stream_init(&test, ogg_page_serialno(&theoraData->page)); ogg_stream_pagein(&test, &theoraData->page); ogg_stream_packetout(&test, &theoraData->packet); /* identify the codec: try theora */ if(!theora_p && th_decode_headerin(&theoraData->thInfo, &theoraData->thComment, &theoraData->thSetup, &theoraData->packet) >= 0) { /* it is theora */ memcpy(&theoraData->state, &test, sizeof(test)); theora_p = 1; } else { /* whatever it is, we don't care about it */ ogg_stream_clear(&test); } } /* fall through to non-bos page parsing */ } while(theora_p && theora_p < 3) { int ret; /* look for further theora headers */ while(theora_p && (theora_p < 3) && (ret = ogg_stream_packetout(&theoraData->state, &theoraData->packet))) { if(ret < 0) { Logger::Error("TheoraPlayer: Error parsing Theora stream headers; corrupt stream?\n"); return; } if(!th_decode_headerin(&theoraData->thInfo, &theoraData->thComment, &theoraData->thSetup, &theoraData->packet)) { Logger::Error("TheoraPlayer: Error parsing Theora stream headers; corrupt stream?\n"); return; } theora_p++; } /* The header pages/packets will arrive before anything else we care about, or the stream is not obeying spec */ if(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0) { ogg_stream_pagein(&theoraData->state, &theoraData->page); /* demux into the appropriate stream */ } else { int ret = BufferData(); /* someone needs more data */ if(ret == 0) { Logger::Error("TheoraPlayer: End of file while searching for codec headers.\n"); return; } } } if(theora_p) { theoraData->thCtx = th_decode_alloc(&theoraData->thInfo, theoraData->thSetup); th_decode_ctl(theoraData->thCtx, TH_DECCTL_GET_PPLEVEL_MAX, &pp_level_max, sizeof(pp_level_max)); pp_level=pp_level_max; th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level)); pp_inc=0; } else { /* tear down the partial theora setup */ th_info_clear(&theoraData->thInfo); th_comment_clear(&theoraData->thComment); } if(theoraData->thSetup) th_setup_free(theoraData->thSetup); theoraData->thSetup = 0; frameBufferW = binCeil(theoraData->thInfo.pic_width); frameBufferH = binCeil(theoraData->thInfo.pic_height); frameBuffer = new unsigned char[frameBufferW * frameBufferH * 4]; repeatFilePos = file->GetPos(); frameTime = (float32)(theoraData->thInfo.fps_denominator)/(float32)(theoraData->thInfo.fps_numerator); isPlaying = true; }
krad_theora_decoder_t * krad_theora_decoder_create (krad_codec_header_t *header) { krad_theora_decoder_t *krad_theora; krad_theora = calloc(1, sizeof(krad_theora_decoder_t)); krad_theora->granulepos = -1; th_comment_init(&krad_theora->comment); th_info_init(&krad_theora->info); /* krad_theora->packet.packet = header->header[0]; krad_theora->packet.bytes = header->header_size[0]; krad_theora->packet.b_o_s = 1; krad_theora->packet.packetno = 1; th_decode_headerin (&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); krad_theora->packet.packet = header->header[1]; krad_theora->packet.bytes = header->header_size[1]; krad_theora->packet.b_o_s = 0; krad_theora->packet.packetno = 2; th_decode_headerin (&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); krad_theora->packet.packet = header->header[2]; krad_theora->packet.bytes = header->header_size[2]; krad_theora->packet.packetno = 3; th_decode_headerin (&krad_theora->info, &krad_theora->comment, &krad_theora->setup_info, &krad_theora->packet); krad_theora->color_depth = PIX_FMT_YUV420P; if (krad_theora->info.pixel_fmt == TH_PF_422) { krad_theora->color_depth = PIX_FMT_YUV422P; printk ("Theora color depth 422"); } if (krad_theora->info.pixel_fmt == TH_PF_444) { krad_theora->color_depth = PIX_FMT_YUV444P; printk ("Theora color depth 444"); } */ printk ("Theora %dx%d %.02f fps video\n Encoded frame content is %dx%d with %dx%d offset", krad_theora->info.frame_width, krad_theora->info.frame_height, (double)krad_theora->info.fps_numerator/krad_theora->info.fps_denominator, krad_theora->info.pic_width, krad_theora->info.pic_height, krad_theora->info.pic_x, krad_theora->info.pic_y); krad_theora->offset_y = krad_theora->info.pic_y; krad_theora->offset_x = krad_theora->info.pic_x; krad_theora->width = krad_theora->info.pic_width; krad_theora->height = krad_theora->info.pic_height; krad_theora->decoder = th_decode_alloc (&krad_theora->info, krad_theora->setup_info); th_setup_free(krad_theora->setup_info); return krad_theora; }
/* * init driver */ static int init(sh_video_t *sh){ theora_struct_t *context = NULL; uint8_t *extradata = (uint8_t *)(sh->bih + 1); int extradata_size = sh->bih->biSize - sizeof(*sh->bih); int errorCode = 0; ogg_packet op; int i; context = calloc (sizeof (theora_struct_t), 1); sh->context = context; if (!context) goto err_out; th_info_init(&context->ti); th_comment_init(&context->tc); context->tsi = NULL; /* Read all header packets, pass them to theora_decode_header. */ for (i = 0; i < THEORA_NUM_HEADER_PACKETS; i++) { if (extradata_size > 2) { op.bytes = AV_RB16(extradata); op.packet = extradata + 2; op.b_o_s = 1; if (extradata_size < op.bytes + 2) { mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Theora header too small\n"); goto err_out; } extradata += op.bytes + 2; extradata_size -= op.bytes + 2; } else { op.bytes = ds_get_packet (sh->ds, &op.packet); op.b_o_s = 1; } if ( (errorCode = th_decode_headerin (&context->ti, &context->tc, &context->tsi, &op)) < 0) { mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Broken Theora header; errorCode=%i!\n", errorCode); goto err_out; } } /* now init codec */ context->tctx = th_decode_alloc (&context->ti, &context->tsi); if (!context->tctx) { mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Theora decode init failed\n"); goto err_out; } /* free memory used for decoder setup information */ th_setup_free(context->tsi); if(sh->aspect==0.0 && context->ti.aspect_denominator!=0) { sh->aspect = ((double)context->ti.aspect_numerator * context->ti.frame_width)/ ((double)context->ti.aspect_denominator * context->ti.frame_height); } mp_msg(MSGT_DECVIDEO,MSGL_V,"INFO: Theora video init ok!\n"); mp_msg(MSGT_DECVIDEO,MSGL_INFO,"Frame: %dx%d, Picture %dx%d, Offset [%d,%d]\n", context->ti.frame_width, context->ti.frame_height, context->ti.pic_width, context->ti.pic_height, context->ti.pic_x, context->ti.pic_y); return mpcodecs_config_vo (sh,context->ti.frame_width,context->ti.frame_height,theora_pixelformat2imgfmt(context->ti.pixel_fmt)); err_out: free(context); sh->context = NULL; return 0; }
bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) { close(); _endOfAudio = false; _endOfVideo = false; _fileStream = stream; // start up Ogg stream synchronization layer ogg_sync_init(&_oggSync); // init supporting Vorbis structures needed in header parsing vorbis_info_init(&_vorbisInfo); vorbis_comment_init(&_vorbisComment); // init supporting Theora structures needed in header parsing th_comment_init(&_theoraComment); th_info_init(&_theoraInfo); // Ogg file open; parse the headers // Only interested in Vorbis/Theora streams bool foundHeader = false; while (!foundHeader) { int ret = bufferData(); if (ret == 0) break; while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { ogg_stream_state test; // is this a mandated initial header? If not, stop parsing if (!ogg_page_bos(&_oggPage)) { // don't leak the page; get it into the appropriate stream queuePage(&_oggPage); foundHeader = true; break; } ogg_stream_init(&test, ogg_page_serialno(&_oggPage)); ogg_stream_pagein(&test, &_oggPage); ogg_stream_packetout(&test, &_oggPacket); // identify the codec: try theora if (!_theoraPacket && th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket) >= 0) { // it is theora memcpy(&_theoraOut, &test, sizeof(test)); _theoraPacket = 1; } else if (!_vorbisPacket && vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket) >= 0) { // it is vorbis memcpy(&_vorbisOut, &test, sizeof(test)); _vorbisPacket = 1; } else { // whatever it is, we don't care about it ogg_stream_clear(&test); } } // fall through to non-bos page parsing } // we're expecting more header packets. while ((_theoraPacket && _theoraPacket < 3) || (_vorbisPacket && _vorbisPacket < 3)) { int ret; // look for further theora headers while (_theoraPacket && (_theoraPacket < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) { if (ret < 0) error("Error parsing Theora stream headers; corrupt stream?"); if (!th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket)) error("Error parsing Theora stream headers; corrupt stream?"); _theoraPacket++; } // look for more vorbis header packets while (_vorbisPacket && (_vorbisPacket < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) { if (ret < 0) error("Error parsing Vorbis stream headers; corrupt stream?"); if (vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket)) error("Error parsing Vorbis stream headers; corrupt stream?"); _vorbisPacket++; if (_vorbisPacket == 3) break; } // The header pages/packets will arrive before anything else we // care about, or the stream is not obeying spec if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) { queuePage(&_oggPage); // demux into the appropriate stream } else { ret = bufferData(); // someone needs more data if (ret == 0) error("End of file while searching for codec headers."); } } // and now we have it all. initialize decoders if (_theoraPacket) { _theoraDecode = th_decode_alloc(&_theoraInfo, _theoraSetup); debugN(1, "Ogg logical stream %lx is Theora %dx%d %.02f fps", _theoraOut.serialno, _theoraInfo.pic_width, _theoraInfo.pic_height, (double)_theoraInfo.fps_numerator / _theoraInfo.fps_denominator); switch (_theoraInfo.pixel_fmt) { case TH_PF_420: debug(1, " 4:2:0 video"); break; case TH_PF_422: debug(1, " 4:2:2 video"); break; case TH_PF_444: debug(1, " 4:4:4 video"); break; case TH_PF_RSVD: default: debug(1, " video\n (UNKNOWN Chroma sampling!)"); break; } if (_theoraInfo.pic_width != _theoraInfo.frame_width || _theoraInfo.pic_height != _theoraInfo.frame_height) debug(1, " Frame content is %dx%d with offset (%d,%d).", _theoraInfo.frame_width, _theoraInfo.frame_height, _theoraInfo.pic_x, _theoraInfo.pic_y); switch (_theoraInfo.colorspace){ case TH_CS_UNSPECIFIED: /* nothing to report */ break; case TH_CS_ITU_REC_470M: debug(1, " encoder specified ITU Rec 470M (NTSC) color."); break; case TH_CS_ITU_REC_470BG: debug(1, " encoder specified ITU Rec 470BG (PAL) color."); break; default: debug(1, "warning: encoder specified unknown colorspace (%d).", _theoraInfo.colorspace); break; } debug(1, "Encoded by %s", _theoraComment.vendor); if (_theoraComment.comments) { debug(1, "theora comment header:"); for (int i = 0; i < _theoraComment.comments; i++) { if (_theoraComment.user_comments[i]) { int len = _theoraComment.comment_lengths[i]; char *value = (char *)malloc(len + 1); if (value) { memcpy(value, _theoraComment.user_comments[i], len); value[len] = '\0'; debug(1, "\t%s", value); free(value); } } } } th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &_ppLevelMax, sizeof(_ppLevelMax)); _ppLevel = _ppLevelMax; th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel)); _ppInc = 0; } else { // tear down the partial theora setup th_info_clear(&_theoraInfo); th_comment_clear(&_theoraComment); } th_setup_free(_theoraSetup); _theoraSetup = 0; if (_vorbisPacket) { vorbis_synthesis_init(&_vorbisDSP, &_vorbisInfo); vorbis_block_init(&_vorbisDSP, &_vorbisBlock); debug(3, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.", _vorbisOut.serialno, _vorbisInfo.channels, _vorbisInfo.rate); _audStream = Audio::makeQueuingAudioStream(_vorbisInfo.rate, _vorbisInfo.channels); // Get enough audio data to start us off while (_audStream->numQueuedStreams() == 0) { // Queue more data bufferData(); while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) queuePage(&_oggPage); queueAudio(); } if (_audStream) g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, _audHandle, _audStream, -1, getVolume(), getBalance()); } else { // tear down the partial vorbis setup vorbis_info_clear(&_vorbisInfo); vorbis_comment_clear(&_vorbisComment); _endOfAudio = true; } _surface.create(_theoraInfo.frame_width, _theoraInfo.frame_height, g_system->getScreenFormat()); // Set up a display surface _displaySurface.pixels = _surface.getBasePtr(_theoraInfo.pic_x, _theoraInfo.pic_y); _displaySurface.w = _theoraInfo.pic_width; _displaySurface.h = _theoraInfo.pic_height; _displaySurface.format = _surface.format; _displaySurface.pitch = _surface.pitch; // Set the frame rate _frameRate = Common::Rational(_theoraInfo.fps_numerator, _theoraInfo.fps_denominator); return true; }
static GstFlowReturn theora_handle_type_packet (GstTheoraDec * dec) { gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GstVideoCodecState *state; GstVideoFormat fmt; GstVideoInfo *info = &dec->input_state->info; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.fps_numerator, dec->info.fps_denominator, dec->info.aspect_numerator, dec->info.aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ par_num = GST_VIDEO_INFO_PAR_N (info); par_den = GST_VIDEO_INFO_PAR_D (info); /* If we have a default PAR, see if the decoder specified a different one */ if (par_num == 1 && par_den == 1 && (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) { par_num = dec->info.aspect_numerator; par_den = dec->info.aspect_denominator; } /* theora has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d", dec->info.pic_width, dec->info.pic_height, dec->info.pic_x, dec->info.pic_y); switch (dec->info.pixel_fmt) { case TH_PF_420: fmt = GST_VIDEO_FORMAT_I420; break; case TH_PF_422: fmt = GST_VIDEO_FORMAT_Y42B; break; case TH_PF_444: fmt = GST_VIDEO_FORMAT_Y444; break; default: goto unsupported_format; } GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width; GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height; /* Ensure correct offsets in chroma for formats that need it * by rounding the offset. libtheora will add proper pixels, * so no need to handle them ourselves. */ if (dec->info.pic_x & 1 && dec->info.pixel_fmt != TH_PF_444) { GST_VIDEO_INFO_WIDTH (info)++; } if (dec->info.pic_y & 1 && dec->info.pixel_fmt == TH_PF_420) { GST_VIDEO_INFO_HEIGHT (info)++; } GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d", info->width, info->height, dec->info.pic_x, dec->info.pic_y); /* done */ dec->decoder = th_decode_alloc (&dec->info, dec->setup); if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV, &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MV visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE, &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI, &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS, &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation"); } /* Create the output state */ dec->output_state = state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, info->width, info->height, dec->input_state); /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */ state->info.fps_n = dec->info.fps_numerator; state->info.fps_d = dec->info.fps_denominator; state->info.par_n = par_num; state->info.par_d = par_den; /* these values are for all versions of the colorspace specified in the * theora info */ state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG; state->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235; state->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; state->info.colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; switch (dec->info.colorspace) { case TH_CS_ITU_REC_470M: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M; break; case TH_CS_ITU_REC_470BG: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG; break; default: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); dec->have_header = TRUE; return ret; /* ERRORS */ unsupported_format: { GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt); return GST_FLOW_ERROR; } }
static GstFlowReturn theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet) { GstCaps *caps; gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GList *walk; guint32 fourcc; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.fps_numerator, dec->info.fps_denominator, dec->info.aspect_numerator, dec->info.aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ if (dec->have_par) { /* we had a par on the sink caps, override the encoded par */ GST_DEBUG_OBJECT (dec, "overriding with input PAR"); par_num = dec->par_num; par_den = dec->par_den; } else { /* take encoded par */ par_num = dec->info.aspect_numerator; par_den = dec->info.aspect_denominator; } if (par_num == 0 || par_den == 0) { par_num = par_den = 1; } /* theora has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d", dec->info.pic_width, dec->info.pic_height, dec->info.pic_x, dec->info.pic_y); if (dec->info.pixel_fmt == TH_PF_420) { dec->output_bpp = 12; /* Average bits per pixel. */ fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0'); } else if (dec->info.pixel_fmt == TH_PF_422) { dec->output_bpp = 16; fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B'); } else if (dec->info.pixel_fmt == TH_PF_444) { dec->output_bpp = 24; fourcc = GST_MAKE_FOURCC ('Y', '4', '4', '4'); } else { GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt); return GST_FLOW_ERROR; } if (dec->crop) { dec->width = dec->info.pic_width; dec->height = dec->info.pic_height; dec->offset_x = dec->info.pic_x; dec->offset_y = dec->info.pic_y; /* Ensure correct offsets in chroma for formats that need it * by rounding the offset. libtheora will add proper pixels, * so no need to handle them ourselves. */ if (dec->offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) { dec->offset_x--; dec->width++; } if (dec->offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) { dec->offset_y--; dec->height++; } } else { /* no cropping, use the encoded dimensions */ dec->width = dec->info.frame_width; dec->height = dec->info.frame_height; dec->offset_x = 0; dec->offset_y = 0; } GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d", dec->width, dec->height, dec->offset_x, dec->offset_y); /* done */ dec->decoder = th_decode_alloc (&dec->info, dec->setup); if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV, &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MV visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE, &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI, &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS, &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation"); } caps = gst_caps_new_simple ("video/x-raw-yuv", "format", GST_TYPE_FOURCC, fourcc, "framerate", GST_TYPE_FRACTION, dec->info.fps_numerator, dec->info.fps_denominator, "pixel-aspect-ratio", GST_TYPE_FRACTION, par_num, par_den, "width", G_TYPE_INT, dec->width, "height", G_TYPE_INT, dec->height, "color-matrix", G_TYPE_STRING, "sdtv", "chroma-site", G_TYPE_STRING, "jpeg", NULL); gst_pad_set_caps (dec->srcpad, caps); gst_caps_unref (caps); dec->have_header = TRUE; if (dec->pendingevents) { for (walk = dec->pendingevents; walk; walk = g_list_next (walk)) gst_pad_push_event (dec->srcpad, GST_EVENT_CAST (walk->data)); g_list_free (dec->pendingevents); dec->pendingevents = NULL; } if (dec->tags) { gst_element_found_tags_for_pad (GST_ELEMENT_CAST (dec), dec->srcpad, dec->tags); dec->tags = NULL; } return ret; }