Exemplo n.º 1
0
	void VideoClip_Theora::_executeSeek()
	{
#if _DEBUG
		log(this->name + " [seek]: seeking to frame " + str(this->seekFrame));
#endif
		int frame = 0;
		float time = this->seekFrame / getFps();
		this->timer->seek(time);
		bool paused = this->timer->isPaused();
		if (!paused)
		{
			this->timer->pause(); // pause until seeking is done
		}
		this->endOfFile = false;
		this->restarted = false;
		this->_resetFrameQueue();
		// reset the video decoder.
		ogg_stream_reset(&this->info.TheoraStreamState);
		th_decode_free(this->info.TheoraDecoder);
		this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup);
		Mutex::ScopeLock audioMutexLock;
		if (this->audioInterface != NULL)
		{
			audioMutexLock.acquire(this->audioMutex);
			ogg_stream_reset(&this->info.VorbisStreamState);
			vorbis_synthesis_restart(&this->info.VorbisDSPState);
			this->destroyAllAudioPackets();
		}
		// first seek to desired frame, then figure out the location of the
		// previous key frame and seek to it.
		// then by setting the correct time, the decoder will skip N frames untill
		// we get the frame we want.
		frame = (int)this->_seekPage(this->seekFrame, 1); // find the key frame nearest to the target frame
#ifdef _DEBUG
	//		log(mName + " [seek]: nearest key frame for frame " + str(mSeekFrame) + " is frame: " + str(frame));
#endif
		this->_seekPage(std::max(0, frame - 1), 0);

		ogg_packet opTheora;
		ogg_int64_t granulePos;
		bool granuleSet = false;
		if (frame <= 1)
		{
			if (this->info.TheoraInfo.version_major == 3 && this->info.TheoraInfo.version_minor == 2 && this->info.TheoraInfo.version_subminor == 0)
			{
				granulePos = 0;
			}
			else
			{
				granulePos = 1; // because of difference in granule interpretation in theora streams 3.2.0 and newer ones
			}
			th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &granulePos, sizeof(granulePos));
			granuleSet = true;
		}
		// now that we've found the key frame that preceeds our desired frame, lets keep on decoding frames until we
		// reach our target frame.
		int status = 0;
		while (this->seekFrame != 0)
		{
			if (ogg_stream_packetout(&this->info.TheoraStreamState, &opTheora) > 0)
			{
				if (!granuleSet)
				{
					// theora decoder requires to set the granule pos after seek to be able to determine the current frame
					if (opTheora.granulepos < 0)
					{
						continue; // ignore prev delta frames until we hit a key frame
					}
					th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &opTheora.granulepos, sizeof(opTheora.granulepos));
					granuleSet = true;
				}
				status = th_decode_packetin(this->info.TheoraDecoder, &opTheora, &granulePos);
				if (status != 0 && status != TH_DUPFRAME)
				{
					continue;
				}
				frame = (int)th_granule_frame(this->info.TheoraDecoder, granulePos);
				if (frame >= this->seekFrame - 1)
				{
					break;
				}
			}
			else if (!this->_readData())
			{
				log(this->name + " [seek]: fineseeking failed, _readData failed!");
				return;
			}
		}
#ifdef _DEBUG
		//	log(mName + " [seek]: fineseeked to frame " + str(frame + 1) + ", requested: " + str(mSeekFrame));
#endif
		if (this->audioInterface != NULL)
		{
			// read audio data until we reach a timeStamp. this usually takes only one iteration, but just in case let's
			// wrap it in a loop
			float timeStamp = 0.0f;
			while (true)
			{
				timeStamp = this->_decodeAudio();
				if (timeStamp >= 0)
				{
					break;
				}
				this->_readData();
			}
			float rate = (float)this->audioFrequency * this->audioChannelsCount;
			float queuedTime = this->getAudioPacketQueueLength();
			int trimmedCount = 0;
			// at this point there are only 2 possibilities: either we have too much packets and we have to delete
			// the first N ones, or we don't have enough, so let's fill the gap with silence.
			if (time > timeStamp - queuedTime)
			{
				while (this->audioPacketQueue != NULL)
				{
					if (time <= timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate)
					{
						trimmedCount = (int)((timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate - time) * rate);
						if (this->audioPacketQueue->samplesCount - trimmedCount <= 0)
						{
							this->destroyAudioPacket(this->popAudioPacket()); // if there's no data to be left, just destroy it
						}
						else
						{
							for (int i = trimmedCount, j = 0; i < this->audioPacketQueue->samplesCount; ++i, ++j)
							{
								this->audioPacketQueue->pcmData[j] = this->audioPacketQueue->pcmData[i];
							}
							this->audioPacketQueue->samplesCount -= trimmedCount;
						}
						break;
					}
					queuedTime -= this->audioPacketQueue->samplesCount / rate;
					this->destroyAudioPacket(this->popAudioPacket());
				}
			}
			// expand the first packet with silence.
			else if (this->audioPacketQueue != NULL)
			{
				int i = 0;
				int j = 0;
				int missingCount = (int)((timeStamp - queuedTime - time) * rate);
				if (missingCount > 0)
				{
					float* samples = new float[missingCount + this->audioPacketQueue->samplesCount];
					if (missingCount > 0)
					{
						memset(samples, 0, missingCount * sizeof(float));
					}
					for (j = 0; i < missingCount + this->audioPacketQueue->samplesCount; ++i, ++j)
					{
						samples[i] = this->audioPacketQueue->pcmData[j];
					}
					delete[] this->audioPacketQueue->pcmData;
					this->audioPacketQueue->pcmData = samples;
				}
			}
			this->lastDecodedFrameNumber = this->seekFrame;
			this->readAudioSamples = (unsigned int)(timeStamp * this->audioFrequency);
			audioMutexLock.release();
		}
		if (!paused)
		{
			this->timer->play();
		}
		this->seekFrame = -1;
	}
Exemplo n.º 2
0
static int theora_decode_control(theora_state *_td,int _req,
                                 void *_buf,size_t _buf_sz) {
    return th_decode_ctl(((th_api_wrapper *)_td->i->codec_setup)->decode,
                         _req,_buf,_buf_sz);
}
Exemplo n.º 3
0
void TheoraPlayer::OpenFile(const String &path)
{
    ReleaseData();
    
    if(path == "")
        return;
    
    filePath = path;
    
    file = File::Create(path, File::OPEN | File::READ);
    ogg_sync_init(&theoraData->syncState);
    th_info_init(&theoraData->thInfo);
    th_comment_init(&theoraData->thComment);
    
    int32 stateflag = 0;
    while(!stateflag)
    {
        if(!BufferData())
            break;
        
        while(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0)
        {
            ogg_stream_state test;
            
            /* is this a mandated initial header? If not, stop parsing */
            if(!ogg_page_bos(&theoraData->page))
            {
                /* don't leak the page; get it into the appropriate stream */
                ogg_stream_pagein(&theoraData->state, &theoraData->page);
                stateflag = 1;
                break;
            }
            
            ogg_stream_init(&test, ogg_page_serialno(&theoraData->page));
            ogg_stream_pagein(&test, &theoraData->page);
            ogg_stream_packetout(&test, &theoraData->packet);
            
            /* identify the codec: try theora */
            if(!theora_p && th_decode_headerin(&theoraData->thInfo, &theoraData->thComment, &theoraData->thSetup, &theoraData->packet) >= 0)
            {
                /* it is theora */
                memcpy(&theoraData->state, &test, sizeof(test));
                theora_p = 1;
            }
            else
            {
                /* whatever it is, we don't care about it */
                ogg_stream_clear(&test);
            }
        }
        /* fall through to non-bos page parsing */
    }
    
    while(theora_p && theora_p < 3)
    {
        int ret;
        
        /* look for further theora headers */
        while(theora_p && (theora_p < 3) && (ret = ogg_stream_packetout(&theoraData->state, &theoraData->packet)))
        {
            if(ret < 0)
            {
                Logger::Error("TheoraPlayer: Error parsing Theora stream headers; corrupt stream?\n");
                return;
            }
            if(!th_decode_headerin(&theoraData->thInfo, &theoraData->thComment, &theoraData->thSetup, &theoraData->packet))
            {
                Logger::Error("TheoraPlayer: Error parsing Theora stream headers; corrupt stream?\n");
                return;
            }
            theora_p++;
        }
        
        /* The header pages/packets will arrive before anything else we
         care about, or the stream is not obeying spec */
        
        if(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0)
        {
            ogg_stream_pagein(&theoraData->state, &theoraData->page); /* demux into the appropriate stream */
        }
        else
        {
            int ret = BufferData(); /* someone needs more data */
            if(ret == 0)
            {
                Logger::Error("TheoraPlayer: End of file while searching for codec headers.\n");
                return;
            }
        }
    }
    if(theora_p)
    {
        theoraData->thCtx = th_decode_alloc(&theoraData->thInfo, theoraData->thSetup);
        
        th_decode_ctl(theoraData->thCtx, TH_DECCTL_GET_PPLEVEL_MAX, &pp_level_max, sizeof(pp_level_max));
        pp_level=pp_level_max;
        th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
        pp_inc=0;
    }
    else
    {
        /* tear down the partial theora setup */
        th_info_clear(&theoraData->thInfo);
        th_comment_clear(&theoraData->thComment);
    }
    
    if(theoraData->thSetup)
        th_setup_free(theoraData->thSetup);
    theoraData->thSetup = 0;

    frameBufferW = binCeil(theoraData->thInfo.pic_width);
    frameBufferH = binCeil(theoraData->thInfo.pic_height);
    
    frameBuffer = new unsigned char[frameBufferW * frameBufferH * 4];
    
    repeatFilePos = file->GetPos();
    
    frameTime = (float32)(theoraData->thInfo.fps_denominator)/(float32)(theoraData->thInfo.fps_numerator);
    
    isPlaying = true;
}
Exemplo n.º 4
0
void TheoraPlayer::Update(float32 timeElapsed)
{
    if(!isPlaying)
        return;
        
    videoTime += timeElapsed;
    
    currFrameTime += timeElapsed;
    if(currFrameTime < frameTime)
    {
        return;
    }
    else
    {
        currFrameTime -= frameTime;
    }
    
    int ret;
    
    while(theora_p && !isVideoBufReady)
    {
        ret = ogg_stream_packetout(&theoraData->state, &theoraData->packet);
        if(ret > 0)
        {
            if(pp_inc)
            {
                pp_level += pp_inc;
                th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
                pp_inc = 0;
            }
            if(theoraData->packet.granulepos >= 0)
                th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_GRANPOS, &theoraData->packet.granulepos, sizeof(theoraData->packet.granulepos));

            if(th_decode_packetin(theoraData->thCtx, &theoraData->packet, &theoraData->videoBufGranulePos) == 0)
            {
                if((videoBufTime = th_granule_time(theoraData->thCtx, theoraData->videoBufGranulePos)) >= videoTime)
                    isVideoBufReady = true;
                else
                    pp_inc = (pp_level > 0)? -1 : 0;
            }
        }
        else
        {
            isVideoBufReady = false;
            break;
        }
    }
    
    if(!isVideoBufReady)
    {
        BufferData();
        while(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0)
            ogg_stream_pagein(&theoraData->state, &theoraData->page);
    }
    
    if(isVideoBufReady)
    {
        isVideoBufReady = false;
        ret = th_decode_ycbcr_out(theoraData->thCtx, theoraData->yuvBuffer);
    
        for(int i = 0; i < frameBufferH; i++) //Y
        {
            int yShift = 0, uShift = 0, vShift = 0;
            const bool inBuffer = (i <= theoraData->yuvBuffer[0].height);
            if(inBuffer)
            {
                yShift = theoraData->yuvBuffer[0].stride * i;
                uShift = theoraData->yuvBuffer[1].stride * (i / 2);
                vShift = theoraData->yuvBuffer[2].stride * (i / 2);
            }
            
            for(int j = 0; j < frameBufferW; j++) //X
            {
                const int index = (i * frameBufferW + j) * 4;
                
                if(inBuffer && j <= theoraData->yuvBuffer[0].width)
                {
                    const unsigned char Y = *(theoraData->yuvBuffer[0].data + yShift + j);
                    const unsigned char U = *(theoraData->yuvBuffer[1].data + uShift + j / 2);
                    const unsigned char V = *(theoraData->yuvBuffer[2].data + vShift + j / 2);
                
                    frameBuffer[index]   = ClampFloatToByte(Y + 1.371f * (V - 128));
                    frameBuffer[index+1] = ClampFloatToByte(Y - 0.698f * (V - 128) - 0.336f * (U - 128));
                    frameBuffer[index+2] = ClampFloatToByte(Y + 1.732f * (U - 128));
                    frameBuffer[index+3] = 255;
                }
                else
                {
                    memset(&frameBuffer[index], 0, 4 * sizeof(unsigned char));
                }
            }
        }
    
        if(!ret)
        {
            Texture * tex = Texture::CreateFromData(FORMAT_RGBA8888, frameBuffer, frameBufferW, frameBufferH, false);
            Sprite * spr = Sprite::CreateFromTexture(tex, 0, 0, tex->width, tex->height);
            spr->ConvertToVirtualSize();

            SafeRelease(tex);
            SetSprite(spr, 0);
            SafeRelease(spr);
        }
    }
    
    if(theora_p)
    {
        double tdiff = videoBufTime - videoTime;
        /*If we have lots of extra time, increase the post-processing level.*/
        if(tdiff > theoraData->thInfo.fps_denominator * 0.25f / theoraData->thInfo.fps_numerator)
        {
            pp_inc = (pp_level < pp_level_max) ? 1 : 0;
        }
        else if(tdiff < theoraData->thInfo.fps_denominator * 0.05 / theoraData->thInfo.fps_numerator)
        {
            pp_inc = (pp_level > 0)? -1 : 0;
        }
    }
    if(isRepeat && file->GetPos() == file->GetSize())
    {
        ReleaseData();
        OpenFile(filePath);
    }
}
Exemplo n.º 5
0
void VideoStreamPlaybackTheora::set_file(const String &p_file) {

	ERR_FAIL_COND(playing);
	ogg_packet op;
	th_setup_info *ts = NULL;

	file_name = p_file;
	if (file) {
		memdelete(file);
	}
	file = FileAccess::open(p_file, FileAccess::READ);
	ERR_FAIL_COND(!file);

#ifdef THEORA_USE_THREAD_STREAMING
	thread_exit = false;
	thread_eof = false;
	//pre-fill buffer
	int to_read = ring_buffer.space_left();
	int read = file->get_buffer(read_buffer.ptr(), to_read);
	ring_buffer.write(read_buffer.ptr(), read);

	thread = Thread::create(_streaming_thread, this);

#endif

	ogg_sync_init(&oy);

	/* init supporting Vorbis structures needed in header parsing */
	vorbis_info_init(&vi);
	vorbis_comment_init(&vc);

	/* init supporting Theora structures needed in header parsing */
	th_comment_init(&tc);
	th_info_init(&ti);

	theora_eos = false;
	vorbis_eos = false;

	/* Ogg file open; parse the headers */
	/* Only interested in Vorbis/Theora streams */
	int stateflag = 0;

	int audio_track_skip = audio_track;

	while (!stateflag) {
		int ret = buffer_data();
		if (ret == 0) break;
		while (ogg_sync_pageout(&oy, &og) > 0) {
			ogg_stream_state test;

			/* is this a mandated initial header? If not, stop parsing */
			if (!ogg_page_bos(&og)) {
				/* don't leak the page; get it into the appropriate stream */
				queue_page(&og);
				stateflag = 1;
				break;
			}

			ogg_stream_init(&test, ogg_page_serialno(&og));
			ogg_stream_pagein(&test, &og);
			ogg_stream_packetout(&test, &op);

			/* identify the codec: try theora */
			if (!theora_p && th_decode_headerin(&ti, &tc, &ts, &op) >= 0) {
				/* it is theora */
				copymem(&to, &test, sizeof(test));
				theora_p = 1;
			} else if (!vorbis_p && vorbis_synthesis_headerin(&vi, &vc, &op) >= 0) {

				/* it is vorbis */
				if (audio_track_skip) {
					vorbis_info_clear(&vi);
					vorbis_comment_clear(&vc);
					ogg_stream_clear(&test);
					vorbis_info_init(&vi);
					vorbis_comment_init(&vc);

					audio_track_skip--;
				} else {
					copymem(&vo, &test, sizeof(test));
					vorbis_p = 1;
				}
			} else {
				/* whatever it is, we don't care about it */
				ogg_stream_clear(&test);
			}
		}
		/* fall through to non-bos page parsing */
	}

	/* we're expecting more header packets. */
	while ((theora_p && theora_p < 3) || (vorbis_p && vorbis_p < 3)) {
		int ret;

		/* look for further theora headers */
		while (theora_p && (theora_p < 3) && (ret = ogg_stream_packetout(&to, &op))) {
			if (ret < 0) {
				fprintf(stderr, "Error parsing Theora stream headers; "
								"corrupt stream?\n");
				clear();
				return;
			}
			if (!th_decode_headerin(&ti, &tc, &ts, &op)) {
				fprintf(stderr, "Error parsing Theora stream headers; "
								"corrupt stream?\n");
				clear();
				return;
			}
			theora_p++;
		}

		/* look for more vorbis header packets */
		while (vorbis_p && (vorbis_p < 3) && (ret = ogg_stream_packetout(&vo, &op))) {
			if (ret < 0) {
				fprintf(stderr, "Error parsing Vorbis stream headers; corrupt stream?\n");
				clear();
				return;
			}
			ret = vorbis_synthesis_headerin(&vi, &vc, &op);
			if (ret) {
				fprintf(stderr, "Error parsing Vorbis stream headers; corrupt stream?\n");
				clear();
				return;
			}
			vorbis_p++;
			if (vorbis_p == 3) break;
		}

		/* The header pages/packets will arrive before anything else we
		care about, or the stream is not obeying spec */

		if (ogg_sync_pageout(&oy, &og) > 0) {
			queue_page(&og); /* demux into the appropriate stream */
		} else {
			int ret = buffer_data(); /* someone needs more data */
			if (ret == 0) {
				fprintf(stderr, "End of file while searching for codec headers.\n");
				clear();
				return;
			}
		}
	}

	/* and now we have it all.  initialize decoders */
	if (theora_p) {
		td = th_decode_alloc(&ti, ts);
		printf("Ogg logical stream %lx is Theora %dx%d %.02f fps",
				to.serialno, ti.pic_width, ti.pic_height,
				(double)ti.fps_numerator / ti.fps_denominator);
		px_fmt = ti.pixel_fmt;
		switch (ti.pixel_fmt) {
			case TH_PF_420: printf(" 4:2:0 video\n"); break;
			case TH_PF_422: printf(" 4:2:2 video\n"); break;
			case TH_PF_444: printf(" 4:4:4 video\n"); break;
			case TH_PF_RSVD:
			default:
				printf(" video\n  (UNKNOWN Chroma sampling!)\n");
				break;
		}
		if (ti.pic_width != ti.frame_width || ti.pic_height != ti.frame_height)
			printf("  Frame content is %dx%d with offset (%d,%d).\n",
					ti.frame_width, ti.frame_height, ti.pic_x, ti.pic_y);
		th_decode_ctl(td, TH_DECCTL_GET_PPLEVEL_MAX, &pp_level_max,
				sizeof(pp_level_max));
		pp_level = pp_level_max;
		pp_level = 0;
		th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
		pp_inc = 0;

		/*{
		int arg = 0xffff;
		th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MBMODE,&arg,sizeof(arg));
		th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MV,&arg,sizeof(arg));
		th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_QI,&arg,sizeof(arg));
		arg=10;
		th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_BITS,&arg,sizeof(arg));
		}*/

		int w;
		int h;
		w = (ti.pic_x + ti.frame_width + 1 & ~1) - (ti.pic_x & ~1);
		h = (ti.pic_y + ti.frame_height + 1 & ~1) - (ti.pic_y & ~1);
		size.x = w;
		size.y = h;

		texture->create(w, h, Image::FORMAT_RGBA, Texture::FLAG_FILTER | Texture::FLAG_VIDEO_SURFACE);

	} else {
		/* tear down the partial theora setup */
		th_info_clear(&ti);
		th_comment_clear(&tc);
	}

	th_setup_free(ts);

	if (vorbis_p) {
		vorbis_synthesis_init(&vd, &vi);
		vorbis_block_init(&vd, &vb);
		fprintf(stderr, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
				vo.serialno, vi.channels, vi.rate);
		//_setup(vi.channels, vi.rate);

	} else {
		/* tear down the partial vorbis setup */
		vorbis_info_clear(&vi);
		vorbis_comment_clear(&vc);
	}

	playing = false;
	buffering = true;
	time = 0;
	audio_frames_wrote = 0;
};
Exemplo n.º 6
0
void VideoStreamPlaybackTheora::update(float p_delta) {

	if (!file)
		return;

	if (!playing || paused) {
		//printf("not playing\n");
		return;
	};

#ifdef THEORA_USE_THREAD_STREAMING
	thread_sem->post();
#endif

	//double ctime =AudioServer::get_singleton()->get_mix_time();

	//print_line("play "+rtos(p_delta));
	time += p_delta;

	if (videobuf_time > get_time()) {
		return; //no new frames need to be produced
	}

	bool frame_done = false;
	bool audio_done = !vorbis_p;

	while (!frame_done || (!audio_done && !vorbis_eos)) {
		//a frame needs to be produced

		ogg_packet op;
		bool no_theora = false;

		while (vorbis_p) {
			int ret;
			float **pcm;

			bool buffer_full = false;

			/* if there's pending, decoded audio, grab it */
			if ((ret = vorbis_synthesis_pcmout(&vd, &pcm)) > 0) {

				const int AUXBUF_LEN = 4096;
				int to_read = ret;
				int16_t aux_buffer[AUXBUF_LEN];

				while (to_read) {

					int m = MIN(AUXBUF_LEN / vi.channels, to_read);

					int count = 0;

					for (int j = 0; j < m; j++) {
						for (int i = 0; i < vi.channels; i++) {

							int val = Math::fast_ftoi(pcm[i][j] * 32767.f);
							if (val > 32767) val = 32767;
							if (val < -32768) val = -32768;
							aux_buffer[count++] = val;
						}
					}

					if (mix_callback) {
						int mixed = mix_callback(mix_udata, aux_buffer, m);
						to_read -= mixed;
						if (mixed != m) { //could mix no more
							buffer_full = true;
							break;
						}
					} else {
						to_read -= m; //just pretend we sent the audio
					}
				}

				int tr = vorbis_synthesis_read(&vd, ret - to_read);

				if (vd.granulepos >= 0) {
					//	print_line("wrote: "+itos(audio_frames_wrote)+" gpos: "+itos(vd.granulepos));
				}

				//print_line("mix audio!");

				audio_frames_wrote += ret - to_read;

				//print_line("AGP: "+itos(vd.granulepos)+" added "+itos(ret-to_read));

			} else {

				/* no pending audio; is there a pending packet to decode? */
				if (ogg_stream_packetout(&vo, &op) > 0) {
					if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */
						vorbis_synthesis_blockin(&vd, &vb);
					}
				} else { /* we need more data; break out to suck in another page */
					//printf("need moar data\n");
					break;
				};
			}

			audio_done = videobuf_time < (audio_frames_wrote / float(vi.rate));

			if (buffer_full)
				break;
		}

		while (theora_p && !frame_done) {
			/* theora is one in, one out... */
			if (ogg_stream_packetout(&to, &op) > 0) {

				if (false && pp_inc) {
					pp_level += pp_inc;
					th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level,
							sizeof(pp_level));
					pp_inc = 0;
				}
				/*HACK: This should be set after a seek or a gap, but we might not have
				a granulepos for the first packet (we only have them for the last
				packet on a page), so we just set it as often as we get it.
				To do this right, we should back-track from the last packet on the
				page and compute the correct granulepos for the first packet after
				a seek or a gap.*/
				if (op.granulepos >= 0) {
					th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos,
							sizeof(op.granulepos));
				}
				ogg_int64_t videobuf_granulepos;
				if (th_decode_packetin(td, &op, &videobuf_granulepos) == 0) {
					videobuf_time = th_granule_time(td, videobuf_granulepos);

					//printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);

					/* is it already too old to be useful?  This is only actually
					 useful cosmetically after a SIGSTOP.  Note that we have to
					 decode the frame even if we don't show it (for now) due to
					 keyframing.  Soon enough libtheora will be able to deal
					 with non-keyframe seeks.  */

					if (videobuf_time >= get_time()) {
						frame_done = true;
					} else {
						/*If we are too slow, reduce the pp level.*/
						pp_inc = pp_level > 0 ? -1 : 0;
					}
				} else {
				}

			} else {
				no_theora = true;
				break;
			}
		}

			//print_line("no theora: "+itos(no_theora)+" theora eos: "+itos(theora_eos)+" frame done "+itos(frame_done));

#ifdef THEORA_USE_THREAD_STREAMING
		if (file && thread_eof && no_theora && theora_eos && ring_buffer.data_left() == 0) {
#else
		if (file && /*!videobuf_ready && */ no_theora && theora_eos) {
#endif
			printf("video done, stopping\n");
			stop();
			return;
		};
#if 0
		if (!videobuf_ready || audio_todo > 0){
			/* no data yet for somebody.  Grab another page */

			buffer_data();
			while(ogg_sync_pageout(&oy,&og)>0){
				queue_page(&og);
			}
		}
#else

		if (!frame_done || !audio_done) {
			//what's the point of waiting for audio to grab a page?

			buffer_data();
			while (ogg_sync_pageout(&oy, &og) > 0) {
				queue_page(&og);
			}
		}
#endif
		/* If playback has begun, top audio buffer off immediately. */
		//if(stateflag) audio_write_nonblocking();

		/* are we at or past time for this video frame? */
		if (videobuf_ready && videobuf_time <= get_time()) {

			//video_write();
			//videobuf_ready=0;
		} else {
			//printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
		}

		float tdiff = videobuf_time - get_time();
		/*If we have lots of extra time, increase the post-processing level.*/
		if (tdiff > ti.fps_denominator * 0.25 / ti.fps_numerator) {
			pp_inc = pp_level < pp_level_max ? 1 : 0;
		} else if (tdiff < ti.fps_denominator * 0.05 / ti.fps_numerator) {
			pp_inc = pp_level > 0 ? -1 : 0;
		}
	}

	video_write();
};

void VideoStreamPlaybackTheora::play() {

	if (!playing)
		time = 0;
	else {
		stop();
	}

	playing = true;
	delay_compensation = Globals::get_singleton()->get("audio/video_delay_compensation_ms");
	delay_compensation /= 1000.0;
};

void VideoStreamPlaybackTheora::stop() {

	if (playing) {

		clear();
		set_file(file_name); //reset
	}
	playing = false;
	time = 0;
};

bool VideoStreamPlaybackTheora::is_playing() const {

	return playing;
};

void VideoStreamPlaybackTheora::set_paused(bool p_paused) {

	paused = p_paused;
	//pau = !p_paused;
};

bool VideoStreamPlaybackTheora::is_paused(bool p_paused) const {

	return paused;
};

void VideoStreamPlaybackTheora::set_loop(bool p_enable){

};

bool VideoStreamPlaybackTheora::has_loop() const {

	return false;
};

float VideoStreamPlaybackTheora::get_length() const {

	return 0;
};

String VideoStreamPlaybackTheora::get_stream_name() const {

	return "";
};

int VideoStreamPlaybackTheora::get_loop_count() const {

	return 0;
};

float VideoStreamPlaybackTheora::get_pos() const {

	return get_time();
};

void VideoStreamPlaybackTheora::seek_pos(float p_time){

	// no
};

void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback, void *p_userdata) {

	mix_callback = p_callback;
	mix_udata = p_userdata;
}
Exemplo n.º 7
0
bool TheoraDecoder::loadStream(Common::SeekableReadStream *stream) {
	close();

	_endOfAudio = false;
	_endOfVideo = false;
	_fileStream = stream;

	// start up Ogg stream synchronization layer
	ogg_sync_init(&_oggSync);

	// init supporting Vorbis structures needed in header parsing
	vorbis_info_init(&_vorbisInfo);
	vorbis_comment_init(&_vorbisComment);

	// init supporting Theora structures needed in header parsing
	th_comment_init(&_theoraComment);
	th_info_init(&_theoraInfo);

	// Ogg file open; parse the headers
	// Only interested in Vorbis/Theora streams
	bool foundHeader = false;
	while (!foundHeader) {
		int ret = bufferData();

		if (ret == 0)
			break;

		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) {
			ogg_stream_state test;

			// is this a mandated initial header? If not, stop parsing
			if (!ogg_page_bos(&_oggPage)) {
				// don't leak the page; get it into the appropriate stream
				queuePage(&_oggPage);
				foundHeader = true;
				break;
			}

			ogg_stream_init(&test, ogg_page_serialno(&_oggPage));
			ogg_stream_pagein(&test, &_oggPage);
			ogg_stream_packetout(&test, &_oggPacket);

			// identify the codec: try theora
			if (!_theoraPacket && th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket) >= 0) {
				// it is theora
				memcpy(&_theoraOut, &test, sizeof(test));
				_theoraPacket = 1;
			} else if (!_vorbisPacket && vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket) >= 0) {
				// it is vorbis
				memcpy(&_vorbisOut, &test, sizeof(test));
				_vorbisPacket = 1;
			} else {
				// whatever it is, we don't care about it
				ogg_stream_clear(&test);
			}
		}
		// fall through to non-bos page parsing
	}

	// we're expecting more header packets.
	while ((_theoraPacket && _theoraPacket < 3) || (_vorbisPacket && _vorbisPacket < 3)) {
		int ret;

		// look for further theora headers
		while (_theoraPacket && (_theoraPacket < 3) && (ret = ogg_stream_packetout(&_theoraOut, &_oggPacket))) {
			if (ret < 0)
				error("Error parsing Theora stream headers; corrupt stream?");

			if (!th_decode_headerin(&_theoraInfo, &_theoraComment, &_theoraSetup, &_oggPacket))
				error("Error parsing Theora stream headers; corrupt stream?");

			_theoraPacket++;
		}

		// look for more vorbis header packets
		while (_vorbisPacket && (_vorbisPacket < 3) && (ret = ogg_stream_packetout(&_vorbisOut, &_oggPacket))) {
			if (ret < 0)
				error("Error parsing Vorbis stream headers; corrupt stream?");

			if (vorbis_synthesis_headerin(&_vorbisInfo, &_vorbisComment, &_oggPacket))
				error("Error parsing Vorbis stream headers; corrupt stream?");

			_vorbisPacket++;

			if (_vorbisPacket == 3)
				break;
		}

		// The header pages/packets will arrive before anything else we
		// care about, or the stream is not obeying spec

		if (ogg_sync_pageout(&_oggSync, &_oggPage) > 0) {
			queuePage(&_oggPage); // demux into the appropriate stream
		} else {
			ret = bufferData(); // someone needs more data

			if (ret == 0)
				error("End of file while searching for codec headers.");
		}
	}

	// and now we have it all.  initialize decoders
	if (_theoraPacket) {
		_theoraDecode = th_decode_alloc(&_theoraInfo, _theoraSetup);
		debugN(1, "Ogg logical stream %lx is Theora %dx%d %.02f fps",
		       _theoraOut.serialno, _theoraInfo.pic_width, _theoraInfo.pic_height,
		       (double)_theoraInfo.fps_numerator / _theoraInfo.fps_denominator);

		switch (_theoraInfo.pixel_fmt) {
		case TH_PF_420:
			debug(1, " 4:2:0 video");
			break;
		case TH_PF_422:
			debug(1, " 4:2:2 video");
			break;
		case TH_PF_444:
			debug(1, " 4:4:4 video");
			break;
		case TH_PF_RSVD:
		default:
			debug(1, " video\n  (UNKNOWN Chroma sampling!)");
			break;
		}

		if (_theoraInfo.pic_width != _theoraInfo.frame_width || _theoraInfo.pic_height != _theoraInfo.frame_height)
			debug(1, "  Frame content is %dx%d with offset (%d,%d).",
			      _theoraInfo.frame_width, _theoraInfo.frame_height, _theoraInfo.pic_x, _theoraInfo.pic_y);

		switch (_theoraInfo.colorspace){
		case TH_CS_UNSPECIFIED:
			/* nothing to report */
			break;
		case TH_CS_ITU_REC_470M:
			debug(1, "  encoder specified ITU Rec 470M (NTSC) color.");
			break;
		case TH_CS_ITU_REC_470BG:
			debug(1, "  encoder specified ITU Rec 470BG (PAL) color.");
			break;
		default:
			debug(1, "warning: encoder specified unknown colorspace (%d).", _theoraInfo.colorspace);
			break;
		}

		debug(1, "Encoded by %s", _theoraComment.vendor);
		if (_theoraComment.comments) {
			debug(1, "theora comment header:");
			for (int i = 0; i < _theoraComment.comments; i++) {
				if (_theoraComment.user_comments[i]) {
					int len = _theoraComment.comment_lengths[i];
					char *value = (char *)malloc(len + 1);
					if (value) {
						memcpy(value, _theoraComment.user_comments[i], len);
						value[len] = '\0';
						debug(1, "\t%s", value);
						free(value);
					}
				}
			}
		}

		th_decode_ctl(_theoraDecode, TH_DECCTL_GET_PPLEVEL_MAX, &_ppLevelMax, sizeof(_ppLevelMax));
		_ppLevel = _ppLevelMax;
		th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel));
		_ppInc = 0;
	} else {
		// tear down the partial theora setup
		th_info_clear(&_theoraInfo);
		th_comment_clear(&_theoraComment);
	}

	th_setup_free(_theoraSetup);
	_theoraSetup = 0;

	if (_vorbisPacket) {
		vorbis_synthesis_init(&_vorbisDSP, &_vorbisInfo);
		vorbis_block_init(&_vorbisDSP, &_vorbisBlock);
		debug(3, "Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.",
		      _vorbisOut.serialno, _vorbisInfo.channels, _vorbisInfo.rate);

		_audStream = Audio::makeQueuingAudioStream(_vorbisInfo.rate, _vorbisInfo.channels);

		// Get enough audio data to start us off
		while (_audStream->numQueuedStreams() == 0) {
			// Queue more data
			bufferData();
			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0)
				queuePage(&_oggPage);

			queueAudio();
		}

		if (_audStream)
			g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, _audHandle, _audStream, -1, getVolume(), getBalance());
	} else {
		// tear down the partial vorbis setup
		vorbis_info_clear(&_vorbisInfo);
		vorbis_comment_clear(&_vorbisComment);
		_endOfAudio = true;
	}

	_surface.create(_theoraInfo.frame_width, _theoraInfo.frame_height, g_system->getScreenFormat());

	// Set up a display surface
	_displaySurface.pixels = _surface.getBasePtr(_theoraInfo.pic_x, _theoraInfo.pic_y);
	_displaySurface.w = _theoraInfo.pic_width;
	_displaySurface.h = _theoraInfo.pic_height;
	_displaySurface.format = _surface.format;
	_displaySurface.pitch = _surface.pitch;

	// Set the frame rate
	_frameRate = Common::Rational(_theoraInfo.fps_numerator, _theoraInfo.fps_denominator);

	return true;
}
Exemplo n.º 8
0
int main(int argc,char *const *argv){

  int pp_level_max;
  int pp_level;
  int pp_inc;
  int i,j;
  ogg_packet op;

  FILE *infile = stdin;

  int frames = 0;
  int dropped = 0;

#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */
  /* Beware the evil ifdef. We avoid these where we can, but this one we
     cannot. Don't add any more, you'll probably go to hell if you do. */
  _setmode( _fileno( stdin ), _O_BINARY );
#endif

  /* open the input file if any */
  if(argc==2){
    infile=fopen(argv[1],"rb");
    if(infile==NULL){
      fprintf(stderr,"Unable to open '%s' for playback.\n", argv[1]);
      exit(1);
    }
  }
  if(argc>2){
      usage();
      exit(1);
  }

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  th_comment_init(&tc);
  th_info_init(&ti);

  /* Ogg file open; parse the headers */
  /* Only interested in Vorbis/Theora streams */
  while(!stateflag){
    int ret=buffer_data(infile,&oy);
    if(ret==0)break;
    while(ogg_sync_pageout(&oy,&og)>0){
      ogg_stream_state test;

      /* is this a mandated initial header? If not, stop parsing */
      if(!ogg_page_bos(&og)){
        /* don't leak the page; get it into the appropriate stream */
        queue_page(&og);
        stateflag=1;
        break;
      }

      ogg_stream_init(&test,ogg_page_serialno(&og));
      ogg_stream_pagein(&test,&og);
      ogg_stream_packetout(&test,&op);


      /* identify the codec: try theora */
      if(!theora_p && th_decode_headerin(&ti,&tc,&ts,&op)>=0){
        /* it is theora */
        memcpy(&to,&test,sizeof(test));
        theora_p=1;
      }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){
        /* it is vorbis */
        memcpy(&vo,&test,sizeof(test));
        vorbis_p=1;
      }else{
        /* whatever it is, we don't care about it */
        ogg_stream_clear(&test);
      }
    }
    /* fall through to non-bos page parsing */
  }

  /* we're expecting more header packets. */
  while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){
    int ret;

    /* look for further theora headers */
    while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Theora stream headers; "
         "corrupt stream?\n");
        exit(1);
      }
      if(!th_decode_headerin(&ti,&tc,&ts,&op)){
        fprintf(stderr,"Error parsing Theora stream headers; "
         "corrupt stream?\n");
        exit(1);
      }
      theora_p++;
    }

    /* look for more vorbis header packets */
    while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      if(vorbis_synthesis_headerin(&vi,&vc,&op)){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      vorbis_p++;
      if(vorbis_p==3)break;
    }

    /* The header pages/packets will arrive before anything else we
       care about, or the stream is not obeying spec */

    if(ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og); /* demux into the appropriate stream */
    }else{
      int ret=buffer_data(infile,&oy); /* someone needs more data */
      if(ret==0){
        fprintf(stderr,"End of file while searching for codec headers.\n");
        exit(1);
      }
    }
  }

  /* and now we have it all.  initialize decoders */
  if(theora_p){
    td=th_decode_alloc(&ti,ts);
    printf("Ogg logical stream %lx is Theora %dx%d %.02f fps",
           to.serialno,ti.pic_width,ti.pic_height,
           (double)ti.fps_numerator/ti.fps_denominator);
    px_fmt=ti.pixel_fmt;
    switch(ti.pixel_fmt){
      case TH_PF_420: printf(" 4:2:0 video\n"); break;
      case TH_PF_422: printf(" 4:2:2 video\n"); break;
      case TH_PF_444: printf(" 4:4:4 video\n"); break;
      case TH_PF_RSVD:
      default:
       printf(" video\n  (UNKNOWN Chroma sampling!)\n");
       break;
    }
    if(ti.pic_width!=ti.frame_width || ti.pic_height!=ti.frame_height)
      printf("  Frame content is %dx%d with offset (%d,%d).\n",
           ti.frame_width, ti.frame_height, ti.pic_x, ti.pic_y);
    report_colorspace(&ti);
    dump_comments(&tc);
    th_decode_ctl(td,TH_DECCTL_GET_PPLEVEL_MAX,&pp_level_max,
     sizeof(pp_level_max));
    pp_level=pp_level_max;
    th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,sizeof(pp_level));
    pp_inc=0;

    /*{
      int arg = 0xffff;
      th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MBMODE,&arg,sizeof(arg));
      th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_MV,&arg,sizeof(arg));
      th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_QI,&arg,sizeof(arg));
      arg=10;
      th_decode_ctl(td,TH_DECCTL_SET_TELEMETRY_BITS,&arg,sizeof(arg));
    }*/
  }else{
    /* tear down the partial theora setup */
    th_info_clear(&ti);
    th_comment_clear(&tc);
  }
  
  th_setup_free(ts);
  
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);
    fprintf(stderr,"Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
            vo.serialno,vi.channels,vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }

  /* open audio */
  if(vorbis_p)open_audio();

  /* open video */
  if(theora_p)open_video();

  /* install signal handler as SDL clobbered the default */
  signal (SIGINT, sigint_handler);

  /* on to the main decode loop.  We assume in this example that audio
     and video start roughly together, and don't begin playback until
     we have a start frame for both.  This is not necessarily a valid
     assumption in Ogg A/V streams! It will always be true of the
     example_encoder (and most streams) though. */

  stateflag=0; /* playback has not begun */
  while(!got_sigint){

    /* we want a video and audio frame ready to go at all times.  If
       we have to buffer incoming, buffer the compressed data (ie, let
       ogg do the buffering) */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;

      /* if there's pending, decoded audio, grab it */
      if((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0){
        int count=audiobuf_fill/2;
        int maxsamples=(audiofd_fragsize-audiobuf_fill)/2/vi.channels;
        for(i=0;i<ret && i<maxsamples;i++)
          for(j=0;j<vi.channels;j++){
            int val=rint(pcm[j][i]*32767.f);
            if(val>32767)val=32767;
            if(val<-32768)val=-32768;
            audiobuf[count++]=val;
          }
        vorbis_synthesis_read(&vd,i);
        audiobuf_fill+=i*vi.channels*2;
        if(audiobuf_fill==audiofd_fragsize)audiobuf_ready=1;
        if(vd.granulepos>=0)
          audiobuf_granulepos=vd.granulepos-ret+i;
        else
          audiobuf_granulepos+=i;

      }else{

        /* no pending audio; is there a pending packet to decode? */
        if(ogg_stream_packetout(&vo,&op)>0){
          if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
            vorbis_synthesis_blockin(&vd,&vb);
        }else   /* we need more data; break out to suck in another page */
          break;
      }
    }

    while(theora_p && !videobuf_ready){
      /* theora is one in, one out... */
      if(ogg_stream_packetout(&to,&op)>0){

        if(pp_inc){
          pp_level+=pp_inc;
          th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,
           sizeof(pp_level));
          pp_inc=0;
        }
        /*HACK: This should be set after a seek or a gap, but we might not have
           a granulepos for the first packet (we only have them for the last
           packet on a page), so we just set it as often as we get it.
          To do this right, we should back-track from the last packet on the
           page and compute the correct granulepos for the first packet after
           a seek or a gap.*/
        if(op.granulepos>=0){
          th_decode_ctl(td,TH_DECCTL_SET_GRANPOS,&op.granulepos,
           sizeof(op.granulepos));
        }
        if(th_decode_packetin(td,&op,&videobuf_granulepos)==0){
          videobuf_time=th_granule_time(td,videobuf_granulepos);
          frames++;

          /* is it already too old to be useful?  This is only actually
             useful cosmetically after a SIGSTOP.  Note that we have to
             decode the frame even if we don't show it (for now) due to
             keyframing.  Soon enough libtheora will be able to deal
             with non-keyframe seeks.  */

          if(videobuf_time>=get_time())
            videobuf_ready=1;
          else{
            /*If we are too slow, reduce the pp level.*/
            pp_inc=pp_level>0?-1:0;
            dropped++;
          }
        }

      }else
        break;
    }

    if(!videobuf_ready && !audiobuf_ready && feof(infile))break;

    if(!videobuf_ready || !audiobuf_ready){
      /* no data yet for somebody.  Grab another page */
      buffer_data(infile,&oy);
      while(ogg_sync_pageout(&oy,&og)>0){
        queue_page(&og);
      }
    }

    /* If playback has begun, top audio buffer off immediately. */
    if(stateflag) audio_write_nonblocking();

    /* are we at or past time for this video frame? */
    if(stateflag && videobuf_ready && videobuf_time<=get_time()){
      video_write();
      videobuf_ready=0;
    }

    if(stateflag &&
       (audiobuf_ready || !vorbis_p) &&
       (videobuf_ready || !theora_p) &&
       !got_sigint){
      /* we have an audio frame ready (which means the audio buffer is
         full), it's not time to play video, so wait until one of the
         audio buffer is ready or it's near time to play video */

      /* set up select wait on the audiobuffer and a timeout for video */
      struct timeval timeout;
      fd_set writefs;
      fd_set empty;
      int n=0;

      FD_ZERO(&writefs);
      FD_ZERO(&empty);
      if(audiofd>=0){
        FD_SET(audiofd,&writefs);
        n=audiofd+1;
      }

      if(theora_p){
        double tdiff;
        long milliseconds;
        tdiff=videobuf_time-get_time();
        /*If we have lots of extra time, increase the post-processing level.*/
        if(tdiff>ti.fps_denominator*0.25/ti.fps_numerator){
          pp_inc=pp_level<pp_level_max?1:0;
        }
        else if(tdiff<ti.fps_denominator*0.05/ti.fps_numerator){
          pp_inc=pp_level>0?-1:0;
        }
        milliseconds=tdiff*1000-5;
        if(milliseconds>500)milliseconds=500;
        if(milliseconds>0){
          timeout.tv_sec=milliseconds/1000;
          timeout.tv_usec=(milliseconds%1000)*1000;

          n=select(n,&empty,&writefs,&empty,&timeout);
          if(n)audio_calibrate_timer(0);
        }
      }else{
        select(n,&empty,&writefs,&empty,NULL);
      }
    }

    /* if our buffers either don't exist or are ready to go,
       we can begin playback */
    if((!theora_p || videobuf_ready) &&
       (!vorbis_p || audiobuf_ready))stateflag=1;
    /* same if we've run out of input */
    if(feof(infile))stateflag=1;

  }

  /* tear it all down */

  audio_close();
  SDL_Quit();

  if(vorbis_p){
    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);
  }
  if(theora_p){
    ogg_stream_clear(&to);
    th_decode_free(td);
    th_comment_clear(&tc);
    th_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  if(infile && infile!=stdin)fclose(infile);

  fprintf(stderr,
          "\r                                                             \r");
  fprintf(stderr, "%d frames", frames);
  if (dropped) fprintf(stderr, " (%d dropped)", dropped);
  fprintf(stderr, "\n");
  fprintf(stderr, "\nDone.\n");

  return(0);

}
Exemplo n.º 9
0
const Graphics::Surface *TheoraDecoder::decodeNextFrame() {
	// First, let's get our frame
	while (_theoraPacket) {
		// theora is one in, one out...
		if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) {

			if (_ppInc) {
				_ppLevel += _ppInc;
				th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel));
				_ppInc = 0;
			}

			if (th_decode_packetin(_theoraDecode, &_oggPacket, NULL) == 0) {
				_curFrame++;

				// Convert YUV data to RGB data
				th_ycbcr_buffer yuv;
				th_decode_ycbcr_out(_theoraDecode, yuv);
				translateYUVtoRGBA(yuv);

				if (_curFrame == 0)
					_startTime = g_system->getMillis();

				double time = th_granule_time(_theoraDecode, _oggPacket.granulepos);

				// We need to calculate when the next frame should be shown
				// This is all in floating point because that's what the Ogg code gives us
				// Ogg is a lossy container format, so it doesn't always list the time to the
				// next frame. In such cases, we need to calculate it ourselves.
				if (time == -1.0)
					_nextFrameStartTime += _frameRate.getInverse().toDouble();
				else
					_nextFrameStartTime = time;

				// break out
				break;
			}
		} else {
			// If we can't get any more frames, we're done.
			if (_theoraOut.e_o_s || _fileStream->eos()) {
				_endOfVideo = true;
				break;
			}

			// Queue more data
			bufferData();
			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0)
				queuePage(&_oggPage);
		}

		// Update audio if we can
		queueAudio();
	}

	// Force at least some audio to be buffered
	// TODO: 5 is very arbitrary. We probably should do something like QuickTime does.
	while (!_endOfAudio && _audStream->numQueuedStreams() < 5) {
		bufferData();
		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0)
			queuePage(&_oggPage);

		bool queuedAudio = queueAudio();
		if ((_vorbisOut.e_o_s  || _fileStream->eos()) && !queuedAudio) {
			_endOfAudio = true;
			break;
		}
	}

	return &_displaySurface;
}
Exemplo n.º 10
0
static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
{
  GstCaps *caps;
  gint par_num, par_den;
  GstFlowReturn ret = GST_FLOW_OK;
  GList *walk;
  guint32 fourcc;

  GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
      dec->info.fps_numerator, dec->info.fps_denominator,
      dec->info.aspect_numerator, dec->info.aspect_denominator);

  /* calculate par
   * the info.aspect_* values reflect PAR;
   * 0:x and x:0 are allowed and can be interpreted as 1:1.
   */
  if (dec->have_par) {
    /* we had a par on the sink caps, override the encoded par */
    GST_DEBUG_OBJECT (dec, "overriding with input PAR");
    par_num = dec->par_num;
    par_den = dec->par_den;
  } else {
    /* take encoded par */
    par_num = dec->info.aspect_numerator;
    par_den = dec->info.aspect_denominator;
  }
  if (par_num == 0 || par_den == 0) {
    par_num = par_den = 1;
  }
  /* theora has:
   *
   *  width/height : dimension of the encoded frame 
   *  pic_width/pic_height : dimension of the visible part
   *  pic_x/pic_y : offset in encoded frame where visible part starts
   */
  GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width,
      dec->info.pic_height, par_num, par_den);
  GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d",
      dec->info.pic_width, dec->info.pic_height,
      dec->info.pic_x, dec->info.pic_y);

  if (dec->info.pixel_fmt == TH_PF_420) {
    dec->output_bpp = 12;       /* Average bits per pixel. */
    fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
  } else if (dec->info.pixel_fmt == TH_PF_422) {
    dec->output_bpp = 16;
    fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
  } else if (dec->info.pixel_fmt == TH_PF_444) {
    dec->output_bpp = 24;
    fourcc = GST_MAKE_FOURCC ('Y', '4', '4', '4');
  } else {
    GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
    return GST_FLOW_ERROR;
  }

  if (dec->crop) {
    dec->width = dec->info.pic_width;
    dec->height = dec->info.pic_height;
    dec->offset_x = dec->info.pic_x;
    dec->offset_y = dec->info.pic_y;
    /* Ensure correct offsets in chroma for formats that need it
     * by rounding the offset. libtheora will add proper pixels,
     * so no need to handle them ourselves. */
    if (dec->offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
      dec->offset_x--;
      dec->width++;
    }
    if (dec->offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
      dec->offset_y--;
      dec->height++;
    }
  } else {
    /* no cropping, use the encoded dimensions */
    dec->width = dec->info.frame_width;
    dec->height = dec->info.frame_height;
    dec->offset_x = 0;
    dec->offset_y = 0;
  }

  GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
      dec->width, dec->height, dec->offset_x, dec->offset_y);

  /* done */
  dec->decoder = th_decode_alloc (&dec->info, dec->setup);

  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV,
          &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MV visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE,
          &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI,
          &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS,
          &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
  }

  caps = gst_caps_new_simple ("video/x-raw-yuv",
      "format", GST_TYPE_FOURCC, fourcc,
      "framerate", GST_TYPE_FRACTION,
      dec->info.fps_numerator, dec->info.fps_denominator,
      "pixel-aspect-ratio", GST_TYPE_FRACTION, par_num, par_den,
      "width", G_TYPE_INT, dec->width, "height", G_TYPE_INT, dec->height,
      "color-matrix", G_TYPE_STRING, "sdtv",
      "chroma-site", G_TYPE_STRING, "jpeg", NULL);
  gst_pad_set_caps (dec->srcpad, caps);
  gst_caps_unref (caps);

  dec->have_header = TRUE;

  if (dec->pendingevents) {
    for (walk = dec->pendingevents; walk; walk = g_list_next (walk))
      gst_pad_push_event (dec->srcpad, GST_EVENT_CAST (walk->data));
    g_list_free (dec->pendingevents);
    dec->pendingevents = NULL;
  }

  if (dec->tags) {
    gst_element_found_tags_for_pad (GST_ELEMENT_CAST (dec), dec->srcpad,
        dec->tags);
    dec->tags = NULL;
  }

  return ret;
}
Exemplo n.º 11
0
static gboolean
gst_theora_dec_ctl_is_supported (int req)
{
  /* should return TH_EFAULT or TH_EINVAL if supported, and TH_EIMPL if not */
  return (th_decode_ctl (NULL, req, NULL, 0) != TH_EIMPL);
}
Exemplo n.º 12
0
/*****************************************************************************
 * ProcessHeaders: process Theora headers.
 *****************************************************************************/
static int ProcessHeaders( decoder_t *p_dec )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    ogg_packet oggpacket;
    th_setup_info *ts = NULL; /* theora setup information */
    int i_max_pp, i_pp;

    unsigned pi_size[XIPH_MAX_HEADER_COUNT];
    void     *pp_data[XIPH_MAX_HEADER_COUNT];
    unsigned i_count;
    if( xiph_SplitHeaders( pi_size, pp_data, &i_count,
                           p_dec->fmt_in.i_extra, p_dec->fmt_in.p_extra) )
        return VLC_EGENERIC;
    if( i_count < 3 )
        return VLC_EGENERIC;

    oggpacket.granulepos = -1;
    oggpacket.e_o_s = 0;
    oggpacket.packetno = 0;

    /* Take care of the initial Vorbis header */
    oggpacket.b_o_s  = 1; /* yes this actually is a b_o_s packet :) */
    oggpacket.bytes  = pi_size[0];
    oggpacket.packet = pp_data[0];
    if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 )
    {
        msg_Err( p_dec, "this bitstream does not contain Theora video data" );
        goto error;
    }

    /* Set output properties */
    if( !p_sys->b_packetizer )

    switch( p_sys->ti.pixel_fmt )
    {
      case TH_PF_420:
        p_dec->fmt_out.i_codec = VLC_CODEC_I420;
        break;
      case TH_PF_422:
        p_dec->fmt_out.i_codec = VLC_CODEC_I422;
        break;
      case TH_PF_444:
        p_dec->fmt_out.i_codec = VLC_CODEC_I444;
        break;
      case TH_PF_RSVD:
      default:
        msg_Err( p_dec, "unknown chroma in theora sample" );
        break;
    }

    p_dec->fmt_out.video.i_width = p_sys->ti.frame_width;
    p_dec->fmt_out.video.i_height = p_sys->ti.frame_height;
    if( p_sys->ti.pic_width && p_sys->ti.pic_height )
    {
        p_dec->fmt_out.video.i_visible_width = p_sys->ti.pic_width;
        p_dec->fmt_out.video.i_visible_height = p_sys->ti.pic_height;

        p_dec->fmt_out.video.i_x_offset = p_sys->ti.pic_x;
        p_dec->fmt_out.video.i_y_offset = p_sys->ti.pic_y;
    }

    if( p_sys->ti.aspect_denominator && p_sys->ti.aspect_numerator )
    {
        p_dec->fmt_out.video.i_sar_num = p_sys->ti.aspect_numerator;
        p_dec->fmt_out.video.i_sar_den = p_sys->ti.aspect_denominator;
    }
    else
    {
        p_dec->fmt_out.video.i_sar_num = 1;
        p_dec->fmt_out.video.i_sar_den = 1;
    }

    if( p_sys->ti.fps_numerator > 0 && p_sys->ti.fps_denominator > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate = p_sys->ti.fps_numerator;
        p_dec->fmt_out.video.i_frame_rate_base = p_sys->ti.fps_denominator;
    }

    msg_Dbg( p_dec, "%dx%d %u/%u fps video, frame content "
             "is %dx%d with offset (%d,%d)",
             p_sys->ti.frame_width, p_sys->ti.frame_height,
             p_sys->ti.fps_numerator, p_sys->ti.fps_denominator,
             p_sys->ti.pic_width, p_sys->ti.pic_height,
             p_sys->ti.pic_x, p_sys->ti.pic_y );

    /* Some assertions based on the documentation.  These are mandatory restrictions. */
    assert( p_sys->ti.frame_height % 16 == 0 && p_sys->ti.frame_height < 1048576 );
    assert( p_sys->ti.frame_width % 16 == 0 && p_sys->ti.frame_width < 1048576 );
    assert( p_sys->ti.keyframe_granule_shift >= 0 && p_sys->ti.keyframe_granule_shift <= 31 );
    assert( p_sys->ti.pic_x <= __MIN( p_sys->ti.frame_width - p_sys->ti.pic_width, 255 ) );
    assert( p_sys->ti.pic_y <= p_sys->ti.frame_height - p_sys->ti.pic_height);
    assert( p_sys->ti.frame_height - p_sys->ti.pic_height - p_sys->ti.pic_y <= 255 );

    /* Sanity check that seems necessary for some corrupted files */
    if( p_sys->ti.frame_width < p_sys->ti.pic_width ||
        p_sys->ti.frame_height < p_sys->ti.pic_height )
    {
        msg_Warn( p_dec, "trying to correct invalid theora header "
                  "(frame size (%dx%d) is smaller than frame content (%d,%d))",
                  p_sys->ti.frame_width, p_sys->ti.frame_height,
                  p_sys->ti.pic_width, p_sys->ti.pic_height );

        if( p_sys->ti.frame_width < p_sys->ti.pic_width )
          p_sys->ti.frame_width = p_sys->ti.pic_width;
        if( p_sys->ti.frame_height < p_sys->ti.pic_height )
            p_sys->ti.frame_height = p_sys->ti.pic_height;
    }

    /* The next packet in order is the comments header */
    oggpacket.b_o_s  = 0;
    oggpacket.bytes  = pi_size[1];
    oggpacket.packet = pp_data[1];

    if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 )
    {
        msg_Err( p_dec, "2nd Theora header is corrupted" );
        goto error;
    }

    ParseTheoraComments( p_dec );

    /* The next packet in order is the codebooks header
     * We need to watch out that this packet is not missing as a
     * missing or corrupted header is fatal. */
    oggpacket.b_o_s  = 0;
    oggpacket.bytes  = pi_size[2];
    oggpacket.packet = pp_data[2];
    if( th_decode_headerin( &p_sys->ti, &p_sys->tc, &ts, &oggpacket ) < 0 )
    {
        msg_Err( p_dec, "3rd Theora header is corrupted" );
        goto error;
    }

    if( !p_sys->b_packetizer )
    {
        /* We have all the headers, initialize decoder */
        if ( ( p_sys->tcx = th_decode_alloc( &p_sys->ti, ts ) ) == NULL )
        {
            msg_Err( p_dec, "Could not allocate Theora decoder" );
            goto error;
        }

        i_pp = var_InheritInteger( p_dec, DEC_CFG_PREFIX "postproc" );
        if ( i_pp >= 0 && !th_decode_ctl( p_sys->tcx,
                    TH_DECCTL_GET_PPLEVEL_MAX, &i_max_pp, sizeof(int) ) )
        {
            i_pp = __MIN( i_pp, i_max_pp );
            if ( th_decode_ctl( p_sys->tcx, TH_DECCTL_SET_PPLEVEL,
                                &i_pp, sizeof(int) ) )
                msg_Err( p_dec, "Failed to set post processing level to %d",
                         i_pp );
            else
                msg_Dbg( p_dec, "Set post processing level to %d / %d",
                         i_pp, i_max_pp );
        }

    }
    else
    {
        void* p_extra = realloc( p_dec->fmt_out.p_extra,
                                 p_dec->fmt_in.i_extra );
        if( unlikely( p_extra == NULL ) )
        {
            /* Clean up the decoder setup info... we're done with it */
            th_setup_free( ts );
            return VLC_ENOMEM;
        }
        p_dec->fmt_out.p_extra = p_extra;
        p_dec->fmt_out.i_extra = p_dec->fmt_in.i_extra;
        memcpy( p_dec->fmt_out.p_extra,
                p_dec->fmt_in.p_extra, p_dec->fmt_out.i_extra );
    }

    /* Clean up the decoder setup info... we're done with it */
    th_setup_free( ts );
    return VLC_SUCCESS;

error:
    /* Clean up the decoder setup info... we're done with it */
    th_setup_free( ts );
    return VLC_EGENERIC;
}
Exemplo n.º 13
0
static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec)
{
  gint par_num, par_den;
  GstFlowReturn ret = GST_FLOW_OK;
  GstVideoCodecState *state;
  GstVideoFormat fmt;
  GstVideoInfo *info;

  if (!dec->input_state)
    return GST_FLOW_NOT_NEGOTIATED;

  info = &dec->input_state->info;

  GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
      dec->info.fps_numerator, dec->info.fps_denominator,
      dec->info.aspect_numerator, dec->info.aspect_denominator);

  /* calculate par
   * the info.aspect_* values reflect PAR;
   * 0:x and x:0 are allowed and can be interpreted as 1:1.
   */
  par_num = GST_VIDEO_INFO_PAR_N (info);
  par_den = GST_VIDEO_INFO_PAR_D (info);

  /* If we have a default PAR, see if the decoder specified a different one */
  if (par_num == 1 && par_den == 1 &&
      (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) {
    par_num = dec->info.aspect_numerator;
    par_den = dec->info.aspect_denominator;
  }
  /* theora has:
   *
   *  width/height : dimension of the encoded frame 
   *  pic_width/pic_height : dimension of the visible part
   *  pic_x/pic_y : offset in encoded frame where visible part starts
   */
  GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width,
      dec->info.pic_height, par_num, par_den);
  GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d",
      dec->info.pic_width, dec->info.pic_height,
      dec->info.pic_x, dec->info.pic_y);

  switch (dec->info.pixel_fmt) {
    case TH_PF_420:
      fmt = GST_VIDEO_FORMAT_I420;
      break;
    case TH_PF_422:
      fmt = GST_VIDEO_FORMAT_Y42B;
      break;
    case TH_PF_444:
      fmt = GST_VIDEO_FORMAT_Y444;
      break;
    default:
      goto unsupported_format;
  }

  GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width;
  GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height;

  /* Ensure correct offsets in chroma for formats that need it
   * by rounding the offset. libtheora will add proper pixels,
   * so no need to handle them ourselves. */
  if (dec->info.pic_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
    GST_VIDEO_INFO_WIDTH (info)++;
  }
  if (dec->info.pic_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
    GST_VIDEO_INFO_HEIGHT (info)++;
  }

  GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
      info->width, info->height, dec->info.pic_x, dec->info.pic_y);

  /* done */
  dec->decoder = th_decode_alloc (&dec->info, dec->setup);

  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV,
          &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MV visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE,
          &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI,
          &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation");
  }
  if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS,
          &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) {
    GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
  }

  /* Create the output state */
  dec->output_state = state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt,
      info->width, info->height, dec->input_state);

  /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */
  state->info.fps_n = dec->info.fps_numerator;
  state->info.fps_d = dec->info.fps_denominator;
  state->info.par_n = par_num;
  state->info.par_d = par_den;

  /* these values are for all versions of the colorspace specified in the
   * theora info */
  state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG;
  state->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;
  state->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
  state->info.colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
  switch (dec->info.colorspace) {
    case TH_CS_ITU_REC_470M:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
      break;
    case TH_CS_ITU_REC_470BG:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
      break;
    default:
      state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
      break;
  }

  gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec));

  dec->have_header = TRUE;

  return ret;

  /* ERRORS */
unsupported_format:
  {
    GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
    return GST_FLOW_ERROR;
  }
}