Example #1
0
void VideoStreamPlaybackGDNative::update(float p_delta) {
	if (!playing || paused) {
		return;
	}
	if (!file) {
		return;
	}
	time += p_delta;
	ERR_FAIL_COND(interface == NULL);
	interface->update(data_struct, p_delta);

	if (mix_callback) {
		if (pcm_write_idx >= 0) {
			// Previous remains
			int mixed = mix_callback(mix_udata, pcm, samples_decoded);
			if (mixed == samples_decoded) {
				pcm_write_idx = -1;
			} else {
				samples_decoded -= mixed;
				pcm_write_idx += mixed;
			}
		}
		if (pcm_write_idx < 0) {
			samples_decoded = interface->get_audioframe(data_struct, pcm, AUX_BUFFER_SIZE);
			pcm_write_idx = mix_callback(mix_udata, pcm, samples_decoded);
			if (pcm_write_idx == samples_decoded) {
				pcm_write_idx = -1;
			} else {
				samples_decoded -= pcm_write_idx;
			}
		}
	}

	while (interface->get_playback_position(data_struct) < time && playing) {

		update_texture();
	}
}
Example #2
0
void VideoStreamPlaybackWebm::update(float p_delta) {

	if ((!playing || paused) || !video)
		return;

	time += p_delta;

	if (time < video_pos) {
		return;
	}

	bool audio_buffer_full = false;

	if (samples_offset > -1) {

		//Mix remaining samples
		const int to_read = num_decoded_samples - samples_offset;
		const int mixed = mix_callback(mix_udata, pcm + samples_offset * webm->getChannels(), to_read);
		if (mixed != to_read) {

			samples_offset += mixed;
			audio_buffer_full = true;
		} else {

			samples_offset = -1;
		}
	}

	const bool hasAudio = (audio && mix_callback);
	while ((hasAudio && !audio_buffer_full && !has_enough_video_frames()) ||
			(!hasAudio && video_frames_pos == 0)) {

		if (hasAudio && !audio_buffer_full && audio_frame->isValid() &&
				audio->getPCMF(*audio_frame, pcm, num_decoded_samples) && num_decoded_samples > 0) {

			const int mixed = mix_callback(mix_udata, pcm, num_decoded_samples);

			if (mixed != num_decoded_samples) {
				samples_offset = mixed;
				audio_buffer_full = true;
			}
		}

		WebMFrame *video_frame;
		if (video_frames_pos >= video_frames_capacity) {

			WebMFrame **video_frames_new = (WebMFrame **)memrealloc(video_frames, ++video_frames_capacity * sizeof(void *));
			ERR_FAIL_COND(!video_frames_new); //Out of memory
			(video_frames = video_frames_new)[video_frames_capacity - 1] = memnew(WebMFrame);
		}
		video_frame = video_frames[video_frames_pos];

		if (!webm->readFrame(video_frame, audio_frame)) //This will invalidate frames
			break; //Can't demux, EOS?

		if (video_frame->isValid())
			++video_frames_pos;
	};

	bool video_frame_done = false;
	while (video_frames_pos > 0 && !video_frame_done) {

		WebMFrame *video_frame = video_frames[0];

		// It seems VPXDecoder::decode has to be executed even though we might skip this frame
		if (video->decode(*video_frame)) {

			VPXDecoder::IMAGE_ERROR err;
			VPXDecoder::Image image;

			if (should_process(*video_frame)) {

				if ((err = video->getImage(image)) != VPXDecoder::NO_FRAME) {

					if (err == VPXDecoder::NO_ERROR && image.w == webm->getWidth() && image.h == webm->getHeight()) {

						PoolVector<uint8_t>::Write w = frame_data.write();
						bool converted = false;

						if (image.chromaShiftW == 1 && image.chromaShiftH == 1) {

							yuv420_2_rgb8888(w.ptr(), image.planes[0], image.planes[2], image.planes[1], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2, 0);
							// 								libyuv::I420ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
							converted = true;
						} else if (image.chromaShiftW == 1 && image.chromaShiftH == 0) {

							yuv422_2_rgb8888(w.ptr(), image.planes[0], image.planes[2], image.planes[1], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2, 0);
							// 								libyuv::I422ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
							converted = true;
						} else if (image.chromaShiftW == 0 && image.chromaShiftH == 0) {

							yuv444_2_rgb8888(w.ptr(), image.planes[0], image.planes[2], image.planes[1], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2, 0);
							// 								libyuv::I444ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
							converted = true;
						} else if (image.chromaShiftW == 2 && image.chromaShiftH == 0) {

							// 								libyuv::I411ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
							// 								converted = true;
						}

						if (converted) {
							Ref<Image> img = memnew(Image(image.w, image.h, 0, Image::FORMAT_RGBA8, frame_data));
							texture->set_data(img); //Zero copy send to visual server
							video_frame_done = true;
						}
					}
				}
			}
		}

		video_pos = video_frame->time;
		memmove(video_frames, video_frames + 1, (--video_frames_pos) * sizeof(void *));
		video_frames[video_frames_pos] = video_frame;
	}

	if (video_frames_pos == 0 && webm->isEOS())
		stop();
}
Example #3
0
void VideoStreamPlaybackTheora::update(float p_delta) {

	if (!file)
		return;

	if (!playing || paused) {
		//printf("not playing\n");
		return;
	};

#ifdef THEORA_USE_THREAD_STREAMING
	thread_sem->post();
#endif

	//print_line("play "+rtos(p_delta));
	time += p_delta;

	if (videobuf_time > get_time()) {
		return; //no new frames need to be produced
	}

	bool frame_done = false;
	bool audio_done = !vorbis_p;

	while (!frame_done || (!audio_done && !vorbis_eos)) {
		//a frame needs to be produced

		ogg_packet op;
		bool no_theora = false;

		while (vorbis_p) {
			int ret;
			float **pcm;

			bool buffer_full = false;

			/* if there's pending, decoded audio, grab it */
			ret = vorbis_synthesis_pcmout(&vd, &pcm);
			if (ret > 0) {

				const int AUXBUF_LEN = 4096;
				int to_read = ret;
				int16_t aux_buffer[AUXBUF_LEN];

				while (to_read) {

					int m = MIN(AUXBUF_LEN / vi.channels, to_read);

					int count = 0;

					for (int j = 0; j < m; j++) {
						for (int i = 0; i < vi.channels; i++) {

							int val = Math::fast_ftoi(pcm[i][j] * 32767.f);
							if (val > 32767) val = 32767;
							if (val < -32768) val = -32768;
							aux_buffer[count++] = val;
						}
					}

					if (mix_callback) {
						int mixed = mix_callback(mix_udata, aux_buffer, m);
						to_read -= mixed;
						if (mixed != m) { //could mix no more
							buffer_full = true;
							break;
						}
					} else {
						to_read -= m; //just pretend we sent the audio
					}
				}

				int tr = vorbis_synthesis_read(&vd, ret - to_read);

				if (vd.granulepos >= 0) {
					//print_line("wrote: "+itos(audio_frames_wrote)+" gpos: "+itos(vd.granulepos));
				}

				//print_line("mix audio!");

				audio_frames_wrote += ret - to_read;

				//print_line("AGP: "+itos(vd.granulepos)+" added "+itos(ret-to_read));

			} else {

				/* no pending audio; is there a pending packet to decode? */
				if (ogg_stream_packetout(&vo, &op) > 0) {
					if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */
						vorbis_synthesis_blockin(&vd, &vb);
					}
				} else { /* we need more data; break out to suck in another page */
					//printf("need moar data\n");
					break;
				};
			}

			audio_done = videobuf_time < (audio_frames_wrote / float(vi.rate));

			if (buffer_full)
				break;
		}

		while (theora_p && !frame_done) {
			/* theora is one in, one out... */
			if (ogg_stream_packetout(&to, &op) > 0) {

				if (false && pp_inc) {
					pp_level += pp_inc;
					th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level,
							sizeof(pp_level));
					pp_inc = 0;
				}
				/*HACK: This should be set after a seek or a gap, but we might not have
				a granulepos for the first packet (we only have them for the last
				packet on a page), so we just set it as often as we get it.
				To do this right, we should back-track from the last packet on the
				page and compute the correct granulepos for the first packet after
				a seek or a gap.*/
				if (op.granulepos >= 0) {
					th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos,
							sizeof(op.granulepos));
				}
				ogg_int64_t videobuf_granulepos;
				if (th_decode_packetin(td, &op, &videobuf_granulepos) == 0) {
					videobuf_time = th_granule_time(td, videobuf_granulepos);

					//printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);

					/* is it already too old to be useful?  This is only actually
					 useful cosmetically after a SIGSTOP.  Note that we have to
					 decode the frame even if we don't show it (for now) due to
					 keyframing.  Soon enough libtheora will be able to deal
					 with non-keyframe seeks.  */

					if (videobuf_time >= get_time()) {
						frame_done = true;
					} else {
						/*If we are too slow, reduce the pp level.*/
						pp_inc = pp_level > 0 ? -1 : 0;
					}
				} else {
				}

			} else {
				no_theora = true;
				break;
			}
		}

#ifdef THEORA_USE_THREAD_STREAMING
		if (file && thread_eof && no_theora && theora_eos && ring_buffer.data_left() == 0) {
#else
		if (file && /*!videobuf_ready && */ no_theora && theora_eos) {
#endif
			printf("video done, stopping\n");
			stop();
			return;
		};

		if (!frame_done || !audio_done) {
			//what's the point of waiting for audio to grab a page?

			buffer_data();
			while (ogg_sync_pageout(&oy, &og) > 0) {
				queue_page(&og);
			}
		}

		/* If playback has begun, top audio buffer off immediately. */
		//if(stateflag) audio_write_nonblocking();

		/* are we at or past time for this video frame? */
		if (videobuf_ready && videobuf_time <= get_time()) {

			//video_write();
			//videobuf_ready=0;
		} else {
			//printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
		}

		float tdiff = videobuf_time - get_time();
		/*If we have lots of extra time, increase the post-processing level.*/
		if (tdiff > ti.fps_denominator * 0.25 / ti.fps_numerator) {
			pp_inc = pp_level < pp_level_max ? 1 : 0;
		} else if (tdiff < ti.fps_denominator * 0.05 / ti.fps_numerator) {
			pp_inc = pp_level > 0 ? -1 : 0;
		}
	}

	video_write();
};

void VideoStreamPlaybackTheora::play() {

	if (!playing)
		time = 0;
	else {
		stop();
	}

	playing = true;
	delay_compensation = ProjectSettings::get_singleton()->get("audio/video_delay_compensation_ms");
	delay_compensation /= 1000.0;
};

void VideoStreamPlaybackTheora::stop() {

	if (playing) {

		clear();
		set_file(file_name); //reset
	}
	playing = false;
	time = 0;
};

bool VideoStreamPlaybackTheora::is_playing() const {

	return playing;
};

void VideoStreamPlaybackTheora::set_paused(bool p_paused) {

	paused = p_paused;
	//pau = !p_paused;
};

bool VideoStreamPlaybackTheora::is_paused(bool p_paused) const {

	return paused;
};

void VideoStreamPlaybackTheora::set_loop(bool p_enable){

};

bool VideoStreamPlaybackTheora::has_loop() const {

	return false;
};

float VideoStreamPlaybackTheora::get_length() const {

	return 0;
};

String VideoStreamPlaybackTheora::get_stream_name() const {

	return "";
};

int VideoStreamPlaybackTheora::get_loop_count() const {

	return 0;
};

float VideoStreamPlaybackTheora::get_playback_position() const {

	return get_time();
};

void VideoStreamPlaybackTheora::seek(float p_time){

	// no
};

void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback, void *p_userdata) {

	mix_callback = p_callback;
	mix_udata = p_userdata;
}