void update() {

		int todo = get_todo();
		int16_t* buffer = get_write_buffer();
		int samples = rb.data_left();
		const int to_write = MIN(todo, samples);

		for (int i=0; i<to_write; i++) {

			uint16_t sample = uint16_t(rb.read() * 32767);
			buffer[i] = sample;
		};
		write(to_write/channels);
		total_wrote += to_write;
	};
	void update() {

		_THREAD_SAFE_METHOD_;
		int todo = get_todo();
		int16_t* buffer = get_write_buffer();
		int frames = rb.data_left()/channels;
		const int to_write = MIN(todo, frames);

		for (int i=0; i<to_write*channels; i++) {

			int v = rb.read() * 32767;
			int16_t sample = CLAMP(v,-32768,32767);
			buffer[i] = sample;
		};
		write(to_write);
		total_wrote += to_write;
	};
Exemplo n.º 3
0
void AudioStreamMPC::update() {

	if (!active || paused)
		return;

	int todo=get_todo();

	while(todo>MPC_DECODER_BUFFER_LENGTH/si.channels) {

		mpc_frame_info frame;

		frame.buffer=sample_buffer;

		mpc_status err = mpc_demux_decode(demux, &frame);
		if (frame.bits!=-1) {

			int16_t *dst_buff = get_write_buffer();

#ifdef MPC_FIXED_POINT

			for( int i = 0; i < frame.samples * si.channels; i++) {
				int tmp = sample_buffer[i] >> MPC_FIXED_POINT_FRACTPART;
				if (tmp > ((1 << 15) - 1)) tmp = ((1 << 15) - 1);
				if (tmp < -(1 << 15)) tmp = -(1 << 15);
				dst_buff[i] = tmp;
			}
#else
			for( int i = 0; i < frame.samples * si.channels; i++) {

				int tmp = Math::fast_ftoi(sample_buffer[i]*32767.0);
				if (tmp > ((1 << 15) - 1)) tmp = ((1 << 15) - 1);
				if (tmp < -(1 << 15)) tmp = -(1 << 15);
				dst_buff[i] = tmp;

			}

#endif

			int frames = frame.samples;
			write(frames);
			todo-=frames;
		} else {

			if (err != MPC_STATUS_OK) {
Exemplo n.º 4
0
void VideoStreamTheora::update() {

	if (!playing) {
		//printf("not playing\n");
		return;
	};

	double ctime =AudioServer::get_singleton()->get_mix_time();

	if (last_update_time) {
		double delta = (ctime-last_update_time);
		time+=delta;
		//print_line("delta: "+rtos(delta));
	}
	last_update_time=ctime;


	int audio_todo = get_todo();
	ogg_packet op;
	int audio_pending = 0;


	while (vorbis_p && audio_todo) {
		int ret;
		float **pcm;

		/* if there's pending, decoded audio, grab it */
		if ((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0) {

			audio_pending = ret;
			int16_t* out = get_write_buffer();
			int count = 0;
			int to_read = MIN(ret, audio_todo);
			for (int i=0; i<to_read; i++) {

				for(int j=0;j<vi.channels;j++){
					int val=Math::fast_ftoi(pcm[j][i]*32767.f);
					if(val>32767)val=32767;
					if(val<-32768)val=-32768;
					out[count++] = val;
				};
			};
			int tr = vorbis_synthesis_read(&vd, to_read);
			audio_todo -= to_read;
			audio_frames_wrote += to_read;
			write(to_read);
			audio_pending -= to_read;
			if (audio_todo==0)
				buffering=false;


		} else {

			/* no pending audio; is there a pending packet to decode? */
			if (ogg_stream_packetout(&vo,&op)>0){
				if(vorbis_synthesis(&vb,&op)==0) { /* test for success! */
					vorbis_synthesis_blockin(&vd,&vb);
				}
			} else {  /* we need more data; break out to suck in another page */
				//printf("need moar data\n");
				break;
			};
		}
	}

	while(theora_p && !videobuf_ready){
		/* theora is one in, one out... */
		if(ogg_stream_packetout(&to,&op)>0){


			if(pp_inc){
				pp_level+=pp_inc;
				th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,
							  sizeof(pp_level));
				pp_inc=0;
			}
			/*HACK: This should be set after a seek or a gap, but we might not have
			a granulepos for the first packet (we only have them for the last
			packet on a page), so we just set it as often as we get it.
			To do this right, we should back-track from the last packet on the
			page and compute the correct granulepos for the first packet after
			a seek or a gap.*/
			if(op.granulepos>=0){
				th_decode_ctl(td,TH_DECCTL_SET_GRANPOS,&op.granulepos,
							  sizeof(op.granulepos));
			}
			ogg_int64_t videobuf_granulepos;
			if(th_decode_packetin(td,&op,&videobuf_granulepos)==0){
				videobuf_time=th_granule_time(td,videobuf_granulepos);
				//printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);

				/* is it already too old to be useful?  This is only actually
				 useful cosmetically after a SIGSTOP.  Note that we have to
				 decode the frame even if we don't show it (for now) due to
				 keyframing.  Soon enough libtheora will be able to deal
				 with non-keyframe seeks.  */

				if(videobuf_time>=get_time())
					videobuf_ready=1;
				else{
					/*If we are too slow, reduce the pp level.*/
					pp_inc=pp_level>0?-1:0;
				}
			}

		} else
			break;
	}

	if (/*!videobuf_ready && */ audio_pending == 0 && file->eof_reached()) {
		printf("video done, stopping\n");
		stop();
		return;
	};

	if (!videobuf_ready || audio_todo > 0){
		/* no data yet for somebody.  Grab another page */

		buffer_data();
		while(ogg_sync_pageout(&oy,&og)>0){
			queue_page(&og);
		}
	}

	/* If playback has begun, top audio buffer off immediately. */
	//if(stateflag) audio_write_nonblocking();

	/* are we at or past time for this video frame? */
	if(videobuf_ready && videobuf_time<=get_time()){

		video_write();
		videobuf_ready=0;
	} else {
		//printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
	}

	float tdiff=videobuf_time-get_time();
	/*If we have lots of extra time, increase the post-processing level.*/
	if(tdiff>ti.fps_denominator*0.25/ti.fps_numerator){
		pp_inc=pp_level<pp_level_max?1:0;
	}
	else if(tdiff<ti.fps_denominator*0.05/ti.fps_numerator){
		pp_inc=pp_level>0?-1:0;
	}
};
	int get_total_wrote() {

		return total_wrote - (get_total() - get_todo());
	};