Example #1
0
static void process_vorbis_packets(VORBIS_FEED *feed) {
	int res, samples, n, stat;
	time_t now;
	float **pcm;
	while (1) {
		res = ogg_stream_packetout(&(feed->os), &(feed->op));
		if (res == 0) return;
		if (res < 0) {
			now = time(NULL);
			logmsg("%s: out of sync\n", ctime(&now));
			continue;
			}
		stat = vorbis_synthesis(&(feed->vb), &(feed->op));
		if (stat == 0)
			vorbis_synthesis_blockin(&(feed->vd), &(feed->vb));
		else if (stat == OV_ENOTAUDIO) {
			logmsg("non-audio packet ignored\n");
			continue;
			}
		else {
			logmsg("bad vorbis packet\n");
			continue;
			}
		while ((samples = vorbis_synthesis_pcmout(&(feed->vd),
				&pcm)) > 0) {
			n = write_ring(feed, pcm, samples);
			vorbis_synthesis_read(&(feed->vd), n);
			}
		}
	return;
	}
/*
 * Class:     org_tritonus_lowlevel_pvorbis_DspState
 * Method:    pcmOut_native
 * Signature: ([[F)I
 */
JNIEXPORT jint JNICALL
Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut_1native
(JNIEnv* env, jobject obj, jobjectArray afPcm)
{
	vorbis_dsp_state*	handle;
	float**			pcm;
	int			nSamples;
	int			nChannels;
	int			nChannel;
	jfloatArray		floatArray;

	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut(): begin\n"); }
	handle = getHandle(env, obj);
	nSamples = vorbis_synthesis_pcmout(handle, &pcm);
	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut(): samples: %d\n", nSamples); }
	nChannels = handle->vi->channels;
	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut(): channels: %d\n", nChannels); }
	for (nChannel = 0; nChannel < nChannels; nChannel++)
	{
		floatArray = (*env)->NewFloatArray(env, nSamples);
		if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut(): float array: %p\n", floatArray); }
		if (nSamples > 0)
		{
			(*env)->SetFloatArrayRegion(env, floatArray, 0,
						    nSamples, pcm[nChannel]);
		}
		(*env)->SetObjectArrayElement(env, afPcm, nChannel, floatArray);
	}
	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_pvorbis_DspState_pcmOut(): end\n"); }
	return nSamples;
}
Example #3
0
int VorbisDecode(Vorbis *vorbis, void *buffer, int buflen, AVCallback callback, long streamend, long granulepos) {
  // setup ogg packet
  vorbis->ogg.packet = buffer;
  vorbis->ogg.bytes = buflen;
  
  int status = 0;
  if (vorbis->headers < 3) {
    status = vorbis_synthesis_headerin(&vorbis->info, &vorbis->comment, &vorbis->ogg);
    
    vorbis->headers++;
    if (status == 0 && vorbis->headers == 3) {
      vorbis->outlen /= vorbis->info.channels;
      
      status = vorbis_synthesis_init(&vorbis->dsp, &vorbis->info);
      if (status == 0)
        status = vorbis_block_init(&vorbis->dsp, &vorbis->block);
    }
  } else {
    // decode
    status = vorbis_synthesis(&vorbis->block, &vorbis->ogg);
    if (status == 0)
      status = vorbis_synthesis_blockin(&vorbis->dsp, &vorbis->block);
    
    int samples = 0;
    float **pcm;
    int cutoff = granulepos - vorbis->lastgranule;
    while ((samples = vorbis_synthesis_pcmout(&vorbis->dsp, &pcm)) > 0) {      
      // interleave
      int channels = vorbis->info.channels;
      int len = samples < vorbis->outlen ? samples : vorbis->outlen;
      if(streamend) {
        len = len > cutoff ? cutoff : len;
        cutoff -= len;
      }
      
      for (int i = 0; i < channels; i++) {
        float *buf = &vorbis->outbuf[i];
        
        for (int j = 0; j < len; j++) {
          *buf = pcm[i][j];
          buf += channels;
        }
      }
      
      status = vorbis_synthesis_read(&vorbis->dsp, len);
      callback(len * channels);
      if(streamend && cutoff <= 0)
        break;
    }
  }
  
  if (vorbis->ogg.b_o_s)
    vorbis->ogg.b_o_s = 0;

  if(granulepos > 0)
    vorbis->lastgranule = granulepos;
  
  return status;
}
/*

  return: audio wants more packets
*/
static qboolean loadAudio(void) {
	qboolean anyDataTransferred	= qtrue;
	float	**pcm;
	float	*right,*left;
	int		samples, samplesNeeded;
	int		i;
	short*	ptr;
	ogg_packet		op;
	vorbis_block	vb;

	memset(&op,0,sizeof(op));
	memset(&vb,0,sizeof(vb));
	vorbis_block_init(&g_ogm.vd,&vb);

	while(anyDataTransferred && g_ogm.currentTime+MAX_AUDIO_PRELOAD>(int)(g_ogm.vd.granulepos*1000/g_ogm.vi.rate)) {
		anyDataTransferred =qfalse;

		if((samples=vorbis_synthesis_pcmout(&g_ogm.vd,&pcm))>0) {
			// vorbis -> raw
			ptr = (short*)rawBuffer;
			samplesNeeded = (SIZEOF_RAWBUFF) / (2*2);// (width*channel)
			if(samples<samplesNeeded)
				samplesNeeded = samples;

			left=pcm[0];
			right=(g_ogm.vi.channels>1)?pcm[1]:pcm[0];
			for(i=0;i<samplesNeeded;++i) {
				ptr[0]=(left[i]>=-1.0f && left[i]<=1.0f)?left[i]*32767.f:32767*((left[i]>0.0f)-(left[i]<0.0f));
				ptr[1]=(right[i]>=-1.0f && right[i]<=1.0f)?right[i]*32767.f:32767*((right[i]>0.0f)-(right[i]<0.0f));
				ptr+=2;//numChans;
			}

			if(i>0) {
				// tell libvorbis how many samples we actually consumed
				vorbis_synthesis_read(&g_ogm.vd,i);

				S_RawSamples( 0, i, g_ogm.vi.rate, 2, 2, rawBuffer, 1.0f, -1 );

				anyDataTransferred = qtrue;
			}
		}

		if(!anyDataTransferred)	{
			// op -> vorbis
			if(ogg_stream_packetout(&g_ogm.os_audio,&op)) {
				if(vorbis_synthesis(&vb,&op)==0)
					vorbis_synthesis_blockin(&g_ogm.vd,&vb);
				anyDataTransferred = qtrue;
			}
		}
	}

	vorbis_block_clear(&vb);

	if(g_ogm.currentTime+MIN_AUDIO_PRELOAD>(int)(g_ogm.vd.granulepos*1000/g_ogm.vi.rate))
		return qtrue;
	else
		return qfalse;
}
	float VideoClip_Theora::_decodeAudio()
	{
		if (this->restarted)
		{
			return -1.0f;
		}
		ogg_packet opVorbis;
		float** pcm;
		int length = 0;
		float timeStamp = -1.0f;
		bool readPastTimestamp = false;
		float factor = 1.0f / this->audioFrequency;
		float videoTime = (float)this->lastDecodedFrameNumber / this->fps;
		float min = this->frameQueue->getSize() / this->fps + 1.0f;
		float audioTime = 0.0f;
		while (true)
		{
			length = vorbis_synthesis_pcmout(&this->info.VorbisDSPState, &pcm);
			if (length == 0)
			{
				if (ogg_stream_packetout(&this->info.VorbisStreamState, &opVorbis) > 0)
				{
					if (vorbis_synthesis(&this->info.VorbisBlock, &opVorbis) == 0)
					{
						if (timeStamp < 0 && opVorbis.granulepos >= 0)
						{
							timeStamp = (float)vorbis_granule_time(&this->info.VorbisDSPState, opVorbis.granulepos);
						}
						else if (timeStamp >= 0)
						{
							readPastTimestamp = true;
						}
						vorbis_synthesis_blockin(&this->info.VorbisDSPState, &this->info.VorbisBlock);
					}
					continue;
				}
				audioTime = this->readAudioSamples * factor;
				// always buffer up of audio ahead of the frames
				if (audioTime - videoTime >= min || !this->_readData())
				{
					break;
				}
			}
			if (length > 0)
			{
				this->addAudioPacket(pcm, length, this->audioGain);
				this->readAudioSamples += length;
				if (readPastTimestamp)
				{
					timeStamp += (float)length / this->info.VorbisInfo.rate;
				}
				vorbis_synthesis_read(&this->info.VorbisDSPState, length); // tell vorbis we read a number of samples
			}
		}
		return timeStamp;
	}
void Inpin::Decode(IMediaSample* pInSample)
{
    BYTE* buf_in;

    HRESULT hr = pInSample->GetPointer(&buf_in);
    assert(SUCCEEDED(hr));
    assert(buf_in);

    const long len_in = pInSample->GetActualDataLength();
    assert(len_in >= 0);

    ogg_packet& pkt = m_packet;

    pkt.packet = buf_in;
    pkt.bytes = len_in;
    ++pkt.packetno;

    int status = vorbis_synthesis(&m_block, &pkt);
    assert(status == 0);  //TODO

    status = vorbis_synthesis_blockin(&m_dsp_state, &m_block);
    assert(status == 0);  //TODO

    typedef VorbisTypes::VORBISFORMAT2 FMT;

    const AM_MEDIA_TYPE& mt = m_connection_mtv[0];
    assert(mt.cbFormat > sizeof(FMT));
    assert(mt.pbFormat);

    const FMT& fmt = (const FMT&)(*mt.pbFormat);
    assert(fmt.channels > 0);
    assert(fmt.samplesPerSec > 0);

    float** sv;

    const int pcmout_count = vorbis_synthesis_pcmout(&m_dsp_state, &sv);

    if (pcmout_count <= 0)
        return;

    assert(sv);

    for (DWORD i = 0; i < fmt.channels; ++i)
    {
        const float* const first = sv[i];
        const float* const last = first + pcmout_count;

        samples_t& ss = m_channels[i];
        ss.insert(ss.end(), first, last);
    }

    sv = 0;

    status = vorbis_synthesis_read(&m_dsp_state, pcmout_count);
    assert(status == 0);
}
// Try to flush the buffer
// unsuccessfully :(
  uint8_t ADM_vorbis::endDecompress( void ) 
  {
  float **sample_pcm;
  ogg_packet packet;
  
  	//vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock);
  	vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm);
        vorbis_synthesis_restart(&STRUCT->vdsp);
  	return 1;
  }
 uint8_t ADM_vorbis::run( uint8_t * ptr, uint32_t nbIn, uint8_t * outptr,   uint32_t * nbOut)
{
ogg_packet packet;
float **sample_pcm;
int	nb_synth;

	*nbOut=0;
	if(!_init) return 0;
	packet.b_o_s=0;
	packet.e_o_s=0;
	packet.bytes=nbIn;
	packet.packet=ptr;
	if(!vorbis_synthesis(&STRUCT->vblock,&packet))
	{
	      vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock);
	 }
	 nb_synth=vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm);
	 if(nb_synth<0)
	 {
	 	printf("error decoding vorbis %d\n",nb_synth);
		return 0;
	 }
	 
	 // Now convert the float / per channel samples to interleaved 16 bits pcm audio
 
 	 float scale =   32767.f * STRUCT->ampscale;
	 int16_t *out;
	 int channels,val;	 

	 
	 channels=STRUCT->vinfo.channels;
	 out=(int16_t *)outptr;
	 *nbOut=channels*2*nb_synth;
	 
	 for(uint32_t samp=0;samp<nb_synth;samp++)
	 {
	 for(uint32_t chan=0;chan<channels;chan++)
	 {
	 	     
	        val=(int)floor(sample_pcm[chan][samp]*scale);
		
		if(val>32767)	
		  	val=32767;
		if(val<-32768)
		  	val=-32768;
		*out++=val;      
	  }
	  }
	// Puge them
	 vorbis_synthesis_read(&STRUCT->vdsp,nb_synth); 
	 aprintf("This round : in %d bytes, out %d bytes\n",nbIn,*nbOut);
	return 1;
	
}								
bool TheoraDecoder::VorbisAudioTrack::decodeSamples() {
#ifdef USE_TREMOR
	ogg_int32_t **pcm;
#else
	float **pcm;
#endif

	// if there's pending, decoded audio, grab it
	int ret = vorbis_synthesis_pcmout(&_vorbisDSP, &pcm);

	if (ret > 0) {
		if (!_audioBuffer) {
			_audioBuffer = (ogg_int16_t *)malloc(AUDIOFD_FRAGSIZE * sizeof(ogg_int16_t));
			assert(_audioBuffer);
		}

		int channels = _audStream->isStereo() ? 2 : 1;
		int count = _audioBufferFill / 2;
		int maxsamples = ((AUDIOFD_FRAGSIZE - _audioBufferFill) / channels) >> 1;
		int i;

		for (i = 0; i < ret && i < maxsamples; i++) {
			for (int j = 0; j < channels; j++) {
				int val = CLIP((int)rint(pcm[j][i] * 32767.f), -32768, 32767);
				_audioBuffer[count++] = val;
			}
		}

		vorbis_synthesis_read(&_vorbisDSP, i);
		_audioBufferFill += (i * channels) << 1;

		if (_audioBufferFill == AUDIOFD_FRAGSIZE) {
			byte flags = Audio::FLAG_16BITS;

			if (_audStream->isStereo())
				flags |= Audio::FLAG_STEREO;

#ifdef SCUMM_LITTLE_ENDIAN
			flags |= Audio::FLAG_LITTLE_ENDIAN;
#endif

			_audStream->queueBuffer((byte *)_audioBuffer, AUDIOFD_FRAGSIZE, DisposeAfterUse::YES, flags);

			// The audio mixer is now responsible for the old audio buffer.
			// We need to create a new one.
			_audioBuffer = 0;
			_audioBufferFill = 0;
		}

		return true;
	}

	return false;
}
Example #10
0
int VorbisProcessData(OggData *data, char *buffer)
{
	int num_samples;
	float **pcm;
	float *chan;
	int i, j;
	int val;
	ogg_packet packet;
	int len;

	len = 0;
	num_samples = 0;

	while (!len) {
		if (ogg_stream_packetout(&data->astream, &packet) != 1) {
			if (OggGetNextPage(&data->page, &data->sync, data->File)) {
				// EOF
				return -1;
			}

			ogg_stream_pagein(&data->astream, &data->page);
		} else {
			if (vorbis_synthesis(&data->vblock, &packet) == 0) {
				vorbis_synthesis_blockin(&data->vdsp, &data->vblock);
			}

			while ((num_samples = vorbis_synthesis_pcmout(&data->vdsp, &pcm)) > 0) {
				j = 0;
				for (i = 0; i < data->vinfo.channels; ++i) {
					chan = pcm[i];
					for (j = 0; j < num_samples; ++j) {
						val = static_cast<int>(chan[j] * 32767.f);
						if (val > 32767) {
							val = 32767;
						} else if (val < -32768) {
							val = -32768;
						}

						*(Sint16 *)(buffer + len
						  + (j * 2 * data->vinfo.channels) + i * 2) = (Sint16)val;
					}
				}
				len += j * 2 * data->vinfo.channels;

				// we consumed num_samples
				vorbis_synthesis_read(&data->vdsp, num_samples);
			}
		}
	}

	return len;
}
Example #11
0
int vorbis_decode(vorbis_ctx *v, ogg_packet *pkt, const float ***pcm)
{
	int r;

	if (0 != (r = vorbis_synthesis(&v->blk, pkt)))
		return r;

	vorbis_synthesis_blockin(&v->ds, &v->blk);

	r = vorbis_synthesis_pcmout(&v->ds, (float***)pcm);
	vorbis_synthesis_read(&v->ds, r);
	return r;
}
Example #12
0
bool TheoraDecoder::queueAudio() {
	if (!_audStream)
		return false;

	// An audio buffer should have been allocated (either in the constructor or after queuing the current buffer)
	if (!_audiobuf) {
		warning("[TheoraDecoder::queueAudio] Invalid audio buffer");
		return false;
	}

	bool queuedAudio = false;

	for (;;) {
		float **pcm;

		// if there's pending, decoded audio, grab it
		int ret = vorbis_synthesis_pcmout(&_vorbisDSP, &pcm);
		if (ret > 0) {
			int count = _audiobufFill / 2;
			int maxsamples = ((AUDIOFD_FRAGSIZE - _audiobufFill) / _vorbisInfo.channels) >> 1;
			int i;
			for (i = 0; i < ret && i < maxsamples; i++)
				for (int j = 0; j < _vorbisInfo.channels; j++) {
					int val = CLIP((int)rint(pcm[j][i] * 32767.f), -32768, 32767);
					_audiobuf[count++] = val;
				}

			vorbis_synthesis_read(&_vorbisDSP, i);
			_audiobufFill += (i * _vorbisInfo.channels) << 1;

			if (_audiobufFill == AUDIOFD_FRAGSIZE) {
				byte flags = Audio::FLAG_16BITS | Audio::FLAG_STEREO;
#ifdef SCUMM_LITTLE_ENDIAN
				flags |= Audio::FLAG_LITTLE_ENDIAN;
#endif
				_audStream->queueBuffer((byte *)_audiobuf, AUDIOFD_FRAGSIZE, DisposeAfterUse::NO, flags);

				// The audio mixer is now responsible for the old audio buffer.
				// We need to create a new one.
				_audiobuf = (ogg_int16_t *)malloc(AUDIOFD_FRAGSIZE * sizeof(ogg_int16_t));
				if (!_audiobuf) {
					warning("[TheoraDecoder::queueAudio] Cannot allocate memory for audio buffer");
					return false;
				}

				_audiobufFill = 0;
				queuedAudio = true;
			}
		} else {
			// no pending audio; is there a pending packet to decode?
			if (ogg_stream_packetout(&_vorbisOut, &_oggPacket) > 0) {
Example #13
0
static int oggvorbis_decode_frame(AVCodecContext *avccontext, void *data,
                        int *got_frame_ptr, AVPacket *avpkt)
{
    OggVorbisDecContext *context = avccontext->priv_data ;
    AVFrame *frame = data;
    float **pcm ;
    ogg_packet *op= &context->op;
    int samples, total_samples, total_bytes;
    int ret;
    int16_t *output;

    if(!avpkt->size){
    //FIXME flush
        return 0;
    }

    frame->nb_samples = 8192*4;
    if ((ret = ff_get_buffer(avccontext, frame)) < 0) {
        av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    output = (int16_t *)frame->data[0];


    op->packet = avpkt->data;
    op->bytes  = avpkt->size;

//    av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d\n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);

/*    for(i=0; i<op->bytes; i++)
      av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]);
    av_log(avccontext, AV_LOG_DEBUG, "\n");*/

    if(vorbis_synthesis(&context->vb, op) == 0)
        vorbis_synthesis_blockin(&context->vd, &context->vb) ;

    total_samples = 0 ;
    total_bytes = 0 ;

    while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) {
        conv(samples, pcm, (char*)output + total_bytes, context->vi.channels) ;
        total_bytes += samples * 2 * context->vi.channels ;
        total_samples += samples ;
        vorbis_synthesis_read(&context->vd, samples) ;
    }

    frame->nb_samples = total_samples;
    *got_frame_ptr   = 1;
    return avpkt->size;
}
Example #14
0
/**
 * @return true if audio wants more packets
 */
static bool CIN_OGM_LoadAudioFrame (cinematic_t* cin)
{
    vorbis_block vb;

    OBJZERO(vb);
    vorbis_block_init(&OGMCIN.vd, &vb);

    while (OGMCIN.currentTime > (int) (OGMCIN.vd.granulepos * 1000 / OGMCIN.vi.rate)) {
        float** pcm;
        const int samples = vorbis_synthesis_pcmout(&OGMCIN.vd, &pcm);

        if (samples > 0) {
            /* vorbis -> raw */
            const int width = 2;
            const int channel = 2;
            int samplesNeeded = sizeof(rawBuffer) / (width * channel);
            const float* left = pcm[0];
            const float* right = (OGMCIN.vi.channels > 1) ? pcm[1] : pcm[0];
            short* ptr = (short*)rawBuffer;
            int i;

            if (samples < samplesNeeded)
                samplesNeeded = samples;

            for (i = 0; i < samplesNeeded; ++i, ptr += channel) {
                ptr[0] = (left[i] >= -1.0f && left[i] <= 1.0f) ? left[i] * 32767.f : 32767 * ((left[i] > 0.0f) - (left[i] < 0.0f));
                ptr[1] = (right[i] >= -1.0f && right[i] <= 1.0f) ? right[i] * 32767.f : 32767 * ((right[i] > 0.0f) - (right[i] < 0.0f));
            }

            /* tell libvorbis how many samples we actually consumed */
            vorbis_synthesis_read(&OGMCIN.vd, i);

            if (!cin->noSound)
                M_AddToSampleBuffer(&OGMCIN.musicStream, OGMCIN.vi.rate, i, rawBuffer);
        } else {
            ogg_packet op;
            /* op -> vorbis */
            if (ogg_stream_packetout(&OGMCIN.os_audio, &op)) {
                if (vorbis_synthesis(&vb, &op) == 0)
                    vorbis_synthesis_blockin(&OGMCIN.vd, &vb);
            } else
                break;
        }
    }

    vorbis_block_clear(&vb);

    return OGMCIN.currentTime > (int)(OGMCIN.vd.granulepos * 1000 / OGMCIN.vi.rate);
}
Example #15
0
File: vorbis.c Project: IAPark/vlc
/*****************************************************************************
 * DecodePacket: decodes a Vorbis packet.
 *****************************************************************************/
static block_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    int           i_samples;

    INTERLEAVE_TYPE **pp_pcm;

    if( p_oggpacket->bytes &&
        vorbis_synthesis( &p_sys->vb, p_oggpacket ) == 0 )
        vorbis_synthesis_blockin( &p_sys->vd, &p_sys->vb );

    /* **pp_pcm is a multichannel float vector. In stereo, for
     * example, pp_pcm[0] is left, and pp_pcm[1] is right. i_samples is
     * the size of each channel. Convert the float values
     * (-1.<=range<=1.) to whatever PCM format and write it out */

    if( ( i_samples = vorbis_synthesis_pcmout( &p_sys->vd, &pp_pcm ) ) > 0 )
    {

        block_t *p_aout_buffer;

        if( decoder_UpdateAudioFormat( p_dec ) ) return NULL;
        p_aout_buffer =
            decoder_NewAudioBuffer( p_dec, i_samples );

        if( p_aout_buffer == NULL ) return NULL;

        /* Interleave the samples */
        Interleave( (INTERLEAVE_TYPE*)p_aout_buffer->p_buffer,
                    (const INTERLEAVE_TYPE**)pp_pcm, p_sys->vi.channels, i_samples,
                    p_sys->pi_chan_table);

        /* Tell libvorbis how many samples we actually consumed */
        vorbis_synthesis_read( &p_sys->vd, i_samples );

        /* Date management */
        p_aout_buffer->i_pts = date_Get( &p_sys->end_date );
        p_aout_buffer->i_length = date_Increment( &p_sys->end_date,
                                           i_samples ) - p_aout_buffer->i_pts;
        return p_aout_buffer;
    }
    else
    {
        return NULL;
    }
}
Example #16
0
static int oggvorbis_decode_frame(AVCodecContext *avccontext,
                        void *data, int *data_size,
                        uint8_t *buf, int buf_size)
{
    OggVorbisContext *context = avccontext->priv_data ;
    float **pcm ;
    ogg_packet *op= &context->op;
    int samples, total_samples, total_bytes;

    if(!buf_size){
    //FIXME flush
        return 0;
    }

    op->packet = buf;
    op->bytes  = buf_size;

//    av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d\n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);

/*    for(i=0; i<op->bytes; i++)
      av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]);
    av_log(avccontext, AV_LOG_DEBUG, "\n");*/

    if(vorbis_synthesis(&context->vb, op) == 0)
        vorbis_synthesis_blockin(&context->vd, &context->vb) ;

    total_samples = 0 ;
    total_bytes = 0 ;

    while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) {
        conv(samples, pcm, (char*)data + total_bytes, context->vi.channels) ;
        total_bytes += samples * 2 * context->vi.channels ;
        total_samples += samples ;
        vorbis_synthesis_read(&context->vd, samples) ;
    }

    *data_size = total_bytes ;
    return buf_size ;
}
Example #17
0
static GF_Err VORB_ProcessData(GF_MediaDecoder *ifcg, 
		char *inBuffer, u32 inBufferLength,
		u16 ES_ID,
		char *outBuffer, u32 *outBufferLength,
		u8 PaddingBits, u32 mmlevel)
{
	ogg_packet op;
    Float **pcm;
    u32 samples, total_samples, total_bytes;

	VORBISCTX();
	/*not using scalabilty*/
	assert(ctx->ES_ID == ES_ID);

	op.granulepos = -1;
	op.b_o_s = 0;
	op.e_o_s = 0;
	op.packetno = 0;
    op.packet = inBuffer;
    op.bytes = inBufferLength;


	*outBufferLength = 0;

	if (vorbis_synthesis(&ctx->vb, &op) == 0) 
		vorbis_synthesis_blockin(&ctx->vd, &ctx->vb) ;

	/*trust vorbis max block info*/
	total_samples = 0;
	total_bytes = 0;
	while ((samples = vorbis_synthesis_pcmout(&ctx->vd, &pcm)) > 0) {
		vorbis_to_intern(samples, pcm, (char*) outBuffer + total_bytes, ctx->vi.channels);
		total_bytes += samples * 2 * ctx->vi.channels;
		total_samples += samples;
		vorbis_synthesis_read(&ctx->vd, samples);
	}
	*outBufferLength = total_bytes;   
	return GF_OK;
}
 uint8_t ADM_vorbis::run(uint8_t *inptr, uint32_t nbIn, float *outptr, uint32_t *nbOut)
{
ogg_packet packet;
float **sample_pcm;
int	nb_synth;

	*nbOut=0;
	if(!_init) return 0;
	packet.b_o_s=0;
	packet.e_o_s=0;
	packet.bytes=nbIn;
	packet.packet=inptr;
        packet.granulepos=-1;
	if(!vorbis_synthesis(&STRUCT->vblock,&packet))
	{
	      vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock);
	 }
	 nb_synth=vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm);
	 if(nb_synth<0)
	 {
	 	printf("error decoding vorbis %d\n",nb_synth);
		return 0;
	 }
	 
	 for (uint32_t samp = 0; samp < nb_synth; samp++)
	 	for (uint8_t chan = 0; chan < STRUCT->vinfo.channels; chan++)
			*outptr++ = sample_pcm[chan][samp] * STRUCT->ampscale;

	 *nbOut = STRUCT->vinfo.channels * nb_synth;

	// Puge them
	 vorbis_synthesis_read(&STRUCT->vdsp,nb_synth); 
	 aprintf("This round : in %d bytes, out %d bytes synthetized:%d\n",nbIn,*nbOut,nb_synth);
	return 1;
	
}	
Example #19
0
static int decode_audio(sh_audio_t *sh,unsigned char *buf,int minlen,int maxlen)
{
        int len = 0;
        int samples;
#ifdef CONFIG_TREMOR
        ogg_int32_t **pcm;
#else
        float scale;
        float **pcm;
#endif
        struct ov_struct_st *ov = sh->context;
	while(len < minlen) {
	  while((samples=vorbis_synthesis_pcmout(&ov->vd,&pcm))<=0){
	    ogg_packet op;
	    double pts;
	    memset(&op,0,sizeof(op)); //op.b_o_s = op.e_o_s = 0;
	    op.bytes = ds_get_packet_pts(sh->ds,&op.packet, &pts);
	    if(op.bytes<=0) break;
	    if (pts != MP_NOPTS_VALUE) {
		sh->pts = pts;
		sh->pts_bytes = 0;
	    }
	    if(vorbis_synthesis(&ov->vb,&op)==0) /* test for success! */
	      vorbis_synthesis_blockin(&ov->vd,&ov->vb);
	  }
	  if(samples<=0) break; // error/EOF
	  while(samples>0){
	    int i,j;
	    int clipflag=0;
	    int convsize=(maxlen-len)/(2*ov->vi.channels); // max size!
	    int bout=((samples<convsize)?samples:convsize);

	    if(bout<=0) break; // no buffer space

	    /* convert floats to 16 bit signed ints (host order) and
	       interleave */
#ifdef CONFIG_TREMOR
           if (ov->rg_scale_int == 64) {
	    for(i=0;i<ov->vi.channels;i++){
	      ogg_int16_t *convbuffer=(ogg_int16_t *)(&buf[len]);
	      ogg_int16_t *ptr=convbuffer+i;
	      ogg_int32_t  *mono=pcm[i];
	      for(j=0;j<bout;j++){
		int val=mono[j]>>9;
		/* might as well guard against clipping */
		if(val>32767){
		  val=32767;
		  clipflag=1;
		}
		if(val<-32768){
		  val=-32768;
		  clipflag=1;
		}
		*ptr=val;
		ptr+=ov->vi.channels;
	      }
	    }
	   } else
#endif /* CONFIG_TREMOR */
	   {
#ifndef CONFIG_TREMOR
            scale = 32767.f * ov->rg_scale;
#endif
	    for(i=0;i<ov->vi.channels;i++){
	      ogg_int16_t *convbuffer=(ogg_int16_t *)(&buf[len]);
	      ogg_int16_t *ptr=convbuffer+i;
#ifdef CONFIG_TREMOR
	      ogg_int32_t  *mono=pcm[i];
	      for(j=0;j<bout;j++){
		int val=(mono[j]*ov->rg_scale_int)>>(9+6);
#else
	      float  *mono=pcm[i];
	      for(j=0;j<bout;j++){
		int val=mono[j]*scale;
		/* might as well guard against clipping */
		if(val>32767){
		  val=32767;
		  clipflag=1;
		}
		if(val<-32768){
		  val=-32768;
		  clipflag=1;
		}
#endif /* CONFIG_TREMOR */
		*ptr=val;
		ptr+=ov->vi.channels;
	      }
	    }
	   }

	    if(clipflag)
	      mp_msg(MSGT_DECAUDIO,MSGL_DBG2,"Clipping in frame %ld\n",(long)(ov->vd.sequence));
	    len+=2*ov->vi.channels*bout;
	    sh->pts_bytes += 2*ov->vi.channels*bout;
	    mp_msg(MSGT_DECAUDIO,MSGL_DBG2,"\n[decoded: %d / %d ]\n",bout,samples);
	    samples-=bout;
	    vorbis_synthesis_read(&ov->vd,bout); /* tell libvorbis how
						    many samples we
						    actually consumed */
	  } //while(samples>0)
//          if (!samples) break; // why? how?
	}

	if (len > 0 && ov->vi.channels >= 5) {
	  reorder_channel_nch(buf, AF_CHANNEL_LAYOUT_VORBIS_DEFAULT,
	                      AF_CHANNEL_LAYOUT_MPLAYER_DEFAULT,
	                      ov->vi.channels, len / sh->samplesize,
	                      sh->samplesize);
	}


  return len;
}
Example #20
0
 int main(){
  ogg_sync_state   oy; /* sync and verify incoming physical bitstream */
  ogg_stream_state os; /* take physical pages, weld into a logical
                          stream of packets */
  ogg_page         og; /* one Ogg bitstream page. Vorbis packets are inside */
  ogg_packet       op; /* one raw packet of data for decode */

  vorbis_info      vi; /* struct that stores all the static vorbis bitstream
                          settings */
  vorbis_comment   vc; /* struct that stores all the bitstream user comments */
  vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
  vorbis_block     vb; /* local working space for packet->PCM decode */

  char *buffer;
  int  bytes;

  FILE *instream;
  FILE *outstream;
  char *inname = "01.ogg";
  char *outname = "esmith2000-09-28d1t15.raw";

//#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */
//  /* Beware the evil ifdef. We avoid these where we can, but this one we
//     cannot. Don't add any more, you'll probably go to hell if you do. */
//  //_setmode( _fileno( stdin ), _O_BINARY );
//  //_setmode( _fileno( stdout ), _O_BINARY );
//#endif

#if defined(macintosh) && defined(__MWERKS__)
  {
    int argc;
    char **argv;
    argc=ccommand(&argv); /* get a "command line" from the Mac user */
                     /* this also lets the user set stdin and stdout */
  }
#endif

  /********** Decode setup ************/

  //opening the file
  
  
  if( fopen_s( &instream, inname, "rb" ) != 0 )
  {
	  fprintf(stderr,"Can not open file %s\n", inname);
      exit(1);
  };
  
  if( fopen_s( &outstream, outname, "wb" ) != 0 )
  {
	  fprintf(stderr,"Can not open file %s\n", outname);
      exit(1);
  }


  ogg_sync_init(&oy); /* Now we can read pages */
  
  while(1){ /* we repeat if the bitstream is chained */
    int eos=0;
    int i;

    /* grab some data at the head of the stream. We want the first page
       (which is guaranteed to be small and only contain the Vorbis
       stream initial header) We need the first page to get the stream
       serialno. */

    /* submit a 4k block to libvorbis' Ogg layer */
    buffer=ogg_sync_buffer(&oy,4096);
    //bytes=fread(buffer,1,4096,stdin);
	bytes=fread(buffer,1,4096,instream);
    ogg_sync_wrote(&oy,bytes);
    
    /* Get the first page. */
    if(ogg_sync_pageout(&oy,&og)!=1){
      /* have we simply run out of data?  If so, we're done. */
      if(bytes<4096)break;
      
      /* error case.  Must not be Vorbis data */
      fprintf(stderr,"Input does not appear to be an Ogg bitstream.\n");
      exit(1);
    }
  
    /* Get the serial number and set up the rest of decode. */
    /* serialno first; use it to set up a logical stream */
    ogg_stream_init(&os,ogg_page_serialno(&og));
    
    /* extract the initial header from the first page and verify that the
       Ogg bitstream is in fact Vorbis data */
    
    /* I handle the initial header first instead of just having the code
       read all three Vorbis headers at once because reading the initial
       header is an easy way to identify a Vorbis bitstream and it's
       useful to see that functionality seperated out. */
    
    vorbis_info_init(&vi);
    vorbis_comment_init(&vc);
    if(ogg_stream_pagein(&os,&og)<0){ 
      /* error; stream version mismatch perhaps */
      fprintf(stderr,"Error reading first page of Ogg bitstream data.\n");
      exit(1);
    }
    
    if(ogg_stream_packetout(&os,&op)!=1){ 
      /* no page? must not be vorbis */
      fprintf(stderr,"Error reading initial header packet.\n");
      exit(1);
    }
    
    if(vorbis_synthesis_headerin(&vi,&vc,&op)<0){ 
      /* error case; not a vorbis header */
      fprintf(stderr,"This Ogg bitstream does not contain Vorbis "
              "audio data.\n");
      exit(1);
    }
    
    /* At this point, we're sure we're Vorbis. We've set up the logical
       (Ogg) bitstream decoder. Get the comment and codebook headers and
       set up the Vorbis decoder */
    
    /* The next two packets in order are the comment and codebook headers.
       They're likely large and may span multiple pages. Thus we read
       and submit data until we get our two packets, watching that no
       pages are missing. If a page is missing, error out; losing a
       header page is the only place where missing data is fatal. */
    
    i=0;
    while(i<2){
      while(i<2){
        int result=ogg_sync_pageout(&oy,&og);
        if(result==0)break; /* Need more data */
        /* Don't complain about missing or corrupt data yet. We'll
           catch it at the packet output phase */
        if(result==1){
          ogg_stream_pagein(&os,&og); /* we can ignore any errors here
                                         as they'll also become apparent
                                         at packetout */
          while(i<2){
            result=ogg_stream_packetout(&os,&op);
            if(result==0)break;
            if(result<0){
              /* Uh oh; data at some point was corrupted or missing!
                 We can't tolerate that in a header.  Die. */
              fprintf(stderr,"Corrupt secondary header.  Exiting.\n");
              exit(1);
            }
            result=vorbis_synthesis_headerin(&vi,&vc,&op);
            if(result<0){
              fprintf(stderr,"Corrupt secondary header.  Exiting.\n");
              exit(1);
            }
            i++;
          }
        }
      }
      /* no harm in not checking before adding more */
      buffer=ogg_sync_buffer(&oy,4096);
      //bytes=fread(buffer,1,4096,stdin);
	  bytes=fread(buffer,1,4096,instream);
      if(bytes==0 && i<2){
        fprintf(stderr,"End of file before finding all Vorbis headers!\n");
        exit(1);
      }
      ogg_sync_wrote(&oy,bytes);
    }
    
    /* Throw the comments plus a few lines about the bitstream we're
       decoding */
    {
      char **ptr=vc.user_comments;
      while(*ptr){
        fprintf(stderr,"%s\n",*ptr);
        ++ptr;
      }
      fprintf(stderr,"\nBitstream is %d channel, %ldHz\n",vi.channels,vi.rate);
      fprintf(stderr,"Encoded by: %s\n\n",vc.vendor);
    }
    
    convsize=4096/vi.channels;

    /* OK, got and parsed all three headers. Initialize the Vorbis
       packet->PCM decoder. */
    if(vorbis_synthesis_init(&vd,&vi)==0){ /* central decode state */
      vorbis_block_init(&vd,&vb);          /* local state for most of the decode
                                              so multiple block decodes can
                                              proceed in parallel. We could init
                                              multiple vorbis_block structures
                                              for vd here */
      
      /* The rest is just a straight decode loop until end of stream */
      while(!eos){
        while(!eos){
          int result=ogg_sync_pageout(&oy,&og);
          if(result==0)break; /* need more data */
          if(result<0){ /* missing or corrupt data at this page position */
            fprintf(stderr,"Corrupt or missing data in bitstream; "
                    "continuing...\n");
          }else{
            ogg_stream_pagein(&os,&og); /* can safely ignore errors at
                                           this point */
            while(1){
              result=ogg_stream_packetout(&os,&op);
              
              if(result==0)break; /* need more data */
              if(result<0){ /* missing or corrupt data at this page position */
                /* no reason to complain; already complained above */
              }else{
                /* we have a packet.  Decode it */
                float **pcm;
                int samples;
                
                if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
                  vorbis_synthesis_blockin(&vd,&vb);
                /* 
                   
                **pcm is a multichannel float vector.  In stereo, for
                example, pcm[0] is left, and pcm[1] is right.  samples is
                the size of each channel.  Convert the float values
                (-1.<=range<=1.) to whatever PCM format and write it out */
                
                while((samples=vorbis_synthesis_pcmout(&vd,&pcm))>0){
                  int j;
                  int clipflag=0;
                  int bout=(samples<convsize?samples:convsize);
                  
                  /* convert floats to 16 bit signed ints (host order) and
                     interleave */
                  for(i=0;i<vi.channels;i++){
                    ogg_int16_t *ptr=convbuffer+i;
                    float  *mono=pcm[i];
                    for(j=0;j<bout;j++){
#if 1
                      int val=floor(mono[j]*32767.f+.5f);
#else /* optional dither */
                      int val=mono[j]*32767.f+drand48()-0.5f;
#endif
                      /* might as well guard against clipping */
                      if(val>32767){
                        val=32767;
                        clipflag=1;
                      }
                      if(val<-32768){
                        val=-32768;
                        clipflag=1;
                      }
                      *ptr=val;
                      ptr+=vi.channels;
                    }
                  }
                  
                  if(clipflag)
                    fprintf(stderr,"Clipping in frame %ld\n",(long)(vd.sequence));
                  
                  
                  //fwrite(convbuffer,2*vi.channels,bout,stdout);
				  fwrite(convbuffer,2*vi.channels,bout,outstream);
                  
                  vorbis_synthesis_read(&vd,bout); /* tell libvorbis how
                                                      many samples we
                                                      actually consumed */
                }            
              }
            }
            if(ogg_page_eos(&og))eos=1;
          }
        }
        if(!eos){
          buffer=ogg_sync_buffer(&oy,4096);
          //bytes=fread(buffer,1,4096,stdin);
		  bytes=fread(buffer,1,4096,instream);
          ogg_sync_wrote(&oy,bytes);
          if(bytes==0)eos=1;
        }
      }
      
      /* ogg_page and ogg_packet structs always point to storage in
         libvorbis.  They're never freed or manipulated directly */
      
      vorbis_block_clear(&vb);
      vorbis_dsp_clear(&vd);
    }else{
      fprintf(stderr,"Error: Corrupt header during playback initialization.\n");
    }

    /* clean up this logical bitstream; before exit we see if we're
       followed by another [chained] */
    
    ogg_stream_clear(&os);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);  /* must be called last */
  }

  /* OK, clean up the framer */
  ogg_sync_clear(&oy);
  
  fprintf(stderr,"Done.\n");
  return(0);
}
Example #21
0
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  const unsigned char* aData = aSample->Data();
  size_t aLength = aSample->Size();
  int64_t aOffset = aSample->mOffset;

  MOZ_ASSERT(mPacketCount >= 3);

  if (!mLastFrameTime ||
      mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
    // We are starting a new block.
    mFrames = 0;
    mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
  }

  ogg_packet pkt = InitVorbisPacket(
    aData, aLength, false, aSample->mEOS,
    aSample->mTimecode.ToMicroseconds(), mPacketCount++);

  int err = vorbis_synthesis(&mVorbisBlock, &pkt);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis:%d", err)),
      __func__);
  }

  err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
      __func__);
  }

  VorbisPCMValue** pcm = 0;
  int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  if (frames == 0) {
    return DecodePromise::CreateAndResolve(DecodedData(), __func__);
  }

  DecodedData results;
  while (frames > 0) {
    uint32_t channels = mVorbisDsp.vi->channels;
    uint32_t rate = mVorbisDsp.vi->rate;
    AlignedAudioBuffer buffer(frames*channels);
    if (!buffer) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
    }
    for (uint32_t j = 0; j < channels; ++j) {
      VorbisPCMValue* channel = pcm[j];
      for (uint32_t i = 0; i < uint32_t(frames); ++i) {
        buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
      }
    }

    auto duration = FramesToTimeUnit(frames, rate);
    if (!duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio duration")),
        __func__);
    }
    auto total_duration = FramesToTimeUnit(mFrames, rate);
    if (!total_duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio total_duration")),
        __func__);
    }

    auto time = total_duration + aSample->mTime;
    if (!time.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Overflow adding total_duration and aSample->mTime")),
        __func__);
    };

    if (!mAudioConverter) {
      AudioConfig in(
        AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate);
      AudioConfig out(channels, rate);
      if (!in.IsValid() || !out.IsValid()) {
        return DecodePromise::CreateAndReject(
          MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                      RESULT_DETAIL("Invalid channel layout:%u", channels)),
          __func__);
      }
      mAudioConverter = MakeUnique<AudioConverter>(in, out);
    }
    MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
    AudioSampleBuffer data(Move(buffer));
    data = mAudioConverter->Process(Move(data));

    results.AppendElement(new AudioData(aOffset, time, duration,
                                        frames, data.Forget(), channels, rate));
    mFrames += frames;
    err = vorbis_synthesis_read(&mVorbisDsp, frames);
    if (err) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                    RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
        __func__);
    }

    frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  }
  return DecodePromise::CreateAndResolve(Move(results), __func__);
}
Example #22
0
/*

  return: audio wants more packets
*/
static qboolean OGV_LoadAudio(cinematic_t *cin)
{
	qboolean     anyDataTransferred = qtrue;
	float        **pcm;
	int          frames, frameNeeded;
	int          i, j;
	short        *ptr;
	ogg_packet   op;
	vorbis_block vb;

	memset(&op, 0, sizeof(op));
	memset(&vb, 0, sizeof(vb));
	vorbis_block_init(&g_ogm->vd, &vb);

	while (anyDataTransferred && g_ogm->currentTime + MAX_AUDIO_PRELOAD > (int)(g_ogm->vd.granulepos * 1000 / g_ogm->vi.rate))
	{
		anyDataTransferred = qfalse;

		if ((frames = vorbis_synthesis_pcmout(&g_ogm->vd, &pcm)) > 0)
		{
			// vorbis -> raw
			ptr = (short *)g_ogm->audioBuffer;

			frameNeeded = (SIZEOF_RAWBUFF) / (OGG_SAMPLEWIDTH * g_ogm->vi.channels);

			if (frames < frameNeeded)
			{
				frameNeeded = frames;
			}

			for (i = 0; i < frameNeeded; i++)
			{
				for (j = 0; j < g_ogm->vi.channels; j++)
				{
					*(ptr++) = (short)((pcm[j][i] >= -1.0f && pcm[j][i] <= 1.0f) ? pcm[j][i] * 32767.f : 32767 * ((pcm[j][i] > 0.0f) - (pcm[j][i] < 0.0f)));
				}
			}

			// tell libvorbis how many samples we actually consumed (we ate them all!)
			vorbis_synthesis_read(&g_ogm->vd, frameNeeded);

			if (!(cin->flags & CIN_silent))
			{
				S_RawSamples(0, frameNeeded, g_ogm->vi.rate, OGG_SAMPLEWIDTH, g_ogm->vi.channels, g_ogm->audioBuffer, 1.0f, 1.0f);
			}

			anyDataTransferred = qtrue;
		}

		if (!anyDataTransferred)
		{
			// op -> vorbis
			if (ogg_stream_packetout(&g_ogm->os_audio, &op))
			{
				if (vorbis_synthesis(&vb, &op) == 0)
				{
					vorbis_synthesis_blockin(&g_ogm->vd, &vb);
				}
				anyDataTransferred = qtrue;
			}
		}
	}

	vorbis_block_clear(&vb);

	return (qboolean)(g_ogm->currentTime + MIN_AUDIO_PRELOAD > (int)(g_ogm->vd.granulepos * 1000 / g_ogm->vi.rate));
}
Example #23
0
static GstFlowReturn
vorbis_handle_data_packet (GstVorbisDec * vd, ogg_packet * packet,
    GstClockTime timestamp, GstClockTime duration)
{
#ifdef USE_TREMOLO
  vorbis_sample_t *pcm;
#else
  vorbis_sample_t **pcm;
#endif
  guint sample_count;
  GstBuffer *out = NULL;
  GstFlowReturn result;
  GstMapInfo map;
  gsize size;

  if (G_UNLIKELY (!vd->initialized)) {
    result = vorbis_dec_handle_header_caps (vd);
    if (result != GST_FLOW_OK)
      goto not_initialized;
  }

  /* normal data packet */
  /* FIXME, we can skip decoding if the packet is outside of the
   * segment, this is however not very trivial as we need a previous
   * packet to decode the current one so we must be careful not to
   * throw away too much. For now we decode everything and clip right
   * before pushing data. */

#ifdef USE_TREMOLO
  if (G_UNLIKELY (vorbis_dsp_synthesis (&vd->vd, packet, 1)))
    goto could_not_read;
#else
  if (G_UNLIKELY (vorbis_synthesis (&vd->vb, packet)))
    goto could_not_read;

  if (G_UNLIKELY (vorbis_synthesis_blockin (&vd->vd, &vd->vb) < 0))
    goto not_accepted;
#endif

  /* assume all goes well here */
  result = GST_FLOW_OK;

  /* count samples ready for reading */
#ifdef USE_TREMOLO
  if ((sample_count = vorbis_dsp_pcmout (&vd->vd, NULL, 0)) == 0)
#else
  if ((sample_count = vorbis_synthesis_pcmout (&vd->vd, NULL)) == 0)
    goto done;
#endif

  size = sample_count * vd->info.bpf;
  GST_LOG_OBJECT (vd, "%d samples ready for reading, size %" G_GSIZE_FORMAT,
      sample_count, size);

  /* alloc buffer for it */
  out = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (vd), size);

  gst_buffer_map (out, &map, GST_MAP_WRITE);
  /* get samples ready for reading now, should be sample_count */
#ifdef USE_TREMOLO
  if (G_UNLIKELY (vorbis_dsp_pcmout (&vd->vd, map.data, sample_count) !=
          sample_count))
#else
  if (G_UNLIKELY (vorbis_synthesis_pcmout (&vd->vd, &pcm) != sample_count))
#endif
    goto wrong_samples;

#ifdef USE_TREMOLO
  if (vd->info.channels < 9)
    gst_audio_reorder_channels (map.data, map.size, GST_VORBIS_AUDIO_FORMAT,
        vd->info.channels, gst_vorbis_channel_positions[vd->info.channels - 1],
        gst_vorbis_default_channel_positions[vd->info.channels - 1]);
#else
  /* copy samples in buffer */
  vd->copy_samples ((vorbis_sample_t *) map.data, pcm,
      sample_count, vd->info.channels);
#endif

  GST_LOG_OBJECT (vd, "have output size of %" G_GSIZE_FORMAT, size);
  gst_buffer_unmap (out, &map);

done:
  /* whether or not data produced, consume one frame and advance time */
  result = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (vd), out, 1);

#ifdef USE_TREMOLO
  vorbis_dsp_read (&vd->vd, sample_count);
#else
  vorbis_synthesis_read (&vd->vd, sample_count);
#endif

  return result;

  /* ERRORS */
not_initialized:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE,
        (NULL), ("no header sent yet"));
    return GST_FLOW_NOT_NEGOTIATED;
  }
could_not_read:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE,
        (NULL), ("couldn't read data packet"));
    return GST_FLOW_ERROR;
  }
not_accepted:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE,
        (NULL), ("vorbis decoder did not accept data packet"));
    return GST_FLOW_ERROR;
  }
wrong_samples:
  {
    gst_buffer_unref (out);
    GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE,
        (NULL), ("vorbis decoder reported wrong number of samples"));
    return GST_FLOW_ERROR;
  }
}
Example #24
0
int
VorbisDataDecoder::DoDecode(MediaRawData* aSample)
{
  const unsigned char* aData = aSample->Data();
  size_t aLength = aSample->Size();
  int64_t aOffset = aSample->mOffset;
  uint64_t aTstampUsecs = aSample->mTime;
  int64_t aTotalFrames = 0;

  MOZ_ASSERT(mPacketCount >= 3);

  if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
    // We are starting a new block.
    mFrames = 0;
    mLastFrameTime = Some(aSample->mTime);
  }

  ogg_packet pkt = InitVorbisPacket(aData, aLength, false, false, -1, mPacketCount++);
  bool first_packet = mPacketCount == 4;

  if (vorbis_synthesis(&mVorbisBlock, &pkt) != 0) {
    return -1;
  }

  if (vorbis_synthesis_blockin(&mVorbisDsp,
                               &mVorbisBlock) != 0) {
    return -1;
  }

  VorbisPCMValue** pcm = 0;
  int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  // If the first packet of audio in the media produces no data, we
  // still need to produce an AudioData for it so that the correct media
  // start time is calculated.  Otherwise we'd end up with a media start
  // time derived from the timecode of the first packet that produced
  // data.
  if (frames == 0 && first_packet) {
    mCallback->Output(new AudioData(aOffset,
                                    aTstampUsecs,
                                    0,
                                    0,
                                    nullptr,
                                    mVorbisDsp.vi->channels,
                                    mVorbisDsp.vi->rate));
  }
  while (frames > 0) {
    uint32_t channels = mVorbisDsp.vi->channels;
    auto buffer = MakeUnique<AudioDataValue[]>(frames*channels);
    for (uint32_t j = 0; j < channels; ++j) {
      VorbisPCMValue* channel = pcm[j];
      for (uint32_t i = 0; i < uint32_t(frames); ++i) {
        buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
      }
    }

    CheckedInt64 duration = FramesToUsecs(frames, mVorbisDsp.vi->rate);
    if (!duration.isValid()) {
      NS_WARNING("Int overflow converting WebM audio duration");
      return -1;
    }
    CheckedInt64 total_duration = FramesToUsecs(mFrames,
                                                mVorbisDsp.vi->rate);
    if (!total_duration.isValid()) {
      NS_WARNING("Int overflow converting WebM audio total_duration");
      return -1;
    }

    CheckedInt64 time = total_duration + aTstampUsecs;
    if (!time.isValid()) {
      NS_WARNING("Int overflow adding total_duration and aTstampUsecs");
      return -1;
    };

    aTotalFrames += frames;
    mCallback->Output(new AudioData(aOffset,
                                    time.value(),
                                    duration.value(),
                                    frames,
                                    Move(buffer),
                                    mVorbisDsp.vi->channels,
                                    mVorbisDsp.vi->rate));
    mFrames += frames;
    if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
      return -1;
    }

    frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  }

  return aTotalFrames > 0 ? 1 : 0;
}
Example #25
0
/*
  For lame_decode_fromfile:  return code
  -1     error, or eof
  0     ok, but need more data before outputing any samples
  n     number of samples output.  
*/
int lame_decode_ogg_fromfile( lame_global_flags*  gfp,
                              FILE*               fd,
                              short int           pcm_l[],
                              short int           pcm_r[],
                              mp3data_struct*     mp3data )
{
  lame_internal_flags *gfc = gfp->internal_flags;
  int samples,result,i,j,eof=0,eos=0,bout=0;
  double **pcm;

  while(1){

    /* 
    **pcm is a multichannel double vector.  In stereo, for
    example, pcm[0] is left, and pcm[1] is right.  samples is
    the size of each channel.  Convert the float values
    (-1.<=range<=1.) to whatever PCM format and write it out */
    /* unpack the buffer, if it has at least 1024 samples */
    convsize=1024;
    samples=vorbis_synthesis_pcmout(&vd,&pcm);
    if (samples >= convsize || eos || eof) {
      /* read 1024 samples, or if eos, read what ever is in buffer */
      int clipflag=0;
      bout=(samples<convsize?samples:convsize);
      
      /* convert doubles to 16 bit signed ints (host order) and
	 interleave */
      for(i=0;i<vi.channels;i++){
	double  *mono=pcm[i];
	for(j=0;j<bout;j++){
	  int val=mono[j]*32767.;
	  /* might as well guard against clipping */
	  if(val>32767){
	    val=32767;
	    clipflag=1;
	  }
	  if(val<-32768){
	    val=-32768;
	    clipflag=1;
	  }
	  if (i==0) pcm_l[j]=val;
	  if (i==1) pcm_r[j]=val;
	}
      }
      
      /*
      if(clipflag)
	MSGF( gfc, "Clipping in frame %ld\n", vd.sequence );
      */
      
      /* tell libvorbis how many samples we actually consumed */
      vorbis_synthesis_read(&vd,bout); 
      
      break;
    }    

    result=ogg_sync_pageout(&oy,&og);
      
    if(result==0) {
      /* need more data */
    }else if (result==-1){ /* missing or corrupt data at this page position */
      ERRORF( gfc, "Corrupt or missing data in bitstream; "
	      "continuing...\n");
    }else{
      /* decode this page */
      ogg_stream_pagein(&os,&og); /* can safely ignore errors at
				       this point */
      do {
	result=ogg_stream_packetout(&os,&op);
	if(result==0) {
	  /* need more data */
	} else if(result==-1){ /* missing or corrupt data at this page position */
	  /* no reason to complain; already complained above */
	}else{
	  /* we have a packet.  Decode it */
	  vorbis_synthesis(&vb,&op);
	  vorbis_synthesis_blockin(&vd,&vb);
	}
      } while (result!=0);
    }

    /* is this the last page? */    
    if(ogg_page_eos(&og))eos=1;
    
    if(!eos){
      char *buffer;
      int bytes;
      buffer=ogg_sync_buffer(&oy,4096);
      bytes=fread(buffer,1,4096,fd);
      ogg_sync_wrote(&oy,bytes);
      if(bytes==0)eof=1;
    }
  }

  mp3data->stereo = vi.channels;
  mp3data->samplerate = vi.rate;
  mp3data->bitrate = 0; //ov_bitrate_instant(&vf);
  /*  mp3data->nsamp=MAX_U_32_NUM;*/

  
  if (bout==0) {
    /* clean up this logical bitstream; before exit we see if we're
       followed by another [chained] */
    ogg_stream_clear(&os);
    
    /* ogg_page and ogg_packet structs always point to storage in
       libvorbis.  They're never freed or manipulated directly */
    
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_info_clear(&vi);  /* must be called last */
    
    /* OK, clean up the framer */
    ogg_sync_clear(&oy);
    return -1;
  }
  return bout;
}
Example #26
0
void VideoStreamPlaybackTheora::update(float p_delta) {

	if (!file)
		return;

	if (!playing || paused) {
		//printf("not playing\n");
		return;
	};

#ifdef THEORA_USE_THREAD_STREAMING
	thread_sem->post();
#endif

	//print_line("play "+rtos(p_delta));
	time += p_delta;

	if (videobuf_time > get_time()) {
		return; //no new frames need to be produced
	}

	bool frame_done = false;
	bool audio_done = !vorbis_p;

	while (!frame_done || (!audio_done && !vorbis_eos)) {
		//a frame needs to be produced

		ogg_packet op;
		bool no_theora = false;

		while (vorbis_p) {
			int ret;
			float **pcm;

			bool buffer_full = false;

			/* if there's pending, decoded audio, grab it */
			ret = vorbis_synthesis_pcmout(&vd, &pcm);
			if (ret > 0) {

				const int AUXBUF_LEN = 4096;
				int to_read = ret;
				int16_t aux_buffer[AUXBUF_LEN];

				while (to_read) {

					int m = MIN(AUXBUF_LEN / vi.channels, to_read);

					int count = 0;

					for (int j = 0; j < m; j++) {
						for (int i = 0; i < vi.channels; i++) {

							int val = Math::fast_ftoi(pcm[i][j] * 32767.f);
							if (val > 32767) val = 32767;
							if (val < -32768) val = -32768;
							aux_buffer[count++] = val;
						}
					}

					if (mix_callback) {
						int mixed = mix_callback(mix_udata, aux_buffer, m);
						to_read -= mixed;
						if (mixed != m) { //could mix no more
							buffer_full = true;
							break;
						}
					} else {
						to_read -= m; //just pretend we sent the audio
					}
				}

				int tr = vorbis_synthesis_read(&vd, ret - to_read);

				if (vd.granulepos >= 0) {
					//print_line("wrote: "+itos(audio_frames_wrote)+" gpos: "+itos(vd.granulepos));
				}

				//print_line("mix audio!");

				audio_frames_wrote += ret - to_read;

				//print_line("AGP: "+itos(vd.granulepos)+" added "+itos(ret-to_read));

			} else {

				/* no pending audio; is there a pending packet to decode? */
				if (ogg_stream_packetout(&vo, &op) > 0) {
					if (vorbis_synthesis(&vb, &op) == 0) { /* test for success! */
						vorbis_synthesis_blockin(&vd, &vb);
					}
				} else { /* we need more data; break out to suck in another page */
					//printf("need moar data\n");
					break;
				};
			}

			audio_done = videobuf_time < (audio_frames_wrote / float(vi.rate));

			if (buffer_full)
				break;
		}

		while (theora_p && !frame_done) {
			/* theora is one in, one out... */
			if (ogg_stream_packetout(&to, &op) > 0) {

				if (false && pp_inc) {
					pp_level += pp_inc;
					th_decode_ctl(td, TH_DECCTL_SET_PPLEVEL, &pp_level,
							sizeof(pp_level));
					pp_inc = 0;
				}
				/*HACK: This should be set after a seek or a gap, but we might not have
				a granulepos for the first packet (we only have them for the last
				packet on a page), so we just set it as often as we get it.
				To do this right, we should back-track from the last packet on the
				page and compute the correct granulepos for the first packet after
				a seek or a gap.*/
				if (op.granulepos >= 0) {
					th_decode_ctl(td, TH_DECCTL_SET_GRANPOS, &op.granulepos,
							sizeof(op.granulepos));
				}
				ogg_int64_t videobuf_granulepos;
				if (th_decode_packetin(td, &op, &videobuf_granulepos) == 0) {
					videobuf_time = th_granule_time(td, videobuf_granulepos);

					//printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);

					/* is it already too old to be useful?  This is only actually
					 useful cosmetically after a SIGSTOP.  Note that we have to
					 decode the frame even if we don't show it (for now) due to
					 keyframing.  Soon enough libtheora will be able to deal
					 with non-keyframe seeks.  */

					if (videobuf_time >= get_time()) {
						frame_done = true;
					} else {
						/*If we are too slow, reduce the pp level.*/
						pp_inc = pp_level > 0 ? -1 : 0;
					}
				} else {
				}

			} else {
				no_theora = true;
				break;
			}
		}

#ifdef THEORA_USE_THREAD_STREAMING
		if (file && thread_eof && no_theora && theora_eos && ring_buffer.data_left() == 0) {
#else
		if (file && /*!videobuf_ready && */ no_theora && theora_eos) {
#endif
			printf("video done, stopping\n");
			stop();
			return;
		};

		if (!frame_done || !audio_done) {
			//what's the point of waiting for audio to grab a page?

			buffer_data();
			while (ogg_sync_pageout(&oy, &og) > 0) {
				queue_page(&og);
			}
		}

		/* If playback has begun, top audio buffer off immediately. */
		//if(stateflag) audio_write_nonblocking();

		/* are we at or past time for this video frame? */
		if (videobuf_ready && videobuf_time <= get_time()) {

			//video_write();
			//videobuf_ready=0;
		} else {
			//printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
		}

		float tdiff = videobuf_time - get_time();
		/*If we have lots of extra time, increase the post-processing level.*/
		if (tdiff > ti.fps_denominator * 0.25 / ti.fps_numerator) {
			pp_inc = pp_level < pp_level_max ? 1 : 0;
		} else if (tdiff < ti.fps_denominator * 0.05 / ti.fps_numerator) {
			pp_inc = pp_level > 0 ? -1 : 0;
		}
	}

	video_write();
};

void VideoStreamPlaybackTheora::play() {

	if (!playing)
		time = 0;
	else {
		stop();
	}

	playing = true;
	delay_compensation = ProjectSettings::get_singleton()->get("audio/video_delay_compensation_ms");
	delay_compensation /= 1000.0;
};

void VideoStreamPlaybackTheora::stop() {

	if (playing) {

		clear();
		set_file(file_name); //reset
	}
	playing = false;
	time = 0;
};

bool VideoStreamPlaybackTheora::is_playing() const {

	return playing;
};

void VideoStreamPlaybackTheora::set_paused(bool p_paused) {

	paused = p_paused;
	//pau = !p_paused;
};

bool VideoStreamPlaybackTheora::is_paused(bool p_paused) const {

	return paused;
};

void VideoStreamPlaybackTheora::set_loop(bool p_enable){

};

bool VideoStreamPlaybackTheora::has_loop() const {

	return false;
};

float VideoStreamPlaybackTheora::get_length() const {

	return 0;
};

String VideoStreamPlaybackTheora::get_stream_name() const {

	return "";
};

int VideoStreamPlaybackTheora::get_loop_count() const {

	return 0;
};

float VideoStreamPlaybackTheora::get_playback_position() const {

	return get_time();
};

void VideoStreamPlaybackTheora::seek(float p_time){

	// no
};

void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback, void *p_userdata) {

	mix_callback = p_callback;
	mix_udata = p_userdata;
}
Example #27
0
DecoderStatus VorbisDecoder::process(Packet * packet) {
  FXASSERT(packet);

#ifdef HAVE_VORBIS_PLUGIN
  FXfloat ** pcm=NULL;
  FXfloat * buf32=NULL;
#else // HAVE_TREMOR_PLUGIN
  FXint ** pcm=NULL;
  FXshort * buf32=NULL;
#endif

  FXint p,navail=0;

  FXint ngiven,ntotalsamples,nsamples,sample,c,s;

  FXbool  eos=packet->flags&FLAG_EOS;
  FXuint   id=packet->stream;
  FXlong  len=packet->stream_length;


  OggDecoder::process(packet);

  /// Init Decoder
  if (!has_dsp) {
    if (!init_decoder())
      return DecoderError;
    if (!has_dsp)
      return DecoderOk;
    }

  /// Find Stream Position
  if (stream_position==-1 && !find_stream_position())
    return DecoderOk;


  if (out) {
    navail = out->availableFrames();
    }

  while(get_next_packet()) {

    if (__unlikely(is_vorbis_header())) {
      GM_DEBUG_PRINT("[vorbis] unexpected vorbis header found. Resetting decoder\n");
      push_back_packet();
      reset_decoder();
      return DecoderOk;
      }

    if (vorbis_synthesis(&block,&op)==0)
      vorbis_synthesis_blockin(&dsp,&block);

    while((ngiven=vorbis_synthesis_pcmout(&dsp,&pcm))>0) {
      if (len>0) FXASSERT(stream_position+ngiven<=len);

      if (__unlikely(stream_position<stream_decode_offset)) {
        FXlong offset = FXMIN(ngiven,stream_decode_offset - stream_position);
        GM_DEBUG_PRINT("[vorbis] stream decode offset %ld. Skipping %ld of %ld \n",stream_decode_offset,offset,stream_decode_offset-stream_position);
        ngiven-=offset;
        stream_position+=offset;
        sample=offset;
        vorbis_synthesis_read(&dsp,offset);
        if (ngiven==0) continue;
        }
      else {
        sample=0;
        }

      for (ntotalsamples=ngiven;ntotalsamples>0;) {

        /// Get new buffer
        if (out==NULL) {
          out = engine->decoder->get_output_packet();
          if (out==NULL) return DecoderInterrupted;
          out->stream_position=stream_position;
          out->stream_length=len;
          out->af=af;
          navail = out->availableFrames();
          }

#ifdef HAVE_VORBIS_PLUGIN
        buf32 = out->flt();
#else // HAVE_TREMOR_PLUGIN
        buf32 = out->s16();
#endif
        /// Copy Samples
        nsamples = FXMIN(ntotalsamples,navail);
        for (p=0,s=sample;s<(nsamples+sample);s++){
          for (c=0;c<info.channels;c++,p++) {
#ifdef HAVE_VORBIS_PLUGIN
            buf32[p]=pcm[c][s];
#else
            buf32[p]=CLIP_TO_15(pcm[c][s]>>9);
#endif
            }
          }

        /// Update sample counts
        out->wroteFrames(nsamples);

        sample+=nsamples;
        navail-=nsamples;
        ntotalsamples-=nsamples;
        stream_position+=nsamples;

        /// Send out packet if full
        ///FIXME handle EOS.
        if (navail==0) {
          engine->output->post(out);
          out=NULL;
          }
        }
      vorbis_synthesis_read(&dsp,ngiven);
      }
    }

  if (eos) {
    if (out && out->numFrames())  {
      engine->output->post(out);
      out=NULL;
      }
    engine->output->post(new ControlEvent(End,id));
    }
  return DecoderOk;
  }
int main(int argc,char *argv[]){

  int i,j;
  ogg_packet op;

  FILE *infile = stdin;

#ifdef _WIN32 /* We need to set stdin/stdout to binary mode. Damn windows. */
  /* Beware the evil ifdef. We avoid these where we can, but this one we
     cannot. Don't add any more, you'll probably go to hell if you do. */
  _setmode( _fileno( stdin ), _O_BINARY );
#endif

  /* open the input file if any */
  if(argc==2){
    infile=fopen(argv[1],"rb");
    if(infile==NULL){
      fprintf(stderr,"Unable to open '%s' for playback.\n", argv[1]);
      exit(1);
    }
  }
  if(argc>2){
      usage();
      exit(1);
  }

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  theora_comment_init(&tc);
  theora_info_init(&ti);

  /* Ogg file open; parse the headers */
  /* Only interested in Vorbis/Theora streams */
  while(!stateflag){
    int ret=buffer_data(infile,&oy);
    if(ret==0)break;
    while(ogg_sync_pageout(&oy,&og)>0){
      ogg_stream_state test;

      /* is this a mandated initial header? If not, stop parsing */
      if(!ogg_page_bos(&og)){
        /* don't leak the page; get it into the appropriate stream */
        queue_page(&og);
        stateflag=1;
        break;
      }

      ogg_stream_init(&test,ogg_page_serialno(&og));
      ogg_stream_pagein(&test,&og);
      ogg_stream_packetout(&test,&op);

      /* identify the codec: try theora */
      if(!theora_p && theora_decode_header(&ti,&tc,&op)>=0){
        /* it is theora */
        memcpy(&to,&test,sizeof(test));
        theora_p=1;
      }else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){
        /* it is vorbis */
        memcpy(&vo,&test,sizeof(test));
        vorbis_p=1;
      }else{
        /* whatever it is, we don't care about it */
        ogg_stream_clear(&test);
      }
    }
    /* fall through to non-bos page parsing */
  }

  /* we're expecting more header packets. */
  while((theora_p && theora_p<3) || (vorbis_p && vorbis_p<3)){
    int ret;

    /* look for further theora headers */
    while(theora_p && (theora_p<3) && (ret=ogg_stream_packetout(&to,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      if(theora_decode_header(&ti,&tc,&op)){
        printf("Error parsing Theora stream headers; corrupt stream?\n");
        exit(1);
      }
      theora_p++;
      if(theora_p==3)break;
    }

    /* look for more vorbis header packets */
    while(vorbis_p && (vorbis_p<3) && (ret=ogg_stream_packetout(&vo,&op))){
      if(ret<0){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      if(vorbis_synthesis_headerin(&vi,&vc,&op)){
        fprintf(stderr,"Error parsing Vorbis stream headers; corrupt stream?\n");
        exit(1);
      }
      vorbis_p++;
      if(vorbis_p==3)break;
    }

    /* The header pages/packets will arrive before anything else we
       care about, or the stream is not obeying spec */

    if(ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og); /* demux into the appropriate stream */
    }else{
      int ret=buffer_data(infile,&oy); /* someone needs more data */
      if(ret==0){
        fprintf(stderr,"End of file while searching for codec headers.\n");
        exit(1);
      }
    }
  }

  /* and now we have it all.  initialize decoders */
  if(theora_p){
    theora_decode_init(&td,&ti);
    printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n",
           (unsigned int)to.serialno,ti.width,ti.height, 
           (double)ti.fps_numerator/ti.fps_denominator);
    if(ti.width!=ti.frame_width || ti.height!=ti.frame_height)
      printf("  Frame content is %dx%d with offset (%d,%d).\n",
           ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
    report_colorspace(&ti);
    dump_comments(&tc);
  }else{
    /* tear down the partial theora setup */
    theora_info_clear(&ti);
    theora_comment_clear(&tc);
  }
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);
    fprintf(stderr,"Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n",
            (unsigned int)vo.serialno,vi.channels,(int)vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }

  /* open audio */
  if(vorbis_p)open_audio();

  /* open video */
  if(theora_p)open_video();

  /* install signal handler as SDL clobbered the default */
  signal (SIGINT, sigint_handler);

  /* on to the main decode loop.  We assume in this example that audio
     and video start roughly together, and don't begin playback until
     we have a start frame for both.  This is not necessarily a valid
     assumption in Ogg A/V streams! It will always be true of the
     example_encoder (and most streams) though. */

  stateflag=0; /* playback has not begun */
  while(!got_sigint){

    /* we want a video and audio frame ready to go at all times.  If
       we have to buffer incoming, buffer the compressed data (ie, let
       ogg do the buffering) */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;

      /* if there's pending, decoded audio, grab it */
      if((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0){
        int count=audiobuf_fill/2;
        int maxsamples=(audiofd_fragsize-audiobuf_fill)/2/vi.channels;
        for(i=0;i<ret && i<maxsamples;i++)
          for(j=0;j<vi.channels;j++){
            int val=rint(pcm[j][i]*32767.f);
            if(val>32767)val=32767;
            if(val<-32768)val=-32768;
            audiobuf[count++]=val;
          }
        vorbis_synthesis_read(&vd,i);
        audiobuf_fill+=i*vi.channels*2;
        if(audiobuf_fill==audiofd_fragsize)audiobuf_ready=1;
        if(vd.granulepos>=0)
          audiobuf_granulepos=vd.granulepos-ret+i;
        else
          audiobuf_granulepos+=i;
        
      }else{
        
        /* no pending audio; is there a pending packet to decode? */
        if(ogg_stream_packetout(&vo,&op)>0){
          if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
            vorbis_synthesis_blockin(&vd,&vb);
        }else   /* we need more data; break out to suck in another page */
          break;
      }
    }

    while(theora_p && !videobuf_ready){
      /* theora is one in, one out... */
      if(ogg_stream_packetout(&to,&op)>0){

        theora_decode_packetin(&td,&op);
        videobuf_granulepos=td.granulepos;
        
        videobuf_time=theora_granule_time(&td,videobuf_granulepos);

        /* is it already too old to be useful?  This is only actually
           useful cosmetically after a SIGSTOP.  Note that we have to
           decode the frame even if we don't show it (for now) due to
           keyframing.  Soon enough libtheora will be able to deal
           with non-keyframe seeks.  */

        if(videobuf_time>=get_time())
        videobuf_ready=1;
                
      }else
        break;
    }

    if(!videobuf_ready && !audiobuf_ready && feof(infile))break;

    if(!videobuf_ready || !audiobuf_ready){
      /* no data yet for somebody.  Grab another page */
      int bytes=buffer_data(infile,&oy);
      while(ogg_sync_pageout(&oy,&og)>0){
        queue_page(&og);
      }
    }

    /* If playback has begun, top audio buffer off immediately. */
    if(stateflag) audio_write_nonblocking();

    /* are we at or past time for this video frame? */
    if(stateflag && videobuf_ready && videobuf_time<=get_time()){
      video_write();
      videobuf_ready=0;
    }

    if(stateflag &&
       (audiobuf_ready || !vorbis_p) &&
       (videobuf_ready || !theora_p) &&
       !got_sigint){
      /* we have an audio frame ready (which means the audio buffer is
         full), it's not time to play video, so wait until one of the
         audio buffer is ready or it's near time to play video */
        
      /* set up select wait on the audiobuffer and a timeout for video */
      struct timeval timeout;
      fd_set writefs;
      fd_set empty;
      int n=0;

      FD_ZERO(&writefs);
      FD_ZERO(&empty);
      if(audiofd>=0){
        FD_SET(audiofd,&writefs);
        n=audiofd+1;
      }

      if(theora_p){
        long milliseconds=(videobuf_time-get_time())*1000-5;
        if(milliseconds>500)milliseconds=500;
        if(milliseconds>0){
          timeout.tv_sec=milliseconds/1000;
          timeout.tv_usec=(milliseconds%1000)*1000;

          n=select(n,&empty,&writefs,&empty,&timeout);
          if(n)audio_calibrate_timer(0);
        }
      }else{
        select(n,&empty,&writefs,&empty,NULL);
      }
    }

    /* if our buffers either don't exist or are ready to go,
       we can begin playback */
    if((!theora_p || videobuf_ready) &&
       (!vorbis_p || audiobuf_ready))stateflag=1;
    /* same if we've run out of input */
    if(feof(infile))stateflag=1;

  }

  /* tear it all down */

  audio_close();
  SDL_Quit();

  if(vorbis_p){
    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);
  }
  if(theora_p){
    ogg_stream_clear(&to);
    theora_clear(&td);
    theora_comment_clear(&tc);
    theora_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  if(infile && infile!=stdin)fclose(infile);

  fprintf(stderr,
          "\r                                                              "
          "\nDone.\n");
  return(0);

}
Example #29
0
static sf_count_t
vorbis_read_sample (SF_PRIVATE *psf, void *ptr, sf_count_t lens, convert_func *transfn)
{	VORBIS_PRIVATE *vdata = psf->codec_data ;
	OGG_PRIVATE *odata = psf->container_data ;
	int len, samples, i = 0 ;
	float **pcm ;

	len = lens / psf->sf.channels ;

	while ((samples = vorbis_synthesis_pcmout (&vdata->vdsp, &pcm)) > 0)
	{	if (samples > len) samples = len ;
		i += transfn (psf, samples, ptr, i, psf->sf.channels, pcm) ;
		len -= samples ;
		/* tell libvorbis how many samples we actually consumed */
		vorbis_synthesis_read (&vdata->vdsp, samples) ;
		vdata->loc += samples ;
		if (len == 0)
			return i ; /* Is this necessary */
	}
	goto start0 ;		/* Jump into the nasty nest */
	while (len > 0 && !odata->eos)
	{	while (len > 0 && !odata->eos)
		{	int result = ogg_sync_pageout (&odata->osync, &odata->opage) ;
			if (result == 0) break ; /* need more data */
			if (result < 0)
			{	/* missing or corrupt data at this page position */
				psf_log_printf (psf, "Corrupt or missing data in bitstream ; continuing...\n") ;
			}
			else
			{	/* can safely ignore errors at this point */
				ogg_stream_pagein (&odata->ostream, &odata->opage) ;
start0:
				while (1)
				{	result = ogg_stream_packetout (&odata->ostream, &odata->opacket) ;
					if (result == 0)
						break ; /* need more data */
					if (result < 0)
					{	/* missing or corrupt data at this page position */
						/* no reason to complain ; already complained above */
					}
					else
					{	/* we have a packet.	Decode it */
						if (vorbis_synthesis (&vdata->vblock, &odata->opacket) == 0) /* test for success! */
							vorbis_synthesis_blockin (&vdata->vdsp, &vdata->vblock) ;
						/*
						** pcm is a multichannel float vector.	 In stereo, for
						** example, pcm [0] is left, and pcm [1] is right.	 samples is
						** the size of each channel.	 Convert the float values
						** (-1.<=range<=1.) to whatever PCM format and write it out.
						*/

						while ((samples = vorbis_synthesis_pcmout (&vdata->vdsp, &pcm)) > 0)
						{	if (samples > len) samples = len ;
							i += transfn (psf, samples, ptr, i, psf->sf.channels, pcm) ;
							len -= samples ;
							/* tell libvorbis how many samples we actually consumed */
							vorbis_synthesis_read (&vdata->vdsp, samples) ;
							vdata->loc += samples ;
							if (len == 0)
								return i ; /* Is this necessary */
						} ;
					}
				}
				if (ogg_page_eos (&odata->opage)) odata->eos = 1 ;
			}
		}
		if (!odata->eos)
		{	char *buffer ;
			int bytes ;
			buffer = ogg_sync_buffer (&odata->osync, 4096) ;
			bytes = psf_fread (buffer, 1, 4096, psf) ;
			ogg_sync_wrote (&odata->osync, bytes) ;
			if (bytes == 0) odata->eos = 1 ;
		}
	}
	return i ;
} /* vorbis_read_sample */