コード例 #1
0
ファイル: freedv_rx.c プロジェクト: Enny1991/FreeDV-Android
int resample_48k_to_8k(
        short      output_short[],
        short      input_short[],
        int        length_output_short, // maximum output array length in samples 
        int        length_input_short
        )
{
    SRC_DATA src_data;
    float    input[N48*2];
    float    output[N48*2];

    const int input_sample_rate = 48000;
    const int output_sample_rate = 8000;

    assert(length_input_short <= N48*2);
    assert(length_output_short <= N48*2);

    src_short_to_float_array(input_short, input, length_input_short);

    src_data.data_in = input;
    src_data.data_out = output;
    src_data.input_frames = length_input_short;
    src_data.output_frames = length_output_short;
    src_data.end_of_input = 0;
    src_data.src_ratio = (float)output_sample_rate/input_sample_rate;

    src_process(insrc1, &src_data);

    assert(src_data.output_frames_gen <= length_output_short);
    src_float_to_short_array(output, output_short, src_data.output_frames_gen);

    return src_data.output_frames_gen;
}
コード例 #2
0
ファイル: dsp.cpp プロジェクト: melchor629/butt
void DSPEffects::processSamples(short *samples) {
    if(dsp_buff == NULL) return;
    
    if(cfg.dsp.equalizer) {
        band1->setPeakGain(cfg.dsp.gain1);
        band2->setPeakGain(cfg.dsp.gain2);
        band3->setPeakGain(cfg.dsp.gain3);
        band4->setPeakGain(cfg.dsp.gain4);
        band5->setPeakGain(cfg.dsp.gain5);
    }

    src_short_to_float_array(samples, dsp_buff, dsp_size);
    for(uint32_t sample = 0; sample < dsp_size; sample++) {
        if(cfg.main.gain != 1)
            dsp_buff[sample] *= cfg.main.gain;
        if(cfg.dsp.equalizer) {
            float s = band5->process(dsp_buff[sample]);
            s = band4->process(s);
            s = band3->process(s);
            s = band2->process(s);
            dsp_buff[sample] = band1->process(s);
        }
        if(cfg.dsp.compressor)
            dsp_buff[sample] = logdrc(dsp_buff[sample], cfg.dsp.compQuantity);
    }
    src_float_to_short_array(dsp_buff, samples, dsp_size);
}
コード例 #3
0
const int16_t *
pcm_resample_lsr_16(struct pcm_resample_state *state,
		    uint8_t channels,
		    unsigned src_rate,
		    const int16_t *src_buffer, size_t src_size,
		    unsigned dest_rate, size_t *dest_size_r,
		    GError **error_r)
{
	bool success;
	SRC_DATA *data = &state->data;
	size_t data_in_size;
	size_t data_out_size;
	int error;
	int16_t *dest_buffer;

	assert((src_size % (sizeof(*src_buffer) * channels)) == 0);

	success = pcm_resample_set(state, channels, src_rate, dest_rate,
				   error_r);
	if (!success)
		return NULL;

	/* there was an error previously, and nothing has changed */
	if (state->error) {
		g_set_error(error_r, libsamplerate_quark(), state->error,
			    "libsamplerate has failed: %s",
			    src_strerror(state->error));
		return NULL;
	}

	data->input_frames = src_size / sizeof(*src_buffer) / channels;
	data_in_size = data->input_frames * sizeof(float) * channels;
	data->data_in = pcm_buffer_get(&state->in, data_in_size);

	data->output_frames = (src_size * dest_rate + src_rate - 1) / src_rate;
	data_out_size = data->output_frames * sizeof(float) * channels;
	data->data_out = pcm_buffer_get(&state->out, data_out_size);

	src_short_to_float_array(src_buffer, data->data_in,
				 data->input_frames * channels);

	error = src_process(state->state, data);
	if (error) {
		g_set_error(error_r, libsamplerate_quark(), error,
			    "libsamplerate has failed: %s",
			    src_strerror(error));
		state->error = error;
		return NULL;
	}

	*dest_size_r = data->output_frames_gen *
		sizeof(*dest_buffer) * channels;
	dest_buffer = pcm_buffer_get(&state->buffer, *dest_size_r);
	src_float_to_short_array(data->data_out, dest_buffer,
				 data->output_frames_gen * channels);

	return dest_buffer;
}
コード例 #4
0
ファイル: AmAudio.cpp プロジェクト: BackupTheBerlios/sems-svn
unsigned int AmAudio::downMix(unsigned int size)
{
  unsigned int s = size;
  if(fmt->channels == 2){
    stereo2mono(samples.back_buffer(),(unsigned char*)samples,s);
    samples.swap();
  }

#ifdef USE_LIBSAMPLERATE 
  if (fmt->rate != SYSTEM_SAMPLERATE) {
    if (!resample_state) {
      int src_error;
      // for better quality but more CPU usage, use SRC_SINC_ converters
      resample_state = src_new(SRC_LINEAR, 1, &src_error);
      if (!resample_state) {
	ERROR("samplerate initialization error: ");
      }
    }

    if (resample_state) {
      if (resample_buf_samples + PCM16_B2S(s) > PCM16_B2S(AUDIO_BUFFER_SIZE) * 2) {
	WARN("resample input buffer overflow! (%d)\n",
	     resample_buf_samples + PCM16_B2S(s));
      } else {
	signed short* samples_s = (signed short*)(unsigned char*)samples;
	src_short_to_float_array(samples_s, &resample_in[resample_buf_samples], PCM16_B2S(s));
	resample_buf_samples += PCM16_B2S(s);
      }
      
      SRC_DATA src_data;
      src_data.data_in = resample_in;
      src_data.input_frames = resample_buf_samples;
      src_data.data_out = resample_out;
      src_data.output_frames = PCM16_B2S(AUDIO_BUFFER_SIZE);
      src_data.src_ratio = (double)SYSTEM_SAMPLERATE / (double)fmt->rate;
      src_data.end_of_input = 0;

      int src_err = src_process(resample_state, &src_data);
      if (src_err) {
	DBG("resample error: '%s'\n", src_strerror(src_err));
      }else {
	signed short* samples_s = (signed short*)(unsigned char*)samples;
	src_float_to_short_array(resample_out, samples_s, src_data.output_frames_gen);
	s = PCM16_S2B(src_data.output_frames_gen);

	if (resample_buf_samples !=  (unsigned int)src_data.input_frames_used) {
	  memmove(resample_in, &resample_in[src_data.input_frames_used], 
		  (resample_buf_samples - src_data.input_frames_used) * sizeof(float));
	}
	resample_buf_samples = resample_buf_samples - src_data.input_frames_used;
      }
    }
  }
#endif
 

  return s;
}
コード例 #5
0
ファイル: AmAudio.cpp プロジェクト: FihlaTV/sems-amr
unsigned int AmLibSamplerateResamplingState::resample(unsigned char* samples, unsigned int s, double ratio)
{
  DBG("resampling packet of size %d with ratio %f", s, ratio);
  if (!resample_state) {
    int src_error;
    // for better quality but more CPU usage, use SRC_SINC_ converters
    resample_state = src_new(SRC_LINEAR, 1, &src_error);
    if (!resample_state) {
      ERROR("samplerate initialization error: ");
    }
  }

  if (resample_state) {
    if (resample_buf_samples + PCM16_B2S(s) > PCM16_B2S(AUDIO_BUFFER_SIZE) * 2) {
      WARN("resample input buffer overflow! (%lu)\n", resample_buf_samples + PCM16_B2S(s));
    } else if (resample_out_buf_samples + (PCM16_B2S(s) * ratio) + 20 > PCM16_B2S(AUDIO_BUFFER_SIZE)) {
      WARN("resample: possible output buffer overflow! (%lu)\n", (resample_out_buf_samples + (size_t) ((PCM16_B2S(s) * ratio)) + 20));
    } else {
      signed short* samples_s = (signed short*)samples;
      src_short_to_float_array(samples_s, &resample_in[resample_buf_samples], PCM16_B2S(s));
      resample_buf_samples += PCM16_B2S(s);
    }

    SRC_DATA src_data;
    src_data.data_in = resample_in;
    src_data.input_frames = resample_buf_samples;
    src_data.data_out = &resample_out[resample_out_buf_samples];
    src_data.output_frames = PCM16_B2S(AUDIO_BUFFER_SIZE);
    src_data.src_ratio = ratio;
    src_data.end_of_input = 0;

    int src_err = src_process(resample_state, &src_data);
    if (src_err) {
      DBG("resample error: '%s'\n", src_strerror(src_err));
    }else {
      signed short* samples_s = (signed short*)(unsigned char*)samples;
      resample_out_buf_samples += src_data.output_frames_gen;
      s *= ratio;
      src_float_to_short_array(resample_out, samples_s, PCM16_B2S(s));
      DBG("resample: output_frames_gen = %ld", src_data.output_frames_gen);

      if (resample_buf_samples !=  (unsigned int)src_data.input_frames_used) {
	memmove(resample_in, &resample_in[src_data.input_frames_used],
		(resample_buf_samples - src_data.input_frames_used) * sizeof(float));
      }
      resample_buf_samples = resample_buf_samples - src_data.input_frames_used;

      if (resample_out_buf_samples != s) {
	memmove(resample_out, &resample_out[PCM16_B2S(s)], (resample_out_buf_samples - PCM16_B2S(s)) * sizeof(float));
      }
      resample_out_buf_samples -= PCM16_B2S(s);
    }
  }

  DBG("resample: output size is %d", s);
  return s;
}
コード例 #6
0
ファイル: resample.cpp プロジェクト: lenalebt/libmusic
 bool Resampler22kHzMono::resample(uint32_t fromSampleRate, int16_t** samplePtr, unsigned int& sampleCount, unsigned int channelCount) const
 {
     //first convert to mono - we do not need stereo or more channels.
     unsigned int frameCount = sampleCount/channelCount;
     int16_t* monoSamples = new int16_t[frameCount];
     if (!monoSamples)   //failed to get memory
         return false;
     
     int16_t* samples = *samplePtr;
     for (unsigned int i = 0; i < frameCount; i++)
     {
         int32_t tmpVal = 0;
         int offset = channelCount * i;
         for (unsigned int j = 0; j < channelCount; j++)
         {
             tmpVal += samples[offset + j];
         }
         monoSamples[i] = tmpVal / channelCount;
     }
     //should have mono samples now. we can discard the old samples.
     delete[] samples;
     samples = NULL;
     sampleCount = frameCount;
     *samplePtr = monoSamples;
     
     SRC_DATA srcdata;
     
     srcdata.data_in = new float[sampleCount];
     src_short_to_float_array(*samplePtr, srcdata.data_in, sampleCount);
     delete *samplePtr;
     
     srcdata.input_frames = sampleCount;
     
     srcdata.output_frames = int(sampleCount * 22050.0 / double(fromSampleRate)) + 1;
     srcdata.data_out = new float[srcdata.output_frames];
     
     srcdata.src_ratio = 22050.0 / double(fromSampleRate);
     
     //do some fast conversion at reasonable quality
     if (src_simple(&srcdata, SRC_SINC_FASTEST, 1) != 0)
         return false;
     
     delete srcdata.data_in;
     
     *samplePtr = new int16_t[srcdata.output_frames];
     
     src_float_to_short_array(srcdata.data_out, *samplePtr, srcdata.output_frames);
     delete srcdata.data_out;
     
     sampleCount = srcdata.output_frames;
     
     return true;
 }
コード例 #7
0
ファイル: resample_jni.cpp プロジェクト: EQ4/sipdroid-pro
JNIEXPORT jint JNICALL Java_org_sipdroid_media_file_AudioFile_nresample(JNIEnv* env, jobject obj, jdouble ratio, jshortArray inBuffer, jshortArray outBuffer){
	if(ready){
		pthread_mutex_lock(&lock);
		// initialize converter
		if(converter == NULL){
			int error;
			converter = src_new(SRC_SINC_MEDIUM_QUALITY, 1, &error);
			if(converter == NULL){
				__android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "unable to initialize sample rate converter: %s", src_strerror(error));
				pthread_mutex_unlock(&lock);
				return -1;
			}
		}
		// prepare buffers
		jint input_len = env->GetArrayLength(inBuffer);
		float* fl_inBuffer = (float*) malloc(input_len * sizeof(float));
		short* sh_inBuffer = env->GetShortArrayElements(inBuffer, NULL);
		src_short_to_float_array(sh_inBuffer, fl_inBuffer, input_len);
		env->ReleaseShortArrayElements(inBuffer, sh_inBuffer, 0);

		jint output_len = env->GetArrayLength(outBuffer);
		float* fl_outBuffer = (float*) malloc(sizeof(float) * output_len);

		SRC_DATA src_data;
		src_data.data_in = fl_inBuffer;
		src_data.input_frames = (long) input_len;
		src_data.data_out = fl_outBuffer;
		src_data.output_frames = (long) output_len;
		src_data.src_ratio = (double) 1/ratio;
		src_data.end_of_input = 0;

		// resample
		int error;
		if ((error = src_process(converter, &src_data)) >= 0){
			// convert output to float and write to outBuffer
			short* sh_outBuffer = env->GetShortArrayElements(outBuffer, NULL);
			src_float_to_short_array(src_data.data_out, sh_outBuffer, src_data.output_frames_gen);
			env->ReleaseShortArrayElements(outBuffer, sh_outBuffer, 0);
		}
		else{
			__android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "resampling error: %s", src_strerror(error));
		}

		free(fl_outBuffer);
		free(fl_inBuffer);

		pthread_mutex_unlock(&lock);
		return src_data.output_frames_gen;
	}
	return 0;
}
コード例 #8
0
PJ_DEF(void) pjmedia_resample_run( pjmedia_resample *resample,
				   const pj_int16_t *input,
				   pj_int16_t *output )
{
    SRC_DATA src_data;

    /* Convert samples to float */
    src_short_to_float_array(input, resample->frame_in, 
			     resample->in_samples);

    if (resample->in_extra) {
	unsigned i;

	for (i=0; i<resample->in_extra; ++i)
	    resample->frame_in[resample->in_samples+i] =
		resample->frame_in[resample->in_samples-1];
    }

    /* Prepare SRC_DATA */
    pj_bzero(&src_data, sizeof(src_data));
    src_data.data_in = resample->frame_in;
    src_data.data_out = resample->frame_out;
    src_data.input_frames = resample->in_samples + resample->in_extra;
    src_data.output_frames = resample->out_samples + resample->out_extra;
    src_data.src_ratio = resample->ratio;

    /* Process! */
    src_process(resample->state, &src_data);

    /* Convert output back to short */
    src_float_to_short_array(resample->frame_out, output,
			     src_data.output_frames_gen);

    /* Replay last sample if conversion couldn't fill up the whole 
     * frame. This could happen for example with 22050 to 16000 conversion.
     */
    if (src_data.output_frames_gen < (int)resample->out_samples) {
	unsigned i;

	if (resample->in_extra < 4)
	    resample->in_extra++;

	for (i=src_data.output_frames_gen; 
	     i<resample->out_samples; ++i)
	{
	    output[i] = output[src_data.output_frames_gen-1];
	}
    }
}
コード例 #9
0
bool
Deck_playback_process::play_main_track(QVector<float*> &io_playback_bufs, const unsigned short int &buf_size)
{
    // Prevent sample table overflow if going forward.
    if ((this->param->get_speed() >= 0.0) &&
       ((this->current_sample + 1) > (this->at->get_end_of_samples() - buf_size)))
    {
        qCDebug(DS_PLAYBACK) << "audio track sample table overflow";
        this->play_silence(io_playback_bufs, buf_size);

        // Update remaining time to 0.
        this->update_remaining_time();

        return false;
    }

#if 0
    // Fake implementation (just play track), do not use playback parameters.
    short signed int *sample_pointer = &this->at->get_samples()[this->current_sample];
    std::copy(sample_pointer, sample_pointer + (nb_samples * 2), this->src_int_input_data);
    src_short_to_float_array(this->src_int_input_data, this->src_float_input_data, nb_samples * 2);

    float *ptr = this->src_float_input_data;
    for (int i = 0; i < nb_samples; i++)
    {
        io_samples[0][i] = *ptr;
        ptr++;
        io_samples[1][i] = *ptr;
        ptr++;
    }

    this->current_sample += nb_samples*2;
#else
    if (this->paused == true)
    {
        this->play_silence(io_playback_bufs, buf_size);
    }
    else if (this->play_data_with_playback_parameters(io_playback_bufs, buf_size) == false)
    {
        qCWarning(DS_PLAYBACK) << "can not prepare data using playback parameters";
        this->play_silence(io_playback_bufs, buf_size);
        return false;
    }
#endif

    return true;
}
コード例 #10
0
ファイル: tsrc.c プロジェクト: fl04t/codec2
int main(int argc, char *argv[]) {
    FILE       *f8k, *fout;
    short       in8k_short[N8];
    float       in8k[N8];
    float       out[N48];
    short       out_short[N48];
    SRC_STATE  *src;
    SRC_DATA    data;
    int         error;

    if (argc != 4) {
	printf("usage %s inputRawFile OutputRawFile OutputSamplerate\n", argv[0]);
	exit(0);
    }

    f8k = fopen(argv[1], "rb");
    assert(f8k != NULL);

    fout = fopen(argv[2], "wb");
    assert(fout != NULL);

    src = src_new(SRC_SINC_FASTEST, 1, &error);
    assert(src != NULL);

    data.data_in = in8k;
    data.data_out = out;
    data.input_frames = N8;
    data.output_frames = N48;
    data.end_of_input = 0;
    data.src_ratio = atof(argv[3])/8000;
    printf("%f\n", data.src_ratio);

    while(fread(in8k_short, sizeof(short), N8, f8k) == N8) {
	src_short_to_float_array(in8k_short, in8k, N8);
	src_process(src, &data);
	printf("%d %d\n", (int)data.output_frames , (int)data.output_frames_gen);
	assert(data.output_frames_gen <= N48);
	src_float_to_short_array(out, out_short, data.output_frames_gen);
	fwrite(out_short, sizeof(short), data.output_frames_gen, fout);
    }

    fclose(fout);
    fclose(f8k);

    return 0;
}
コード例 #11
0
static void pcm_src_convert_s16(void *obj, int16_t *dst, unsigned int dst_frames,
				const int16_t *src, unsigned int src_frames)
{
	struct rate_src *rate = obj;
	unsigned int ofs;

	rate->data.input_frames = src_frames;
	rate->data.output_frames = dst_frames;
	rate->data.end_of_input = 0;
	
	src_short_to_float_array(src, rate->src_buf, src_frames * rate->channels);
	src_process(rate->state, &rate->data);
	if (rate->data.output_frames_gen < dst_frames)
		ofs = dst_frames - rate->data.output_frames_gen;
	else
		ofs = 0;
	src_float_to_short_array(rate->dst_buf, dst + ofs * rate->channels,
				 rate->data.output_frames_gen * rate->channels);
}
コード例 #12
0
const int16_t *
pcm_resample_lsr_16(struct pcm_resample_state *state,
		    unsigned channels,
		    unsigned src_rate,
		    const int16_t *src_buffer, size_t src_size,
		    unsigned dest_rate, size_t *dest_size_r,
		    GError **error_r)
{
	bool success;
	SRC_DATA *data = &state->data;
	size_t data_in_size;
	size_t data_out_size;
	int16_t *dest_buffer;

	assert((src_size % (sizeof(*src_buffer) * channels)) == 0);

	success = pcm_resample_set(state, channels, src_rate, dest_rate,
				   error_r);
	if (!success)
		return NULL;

	data->input_frames = src_size / sizeof(*src_buffer) / channels;
	data_in_size = data->input_frames * sizeof(float) * channels;
	data->data_in = pcm_buffer_get(&state->in, data_in_size);

	data->output_frames = (src_size * dest_rate + src_rate - 1) / src_rate;
	data_out_size = data->output_frames * sizeof(float) * channels;
	data->data_out = pcm_buffer_get(&state->out, data_out_size);

	src_short_to_float_array(src_buffer, data->data_in,
				 data->input_frames * channels);

	if (!lsr_process(state, error_r))
		return NULL;

	*dest_size_r = data->output_frames_gen *
		sizeof(*dest_buffer) * channels;
	dest_buffer = pcm_buffer_get(&state->buffer, *dest_size_r);
	src_float_to_short_array(data->data_out, dest_buffer,
				 data->output_frames_gen * channels);

	return dest_buffer;
}
コード例 #13
0
ファイル: Sample.cpp プロジェクト: dcsch/asiotest
void Sample::Load(Reader &reader)
{
	UInt8 *rawBuffer;
	Length rawBufferLength;
	WaveAudioLoader loader;
	loader.Load(reader, &rawBuffer, &rawBufferLength);

	_rate = loader.getFormatChunk()->nSamplesPerSec;
	_channels = loader.getFormatChunk()->nChannels;

	Length rawBufferLength2;
	UInt8 *rawBuffer2;
	if (_channels == 1)
	{
		// Convert to 2-channel
		rawBufferLength2 = 2 * rawBufferLength;
		rawBuffer2 = new UInt8[(UInt32)rawBufferLength2];
		Int16 *src = reinterpret_cast<Int16 *>(rawBuffer);
		Int16 *dst = reinterpret_cast<Int16 *>(rawBuffer2);
		for (UInt32 i = 0; i < rawBufferLength / 2; ++i)
		{
			Int16 sample = *src++;
			*dst++ = sample;
			*dst++ = sample;
		}
		_channels = 2;
	}
	else
	{
		rawBufferLength2 = rawBufferLength;
		rawBuffer2 = rawBuffer;
	}

	// Convert to a float buffer
	_frameCount = rawBufferLength2 / 4;
	_buffer = new float[(UInt32)rawBufferLength2 / 2];
	src_short_to_float_array((const short *)rawBuffer2, _buffer, (int)rawBufferLength2 / 2);

	delete [] rawBuffer;
	if (rawBuffer2 != rawBuffer)
		delete [] rawBuffer2;
}
コード例 #14
0
ファイル: AY8910.c プロジェクト: jvernet/apple2
static void
sound_resample( void )
{
  int error;
  SRC_DATA data;

  data.data_in = convert_input_buffer;
  data.input_frames = sound_generator_framesiz;
  data.data_out = convert_output_buffer;
  data.output_frames = sound_framesiz;
  data.src_ratio =
    ( double ) settings_current.sound_freq / sound_generator_freq;
  data.end_of_input = 0;

  src_short_to_float_array( ( const short * ) sound_buf, convert_input_buffer,
			    sound_generator_framesiz * sound_channels );

  while( data.input_frames ) {
    error = src_process( src_state, &data );
    if( error ) {
      ui_error( UI_ERROR_ERROR, "hifi sound downsample error %s",
		src_strerror( error ) );
      sound_end(_this);
      return;
    }

    src_float_to_short_array( convert_output_buffer, ( short * ) sound_buf,
			      data.output_frames_gen * sound_channels );

    sound_lowlevel_frame( sound_buf,
			  data.output_frames_gen * sound_channels );

    data.data_in += data.input_frames_used * sound_channels;
    data.input_frames -= data.input_frames_used;
  }
}
コード例 #15
0
ファイル: aacPlusEncoder.cpp プロジェクト: bryangrim/darkice
/*------------------------------------------------------------------------------
 *  Write data to the encoder
 *----------------------------------------------------------------------------*/
unsigned int
aacPlusEncoder :: write (  const void    * buf,
                        unsigned int    len )           throw ( Exception )
{
    if ( !isOpen() || len == 0) {
        return 0;
    }
    
    unsigned int    channels         = getInChannel();
    unsigned int    bitsPerSample    = getInBitsPerSample();
    unsigned int    sampleSize       = (bitsPerSample / 8) * channels;
    unsigned int    processed        = len - (len % sampleSize);
    unsigned int    nSamples         = processed / sampleSize;
    unsigned int    samples          = (unsigned int) nSamples * channels;
    int processedSamples = 0;
    
    

    if ( converter ) {
        unsigned int         converted;
#ifdef HAVE_SRC_LIB
        src_short_to_float_array ((short *) buf, converterData.data_in, samples);
        converterData.input_frames   = nSamples;
        converterData.data_out = resampledOffset + (resampledOffsetSize * channels);
        int srcError = src_process (converter, &converterData);
        if (srcError)
             throw Exception (__FILE__, __LINE__, "libsamplerate error: ", src_strerror (srcError));
        converted = converterData.output_frames_gen;
#else
        int         inCount  = nSamples;
        short int     * shortBuffer  = new short int[samples];
        int         outCount = (int) (inCount * resampleRatio);
        unsigned char * b = (unsigned char*) buf;
        Util::conv( bitsPerSample, b, processed, shortBuffer, isInBigEndian());
        converted = converter->resample( inCount,
                                         outCount+1,
                                         shortBuffer,
                                         &resampledOffset[resampledOffsetSize*channels]);
        delete[] shortBuffer;
#endif
        resampledOffsetSize += converted;

        // encode samples (if enough)
        while(resampledOffsetSize - processedSamples >= inSamples/channels) {
#ifdef HAVE_SRC_LIB
            short *shortData = new short[inSamples];
            src_float_to_short_array(resampledOffset + (processedSamples * channels),
                                     shortData, inSamples) ;

            encodeAacSamples (shortData, inSamples, channels);
            delete [] shortData;
#else
            encodeAacSamples (&resampledOffset[processedSamples*channels], inSamples, channels);
#endif
            processedSamples+=inSamples/channels;
        }

        if (processedSamples && (int) resampledOffsetSize >= processedSamples) {
            resampledOffsetSize -= processedSamples;
            //move least part of resampled data to beginning
            if(resampledOffsetSize)
#ifdef HAVE_SRC_LIB
                resampledOffset = (float *) memmove(resampledOffset, &resampledOffset[processedSamples*channels],
                                                    resampledOffsetSize*channels*sizeof(float));
#else
                resampledOffset = (short *) memmove(resampledOffset, &resampledOffset[processedSamples*channels],
                                                    resampledOffsetSize*sampleSize);
#endif
        }
    } else {
        encodeAacSamples ((short *) buf, samples, channels);
    }

    return samples;
}
コード例 #16
0
/* Sound data resampling */
static int raw_resample(char *fname, double ratio)
{
	int res = 0;
	FILE *fl;
	struct stat st;
	int in_size;
	short *in_buff, *out_buff;
	long in_frames, out_frames;
	float *inp, *outp;
	SRC_DATA rate_change;

	if ((fl = fopen(fname, "r")) == NULL) {
		ast_log(LOG_ERROR, "eSpeak: Failed to open file for resampling.\n");
		return -1;
	}
	if ((stat(fname, &st) == -1)) {
		ast_log(LOG_ERROR, "eSpeak: Failed to stat file for resampling.\n");
		fclose(fl);
		return -1;
	}
	in_size = st.st_size;
	if ((in_buff = ast_malloc(in_size)) == NULL) {
		fclose(fl);
		return -1;
	}
	if ((fread(in_buff, 1, in_size, fl) != (size_t)in_size)) {
		ast_log(LOG_ERROR, "eSpeak: Failed to read file for resampling.\n");
		fclose(fl);
		res = -1;
		goto CLEAN1;
	}
	fclose(fl);
	in_frames = in_size / 2;

	if ((inp = (float *)(ast_malloc(in_frames * sizeof(float)))) == NULL) {
		res = -1;
		goto CLEAN1;
	}
	src_short_to_float_array(in_buff, inp, in_size/sizeof(short));
	out_frames = (long)((double)in_frames * ratio);
	if ((outp = (float *)(ast_malloc(out_frames * sizeof(float)))) == NULL) {
		res = -1;
		goto CLEAN2;
	}
	rate_change.data_in = inp;
	rate_change.data_out = outp;
	rate_change.input_frames = in_frames;
	rate_change.output_frames = out_frames;
	rate_change.src_ratio = ratio;

	if ((res = src_simple(&rate_change, SRC_SINC_FASTEST, 1)) != 0) {
		ast_log(LOG_ERROR, "eSpeak: Failed to resample sound file '%s': '%s'\n",
				fname, src_strerror(res));
		res = -1;
		goto CLEAN3;
	}

	if ((out_buff = ast_malloc(out_frames*sizeof(float))) == NULL) {
		res = -1;
		goto CLEAN3;
	}
	src_float_to_short_array(rate_change.data_out, out_buff, out_frames);
	if ((fl = fopen(fname, "w+")) != NULL) {
		if ((fwrite(out_buff, 1, 2*out_frames, fl)) != (size_t)(2*out_frames)) {
			ast_log(LOG_ERROR, "eSpeak: Failed to write resampled output file.\n");
			res = -1;
		}
		fclose(fl);
	} else {
		ast_log(LOG_ERROR, "eSpeak: Failed to open output file for resampling.\n");
		res = -1;
	}
	ast_free(out_buff);
CLEAN3:
	ast_free(outp);
CLEAN2:
	ast_free(inp);
CLEAN1:
	ast_free(in_buff);
	return res;
}
コード例 #17
0
ファイル: aacPlusEncoder.cpp プロジェクト: bryangrim/darkice
/*------------------------------------------------------------------------------
 *  Write data to the encoder
 *----------------------------------------------------------------------------*/
unsigned int
aacPlusEncoder :: write (  const void    * buf,
                        unsigned int    len )           throw ( Exception )
{
    if ( !isOpen() || len == 0) {
        return 0;
    }

    unsigned int    channels         = getInChannel();
    unsigned int    bitsPerSample    = getInBitsPerSample();
    unsigned int    sampleSize       = (bitsPerSample / 8) * channels;
    unsigned char * b                = (unsigned char*) buf;
    unsigned int    processed        = len - (len % sampleSize);
    unsigned int    nSamples         = processed / sampleSize;
    unsigned char * aacplusBuf          = new unsigned char[maxOutputBytes];
    int             samples          = (int) nSamples * channels;
    int             processedSamples = 0;


    if ( converter ) {
        unsigned int         converted;
#ifdef HAVE_SRC_LIB
        src_short_to_float_array ((short *) b, converterData.data_in, samples);
        converterData.input_frames   = nSamples;
        converterData.data_out = resampledOffset + (resampledOffsetSize * channels);
        int srcError = src_process (converter, &converterData);
        if (srcError)
             throw Exception (__FILE__, __LINE__, "libsamplerate error: ", src_strerror (srcError));
        converted = converterData.output_frames_gen;
#else
        int         inCount  = nSamples;
        short int     * shortBuffer  = new short int[samples];
        int         outCount = (int) (inCount * resampleRatio);
        Util::conv( bitsPerSample, b, processed, shortBuffer, isInBigEndian());
        converted = converter->resample( inCount,
                                         outCount+1,
                                         shortBuffer,
                                         &resampledOffset[resampledOffsetSize*channels]);
        delete[] shortBuffer;
#endif
        resampledOffsetSize += converted;

        // encode samples (if enough)
        while(resampledOffsetSize - processedSamples >= inputSamples/channels) {
#ifdef HAVE_SRC_LIB
            short *shortData = new short[inputSamples];
            src_float_to_short_array(resampledOffset + (processedSamples * channels),
                                     shortData, inputSamples) ;
            int outputBytes = aacplusEncEncode(encoderHandle,
                                       (int32_t*) shortData,
                                        inputSamples,
                                        aacplusBuf,
                                        maxOutputBytes);
            delete [] shortData;
#else
            int outputBytes = aacplusEncEncode(encoderHandle,
                                       (int32_t*) &resampledOffset[processedSamples*channels],
                                        inputSamples,
                                        aacplusBuf,
                                        maxOutputBytes);
#endif
            unsigned int wrote = getSink()->write(aacplusBuf, outputBytes);
            
            if (wrote < outputBytes) {
                reportEvent(3, "aacPlusEncoder :: write, couldn't write full data to underlying sink");
            }

            processedSamples+=inputSamples/channels;
        }

        if (processedSamples && (int) resampledOffsetSize >= processedSamples) {
            resampledOffsetSize -= processedSamples;
            //move least part of resampled data to beginning
            if(resampledOffsetSize)
#ifdef HAVE_SRC_LIB
                resampledOffset = (float *) memmove(resampledOffset, &resampledOffset[processedSamples*channels],
                                                    resampledOffsetSize*channels*sizeof(float));
#else
                resampledOffset = (short *) memmove(resampledOffset, &resampledOffset[processedSamples*channels],
                                                    resampledOffsetSize*sampleSize);
#endif
        }
    } else {
        while (processedSamples < samples) {
            int     inSamples = samples - processedSamples < (int) inputSamples
                              ? samples - processedSamples
                              : inputSamples;

            int outputBytes = aacplusEncEncode(encoderHandle,
                                       (int32_t*) (b + processedSamples/sampleSize),
                                        inSamples,
                                        aacplusBuf,
                                        maxOutputBytes);
            
            unsigned int wrote = getSink()->write(aacplusBuf, outputBytes);
            
            if (wrote < outputBytes) {
                reportEvent(3, "aacPlusEncoder :: write, couldn't write full data to underlying sink");
            }
            
            processedSamples += inSamples;
        }
    }

    delete[] aacplusBuf;

//    return processedSamples;
    return samples * sampleSize;
}
コード例 #18
0
int main (int argc, char ** argv) {

// vars for networking
int udpport;
struct sockaddr_in localsa4;
struct sockaddr_in6 localsa6;
struct sockaddr * receivefromsa = NULL; // structure used for sendto
socklen_t receivefromsa_size=0; // size of structed used for sendto
unsigned char *udpbuffer;
int udpsock;
int udpsize;


// vars for audio
int16_t *inaudiobuffer; // tempory audio buffer used when audio sampling is needed
float *inaudiobuffer_f = NULL; // used for audio samplerate conversion
float *outaudiobuffer_f = NULL; // used for audio samplerate conversion

// for SAMPLE RATE CONVERSION
SRC_STATE *src=NULL;
SRC_DATA src_data;
int src_error;

// vars for codec2
void *codec2;
int mode, nc2byte;

// other vars
int ret;
int new_ptr_audio_write;
int state;

// structure for c2encap data
c2encap c2_voice;
c2encap c2_begin, c2_end;

uint8_t *c2encap_type;
uint8_t *c2encap_begindata;

// "audio out" posix thread
pthread_t thr_audioout;

// init data
global.stereo=-1;
global.ptr_audio_write=1;
global.ptr_audio_read=0;
global.exact=0;

// We need at least 2 arguments: udpport and samplerate
if (argc < 3) {
	fprintf(stderr,"Error: at least 2 arguments needed. \n");
	fprintf(stderr,"Usage: %s <udp port> <samplerate> [ <audiodevice> [exact] ] \n",argv[0]);
	fprintf(stderr,"Note: allowed audio samplerate are 8000, 44100 or 48000 samples/second.\n");
	fprintf(stderr,"Note: use device \"\" to get list of devices.\n");
	exit(-1);
}; // end if

udpport=atoi(argv[1]);
global.rate=atoi(argv[2]);


// if 1st argument exists, use it as capture device
if (argc >= 4) {
	global.pa_device = argv[3];

	// is there the "exact" statement?
	if (argc >= 5) {
		if (!strcmp(argv[4],"exact")) {
			global.exact=1;
		} else {
			fprintf(stderr,"Error: parameter \"exact\" expected. Got %s. Ignoring! \n",argv[4]);
		}; // end else - if
	}; // end if
} else {
	// no argument given; use default
	global.pa_device = NULL;
}; // end else - if


// sample rates below 8Ksamples/sec or above 48Ksamples/sec do not make sence
if ((global.rate != 8000) &&  (global.rate != 44100) && (global.rate != 48000)) {
	fprintf(stderr,"Error: audio samplerate should be 8000, 44100 or 48000 samples/sec! \n");
	exit(-1);
}; // end if



// create network structure
if ((udpport < 0) || (udpport > 65535)) {
	fprintf(stderr,"Error: UDPport number must be between 0 and 65535! \n");
	exit(-1);
}; // end if


if ((IPV4ONLY) && (IPV6ONLY)) {
	fprintf(stderr,"Error: internal configuration error: ipv4only and ipv6only are mutually exclusive! \n");
	exit(-1);
}; // end if


// initialise UDP buffer
udpbuffer=malloc(1500); // we can receive up to 1500 octets

if (!udpbuffer) {
	fprintf(stderr,"Error: could not allocate memory for udpbuffer!\n");
	exit(-1);
}; // end if

// set pointers for c2encap type and c2encap begin-of-data
c2encap_type = (uint8_t*) &udpbuffer[3];
c2encap_begindata = (uint8_t*) &udpbuffer[4]; 

// open inbound UDP socket and bind to socket
if (IPV4ONLY) {
	udpsock=socket(AF_INET,SOCK_DGRAM,0);

	localsa4.sin_family=AF_INET;
	localsa4.sin_port=udpport;
	memset(&localsa4.sin_addr,0,sizeof(struct in_addr)); // address = "::" (all 0) -> we listen

	receivefromsa=(struct sockaddr *) &localsa4;

	ret=bind(udpsock, receivefromsa, sizeof(localsa4)); 

} else {
	// IPV6 socket can handle both ipv4 and ipv6
	udpsock=socket(AF_INET6,SOCK_DGRAM,0);

	// if ipv6 only, set option
	if (IPV6ONLY) {
		int yes=1;

		// make socket ipv6-only.
		ret=setsockopt(udpsock,IPPROTO_IPV6, IPV6_V6ONLY, (char *)&yes,sizeof(int));
		if (ret == -1) {
			fprintf(stderr,"Error: IPV6ONLY option set but could not make socket ipv6 only: %d (%s)! \n",errno,strerror(errno));
			return(-1);
		}; // end if
	}; // end if

	localsa6.sin6_family=AF_INET6;
	localsa6.sin6_port=htons(udpport);
	localsa6.sin6_flowinfo=0; // flows not used
	localsa6.sin6_scope_id=0; // we listen on all interfaces
	memset(&localsa6.sin6_addr,0,sizeof(struct in6_addr)); // address = "::" (all 0) -> we listen

	receivefromsa=(struct sockaddr *)&localsa6;

	ret=bind(udpsock, receivefromsa, sizeof(localsa6)); 

}; // end else - elsif - if

if (ret < 0) {
	fprintf(stderr,"Error: could not bind network-address to socket: %d (%s) \n",errno,strerror(errno));
	exit(-1);
}; // end if

// start audio out thread
pthread_create (&thr_audioout, NULL, funct_audioout, (void *) &global);


// init c2encap structures
memcpy(c2_begin.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_begin.header[3]=C2ENCAP_MARK_BEGIN;
memcpy(c2_begin.c2data.c2data_text3,"BEG",3);

memcpy(c2_end.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_end.header[3]=C2ENCAP_MARK_END;
memcpy(c2_end.c2data.c2data_text3,"END",3);

memcpy(c2_voice.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_voice.header[3]=C2ENCAP_DATA_VOICE1400;

// in the mean time, do some other things while the audio-process initialises
// init codec2
mode = CODEC2_MODE_1400;
codec2 = codec2_create (mode);

nc2byte = (codec2_bits_per_frame(codec2) + 7) >> 3; // ">>3" is same as "/8"

if (nc2byte != 7) {
	fprintf(stderr,"Error: number of bytes for codec2 frames should be 7. We got %d \n",nc2byte);
}; // end if

if (codec2_samples_per_frame(codec2) != 320) {
	fprintf(stderr,"Error: number of samples for codec2 frames should be 320. We got %d \n",codec2_samples_per_frame(codec2));
}; // end if

// wait for thread audio to initialise 
while (!global.audioready) {
	// sleep 5 ms
	usleep(5000);
}; // end while

// done. Just to be sure, check "stereo" setting, should be "0" or "1"
if ((global.stereo != 0) && (global.stereo != 1)) {
	fprintf(stderr,"Internal error: stereo flag not set correctly by audioout subthread! Should not happen! Exiting. \n");
	exit(-1);
}; // end if


if (global.rate != 8000) {
// allocate memory for audio sample buffers (only needed when audio rate conversion is used)
	inaudiobuffer=malloc(320 * sizeof(int16_t));
	if (!inaudiobuffer) {
		fprintf(stderr,"Error in malloc for inaudiobuffer! \n");
		exit(-1);
	}; // end if

	inaudiobuffer_f=malloc(320 * sizeof(float));
	if (!inaudiobuffer_f) {
		fprintf(stderr,"Error in malloc for inaudiobuffer_f! \n");
		exit(-1);
	}; // end if

	outaudiobuffer_f=malloc(global.numSample * sizeof(float));
	if (!outaudiobuffer_f) {
		fprintf(stderr,"Error in malloc for outaudiobuffer_f! \n");
		exit(-1);
	}; // end if

	// init samplerate conversion
	src = src_new(SRC_SINC_FASTEST, 1, &src_error);

	if (!src) {
		fprintf(stderr,"src_new failed! \n");
		exit(-1);
	}; // end if

	src_data.data_in = inaudiobuffer_f;
	src_data.data_out = outaudiobuffer_f;
	src_data.input_frames = 320; // 40ms @ 8000 samples/sec
	src_data.output_frames = global.numSample;
	src_data.end_of_input = 0; // no further data, every 40 ms is processed on itself

	if (global.rate == 48000) {
		src_data.src_ratio = (float) 48000/8000;
	} else {
		src_data.src_ratio = (float) 44100/8000;
	}; // end if
}; // end if



// init state
state=0; // state 0 = wait for start

while (FOREVER ) {
	// wait for UDP packets

	// read until read or error, but ignore "EINTR" errors
	while (FOREVER) {
		udpsize = recvfrom(udpsock, udpbuffer, 1500, 0, receivefromsa, &receivefromsa_size);

		if (udpsize > 0) {
			// break out if really packet received;
			break;
		}; // end if

		// break out when not error EINTR
		if (errno != EINTR) {
			break;
		}; // end if
	}; // end while (read valid UDP packet)

	if (udpsize < 0) {
		// error: print message, wait 1/4 of a second and retry
		fprintf(stderr,"Error receiving UDP packets: %d (%s) \n",errno, strerror(errno));
		usleep(250000);
		continue;
	}; // end if


	if (udpsize < 4) {
		// should be at least 4 octets: to small, ignore it
		fprintf(stderr,"Error: received UDP packet to small (size = %d). Ignoring! \n",udpsize);
		continue;
	}; // end if


	// check beginning of frame, it should contain the c2enc signature
	if (memcmp(udpbuffer,C2ENCAP_HEAD,3)) {
		// signature does not match, ignore packet
		continue;
	}; // end  if
	
	// check size + content
	// we know the udp packet is at least 4 octets, so check 4th char for type
	if (*c2encap_type == C2ENCAP_MARK_BEGIN) {
		if (udpsize < C2ENCAP_SIZE_MARK ) {
			fprintf(stderr,"Error: received C2ENCAP BEGIN MARKER with to small size: %d octets! Ignoring\n",udpsize);
			continue;
		} else if (udpsize > C2ENCAP_SIZE_MARK) {
			fprintf(stderr,"Warning: received C2ENCAP BEGIN MARKER with to large size: %d octets! Ignoring extra data\n",udpsize);
		};

		// check content
		if (memcmp(c2encap_begindata,"BEG",3)) {
			fprintf(stderr,"Error: RECEIVED C2ENCAP BEGIN MARKER WITH INCORRECT TEXT: 0X%02X 0X%02X 0X%02X. Ignoring frame!\n",udpbuffer[4],udpbuffer[5],udpbuffer[6]);
			continue;
		}; // end if
	} else if (*c2encap_type == C2ENCAP_MARK_END) {
		if (udpsize < C2ENCAP_SIZE_MARK ) {
			fprintf(stderr,"Error: received C2ENCAP END MARKER with to small size: %d octets! Ignoring\n",udpsize);
			continue;
		} else if (udpsize > C2ENCAP_SIZE_MARK) {
			fprintf(stderr,"Warning: received C2ENCAP END MARKER with to large size: %d octets! Ignoring extra data\n",udpsize);
		};

		// check content
		if (memcmp(c2encap_begindata,"END",3)) {
			fprintf(stderr,"Error: RECEIVED C2ENCAP BEGIN MARKER WITH INCORRECT TEXT: 0X%02X 0X%02X 0X%02X. Ignoring frame!\n",udpbuffer[4],udpbuffer[5],udpbuffer[6]);
			continue;
		}; // end if
	} else if (*c2encap_type == C2ENCAP_DATA_VOICE1200) {
		if (udpsize < C2ENCAP_SIZE_VOICE1200 ) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE1200 with to small size: %d octets! Ignoring\n",udpsize);
			continue;
		} else if (udpsize > C2ENCAP_SIZE_VOICE1200) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE1200 with to large size: %d octets! Ignoring extra data\n",udpsize);
		};

	} else if (*c2encap_type == C2ENCAP_DATA_VOICE1400) {
		if (udpsize < C2ENCAP_SIZE_VOICE1400 ) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE1400 with to small size: %d octets! Ignoring\n",udpsize);
			continue;
		} else if (udpsize > C2ENCAP_SIZE_VOICE1400) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE1400 with to large size: %d octets! Ignoring extra data\n",udpsize);
		};
	} else if (*c2encap_type == C2ENCAP_DATA_VOICE2400) {
		if (udpsize < C2ENCAP_SIZE_VOICE2400 ) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE2400 with to small size: %d octets! Ignoring\n",udpsize);
			continue;
		} else if (udpsize > C2ENCAP_SIZE_VOICE2400) {
			fprintf(stderr,"Warning: received C2ENCAP VOICE2400 with to large size: %d octets! Ignoring extra data\n",udpsize);
		};
	} else {
		fprintf(stderr,"Warning: received packet with unknown type of C2ENCAP type: 0X%02X. Ignoring!\n",*c2encap_type);
		continue;
	}; // end if


	// processing from here on depends on state
	if (state == 0) {
		// state 0, waiting for start data

		if (*c2encap_type == C2ENCAP_MARK_BEGIN) {
			// received start, go to state 1
			state=1;
			continue;
		} else {
			fprintf(stderr,"Warning: received packet of type 0X%02X in state 0. Ignoring packet! \n",*c2encap_type);
			continue;
		}; // end if
	} else if (state == 1) {
		// state 1: receiving voice data, until we receive a "end" marker
		if (*c2encap_type == C2ENCAP_MARK_END) {
			// end received. Go back to state 0
			state=0;
			continue;
		} else if (*c2encap_type != C2ENCAP_DATA_VOICE1400) {
			fprintf(stderr,"Warning: received packet of type 0X%02X in state 1. Ignoring packet! \n",*c2encap_type);
			continue;
		} else {
			// voice 1400 data packet. Decode and play out

			// first check if there is place to store the result
			new_ptr_audio_write = global.ptr_audio_write+1;
			if (new_ptr_audio_write >= NUMBUFF) {
				// wrap around at NUMBUFF
				new_ptr_audio_write=0;
			}; // end if

			if (new_ptr_audio_write == global.ptr_audio_read) {
				// oeps. No buffers available to write data
				fputc('B',stderr);
			} else {
//				fputc('Q',stderr);


				// decode codec2 frame
  				codec2_decode(codec2, global.audiobuffer[new_ptr_audio_write],c2encap_begindata);
				

				// if not samplerate 8000, do rate conversion
				if (global.rate != 8000) {
					// convert int16 to float
					if (!inaudiobuffer_f) {
						fprintf(stderr,"Internal error: inaudiobuffer_f not initialised! \n");
						exit(-1);
					}; // "end if

					if (!outaudiobuffer_f) {
						fprintf(stderr,"Internal error: outaudiobuffer_f not initialised! \n");
						exit(-1);
					}; // "end if


					src_short_to_float_array(global.audiobuffer[new_ptr_audio_write],inaudiobuffer_f,320);

					// do conversion
					ret=src_process(src,&src_data);

					if (ret) {
						fprintf(stderr,"Warning: samplerate conversion error %d (%s)\n",ret,src_strerror(ret));
					}; // end if

					// some error checking
					if (src_data.output_frames_gen != global.numSample) {
						fprintf(stderr,"Warning: number of frames generated by samplerateconvert should be %d, got %ld. \n",global.numSample,src_data.output_frames_gen);
					}; // end if

					// convert back from float to int, and store immediatly in ringbuffer
					src_float_to_short_array(outaudiobuffer_f,global.audiobuffer[new_ptr_audio_write],global.numSample );
				}; // end if (samplerate != 8000)


				// make stereo (duplicate channels) if needed
				if (global.stereo) {
					int loop;
					int16_t *p1, *p2;

					int lastsample_m, lastsample_s;

					lastsample_m = global.numSample - 1;
					lastsample_s = global.numSample*2 - 1;

					// codec2_decode returns a buffer of 16-bit samples, MONO
					// so duplicate all samples, start with last sample, move down to sample "1" (not 0);
					p1=&global.audiobuffer[new_ptr_audio_write][lastsample_s]; // last sample of buffer (320 samples stereo = 640 samples mono)
					p2=&global.audiobuffer[new_ptr_audio_write][lastsample_m]; // last sample of buffer (mono)

					for (loop=0; loop < lastsample_m; loop++) {
						*p1 = *p2; p1--; // copy 1st octet, move down "write" pointer
						*p1 = *p2; p1--; p2--; // copy 2nd time, move down both pointers
					}; // end if

					// last sample, just copy (no need anymore to move pointers)
					*p1 = *p2;
				}; // end if

				// move up pointer in global vars
				global.ptr_audio_write=new_ptr_audio_write;
			}; // end if

		}; // end if
	} else {
		fprintf(stderr,"Internal Error: unknow state %d in audioplay main loop. Should not happen. Exiting!!!\n",state);
		exit(-1);
	}; // end if

}; // end while

fprintf(stderr,"Internal Error: audioplay main application drops out of endless loop. Should not happen! \n");
exit(-1);

}; // end main application
コード例 #19
0
bool
Deck_playback_process::play_data_with_playback_parameters(QVector<float*> &io_playback_bufs, const unsigned short int &buf_size)
{
    float speed = this->param->get_speed();

    // If speed is null, play empty sound.
    if (speed == 0.0)
    {
        this->play_silence(io_playback_bufs, buf_size);
        return true;
    }

    // Determine the approximate number of input data we need to make time stretching.
    unsigned short int nb_input_data = (unsigned short int)((float)buf_size * fabs(speed));
    if (fabs(speed) >= 1.0)
    {
        // Add a little bit of samples if speed is more than 1.0 (quality of stretched sound is better).
        nb_input_data += SOUND_STRETCH_POND_MIN;
    }
    else
    {
        // Add a lot of samples if speed is less than 1.0 (quality of stretched sound is better).
        nb_input_data += (SOUND_STRETCH_POND_MIN - SOUND_STRETCH_POND_MAX) * fabs(speed) + SOUND_STRETCH_POND_MAX;
    }

    // Do we have enough samples to play ?
    unsigned int remaining_samples = 0;
    if (speed > 0.0)
    {
        remaining_samples = (this->at->get_end_of_samples() - this->current_sample) / 2.0;
    }
    else
    {
        remaining_samples = this->current_sample / 2.0;
    }
    if (nb_input_data > remaining_samples)
    {
#ifdef ENABLE_TEST_MODE
        if (speed > 0.0)
        {
            this->current_sample = 0.0;
        }
        else
        {
            this->current_sample = this->at->get_end_of_samples();
        }
#else
        nb_input_data = remaining_samples;
#endif
    }
    if (nb_input_data == 0) // No data to play.
    {
        this->play_silence(io_playback_bufs, buf_size);
        return true;
    }
    if ((nb_input_data * 2) > SOUND_STRETCH_MAX_BUFFER)
    {
        this->play_silence(io_playback_bufs, buf_size);
        qCWarning(DS_PLAYBACK) << "too many data to stretch";
        return false;
    }

    // Prepare samples to play.
    short signed int *start_sample = &this->at->get_samples()[this->current_sample];
    std::fill(this->src_int_input_data, this->src_int_input_data + (nb_input_data * 2), 0);
    if (speed > 0.0)
    {
        std::copy(start_sample, start_sample + (nb_input_data * 2), this->src_int_input_data);
    }
    else
    {
        std::reverse_copy(start_sample - (nb_input_data * 2), start_sample, this->src_int_input_data);
    }

    // Since libsamplerate only use float, we need to convert set of input data.
    src_short_to_float_array(this->src_int_input_data, this->src_float_input_data, nb_input_data * 2);

    // Do time stretching.
    int err = 0;
    std::fill(this->src_float_output_data, this->src_float_output_data + (buf_size * 2), float(0.0));
    this->src_data->data_in           = this->src_float_input_data;
    this->src_data->data_out          = this->src_float_output_data;
    this->src_data->end_of_input      = 0;
    this->src_data->input_frames      = nb_input_data;
    this->src_data->output_frames     = buf_size;
    this->src_data->src_ratio         = fabs(1.0 / speed);
    err = src_process(this->src_state, this->src_data);
    if (err != 0)
    {
        // FIXME: when speed is very slow, src_process fails, it is not really an issue, but investigate that.
        //qCWarning(DS_PLAYBACK) << "libsamplerate fails: " << src_strerror(err);
        this->play_silence(io_playback_bufs, buf_size);
        return true;
    }
    else
    {
        // Change current pointer on sample to play
        if (speed > 0.0)
        {
            this->current_sample += this->src_data->input_frames_used * 2;
        }
        else
        {
            this->current_sample -= this->src_data->input_frames_used * 2;
        }

        // Change volume.
        this->change_volume(this->src_float_output_data, buf_size * 2);

        // Put result in sound card interleaved output stream.
        float *ptr = this->src_float_output_data;
        for (int i = 0; i < buf_size; i++)
        {
            io_playback_bufs[0][i] = *ptr;
            ptr++;
            io_playback_bufs[1][i] = *ptr;
            ptr++;
        }
    }

    return true;
}
コード例 #20
0
ファイル: Main.cpp プロジェクト: 173210/project64
static int resample(unsigned char *input, int /*input_avail*/, int oldsamplerate, unsigned char *output, int output_needed, int newsamplerate)
{
    int *psrc = (int*)input;
    int *pdest = (int*)output;
    int i = 0, j = 0;

#ifdef USE_SPEEX
    spx_uint32_t in_len, out_len;
    if(Resample == RESAMPLER_SPEEX)
    {
        if(spx_state == NULL)
        {
            spx_state = speex_resampler_init(2, oldsamplerate, newsamplerate, ResampleQuality,  &error);
            if(spx_state == NULL)
            {
                memset(output, 0, output_needed);
                return 0;
            }
        }
        speex_resampler_set_rate(spx_state, oldsamplerate, newsamplerate);
        in_len = input_avail / 4;
        out_len = output_needed / 4;

        if ((error = speex_resampler_process_interleaved_int(spx_state, (const spx_int16_t *)input, &in_len, (spx_int16_t *)output, &out_len)))
        {
            memset(output, 0, output_needed);
            return input_avail;  // number of bytes consumed
        }
        return in_len * 4;
    }
#endif
#ifdef USE_SRC
    if(Resample == RESAMPLER_SRC)
    {
        // the high quality resampler needs more input than the samplerate ratio would indicate to work properly
        if (input_avail > output_needed * 3 / 2)
            input_avail = output_needed * 3 / 2; // just to avoid too much short-float-short conversion time
        if (_src_len < input_avail*2 && input_avail > 0)
        {
            if(_src) free(_src);
            _src_len = input_avail*2;
            _src = malloc(_src_len);
        }
        if (_dest_len < output_needed*2 && output_needed > 0)
        {
            if(_dest) free(_dest);
            _dest_len = output_needed*2;
            _dest = malloc(_dest_len);
        }
        memset(_src,0,_src_len);
        memset(_dest,0,_dest_len);
        if(src_state == NULL)
        {
            src_state = src_new (ResampleQuality, 2, &error);
            if(src_state == NULL)
            {
                memset(output, 0, output_needed);
                return 0;
            }
        }
        src_short_to_float_array ((short *) input, _src, input_avail/2);
        src_data.end_of_input = 0;
        src_data.data_in = _src;
        src_data.input_frames = input_avail/4;
        src_data.src_ratio = (float) newsamplerate / oldsamplerate;
        src_data.data_out = _dest;
        src_data.output_frames = output_needed/4;
        if ((error = src_process (src_state, &src_data)))
        {
            memset(output, 0, output_needed);
            return input_avail;  // number of bytes consumed
        }
        src_float_to_short_array (_dest, (short *) output, output_needed/2);
        return src_data.input_frames_used * 4;
    }
#endif
    // RESAMPLE == TRIVIAL
    if (newsamplerate >= oldsamplerate)
    {
        int sldf = oldsamplerate;
        int const2 = 2*sldf;
        int dldf = newsamplerate;
        int const1 = const2 - 2*dldf;
        int criteria = const2 - dldf;
        for (i = 0; i < output_needed/4; i++)
        {
            pdest[i] = psrc[j];
            if(criteria >= 0)
            {
                ++j;
                criteria += const1;
            }
            else criteria += const2;
        }
        return j * 4; //number of bytes consumed
    }
    // newsamplerate < oldsamplerate, this only happens when speed_factor > 1
    for (i = 0; i < output_needed/4; i++)
    {
        j = i * oldsamplerate / newsamplerate;
        pdest[i] = psrc[j];
    }
    return j * 4; //number of bytes consumed
}
コード例 #21
0
ファイル: port_audio.cpp プロジェクト: melchor629/butt
//this function is called by PortAudio when new audio data arrived
int snd_callback(const void *input,
                 void *output,
                 unsigned long frameCount,
                 const PaStreamCallbackTimeInfo* timeInfo,
                 PaStreamCallbackFlags statusFlags,
                 void *userData)
{
    int samplerate_out;
    bool convert_stream = false;
    bool convert_record = false;


    memcpy(pa_pcm_buf, input, frameCount*cfg.audio.channel*sizeof(short));
    samplerate_out = cfg.audio.samplerate;

    if(dsp->hasToProcessSamples()) {
        dsp->processSamples(pa_pcm_buf);
    }
	
	if (streaming)
	{
        if ((!strcmp(cfg.audio.codec, "opus")) && (cfg.audio.samplerate != 48000))
        {
            convert_stream = true;
            samplerate_out = 48000;
        }

        if (convert_stream == true)
        {
            srconv_stream.end_of_input = 0;
            srconv_stream.src_ratio = (float)samplerate_out/cfg.audio.samplerate;
            srconv_stream.input_frames = frameCount;
            srconv_stream.output_frames = frameCount*cfg.audio.channel * (srconv_stream.src_ratio+1) * sizeof(float);

            src_short_to_float_array((short*)pa_pcm_buf, (float*) srconv_stream.data_in, (int) frameCount*cfg.audio.channel);

            //The actual resample process
            src_process(srconv_state_stream, &srconv_stream);

            src_float_to_short_array(srconv_stream.data_out, (short*)stream_buf, (int) srconv_stream.output_frames_gen*cfg.audio.channel);

            rb_write(&stream_rb, (char*)stream_buf, (int) srconv_stream.output_frames_gen*sizeof(short)*cfg.audio.channel);
        }
        else
            rb_write(&stream_rb, (char*)pa_pcm_buf, (int) frameCount*sizeof(short)*cfg.audio.channel);

		pthread_cond_signal(&stream_cond);
	}

	if(recording)
	{

        if ((!strcmp(cfg.rec.codec, "opus")) && (cfg.audio.samplerate != 48000))
        {
            convert_record = true;
            samplerate_out = 48000;
        }

        if (convert_record == true)
        {
            srconv_record.end_of_input = 0;
            srconv_record.src_ratio = (float)samplerate_out/cfg.audio.samplerate;
            srconv_record.input_frames = frameCount;
            srconv_record.output_frames = frameCount*cfg.audio.channel * (srconv_record.src_ratio+1) * sizeof(float);

            src_short_to_float_array((short*)pa_pcm_buf, (float*) srconv_record.data_in, (int) frameCount*cfg.audio.channel);

            //The actual resample process
            src_process(srconv_state_record, &srconv_record);

            src_float_to_short_array(srconv_record.data_out, (short*)record_buf, (int) srconv_record.output_frames_gen*cfg.audio.channel);

            rb_write(&rec_rb, (char*)record_buf, (int) srconv_record.output_frames_gen*sizeof(short)*cfg.audio.channel);

        }
        else
            rb_write(&rec_rb, (char*)pa_pcm_buf, (int) frameCount*sizeof(short)*cfg.audio.channel);

		pthread_cond_signal(&rec_cond);
	}
    
    //tell vu_update() that there is new audio data
    pa_new_frames = 1;

    return 0;
}
コード例 #22
0
ファイル: video_layer.cpp プロジェクト: dewn49/FreeJ
void *VideoLayer::feed() {
  int got_picture=0;
  int len1=0 ;
  int ret=0;
  bool got_it=false;

  double now = get_master_clock();


  if(paused)
    return rgba_picture->data[0];

  /**
   * follow user video loop
   */
  if(mark_in!=NO_MARK && mark_out!=NO_MARK && seekable) {
    if (now >= mark_out)
      seek((int64_t)mark_in * AV_TIME_BASE);
  }
  
  // operate seek if was requested
  if(to_seek>=0) {
    seek(to_seek);
    to_seek = -1;
  }
    
  got_it=false;
  
  while (!got_it) {
    
    
    if(packet_len<=0) {
      /**
       * Read one packet from the media and put it in pkt
       */
      while(1) {
#ifdef DEBUG
	func("av_read_frame ...");
#endif
	ret = av_read_frame(avformat_context, &pkt);

#ifdef DEBUG
	if(pkt.stream_index == video_index)
	  std::cout << "video read packet";
	else if(pkt.stream_index == audio_index)
	  std::cout << "audio read packet";
	std::cout << " pkt.data=" << pkt.data;
	std::cout << " pkt.size=" << pkt.size;
	std::cout << " pkt.pts/dts=" << pkt.pts << "/" << pkt.dts << std::endl;
	std::cout << "pkt.duration=" << pkt.duration;
	std::cout << " avformat_context->start_time=" << avformat_context->start_time;
	std::cout << " avformat_context->duration=" << avformat_context->duration/AV_TIME_BASE << std::endl;
	std::cout << "avformat_context->duration=" << avformat_context->duration << std::endl;
#endif
	
	/* TODO(shammash): this may be good for streams but breaks
	 * looping in files, needs fixing. */
	// 	      if(!pkt.duration) continue;
	
	// 	      if(!pkt.size || !pkt.data) {
	// 		return NULL;
	// 	      }
	
	
	/**
	 * check eof and loop
	 */
	if(ret!= 0) {	//does not enter if data are available
	  eos->notify();
	  //	  eos->dispatcher->do_jobs(); /// XXX hack hack hack
	  ret = seek(avformat_context->start_time);
	  if (ret < 0) {
	    error("VideoLayer::could not loop file");
	    return rgba_picture->data[0];
	  }
	  continue;
	} else if( (pkt.stream_index == video_index) 
		   || (pkt.stream_index == audio_index) )
	  break; /* exit loop */
      }
    } // loop break after a known index is found
    
    
    frame_number++;
	//std::cout << "frame_number :" << frame_number << std::endl;
    
    /**
     * Decode video
     */
    if(pkt.stream_index == video_index) {
      
      len1 = decode_video_packet(&got_picture);
      
      AVFrame *yuv_picture=&av_frame;
      if(len1<0) {
	//	  error("VideoLayer::Error while decoding frame");
	func("one frame only?");
	return NULL;
      }
      else if (len1 == 0) {
	packet_len=0;
	return NULL;
      }
      
      /**
       * We've found a picture
       */
      ptr += len1;
      packet_len -= len1;
      if (got_picture!=0) {
	got_it=true;
	avformat_stream=avformat_context->streams[video_index];
	
	/** Deinterlace input if requested */
	if(deinterlaced)
	  deinterlace((AVPicture *)yuv_picture);
	
#ifdef WITH_SWSCALE
	sws_scale(img_convert_ctx, yuv_picture->data, yuv_picture->linesize,
		  0, video_codec_ctx->height,
		  rgba_picture->data, rgba_picture->linesize);	  
#else
	/**
	 * yuv2rgb
	 */
	img_convert(rgba_picture, PIX_FMT_RGB32, (AVPicture *)yuv_picture,
		    video_codec_ctx->pix_fmt, 
		    //avformat_stream.codec->pix_fmt,
		    video_codec_ctx->width,
		    video_codec_ctx->height);
#endif
	// memcpy(frame_fifo.picture[fifo_position % FIFO_SIZE]->data[0],rgba_picture->data[0],geo.size);
	/* TODO move */
	if(fifo_position == FIFO_SIZE)
	  fifo_position=0;
	
	/* workaround since sws_scale conversion from YUV
	   returns an buffer RGBA with alpha set to 0x0  */
	{
	  register int bufsize = ( rgba_picture->linesize[0] * video_codec_ctx->height ) /4;
	  int32_t *pbuf =  (int32_t*)rgba_picture->data[0];
	  
	  for(; bufsize>0; bufsize--) {
	    *pbuf = (*pbuf | alpha_bitmask);
	    pbuf++;
	  }
	} 
	
	jmemcpy(frame_fifo.picture[fifo_position]->data[0],
		rgba_picture->data[0],
		rgba_picture->linesize[0] * video_codec_ctx->height);
	
	//			    avpicture_get_size(PIX_FMT_RGBA32, enc->width, enc->height));
	fifo_position++;
      }
    } // end video packet decoding
    

    ////////////////////////
    // audio packet decoding
    else if(pkt.stream_index == audio_index) {
      // XXX(shammash): audio decoding seems to depend on screen properties, so
      //                we skip decoding audio frames if there's no screen
      //  long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000;
      //  ringbuffer_write(screen->audio, (const char*)audio_float_buf,  samples*sizeof(float));
      //  ... and so on ...
      if(use_audio && screen) {
	int data_size;
	len1 = decode_audio_packet(&data_size);
	if (len1 > 0)  {
	  int samples = data_size/sizeof(uint16_t);
	  long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000;
	  double m_ResampleRatio = (double)(m_SampleRate)/(double)audio_samplerate; 
	  long unsigned max_buf = ceil(AVCODEC_MAX_AUDIO_FRAME_SIZE * m_ResampleRatio * audio_channels);

	  if (audio_resampled_buf_len < max_buf) {
		if (audio_resampled_buf) free (audio_resampled_buf);
		audio_resampled_buf = (float*) malloc(max_buf * sizeof(float));
		audio_resampled_buf_len = max_buf;
	  }

	  src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples);
	  if (m_ResampleRatio == 1.0) 
	  {
	    ringbuffer_write(screen->audio, (const char*)audio_float_buf,  samples*sizeof(float));
	    time_t *tm = (time_t *)malloc(sizeof(time_t));
	    time (tm);
// 	    std::cerr << "-- VL:" << asctime(localtime(tm));
	  } 
	  else 
	  {
	    src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples);

	    SRC_DATA src_data;
	    int offset = 0;

            do {
	      src_data.input_frames  = samples/audio_channels;
	      src_data.output_frames = audio_resampled_buf_len/audio_channels - offset;
	      src_data.end_of_input  = 0;
	      src_data.src_ratio     =  m_ResampleRatio;
	      src_data.input_frames_used = 0;
	      src_data.output_frames_gen = 0;
	      src_data.data_in       = audio_float_buf + offset; 
	      src_data.data_out      = audio_resampled_buf + offset;

	      src_simple (&src_data, SRC_SINC_MEDIUM_QUALITY, audio_channels) ;
	      ringbuffer_write(screen->audio,
			       (const char*)audio_resampled_buf,
			       src_data.output_frames_gen * audio_channels *sizeof(float));

	      offset += src_data.input_frames_used * audio_channels;
	      samples -= src_data.input_frames_used * audio_channels;

	      if (samples>0)
		warning("resampling left: %i < %i",
			src_data.input_frames_used, samples/audio_channels);

	    } while (samples > audio_channels);
	  }
	}
      }
    }
    
    av_free_packet(&pkt); /* sun's good. love's bad */
    
  } // end of while(!got_it)
  
  return frame_fifo.picture[fifo_position-1]->data[0];
}
コード例 #23
0
int main (int argc, char ** argv) {
int buffersize;
int stereo;
int samplerate;
int numBytes;
int numSample;
int maxnumchannel_input;

// state of keypress
int state, oldstate;

// vars for portaudio
char * portaudiodevice;
int exact=0;
PaStreamParameters inputParameters;
PaStream * stream;
PaError pa_ret;

const PaDeviceInfo *devinfo;

// vars for networking
char * ipaddrtxtin;
int udpport;
struct sockaddr_in * udp_aiaddr_in = NULL;
struct sockaddr_in6 * udp_aiaddr_in6 = NULL;
struct sockaddr * sendto_aiaddr = NULL; // structure used for sendto
int sendto_sizeaiaddr=0; // size of structed used for sendto
int udpsd;
int udp_family;

char ipaddrtxt[INET6_ADDRSTRLEN];

// vars for getaddrinfo
struct addrinfo * hint;
struct addrinfo * info;

// for SAMPLE RATE CONVERSION
SRC_STATE *src=NULL;
SRC_DATA src_data;
int src_error;

// vars for codec2
void *codec2;
unsigned char *c2_buff;
int mode, nc2byte;

// vars for audio
int16_t * audiobuffer;
float * inaudiobuffer_f = NULL;
float * outaudiobuffer_f = NULL;

// other vars
int ret;

// structure for c2encap data
c2encap c2_voice;
c2encap c2_begin, c2_end;

// "audio in" posix thread
pthread_t thr_keypress;

// init data
stereo=-1;
global.transmit=0;

// We need at least 3 arguments: IP-address, udpport and samplerate
if (argc < 4) {
	fprintf(stderr,"Error: at least 3 arguments needed. \n");
	fprintf(stderr,"Usage: %s <ip-addr> <udp port> <samplerate> [ <audiodevice> [exact] ] \n",argv[0]);
	fprintf(stderr,"Note: allowed audio samplerate are 8000, 44100 or 48000 samples/second.\n");
	fprintf(stderr,"Note: use device \"\" to get list of devices.\n");
	exit(-1);
}; // end if

ipaddrtxtin=argv[1];
udpport=atoi(argv[2]);
samplerate=atoi(argv[3]);


// if 1st argument exists, use it as capture device
if (argc >= 5) {
	portaudiodevice = argv[4];

	// is there the "exact" statement?
	if (argc >= 6) {
		if (!strcmp(argv[5],"exact")) {
			exact=1;
		} else {
			fprintf(stderr,"Error: parameter \"exact\" expected. Got %s. Ignoring! \n",argv[5]);
		}; // end else - if
	}; // end if
} else {
	// no argument given
	portaudiodevice = NULL;
}; // end else - if



// create network structure
if ((udpport < 0) || (udpport > 65535)) {
	fprintf(stderr,"Error: UDPport number must be between 0 and 65535! \n");
	exit(-1);
}; // end if


if ((IPV4ONLY) && (IPV6ONLY)) {
	fprintf(stderr,"Error: internal configuration error: ipv4only and ipv6only are mutually exclusive! \n");
	exit(-1);
}; // end if


// sample rates below 8Ksamples/sec or above 48Ksamples/sec do not make sence
if (samplerate == 8000) {
	numSample = 320;
} else if (samplerate == 44100) {
	numSample = 1764;
} else if (samplerate == 48000) {
	numSample = 1920;
} else {
	fprintf(stderr,"Error: audio samplerate should be 8000, 44100 or 48000 samples/sec! \n");
	exit(-1);
}; // end if


// DO DNS query for ipaddress
hint=malloc(sizeof(struct addrinfo));

if (!hint) {
	fprintf(stderr,"Error: could not allocate memory for hint!\n");
	exit(-1);
}; // end if

// clear hint
memset(hint,0,sizeof(hint));

hint->ai_socktype = SOCK_DGRAM;

// resolve hostname, use function "getaddrinfo"
// set address family of hint if ipv4only or ipv6only
if (IPV4ONLY) {
	hint->ai_family = AF_INET;
} else if (IPV6ONLY) {
	hint->ai_family = AF_INET6;
} else {
	hint->ai_family = AF_UNSPEC;
}; // end else - elsif - if

// do DNS-query, use getaddrinfo for both ipv4 and ipv6 support
ret=getaddrinfo(ipaddrtxtin, NULL, hint, &info);

if (ret < 0) {
	fprintf(stderr,"Error: resolving hostname %s failed: (%s)\n",ipaddrtxtin,gai_strerror(ret));
	exit(-1);
}; // end if


udp_family=info->ai_family;

// open UDP socket + set udp port
if (udp_family == AF_INET) {
	udpsd=socket(AF_INET,SOCK_DGRAM,0);
	
	// getaddrinfo returns pointer to generic "struct sockaddr" structure.
	// 		Cast to "struct sockaddr_in" to be able to fill in destination port
	udp_aiaddr_in=(struct sockaddr_in *)info->ai_addr;
	udp_aiaddr_in->sin_port=htons((unsigned short int) udpport);

	// set pointer to be used for "sendto" ipv4 structure
	// sendto uses generic "struct sockaddr" just like the information
	// 		returned from getaddrinfo, so no casting needed here
	sendto_aiaddr=info->ai_addr;
	sendto_sizeaiaddr=sizeof(struct sockaddr);

	// get textual version of returned ip-address
	inet_ntop(AF_INET,&udp_aiaddr_in->sin_addr,ipaddrtxt,INET6_ADDRSTRLEN);
	
} else if (udp_family == AF_INET6) {
	udpsd=socket(AF_INET6,SOCK_DGRAM,0);

	// getaddrinfo returns pointer to generic "struct sockaddr" structure.
	// 		Cast to "struct sockaddr_in6" to be able to fill in destination port
	udp_aiaddr_in6=(struct sockaddr_in6 *)info->ai_addr;
	udp_aiaddr_in6->sin6_port=htons((unsigned short int) udpport);

	// set pointer to be used for "sendto" ipv4 structure
	// sendto uses generic "struct sockaddr" just like the information
	// 		returned from getaddrinfo, so no casting needed here
	sendto_aiaddr=info->ai_addr;
	sendto_sizeaiaddr=sizeof(struct sockaddr_in6);

	// get textual version of returned ip-address
	inet_ntop(AF_INET6,&udp_aiaddr_in6->sin6_addr,ipaddrtxt,INET6_ADDRSTRLEN);
	
} else {
	fprintf(stderr,"Error: DNS query for %s returned an unknown network-family: %d \n",ipaddrtxtin,udp_family);
	exit(-1);
}; // end if



// getaddrinfo can return multiple results, we only use the first one
// give warning is more then one result found.
// Data is returned in info as a linked list
// If the "next" pointer is not NULL, there is more then one
// element in the chain

if (info->ai_next != NULL) {
	fprintf(stderr,"Warning. getaddrinfo returned multiple entries. Using %s\n",ipaddrtxt);
}; // end if


if (udpsd < 0) {
	fprintf(stderr,"Error: could not create socket for UDP! \n");
	exit(-1);
}; // end if

// init c2encap structures
memcpy(c2_begin.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_begin.header[3]=C2ENCAP_MARK_BEGIN;
memcpy(c2_begin.c2data.c2data_text3,"BEG",3);

memcpy(c2_end.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_end.header[3]=C2ENCAP_MARK_END;
memcpy(c2_end.c2data.c2data_text3,"END",3);

memcpy(c2_voice.header,C2ENCAP_HEAD,sizeof(C2ENCAP_HEAD));
c2_voice.header[3]=C2ENCAP_DATA_VOICE1400;



// PORTAUDIO STUFF

fprintf(stderr,"INITIALISING PORTAUDIO    (this can take some time, please ignore any errors below) .... \n");
// open portaudio device
pa_ret=Pa_Initialize();
fprintf(stderr,".... DONE\n");

if (pa_ret != paNoError) {
	Pa_Terminate();
	fprintf(stderr,"Error: Could not initialise Portaudio: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
	exit(-1);
}; // end if

if (portaudiodevice == NULL) {
	// portaudio device = NULL -> use portaudio "get default input device"
	inputParameters.device = Pa_GetDefaultInputDevice();

	if (inputParameters.device == paNoDevice) {
		fprintf(stderr,"Error: no portaudio default input device!\n");
		exit(-1);
	}; // end if

	if (inputParameters.device >= Pa_GetDeviceCount()) {
		fprintf(stderr,"Internal Error: portaudio \"GetDefaultInputDevice\" returns device number %d while possible devices go from 0 to %d \n,",inputParameters.device, (Pa_GetDeviceCount() -1) );
		exit(-1);
	}; // end if


	// check if device supports samplerate:
	inputParameters.sampleFormat = paInt16;
	inputParameters.suggestedLatency = 0; // not used in Pa_IsFormatSupported
	inputParameters.hostApiSpecificStreamInfo = NULL;

	devinfo = Pa_GetDeviceInfo (inputParameters.device);

   maxnumchannel_input = devinfo->maxInputChannels;
	printf("Audio device = %d (%s %s)\n",inputParameters.device,Pa_GetHostApiInfo(devinfo->hostApi)->name,devinfo->name);

	if (maxnumchannel_input >= 1) {
		// first check if samplerate is supported in mono
		inputParameters.channelCount = 1;
		pa_ret = Pa_IsFormatSupported(NULL,&inputParameters,(double) samplerate);

		if (pa_ret == paFormatIsSupported) {
			printf("Samplerate %d supported in mono.\n",samplerate);
			stereo=0;
		} else {
			// try again using stereo
			inputParameters.channelCount = 2;

			if (maxnumchannel_input >= 2) {
				pa_ret = Pa_IsFormatSupported(NULL,&inputParameters,(double) samplerate);

				if (pa_ret == paFormatIsSupported) {
					printf("Samplerate %d supported in stereo.\n",samplerate);
					stereo=1;
				} else {
					printf("Error: Samplerate %d not supported in mono or stereo!\n",samplerate);
					exit(-1);
				}; // end if
			} else {
				// stereo not supported on this device
				printf("Error: Samplerate %d not supported in mono. Stereo not supported on this device!\n",samplerate);
				exit(-1);
			}; // end if
		}; // end else - if
	} else {
		printf("Error: input not supported on this device!\n");
		exit(-1);
	}; // end if
	
	printf("\n");
	fflush(stdout);


} else {
	// CLI option "device" contains text, look throu list of all devices if there are
	// devices that match that name and support the particular requested samplingrate
	int loop;
	int numdevice;

	int numdevicefound=0;
	int devicenr=0;
	int devicestereo=0;



	// init some vars
	numdevice=Pa_GetDeviceCount();

	inputParameters.sampleFormat = paInt16;
	inputParameters.suggestedLatency = 0; // not used in Pa_IsFormatSupported
	inputParameters.hostApiSpecificStreamInfo = NULL;

	for (loop=0; loop<numdevice;loop++) {
		int devnamematch=0;

		// get name of device
		devinfo = Pa_GetDeviceInfo (loop);

		// only do check if searchstring is smaller or equal is size of device name
		if (strlen(devinfo->name) >= strlen(portaudiodevice)) {
			int numcheck;
			int devnamesize;
			int loop;
			char *p;

			// init pointer to beginning of string
			p=(char *)devinfo->name;
			devnamesize = strlen(portaudiodevice);

			if (exact) {
				// exact match, only check once: at the beginning
				numcheck=1;
			} else {
				numcheck=strlen(p) - strlen(portaudiodevice) +1;
			}; // end if

			// loop until text found or end-of-string
			for (loop=0; (loop<numcheck && devnamematch == 0); loop++) {
				if (strncmp(portaudiodevice,p,devnamesize) ==0) {
					devnamematch=1;
				}; // end if

				// move up pointer
				p++;
			};
		}; // end if

		if (devnamematch) {
			printf("Audio device: %d (API: %s ,NAME: %s)\n",loop,Pa_GetHostApiInfo(devinfo->hostApi)->name,devinfo->name);

   		maxnumchannel_input = devinfo->maxInputChannels;

			if (maxnumchannel_input >= 1) {
				// next step: check if this device supports the particular requested samplerate
				inputParameters.device = loop;

				inputParameters.channelCount = 1;
				pa_ret = Pa_IsFormatSupported(NULL,&inputParameters,(double) samplerate);

				if (pa_ret == paFormatIsSupported) {
					printf("Samplerate %d supported in mono.\n",samplerate);
					numdevicefound++;
					devicenr=loop;
					devicestereo=0;
				} else {
					if (maxnumchannel_input >= 2) {
						inputParameters.channelCount = 2;
						pa_ret = Pa_IsFormatSupported(NULL,&inputParameters,(double) samplerate);

						if (pa_ret == paFormatIsSupported) {
							printf("Samplerate %d supported in stereo.\n",samplerate);
							numdevicefound++;
							devicenr=loop;
							devicestereo=1;
						} else {
							printf("Error: Samplerate %d not supported in mono or stereo.\n",samplerate);
						}; // end else - if
					} else {
						// stereo not supported on this device
						printf("Error: Samplerate %d not supported in mono. Stereo not supported on this device!\n",samplerate);
					}; // end if
				}; // end else - if
			} else {
				printf("Error: Input not supported on device.\n");
			}; // end if

			printf("\n");
			fflush(stdout);
		};// end if
	}; // end for

	// did we find any device
	if (numdevicefound == 0) {
		fprintf(stderr,"Error: did not find any audio-device supporting that audio samplerate\n");
		fprintf(stderr,"       Try again with other samplerate of devicename \"\" to get list of all devices\n");
		exit(-1);
	} else if (numdevicefound > 1) {
		fprintf(stderr,"Error: Found multiple devices matching devicename supporting that audio samplerate\n");
		fprintf(stderr,"       Try again with a more strict devicename or use \"exact\" clause!\n");
		exit(-1);
	} else {
		// OK, we have exactly one device: copy its parameters
		inputParameters.device=devicenr;
		stereo=devicestereo;

		if (devicestereo) {
			inputParameters.channelCount = 2;
		} else {
			inputParameters.channelCount = 1;
		}; // end else - if

		// get name info from device
		devinfo = Pa_GetDeviceInfo (inputParameters.device);

		fprintf(stderr,"Selected Audio device = (API: %s ,NAME: %s)\n",Pa_GetHostApiInfo(devinfo->hostApi)->name,devinfo->name);
	};
}; // end else - if

// set other parameters of inputParameters structure
inputParameters.suggestedLatency = Pa_GetDeviceInfo(inputParameters.device)->defaultLowInputLatency;

// configure portaudio global data
if (samplerate == 8000) {
	numSample = 320;
} else if (samplerate == 44100) {
	numSample = 1764;
} else if (samplerate == 48000) {
	numSample = 1920;
} else {
	fprintf(stderr,"Error: invalid value for samplerate in funct_audioout: %d !\n",samplerate);
	exit(-1);
}; // end if


// configure portaudio global data
if (stereo) {
	numBytes = (numSample << 2);
} else {
	numBytes = (numSample << 1); 
}; // end if

// create memory for audiobuffer
audiobuffer = malloc(numBytes); // allow memory for buffer 0
if (!audiobuffer) {
	// memory could not be allocated
	fprintf(stderr,"Error: could not allocate memory for portaudio buffer 0!\n");
	exit(-1);
}; // end if


// some network debug info
fprintf(stderr,"Sending CODEC2 DV stream to ip-address %s udp port %d\n",ipaddrtxt,udpport);


// open PortAudio stream
// do not start stream yet, will be done further down
pa_ret = Pa_OpenStream (
	&stream,
	&inputParameters,
	NULL, // output Parameters, not used here 
	samplerate, // sample rate
	numSample, // frames per buffer: 40 ms @ 8000 samples/sec
	paClipOff, // we won't output out of range samples,
					// so don't bother clipping them
	NULL, // no callback function, syncronous read
	&global // parameters passed to callback function (not used here)
);

if (pa_ret != paNoError) {
	Pa_Terminate();
	fprintf(stderr,"Error in Pa_OpenStream: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
	exit(-1);
}; // end if

// init codec2
mode = CODEC2_MODE_1400;
codec2 = codec2_create (mode);

nc2byte = (codec2_bits_per_frame(codec2) + 7) >> 3; // ">>3" is same as "/8"

if (nc2byte != 7) {
	fprintf(stderr,"Error: number of bytes for codec2 frames should be 7. We got %d \n",nc2byte);
}; // end if

if (codec2_samples_per_frame(codec2) != 320) {
	fprintf(stderr,"Error: number of samples for codec2 frames should be 320. We got %d \n",codec2_samples_per_frame(codec2));
}; // end if

c2_buff = (unsigned char *)&c2_voice.c2data.c2data_data7;


// allocate audiobuffer
if (stereo) {
	buffersize= numSample << 2; // = number of samples  * 4 (stereo and 16 bit/sample)
} else {
	// mono
	buffersize= numSample << 1; // = number of samples * 2 (16 bit/sample)
}; // end else - if
audiobuffer=malloc(buffersize);

if (!audiobuffer) {
	fprintf(stderr,"Error: malloc audiobuffer: %s",strerror(errno));
	exit(-1);
}; // end if


// init samplerate conversion
if (samplerate != 8000) {

// allocate memory for audio sample buffers (only needed when audio rate conversion is used)
	inaudiobuffer_f=malloc(numSample * sizeof(float));
	if (!inaudiobuffer_f) {
		fprintf(stderr,"Error in malloc for inaudiobuffer_f! \n");
		exit(-1);
	}; // end if

	outaudiobuffer_f=malloc(320 * sizeof(float)); // output buffer is 320 samples (40 ms @ 8000 samples/sec)
	if (!outaudiobuffer_f) {
		fprintf(stderr,"Error in malloc for outaudiobuffer_f! \n");
		exit(-1);
	}; // end if

	src = src_new(SRC_SINC_FASTEST,1,&src_error);

	if (!src) {
		fprintf(stderr,"src_new failed! \n");
		exit(-1);
	}; // end if

	src_data.data_in = inaudiobuffer_f;
	src_data.data_out = outaudiobuffer_f;
	src_data.input_frames = numSample;
	src_data.output_frames = 320; // 40 ms @ 8000 samples / sec
	src_data.end_of_input = 0; // no further data, every 40 ms frame is concidered to be a seperate unit

	if (samplerate == 48000) {
		src_data.src_ratio = (float) 8000/48000;
	} else {
		src_data.src_ratio = (float) 8000/44100;
	}; // end else - if
}; // end if


// start thread to read detect keypress (used to switch transmitting)
pthread_create (&thr_keypress, NULL, funct_keypress, (void *) &global);


// Start stream
pa_ret=Pa_StartStream(stream);

if (pa_ret != paNoError) {
	Pa_Terminate();
	fprintf(stderr,"Error in Pa_StartStream: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
	exit(-1);
}; // end if


// init some vars;
oldstate=0;


while (( pa_ret = Pa_IsStreamActive (stream)) == 1) {
// get audio

	pa_ret = Pa_ReadStream(stream, audiobuffer, numSample);

	if (pa_ret != paNoError) {
		Pa_Terminate();
		fprintf(stderr,"Error in Pa_ReadStream: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
		exit(-1);
	}; // end if


	// get state from subthread
	state=global.transmit;

	if (state) {
		// State = 1: write audio

		// first check old state, if we go from oldstate=0 to state=1, this is
		// the beginning of a new stream; so send start packe
		if (oldstate == 0) {
			// start "start" marker
			// fwrite((void *) &c2_begin,C2ENCAP_SIZE_MARK,1,stdout);
			// fflush(stdout);

			// send start 3 times, just to be sure
			sendto(udpsd,&c2_begin,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);
			sendto(udpsd,&c2_begin,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);
			sendto(udpsd,&c2_begin,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);

//			putc('B',stderr);
		}


		// if stereo, only use left channel
		if (stereo) {
			int loop;

			int16_t *p1, *p2;

			// start at 2th element (in stereo format); which is 3th (in mono format)
			p1=&audiobuffer[1];
			p2=&audiobuffer[2];
	
			for (loop=1; loop < numSample; loop++) {
				*p1=*p2;
				p1++; p2 += 2;
			}; // end for
		}; // end if


		// if not 8000 samples / second: convert
		if (samplerate != 8000) {
fprintf(stderr,"2!!! \n");
			if (!inaudiobuffer_f) {
				fprintf(stderr,"Internal Error: inaudiobuffer_f not initialised \n");
				exit(-1);
			}; // end if

			if (!outaudiobuffer_f) {
				fprintf(stderr,"Internal Error: outaudiobuffer_f not initialised \n");
				exit(-1);
			}; // end if

			// convert int16 to float
			src_short_to_float_array(audiobuffer,inaudiobuffer_f,numSample);

			// convert
			ret=src_process(src,&src_data);

			if (ret) {
				fprintf(stderr,"Warning: samplerate conversion error %d (%s)\n",ret,src_strerror(ret));
			}; // end if

			// some error checking
			if (src_data.output_frames_gen != 320) {
				fprintf(stderr,"Warning: number of frames generated by samplerateconvert should be %d, got %ld. \n",numSample,src_data.output_frames_gen);
			}; // end if

			// convert back from float to int
			src_float_to_short_array(outaudiobuffer_f,audiobuffer,320); // 40 ms @ 8000 samples/sec = 320 samples

fprintf(stderr,"3!!! \n");
		}; // end if

		// do codec2 encoding
		codec2_encode(codec2, c2_buff, audiobuffer);

		//fwrite((void *)&c2_voice,C2ENCAP_SIZE_VOICE1400,1,stdout);
		//fflush(stdout);

		sendto(udpsd,&c2_voice,C2ENCAP_SIZE_VOICE1400,0,sendto_aiaddr, sendto_sizeaiaddr);

//		putc('T',stderr);
	} else {
		// state = 0, do not send
		// however, if we go from "oldstate = 1 - > state = 0", this is
		// the end of a stream

		if (oldstate) {
			// send "end" marker
			//fwrite((void *)&c2_end,C2ENCAP_SIZE_MARK,1,stdout);
			//fflush(stdout);

			// send end 3 times, just to be sure
			sendto(udpsd,&c2_end,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);
			sendto(udpsd,&c2_end,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);
			sendto(udpsd,&c2_end,C2ENCAP_SIZE_MARK,0,sendto_aiaddr, sendto_sizeaiaddr);

//			putc('E',stderr);
		}; // end if 
	}; // end else - if

	oldstate=state;

	
}; // end while
// dropped out of endless loop. Should not happen

if (pa_ret < 0) {
	Pa_Terminate();
	fprintf(stderr,"Error in Pa_isStreamActive: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
	exit(-1);
}; // end if

fprintf(stderr,"Error: audiocap dropped out of audiocapturing loop. Should not happen!\n");

pa_ret=Pa_CloseStream(stream);

if (pa_ret != paNoError) {
	Pa_Terminate();
	fprintf(stderr,"Error in Pa_CloseStream: %s(%d) \n",Pa_GetErrorText(pa_ret),pa_ret);
	exit(-1);
}; // end if

// Done!!!

Pa_Terminate();

exit(0);


}; // end main applicion
コード例 #24
0
// 0 -> eof, but still check for data.
// Otherwise, 1.
int decGetData(
	void* data, 
	char* ch_buffer, // Expecting AV_BUFFSIZE bytes.
	int*  data_size)
{
	dec_data* dec = (dec_data*)data;
	int ret;
	int to_read = AV_BUFFSIZE * dec->samplerate / audio_samplerate;
	char* read_buffer;
	int actually_read;

	// Make sure the number is is at least a multiple
	// of channels * 16 bits.
	while(to_read % (audio_channels * sizeof(short)))
		to_read--;

	// Are we changing the samplerate?
	if(to_read == AV_BUFFSIZE)
		read_buffer = ch_buffer;
	else
 		read_buffer = (char*)alloca(to_read);

	if(dec->channels == audio_channels)
	{
		if((actually_read = dec->decoder->read(
			dec->data,
			read_buffer,
			to_read)) == 0)
		{
			*data_size = 0;
			return 0;
		}

		ret = actually_read;
	}
	else if(dec->channels == 1)
	{
		// If we have mono, repeat the data
		// for all channels.
		short* buff = (short*)alloca(to_read / audio_channels);
		int i;
		int m = to_read / sizeof(short) / audio_channels;

 		short* sbuff = (short*)read_buffer;

		if((actually_read = dec->decoder->read(
			dec->data,
			(char*)buff,
			to_read / audio_channels)) == 0)
		{
			*data_size = 0;
			return 0;
		}

		for(i = 0 ; i < m ; i++)
		{
			int j;

			for(j = 0 ; j < audio_channels ; j++)
				sbuff[i * audio_channels + j] = buff[i];
		}

		ret = actually_read * audio_channels;
	}
	else
	{
		LOG(L"I only support from mono to multiple channels!\n");
		*data_size = 0;
		return 0;
	}

	// At this point, we know that the input has audio_channels channels.

	if(dec->samplerate != audio_samplerate)
	{
		SRC_DATA src;
		float* buffer_in = (float*)alloca(ret / sizeof(short) * sizeof(float));
		float buffer_out[AV_BUFFSIZE / sizeof(short)];
		int err;
		
		short* buffer = (short*)read_buffer;

		src_short_to_float_array(
			buffer,
			buffer_in,
			ret / sizeof(short));

		src.data_in  = buffer_in;
		src.data_out = buffer_out;

		src.input_frames  = ret         / sizeof(short) / audio_channels;
		src.output_frames = AV_BUFFSIZE / sizeof(short) / audio_channels;

		src.src_ratio = dec->ratio;
		src.end_of_input = (actually_read != to_read);

		src.input_frames_used = 0;
		src.output_frames_gen = 0;

		if((err = src_process((SRC_STATE*)dec->converter, &src)) != 0)
		{
			LOG(L"libsamplerate: %s\n", src_strerror(err));
			*data_size = 0;
			return 0;
		}

		src_float_to_short_array(
			buffer_out,
			(short*)ch_buffer,
			src.output_frames_gen * audio_channels);

		*data_size = src.output_frames_gen * sizeof(short) * audio_channels;
		return src.output_frames_gen != 0 || src.input_frames_used != 0;
	}
	else
	{
		*data_size = ret;
		return ret;
	}

/*	return dec->decoder->read(
		dec->data,
		ch_buffer,
		AV_BUFFSIZE);*/
}
コード例 #25
0
ファイル: main.c プロジェクト: z00t/n64iphone
static int resample(Uint8 *input, int input_avail, int oldsamplerate, Uint8 *output, int output_needed, int newsamplerate)
{

#ifdef USE_SRC
    if(Resample == 2)
    {
        // the high quality resampler needs more input than the samplerate ratio would indicate to work properly
        if (input_avail > output_needed * 3 / 2)
            input_avail = output_needed * 3 / 2; // just to avoid too much short-float-short conversion time
        if (_src_len < input_avail*2 && input_avail > 0)
        {
            if(_src) free(_src);
            _src_len = input_avail*2;
            _src = malloc(_src_len);
        }
        if (_dest_len < output_needed*2 && output_needed > 0)
        {
            if(_dest) free(_dest);
            _dest_len = output_needed*2;
            _dest = malloc(_dest_len);
        }
        memset(_src,0,_src_len);
        memset(_dest,0,_dest_len);
        if(src_state == NULL)
        {
            src_state = src_new (SRC_SINC_BEST_QUALITY, 2, &error);
            if(src_state == NULL)
            {
                memset(output, 0, output_needed);
                return;
            }
        }
        src_short_to_float_array ((short *) input, _src, input_avail/2);
        src_data.end_of_input = 0;
        src_data.data_in = _src;
        src_data.input_frames = input_avail/4;
        src_data.src_ratio = (float) newsamplerate / oldsamplerate;
        src_data.data_out = _dest;
        src_data.output_frames = output_needed/4;
        if ((error = src_process (src_state, &src_data)))
        {
            memset(output, 0, output_needed);
            return input_avail;  // number of bytes consumed
        }
        src_float_to_short_array (_dest, (short *) output, output_needed/2);
        return src_data.input_frames_used * 4;
    }
#endif
    // RESAMPLE == 1
    int *psrc = (int*)input;
    int *pdest = (int*)output;
    int i;
    int j=0;
    int sldf = oldsamplerate;
    int const2 = 2*sldf;
    int dldf = newsamplerate;
    int const1 = const2 - 2*dldf;
    int criteria = const2 - dldf;
    for(i = 0; i < output_needed/4; i++)
    {
        pdest[i] = psrc[j];
        if(criteria >= 0)
        {
            ++j;
            criteria += const1;
        }
        else criteria += const2;
    }
    return j * 4; //number of bytes consumed
}
コード例 #26
0
ファイル: audiofile.cpp プロジェクト: dyfet/sflphone
RawFile::RawFile(const std::string& name, sfl::AudioCodec *codec, unsigned int sampleRate)
    : AudioFile(name), audioCodec_(codec)
{
    if (filepath_.empty())
        throw AudioFileException("Unable to open audio file: filename is empty");

    std::fstream file;
    file.open(filepath_.c_str(), std::fstream::in);

    if (!file.is_open())
        throw AudioFileException("Unable to open audio file");

    file.seekg(0, std::ios::end);
    size_t length = file.tellg();
    file.seekg(0, std::ios::beg);

    std::vector<char> fileBuffer(length);
    file.read(&fileBuffer[0], length);
    file.close();

    const unsigned int frameSize = audioCodec_->getFrameSize();
    const unsigned int bitrate   = audioCodec_->getBitRate() * 1000 / 8;
    const unsigned int audioRate = audioCodec_->getClockRate();
    const unsigned int encFrameSize = frameSize * bitrate / audioRate;
    const unsigned int decodedSize = length * (frameSize / encFrameSize);

    SFLDataFormat *monoBuffer = new SFLDataFormat[decodedSize];
    SFLDataFormat *bufpos = monoBuffer;
    unsigned char *filepos = reinterpret_cast<unsigned char *>(&fileBuffer[0]);
    size_ = decodedSize;

    while (length >= encFrameSize) {
        bufpos += audioCodec_->decode(bufpos, filepos, encFrameSize);
        filepos += encFrameSize;
        length -= encFrameSize;
    }

    if (sampleRate == audioRate)
        buffer_ = monoBuffer;
    else {
        double factord = (double) sampleRate / audioRate;
        float* floatBufferIn = new float[size_];
        int    sizeOut  = ceil(factord * size_);
        src_short_to_float_array(monoBuffer, floatBufferIn, size_);
        delete [] monoBuffer;
        delete [] buffer_;
        buffer_ = new SFLDataFormat[sizeOut];

        SRC_DATA src_data;
        src_data.data_in = floatBufferIn;
        src_data.input_frames = size_;
        src_data.output_frames = sizeOut;
        src_data.src_ratio = factord;

        float* floatBufferOut = new float[sizeOut];
        src_data.data_out = floatBufferOut;

        src_simple(&src_data, SRC_SINC_BEST_QUALITY, 1);
        src_float_to_short_array(floatBufferOut, buffer_, src_data.output_frames_gen);

        delete [] floatBufferOut;
        delete [] floatBufferIn;
        size_ = src_data.output_frames_gen;
    }
}