void Resample::out(float *inl, float *inr, float *outl, float *outr, int frames, double ratio) { long int o_frames = lrint((double)frames*ratio); srcinfol.data_in = inl; srcinfol.input_frames = frames; srcinfol.data_out = outl; srcinfol.output_frames = o_frames; srcinfol.src_ratio = ratio; srcinfol.end_of_input = 0; srcinfor.data_in = inr; srcinfor.input_frames = frames; srcinfor.data_out = outr; srcinfor.output_frames = o_frames; srcinfor.src_ratio = ratio; srcinfor.end_of_input = 0; errorl = src_process(statel, &srcinfol); errorr = src_process(stater, &srcinfor); }
static void zero_input_test (int converter) { SRC_DATA data ; SRC_STATE *state ; float out [100] ; int error ; printf (" %s (%-26s) ........ ", __func__, src_get_name (converter)) ; fflush (stdout) ; if ((state = src_new (converter, 1, &error)) == NULL) { printf ("\n\nLine %d : src_new failed : %s.\n\n", __LINE__, src_strerror (error)) ; exit (1) ; } ; data.data_in = (float *) 0xdeadbeef ; data.input_frames = 0 ; data.data_out = out ; data.output_frames = ARRAY_LEN (out) ; data.end_of_input = 0 ; data.src_ratio = 1.0 ; if ((error = src_process (state, &data))) { printf ("\n\nLine %d : src_new failed : %s.\n\n", __LINE__, src_strerror (error)) ; exit (1) ; } ; state = src_delete (state) ; puts ("ok") ; } /* zero_input_test */
int jack_card_read(JackCard *obj,char *buf,int size) { size_t bytes, can_read, i; int error; float norm, value; g_return_val_if_fail((obj->read.buffer!=NULL)&&(obj->read.src_state!=NULL),-1); if (jack_init(obj) != 0) return -1; size /= 2; can_read = MIN(size, obj->read.frames); // can_read = MIN(((long)((double)can_read / obj->read.data.src_ratio))*sizeof(sample_t), jack_ringbuffer_read_space(obj->read.buffer)); can_read = ((long)((double)can_read / obj->read.data.src_ratio))*sizeof(sample_t); obj->read.can_process = FALSE; bytes = jack_ringbuffer_read (obj->read.buffer, (void *)obj->read.data.data_in, can_read); obj->read.can_process = TRUE; obj->read.data.input_frames = bytes / sizeof(sample_t); can_read = MIN(size, obj->read.frames); obj->read.data.output_frames = can_read; if ((error = src_process(obj->read.src_state, &(obj->read.data))) != 0) g_warning("error while samplerate conversion. error: %s", src_strerror(error)); norm = obj->read.level*obj->level*(float)0x8000; for (i=0; i < obj->read.data.output_frames_gen; i++) { value = obj->read.data.data_out[i]*norm; if (value >= 32767.0) ((short*)buf)[i] = 32767; else if (value <= -32768.0) ((short*)buf)[i] = -32768; else ((short*)buf)[i] = (short)value; } bytes = obj->read.data.output_frames_gen * 2; return bytes; }
static void bufadd (Buffer * b, float * data, int len, double ratio) { int oldlen = b->len; int max = len * ratio + 100; bufgrow (b, oldlen + max); SRC_DATA d = { .data_in = data, .input_frames = len, .data_out = OFFSET (b->mem, oldlen), .output_frames = max, .src_ratio = ratio}; src_process (srcstate, & d); b->len = oldlen + d.output_frames_gen; } static void speed_flush (void) { src_reset (srcstate); in.len = 0; out.len = 0; /* Add silence to the beginning of the input signal. */ bufgrow (& in, width / 2); trim = width / 2; written = 0; ending = FALSE; }
//TODO Add ifdef for int16 or float32 type void SamplerateConverter::resample (SFLDataFormat* dataIn , SFLDataFormat* dataOut , int inputFreq , int outputFreq , int nbSamples) { double sampleFactor = (double) outputFreq / inputFreq; if (sampleFactor == 1.0) return; unsigned int outSamples = nbSamples * sampleFactor; unsigned int maxSamples = outSamples; if (maxSamples < (unsigned int)nbSamples) maxSamples = nbSamples; if (maxSamples > _samples) { /* grow buffer if needed */ _samples = maxSamples; delete [] _floatBufferIn; delete [] _floatBufferOut; _floatBufferIn = new float32[_samples]; _floatBufferOut = new float32[_samples]; } SRC_DATA src_data; src_data.data_in = _floatBufferIn; src_data.data_out = _floatBufferOut; src_data.input_frames = nbSamples; src_data.output_frames = outSamples; src_data.src_ratio = sampleFactor; src_data.end_of_input = 0; // More data will come Short2FloatArray (dataIn , _floatBufferIn, nbSamples); src_process (_src_state, &src_data); src_float_to_short_array (_floatBufferOut, dataOut , outSamples); }
int Resample::Process(double factor, float *inBuffer, int inBufferLen, bool lastFlag, int *inBufferUsed, float *outBuffer, int outBufferLen) { if (mInitial) { src_set_ratio((SRC_STATE *)mHandle, factor); mInitial = false; } SRC_DATA data; data.data_in = inBuffer; data.data_out = outBuffer; data.input_frames = inBufferLen; data.output_frames = outBufferLen; data.input_frames_used = 0; data.output_frames_gen = 0; data.end_of_input = (int)lastFlag; data.src_ratio = factor; int err = src_process((SRC_STATE *)mHandle, &data); if (err) { wxFprintf(stderr, _("Libsamplerate error: %d\n"), err); return 0; } *inBufferUsed = (int)data.input_frames_used; return (int)data.output_frames_gen; }
void phat_audiostream_render( PhatAudiostream *stream, gint nframes, float **buffers ) { gint num_channels = phat_audiostream_get_num_channels( stream ); SRC_DATA *src = & (stream->src_data); src->data_out = stream->src_buffer; src->data_in = stream->data + (stream->playpos * num_channels); src->input_frames = stream->len - stream->playpos; src->output_frames = nframes; src->end_of_input = 0; // FIXME: real check; src->src_ratio = stream->speed + stream->tmp_speed; src_process( stream->src_state, src ); gint i,c; for( i=0; i<nframes; i++ ) { for( c=0; c<num_channels; c++ ) { buffers[c][i] = stream->src_buffer[ i * num_channels + c ]; } } if( src->input_frames_used != 0 ) { stream->playpos += src->input_frames_used; stream->pos_est = (gdouble) stream->playpos; } else { stream->pos_est += ( ((gdouble)nframes) / (stream->speed + stream->tmp_speed) ); } }
int resample_48k_to_8k( short output_short[], short input_short[], int length_output_short, // maximum output array length in samples int length_input_short ) { SRC_DATA src_data; float input[N48*2]; float output[N48*2]; const int input_sample_rate = 48000; const int output_sample_rate = 8000; assert(length_input_short <= N48*2); assert(length_output_short <= N48*2); src_short_to_float_array(input_short, input, length_input_short); src_data.data_in = input; src_data.data_out = output; src_data.input_frames = length_input_short; src_data.output_frames = length_output_short; src_data.end_of_input = 0; src_data.src_ratio = (float)output_sample_rate/input_sample_rate; src_process(insrc1, &src_data); assert(src_data.output_frames_gen <= length_output_short); src_float_to_short_array(output, output_short, src_data.output_frames_gen); return src_data.output_frames_gen; }
// Could we get iph-based instruments support sample-exact models by using a // frame-length of 1 while rendering? void sf2Instrument::play( sampleFrame * _working_buffer ) { const fpp_t frames = engine::mixer()->framesPerPeriod(); m_synthMutex.lock(); const int currentMidiPitch = instrumentTrack()->midiPitch(); if( m_lastMidiPitch != currentMidiPitch ) { m_lastMidiPitch = currentMidiPitch; fluid_synth_pitch_bend( m_synth, m_channel, m_lastMidiPitch ); } const int currentMidiPitchRange = instrumentTrack()->midiPitchRange(); if( m_lastMidiPitchRange != currentMidiPitchRange ) { m_lastMidiPitchRange = currentMidiPitchRange; fluid_synth_pitch_wheel_sens( m_synth, m_channel, m_lastMidiPitchRange ); } if( m_internalSampleRate < engine::mixer()->processingSampleRate() && m_srcState != NULL ) { const fpp_t f = frames * m_internalSampleRate / engine::mixer()->processingSampleRate(); #ifdef __GNUC__ sampleFrame tmp[f]; #else sampleFrame * tmp = new sampleFrame[f]; #endif fluid_synth_write_float( m_synth, f, tmp, 0, 2, tmp, 1, 2 ); SRC_DATA src_data; src_data.data_in = tmp[0]; src_data.data_out = _working_buffer[0]; src_data.input_frames = f; src_data.output_frames = frames; src_data.src_ratio = (double) frames / f; src_data.end_of_input = 0; int error = src_process( m_srcState, &src_data ); #ifndef __GNUC__ delete[] tmp; #endif if( error ) { qCritical( "sf2Instrument: error while resampling: %s", src_strerror( error ) ); } if( src_data.output_frames_gen > frames ) { qCritical( "sf2Instrument: not enough frames: %ld / %d", src_data.output_frames_gen, frames ); } } else { fluid_synth_write_float( m_synth, frames, _working_buffer, 0, 2, _working_buffer, 1, 2 ); } m_synthMutex.unlock(); instrumentTrack()->processAudioBuffer( _working_buffer, frames, NULL ); }
const int32_t * pcm_resample_lsr_32(struct pcm_resample_state *state, uint8_t channels, unsigned src_rate, const int32_t *src_buffer, size_t src_size, unsigned dest_rate, size_t *dest_size_r, GError **error_r) { bool success; SRC_DATA *data = &state->data; size_t data_in_size; size_t data_out_size; int error; int32_t *dest_buffer; assert((src_size % (sizeof(*src_buffer) * channels)) == 0); success = pcm_resample_set(state, channels, src_rate, dest_rate, error_r); if (!success) return NULL; /* there was an error previously, and nothing has changed */ if (state->error) { g_set_error(error_r, libsamplerate_quark(), state->error, "libsamplerate has failed: %s", src_strerror(state->error)); return NULL; } data->input_frames = src_size / sizeof(*src_buffer) / channels; data_in_size = data->input_frames * sizeof(float) * channels; data->data_in = pcm_buffer_get(&state->in, data_in_size); data->output_frames = (src_size * dest_rate + src_rate - 1) / src_rate; data_out_size = data->output_frames * sizeof(float) * channels; data->data_out = pcm_buffer_get(&state->out, data_out_size); src_int_to_float_array(src_buffer, data->data_in, data->input_frames * channels); error = src_process(state->state, data); if (error) { g_set_error(error_r, libsamplerate_quark(), error, "libsamplerate has failed: %s", src_strerror(error)); state->error = error; return NULL; } *dest_size_r = data->output_frames_gen * sizeof(*dest_buffer) * channels; dest_buffer = pcm_buffer_get(&state->buffer, *dest_size_r); src_float_to_int_array(data->data_out, dest_buffer, data->output_frames_gen * channels); return dest_buffer; }
unsigned int AmAudio::downMix(unsigned int size) { unsigned int s = size; if(fmt->channels == 2){ stereo2mono(samples.back_buffer(),(unsigned char*)samples,s); samples.swap(); } #ifdef USE_LIBSAMPLERATE if (fmt->rate != SYSTEM_SAMPLERATE) { if (!resample_state) { int src_error; // for better quality but more CPU usage, use SRC_SINC_ converters resample_state = src_new(SRC_LINEAR, 1, &src_error); if (!resample_state) { ERROR("samplerate initialization error: "); } } if (resample_state) { if (resample_buf_samples + PCM16_B2S(s) > PCM16_B2S(AUDIO_BUFFER_SIZE) * 2) { WARN("resample input buffer overflow! (%d)\n", resample_buf_samples + PCM16_B2S(s)); } else { signed short* samples_s = (signed short*)(unsigned char*)samples; src_short_to_float_array(samples_s, &resample_in[resample_buf_samples], PCM16_B2S(s)); resample_buf_samples += PCM16_B2S(s); } SRC_DATA src_data; src_data.data_in = resample_in; src_data.input_frames = resample_buf_samples; src_data.data_out = resample_out; src_data.output_frames = PCM16_B2S(AUDIO_BUFFER_SIZE); src_data.src_ratio = (double)SYSTEM_SAMPLERATE / (double)fmt->rate; src_data.end_of_input = 0; int src_err = src_process(resample_state, &src_data); if (src_err) { DBG("resample error: '%s'\n", src_strerror(src_err)); }else { signed short* samples_s = (signed short*)(unsigned char*)samples; src_float_to_short_array(resample_out, samples_s, src_data.output_frames_gen); s = PCM16_S2B(src_data.output_frames_gen); if (resample_buf_samples != (unsigned int)src_data.input_frames_used) { memmove(resample_in, &resample_in[src_data.input_frames_used], (resample_buf_samples - src_data.input_frames_used) * sizeof(float)); } resample_buf_samples = resample_buf_samples - src_data.input_frames_used; } } } #endif return s; }
unsigned int AmLibSamplerateResamplingState::resample(unsigned char* samples, unsigned int s, double ratio) { DBG("resampling packet of size %d with ratio %f", s, ratio); if (!resample_state) { int src_error; // for better quality but more CPU usage, use SRC_SINC_ converters resample_state = src_new(SRC_LINEAR, 1, &src_error); if (!resample_state) { ERROR("samplerate initialization error: "); } } if (resample_state) { if (resample_buf_samples + PCM16_B2S(s) > PCM16_B2S(AUDIO_BUFFER_SIZE) * 2) { WARN("resample input buffer overflow! (%lu)\n", resample_buf_samples + PCM16_B2S(s)); } else if (resample_out_buf_samples + (PCM16_B2S(s) * ratio) + 20 > PCM16_B2S(AUDIO_BUFFER_SIZE)) { WARN("resample: possible output buffer overflow! (%lu)\n", (resample_out_buf_samples + (size_t) ((PCM16_B2S(s) * ratio)) + 20)); } else { signed short* samples_s = (signed short*)samples; src_short_to_float_array(samples_s, &resample_in[resample_buf_samples], PCM16_B2S(s)); resample_buf_samples += PCM16_B2S(s); } SRC_DATA src_data; src_data.data_in = resample_in; src_data.input_frames = resample_buf_samples; src_data.data_out = &resample_out[resample_out_buf_samples]; src_data.output_frames = PCM16_B2S(AUDIO_BUFFER_SIZE); src_data.src_ratio = ratio; src_data.end_of_input = 0; int src_err = src_process(resample_state, &src_data); if (src_err) { DBG("resample error: '%s'\n", src_strerror(src_err)); }else { signed short* samples_s = (signed short*)(unsigned char*)samples; resample_out_buf_samples += src_data.output_frames_gen; s *= ratio; src_float_to_short_array(resample_out, samples_s, PCM16_B2S(s)); DBG("resample: output_frames_gen = %ld", src_data.output_frames_gen); if (resample_buf_samples != (unsigned int)src_data.input_frames_used) { memmove(resample_in, &resample_in[src_data.input_frames_used], (resample_buf_samples - src_data.input_frames_used) * sizeof(float)); } resample_buf_samples = resample_buf_samples - src_data.input_frames_used; if (resample_out_buf_samples != s) { memmove(resample_out, &resample_out[PCM16_B2S(s)], (resample_out_buf_samples - PCM16_B2S(s)) * sizeof(float)); } resample_out_buf_samples -= PCM16_B2S(s); } } DBG("resample: output size is %d", s); return s; }
int LV2convolv::resample_read_presets (const float *in, unsigned int in_frames, const int sample_rate, float **buf, unsigned int *n_ch, unsigned int *n_sp) { float resample_ratio = 1.0; if (n_ch) *n_ch = PRESETS_CH; if (n_sp) *n_sp = in_frames; if (sample_rate != PRESETS_SAMPLERATE) { fprintf(stderr, "convolution: samplerate mismatch preset:%d host:%d\n", PRESETS_SAMPLERATE, sample_rate); resample_ratio = (float) sample_rate / (float) PRESETS_SAMPLERATE; } if (buf) { const size_t frames_in = PRESETS_CH * in_frames; const size_t frames_out = PRESETS_CH * ceil(in_frames * resample_ratio); *buf = (float*) malloc(frames_out*sizeof(float)); float *iin; if (resample_ratio != 1.0) { iin = (float*)malloc(frames_in * sizeof(float)); memcpy(iin, in, frames_in * sizeof(float)); } else { memcpy(*buf, in, frames_in * sizeof(float)); } if (!*buf) { fprintf (stderr, "convolution: memory allocation failed for IR audio-file buffer.\n"); return (-2); } if (resample_ratio != 1.0) { VERBOSE_printf("convolution: resampling IR %ld -> %ld [frames * channels].\n", (long int) frames_in, (long int) frames_out); SRC_STATE* src_state = src_new(SRC_QUALITY, PRESETS_CH, NULL); SRC_DATA src_data; src_data.input_frames = in_frames; src_data.output_frames = in_frames * resample_ratio; src_data.end_of_input = 1; src_data.src_ratio = resample_ratio; src_data.input_frames_used = 0; src_data.output_frames_gen = 0; src_data.data_in = iin; src_data.data_out = *buf; src_process(src_state, &src_data); VERBOSE_printf("convolution: resampled IR %ld -> %ld [frames * channels].\n", src_data.input_frames_used * PRESETS_CH, src_data.output_frames_gen * PRESETS_CH); if (n_sp) *n_sp = (unsigned int) src_data.output_frames_gen; free(iin); } } return (0); }
int krad_opus_decoder_read (krad_opus_t *krad_opus, int channel, char *buffer, int buffer_length) { int resample_process_size = 512; krad_opus->ret = krad_ringbuffer_peek (krad_opus->resampled_ringbuf[channel - 1], (char *)buffer, buffer_length ); if (krad_opus->ret >= buffer_length) { krad_ringbuffer_read_advance (krad_opus->resampled_ringbuf[channel - 1], buffer_length ); return krad_opus->ret; } else { while (krad_ringbuffer_read_space (krad_opus->resampled_ringbuf[channel - 1]) < buffer_length) { if (krad_ringbuffer_read_space (krad_opus->ringbuf[channel - 1]) >= resample_process_size * 4 ) { krad_opus->ret = krad_ringbuffer_peek (krad_opus->ringbuf[channel - 1], (char *)krad_opus->read_samples[channel - 1], (resample_process_size * 4) ); krad_opus->src_data[channel - 1].data_in = krad_opus->read_samples[channel - 1]; krad_opus->src_data[channel - 1].input_frames = resample_process_size; krad_opus->src_data[channel - 1].data_out = krad_opus->resampled_samples[channel - 1]; krad_opus->src_data[channel - 1].output_frames = 2048; krad_opus->src_error[channel - 1] = src_process (krad_opus->src_resampler[channel - 1], &krad_opus->src_data[channel - 1]); if (krad_opus->src_error[channel - 1] != 0) { failfast ("krad_opus_read_audio src resampler error: %s\n", src_strerror(krad_opus->src_error[channel - 1])); } krad_ringbuffer_read_advance (krad_opus->ringbuf[channel - 1], (krad_opus->src_data[channel - 1].input_frames_used * 4) ); krad_opus->ret = krad_ringbuffer_write (krad_opus->resampled_ringbuf[channel - 1], (char *)krad_opus->resampled_samples[channel - 1], (krad_opus->src_data[channel - 1].output_frames_gen * 4) ); if (krad_ringbuffer_read_space (krad_opus->resampled_ringbuf[channel - 1]) >= buffer_length ) { return krad_ringbuffer_read (krad_opus->resampled_ringbuf[channel - 1], buffer, buffer_length ); } } else { return 0; } } } return 0; }
void aubio_resampler_do (aubio_resampler_t * s, fvec_t * input, fvec_t * output) { s->proc->input_frames = input->length; s->proc->output_frames = output->length; s->proc->src_ratio = (double) s->ratio; /* make SRC_PROC data point to input outputs */ s->proc->data_in = (float *) input->data; s->proc->data_out = (float *) output->data; /* do resampling */ src_process (s->stat, s->proc); }
static block_t *Resample (filter_t *filter, block_t *in) { block_t *out = NULL; const size_t framesize = filter->fmt_out.audio.i_bytes_per_frame; SRC_STATE *s = (SRC_STATE *)filter->p_sys; SRC_DATA src; src.src_ratio = (double)filter->fmt_out.audio.i_rate / (double)filter->fmt_in.audio.i_rate; int err = src_set_ratio (s, src.src_ratio); if (err != 0) { msg_Err (filter, "cannot update resampling ratio: %s", src_strerror (err)); goto error; } src.input_frames = in->i_nb_samples; src.output_frames = ceil (src.src_ratio * src.input_frames); src.end_of_input = 0; out = block_Alloc (src.output_frames * framesize); if (unlikely(out == NULL)) goto error; src.data_in = (float *)in->p_buffer; src.data_out = (float *)out->p_buffer; err = src_process (s, &src); if (err != 0) { msg_Err (filter, "cannot resample: %s", src_strerror (err)); block_Release (out); out = NULL; goto error; } if (src.input_frames_used < src.input_frames) msg_Err (filter, "lost %ld of %ld input frames", src.input_frames - src.input_frames_used, src.input_frames); out->i_buffer = src.output_frames_gen * framesize; out->i_nb_samples = src.output_frames_gen; out->i_pts = in->i_pts; out->i_length = src.output_frames_gen * CLOCK_FREQ / filter->fmt_out.audio.i_rate; error: block_Release (in); return out; }
void Resampler::resample(const AudioBuffer &dataIn, AudioBuffer &dataOut) { const double inputFreq = dataIn.getSampleRate(); const double outputFreq = dataOut.getSampleRate(); const double sampleFactor = outputFreq / inputFreq; if (sampleFactor == 1.0) return; const size_t nbFrames = dataIn.frames(); const size_t nbChans = dataIn.channels(); if (nbChans != format_.nb_channels) { // change channel num if needed int err; src_delete(src_state_); src_state_ = src_new(SRC_LINEAR, nbChans, &err); format_.nb_channels = nbChans; DEBUG("SRC channel number changed."); } if (nbChans != dataOut.channels()) { DEBUG("Output buffer had the wrong number of channels (in: %d, out: %d).", nbChans, dataOut.channels()); dataOut.setChannelNum(nbChans); } size_t inSamples = nbChans * nbFrames; size_t outSamples = inSamples * sampleFactor; // grow buffer if needed floatBufferIn_.resize(inSamples); floatBufferOut_.resize(outSamples); scratchBuffer_.resize(outSamples); SRC_DATA src_data; src_data.data_in = floatBufferIn_.data(); src_data.data_out = floatBufferOut_.data(); src_data.input_frames = nbFrames; src_data.output_frames = nbFrames * sampleFactor; src_data.src_ratio = sampleFactor; src_data.end_of_input = 0; // More data will come dataIn.interleaveFloat(floatBufferIn_.data()); src_process(src_state_, &src_data); /* TODO: one-shot deinterleave and float-to-short conversion */ src_float_to_short_array(floatBufferOut_.data(), scratchBuffer_.data(), outSamples); dataOut.deinterleave(scratchBuffer_.data(), src_data.output_frames, nbChans); }
JNIEXPORT jint JNICALL Java_org_sipdroid_media_file_AudioFile_nresample(JNIEnv* env, jobject obj, jdouble ratio, jshortArray inBuffer, jshortArray outBuffer){ if(ready){ pthread_mutex_lock(&lock); // initialize converter if(converter == NULL){ int error; converter = src_new(SRC_SINC_MEDIUM_QUALITY, 1, &error); if(converter == NULL){ __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "unable to initialize sample rate converter: %s", src_strerror(error)); pthread_mutex_unlock(&lock); return -1; } } // prepare buffers jint input_len = env->GetArrayLength(inBuffer); float* fl_inBuffer = (float*) malloc(input_len * sizeof(float)); short* sh_inBuffer = env->GetShortArrayElements(inBuffer, NULL); src_short_to_float_array(sh_inBuffer, fl_inBuffer, input_len); env->ReleaseShortArrayElements(inBuffer, sh_inBuffer, 0); jint output_len = env->GetArrayLength(outBuffer); float* fl_outBuffer = (float*) malloc(sizeof(float) * output_len); SRC_DATA src_data; src_data.data_in = fl_inBuffer; src_data.input_frames = (long) input_len; src_data.data_out = fl_outBuffer; src_data.output_frames = (long) output_len; src_data.src_ratio = (double) 1/ratio; src_data.end_of_input = 0; // resample int error; if ((error = src_process(converter, &src_data)) >= 0){ // convert output to float and write to outBuffer short* sh_outBuffer = env->GetShortArrayElements(outBuffer, NULL); src_float_to_short_array(src_data.data_out, sh_outBuffer, src_data.output_frames_gen); env->ReleaseShortArrayElements(outBuffer, sh_outBuffer, 0); } else{ __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "resampling error: %s", src_strerror(error)); } free(fl_outBuffer); free(fl_inBuffer); pthread_mutex_unlock(&lock); return src_data.output_frames_gen; } return 0; }
void SDLAudioDevice::put_sample(int16_t sample) { lock_guard lock (_mutex); _in.push_back(sample / 32768.f); if (_in.size() > FLUSH_SIZE && _out.available_size() && !_dumping) { _dumping = true; std::thread t0 { [this] { lock_guard lock (_mutex); if (!_out.available_size()) { std::cerr << "No more room in output buffer!\n"; _dumping = false; return; } SRC_DATA data; data.data_in = _in.data(); data.data_out = _out.data() + _out.size(); data.input_frames = _in.size(); data.output_frames = _out.available_size(); data.src_ratio = 44800.0 / 1789773.0; data.end_of_input = 0; int error = src_process(_state, &data); if (error) { std::cerr << "SRC error: " << src_strerror(error) << "\n"; } _in.flush(data.input_frames_used); _out.add(data.output_frames_gen); bool ready = _out.size() > AUDIO_BUFFER_SIZE * 2; if (ready && !_unpaused) { std::cout << "Unpausing audio...\n"; SDL_PauseAudioDevice(_device, 0); _unpaused = true; } _dumping = false; }}; t0.detach(); } }
void Resample::mono_out(float *inl, float *outl, int frames, double ratio, int o_frames) { srcinfol.data_in = inl; srcinfol.input_frames = frames; srcinfol.data_out = outl; srcinfol.output_frames = o_frames; srcinfol.src_ratio = ratio; srcinfol.end_of_input = 0; errorl = src_process(statel, &srcinfol); }
static bool lsr_process(struct pcm_resample_state *state, GError **error_r) { if (state->error == 0) state->error = src_process(state->state, &state->data); if (state->error) { g_set_error(error_r, libsamplerate_quark(), state->error, "libsamplerate has failed: %s", src_strerror(state->error)); return false; } return true; }
PJ_DEF(void) pjmedia_resample_run( pjmedia_resample *resample, const pj_int16_t *input, pj_int16_t *output ) { SRC_DATA src_data; /* Convert samples to float */ src_short_to_float_array(input, resample->frame_in, resample->in_samples); if (resample->in_extra) { unsigned i; for (i=0; i<resample->in_extra; ++i) resample->frame_in[resample->in_samples+i] = resample->frame_in[resample->in_samples-1]; } /* Prepare SRC_DATA */ pj_bzero(&src_data, sizeof(src_data)); src_data.data_in = resample->frame_in; src_data.data_out = resample->frame_out; src_data.input_frames = resample->in_samples + resample->in_extra; src_data.output_frames = resample->out_samples + resample->out_extra; src_data.src_ratio = resample->ratio; /* Process! */ src_process(resample->state, &src_data); /* Convert output back to short */ src_float_to_short_array(resample->frame_out, output, src_data.output_frames_gen); /* Replay last sample if conversion couldn't fill up the whole * frame. This could happen for example with 22050 to 16000 conversion. */ if (src_data.output_frames_gen < (int)resample->out_samples) { unsigned i; if (resample->in_extra < 4) resample->in_extra++; for (i=src_data.output_frames_gen; i<resample->out_samples; ++i) { output[i] = output[src_data.output_frames_gen-1]; } } }
void AudioBuffer::convert_rate(AudioBuffer &_dest, unsigned _frames_count, SRC_STATE *_SRC) { AudioSpec destspec{AUDIO_FORMAT_F32, m_spec.channels, _dest.rate()}; if(m_spec.format != AUDIO_FORMAT_F32 || _dest.spec() != destspec) { throw std::logic_error("unsupported format"); } _frames_count = std::min(frames(),_frames_count); double rate_ratio = double(destspec.rate)/double(m_spec.rate); unsigned out_frames = unsigned(ceil(double(_frames_count) * rate_ratio)); if(out_frames==0) { return; } unsigned destpos = _dest.samples(); unsigned destframes = _dest.frames(); _dest.resize_frames(_dest.frames()+out_frames); #if HAVE_LIBSAMPLERATE SRC_DATA srcdata; srcdata.data_in = &at<float>(0); srcdata.data_out = &_dest.at<float>(destpos); srcdata.input_frames = _frames_count; srcdata.output_frames = out_frames; srcdata.src_ratio = rate_ratio; int srcresult; if(_SRC != nullptr) { srcdata.end_of_input = 0; srcresult = src_process(_SRC, &srcdata); } else { srcdata.end_of_input = 1; srcresult = src_simple(&srcdata, SRC_SINC_BEST_QUALITY, destspec.channels) ; } if(srcresult != 0) { throw std::runtime_error(std::string("error resampling: ") + src_strerror(srcresult)); } assert(srcdata.output_frames_gen>=0 && srcdata.output_frames_gen<=out_frames); if(srcdata.output_frames_gen != out_frames) { _dest.resize_frames(destframes + srcdata.output_frames_gen); } PDEBUGF(LOG_V2, LOG_MIXER, "convert rate: f-in: %d, f-out: %d, gen: %d\n", _frames_count, out_frames, srcdata.output_frames_gen); #else for(unsigned i=destpos; i<_dest.samples(); ++i) { _dest.operator[]<float>(i) = 0.f; } #endif }
int ResampleSRC::Do(int chns,Fifo<float> *input,float *const *output,int need,double ratio) { if(ratio == 1) return Resample::Do(chns,input,output,frames,1); assert(chns <= channels); SRC_DATA src_data; src_data.src_ratio = ratio; src_data.end_of_input = 0; int count = -1; // hopefully all channel fifos advance uniformly..... for(int i = 0; i < chns; ++i) { src_set_ratio(state[i],ratio); for(int got = 0; got < frames; ) { src_data.data_out = output[i]+got; src_data.output_frames = frames-got; if(decoded[i].Have()) { src_data.data_in = input[i].ReadPtr(); src_data.input_frames = input[i].ReadSamples(); int err = src_process(state[i],&src_data); if(err) post("src_process error %i",err); // advance buffer decoded[i].Read(src_data.input_frames_used,NULL); } else { schedWait(); if(debug) post("fifo underrun"); // Buffer underrun!! -> zero output buffer memset(src_data.data_out,0,src_data.output_frames*sizeof(*src_data.data_out)); src_data.output_frames_gen = src_data.output_frames; } got += src_data.output_frames_gen; } assert(count < 0 || got == count); count = got; } return count; }
int SampleChannel::fillChan(float *dest, int start, int offset, bool rewind) { int position; // return value: the new position if (pitch == 1.0f) { /* case 1: 'dest' lies within the original sample boundaries (start- * end) */ if (start+bufferSize-offset <= end) { memcpy(dest+offset, wave->data+start, (bufferSize-offset)*sizeof(float)); position = start+bufferSize-offset; if (rewind) frameRewind = -1; } /* case2: 'dest' lies outside the end of the sample, OR the sample * is smaller than 'dest' */ else { memcpy(dest+offset, wave->data+start, (end-start)*sizeof(float)); position = end; if (rewind) frameRewind = end-start+offset; } } else { rsmp_data.data_in = wave->data+start; // source data rsmp_data.input_frames = (end-start)/2; // how many readable bytes rsmp_data.data_out = dest+offset; // destination (processed data) rsmp_data.output_frames = (bufferSize-offset)/2; // how many bytes to process rsmp_data.end_of_input = false; src_process(rsmp_state, &rsmp_data); int gen = rsmp_data.output_frames_gen*2; // frames generated by this call position = start + rsmp_data.input_frames_used*2; // position goes forward of frames_used (i.e. read from wave) if (rewind) { if (gen == bufferSize-offset) frameRewind = -1; else frameRewind = gen+offset; } } return position; }
static FLAC__StreamDecoderWriteStatus flac_writer_callback(const FLAC__StreamDecoder *decoder, const FLAC__Frame *frame, const FLAC__int32 * const inputbuffer[], void *client_data) { struct xlplayer *xlplayer = client_data; struct flacdecode_vars *self = xlplayer->dec_data; SRC_DATA *src_data = &(xlplayer->src_data); int src_error; if (self->suppress_audio_output == FALSE) { if (xlplayer->src_state) { if (frame->header.number_type == FLAC__FRAME_NUMBER_TYPE_FRAME_NUMBER && frame->header.number.frame_number == 0) { fprintf(stderr, "flac_writer_callback: performance warning -- can't determine if a block is the last one or not for this file\n"); } else { if (frame->header.number.sample_number + frame->header.blocksize == self->totalsamples) src_data->end_of_input = TRUE; } src_data->input_frames = frame->header.blocksize; src_data->data_in = realloc(src_data->data_in, src_data->input_frames * frame->header.channels * sizeof (float)); src_data->output_frames = (int)(src_data->input_frames * src_data->src_ratio) + 2 + (512 * src_data->end_of_input); src_data->data_out = realloc(src_data->data_out, src_data->output_frames * frame->header.channels * sizeof (float)); make_flac_audio_to_float(xlplayer, src_data->data_in, inputbuffer, frame->header.blocksize, frame->header.bits_per_sample, frame->header.channels); if ((src_error = src_process(xlplayer->src_state, src_data))) { fprintf(stderr, "flac_writer_callback: src_process reports %s\n", src_strerror(src_error)); xlplayer->playmode = PM_EJECTING; return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT; } xlplayer_demux_channel_data(xlplayer, src_data->data_out, src_data->output_frames_gen, frame->header.channels, 1.f); } else { if ((self->flbuf = realloc(self->flbuf, sizeof (float) * frame->header.blocksize * frame->header.channels)) == NULL) { fprintf(stderr, "flac_writer_callback: malloc failure\n"); xlplayer->playmode = PM_EJECTING; return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT; } make_flac_audio_to_float(xlplayer, self->flbuf, inputbuffer, frame->header.blocksize, frame->header.bits_per_sample, frame->header.channels); xlplayer_demux_channel_data(xlplayer, self->flbuf, frame->header.blocksize, frame->header.channels, 1.f); } xlplayer_write_channel_data(xlplayer); } return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE; }
int src_simple (SRC_DATA *src_data, int converter, int channels) { SRC_STATE *src_state ; int error ; if ((src_state = src_new (converter, channels, &error)) == NULL) return error ; src_data->end_of_input = 1 ; /* Only one buffer worth of input. */ error = src_process (src_state, src_data) ; src_state = src_delete (src_state) ; return error ; } /* src_simple */
HRESULT TaudioFilterResampleSRC::process(TfilterQueue::iterator it, TsampleFormat &fmt, void *samples, size_t numsamples, const TfilterSettingsAudio *cfg0) { const TresampleSettings *cfg = (const TresampleSettings*)cfg0; if (is(fmt, cfg)) { if (!cfg->equal(old) || oldfreq != fmt.freq || oldnchannels != fmt.nchannels) { old = *cfg; oldfreq = fmt.freq; oldnchannels = fmt.nchannels; done(); state = src_new(cfg->mode - TresampleSettings::RESAMPLE_SRC_SINC_BEST_QUALITY, fmt.nchannels, NULL); } if (state) { unsigned int srcfreq = fmt.freq; size_t lenout = numsamples * cfg->freq / fmt.freq * 2; int16_t *samples1 = (int16_t*)init(cfg, fmt, samples, numsamples); fmt.freq = cfg->freq; int16_t *samples2 = (int16_t*)alloc_buffer(fmt, lenout, buf); if (numsamples > 0 && state) { SRC_DATA src; src.data_in = (float*)samples1; src.input_frames = (long)numsamples; src.data_out = (float*)samples2; src.output_frames = (long)lenout; src.src_ratio = (double)cfg->freq / srcfreq; src.end_of_input = 0; numsamples = 0; do { int error = src_process(state, &src); if (error) { break; } numsamples += src.output_frames_gen; if (src.end_of_input) { break; } src.data_in += src.input_frames_used * fmt.nchannels; src.input_frames -= src.input_frames_used; src.data_out += src.output_frames_gen * fmt.nchannels; } while (src.input_frames > 0); } samples = samples2; } } return parent->deliverSamples(++it, fmt, samples, numsamples); }
int main(int argc, char *argv[]) { FILE *f8k, *fout; short in8k_short[N8]; float in8k[N8]; float out[N48]; short out_short[N48]; SRC_STATE *src; SRC_DATA data; int error; if (argc != 4) { printf("usage %s inputRawFile OutputRawFile OutputSamplerate\n", argv[0]); exit(0); } f8k = fopen(argv[1], "rb"); assert(f8k != NULL); fout = fopen(argv[2], "wb"); assert(fout != NULL); src = src_new(SRC_SINC_FASTEST, 1, &error); assert(src != NULL); data.data_in = in8k; data.data_out = out; data.input_frames = N8; data.output_frames = N48; data.end_of_input = 0; data.src_ratio = atof(argv[3])/8000; printf("%f\n", data.src_ratio); while(fread(in8k_short, sizeof(short), N8, f8k) == N8) { src_short_to_float_array(in8k_short, in8k, N8); src_process(src, &data); printf("%d %d\n", (int)data.output_frames , (int)data.output_frames_gen); assert(data.output_frames_gen <= N48); src_float_to_short_array(out, out_short, data.output_frames_gen); fwrite(out_short, sizeof(short), data.output_frames_gen, fout); } fclose(fout); fclose(f8k); return 0; }
static FLAC__StreamDecoderWriteStatus flac_write( const FLAC__StreamDecoder *decoder, const FLAC__Frame *frame, const FLAC__int32 *const buf[], void *arg) { FLAC_FEED *feed; float *pt; int i, j, samples; feed = (FLAC_FEED *)arg; if (feed->base.cbuf == NULL) init_flac(feed, &(frame->header)); if (feed->base.converter == NULL) { for (i = 0; i < frame->header.blocksize; i++) for (j = 0; j < frame->header.channels; j++) push_ringbuf(feed->base.cbuf, buf[j][i] * feed->scale); return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE; } samples = frame->header.blocksize * feed->base.channels; if (samples > feed->base.src_data_size) { feed->base.src_data_size = samples; free(feed->base.src_data_in); free(feed->base.src_data_out); feed->base.src_data_in = (float *)malloc(samples * sizeof(float)); feed->base.src_data_out = (float *)malloc(ceil(samples * sizeof(float) * feed->base.src_data.src_ratio)); } pt = feed->base.src_data_in; for (i = 0; i < frame->header.blocksize; i++) for (j = 0; j < frame->header.channels; j++) *pt++ = buf[j][i] * feed->scale; feed->base.src_data.data_in = feed->base.src_data_in; feed->base.src_data.input_frames = frame->header.blocksize; feed->base.src_data.data_out = feed->base.src_data_out; feed->base.src_data.output_frames = (int)ceil(feed->base.src_data.input_frames * feed->base.src_data.src_ratio); feed->base.src_data.end_of_input = 0; src_process(feed->base.converter, &(feed->base.src_data)); pt = feed->base.src_data_out; for (i = 0; i < feed->base.src_data.output_frames_gen; i++) for (j = 0; j < feed->base.channels; j++) push_ringbuf(feed->base.cbuf, *pt++); return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE; }