コード例 #1
0
static void resample_process_ms2(MSFilter *obj) {
    ResampleData *dt=(ResampleData*)obj->data;
    mblk_t *m;

    if (dt->output_rate==dt->input_rate) {
        while((m=ms_queue_get(obj->inputs[0]))!=NULL) {
            ms_queue_put(obj->outputs[0],m);
        }
        return;
    }
    ms_filter_lock(obj);
    if (dt->handle!=NULL) {
        unsigned int inrate=0, outrate=0;
        speex_resampler_get_rate(dt->handle,&inrate,&outrate);
        if (inrate!=dt->input_rate || outrate!=dt->output_rate) {
            speex_resampler_destroy(dt->handle);
            dt->handle=0;
        }
    }
    if (dt->handle==NULL) {
        int err=0;
        dt->handle=speex_resampler_init(dt->nchannels, dt->input_rate, dt->output_rate, SPEEX_RESAMPLER_QUALITY_VOIP, &err);
    }


    while((m=ms_queue_get(obj->inputs[0]))!=NULL) {
        unsigned int inlen=(m->b_wptr-m->b_rptr)/(2*dt->nchannels);
        unsigned int outlen=((inlen*dt->output_rate)/dt->input_rate)+1;
        unsigned int inlen_orig=inlen;
        mblk_t *om=allocb(outlen*2*dt->nchannels,0);
        if (dt->nchannels==1) {
            speex_resampler_process_int(dt->handle,
                                        0,
                                        (int16_t*)m->b_rptr,
                                        &inlen,
                                        (int16_t*)om->b_wptr,
                                        &outlen);
        } else {
            speex_resampler_process_interleaved_int(dt->handle,
                                                    (int16_t*)m->b_rptr,
                                                    &inlen,
                                                    (int16_t*)om->b_wptr,
                                                    &outlen);
        }
        if (inlen_orig!=inlen) {
            ms_error("Bug in resampler ! only %u samples consumed instead of %u, out=%u",
                     inlen,inlen_orig,outlen);
        }
        om->b_wptr+=outlen*2*dt->nchannels;
        mblk_set_timestamp_info(om,dt->ts);
        dt->ts+=outlen;
        ms_queue_put(obj->outputs[0],om);
        freemsg(m);
    }
    ms_filter_unlock(obj);
}
コード例 #2
0
ファイル: coreaudio.c プロジェクト: k4rtik/ultragrid
static OSStatus InputProc(void *inRefCon,
                AudioUnitRenderActionFlags *ioActionFlags,
                const AudioTimeStamp *inTimeStamp,
                UInt32 inBusNumber,
                UInt32 inNumberFrames,
                AudioBufferList * ioData)
{
	UNUSED(ioData);
        struct state_ca_capture * s = (struct state_ca_capture *) inRefCon;

        OSStatus err =noErr;

        err= AudioUnitRender(s->auHALComponentInstance, ioActionFlags, inTimeStamp, inBusNumber,     //will be '1' for input data
                inNumberFrames, //# of frames requested
                s->theBufferList);

        if(err == noErr) {
                int i;
                int len = inNumberFrames * s->audio_packet_size;
                for(i = 0; i < s->frame.ch_count; ++i)
                        mux_channel(s->tmp, s->theBufferList->mBuffers[i].mData, s->frame.bps, len, s->frame.ch_count, i, 1.0);
                uint32_t write_bytes = len * s->frame.ch_count;
#ifdef HAVE_SPEEX
                if(s->nominal_sample_rate != s->frame.sample_rate) {
                        int err;
                        uint32_t in_frames = inNumberFrames;
                        err = speex_resampler_process_interleaved_int(s->resampler, (spx_int16_t *) s->tmp, &in_frames, (spx_int16_t *) s->resampled, &write_bytes);
                        //speex_resampler_process_int(resampler, channelID, in, &in_length, out, &out_length); 
                        write_bytes *= s->frame.bps * s->frame.ch_count;
                        if(err) {
                                fprintf(stderr, "Resampling data error.\n");
                                return err;
                        }
                }
#endif

                pthread_mutex_lock(&s->lock);
#ifdef HAVE_SPEEX
                if(s->nominal_sample_rate != s->frame.sample_rate) 
                        ring_buffer_write(s->buffer, s->resampled, write_bytes);
                else
#endif
                        ring_buffer_write(s->buffer, s->tmp, write_bytes);
                s->data_ready = TRUE;
                if(s->boss_waiting)
                        pthread_cond_signal(&s->cv);
                pthread_mutex_unlock(&s->lock);
        } else {
                fprintf(stderr, "[CoreAudio] writing buffer caused error %i.\n", (int) err);
        }

        return err;
}
コード例 #3
0
long
cubeb_resampler_speex::fill(void * input_buffer, void * output_buffer, long frames_needed)
{
  // Use more input frames than strictly necessary, so in the worst case,
  // we have leftover unresampled frames at the end, that we can use
  // during the next iteration.
  assert(frames_needed <= buffer_frame_count);
  long before_resampling = frame_count_at_rate(frames_needed, resampling_ratio);
  long frames_requested = before_resampling - leftover_frame_count;

  // Copy the previous leftover frames to the front of the buffer.
  size_t leftover_bytes = frames_to_bytes(stream_params, leftover_frame_count);
  memcpy(resampling_src_buffer.get(), leftover_frames_buffer.get(), leftover_bytes);
  uint8_t * buffer_start = resampling_src_buffer.get() + leftover_bytes;

  long got = data_callback(stream, user_ptr, NULL, buffer_start, frames_requested);
  assert(got <= frames_requested);

  if (got < 0) {
    return CUBEB_ERROR;
  }

  uint32_t in_frames = leftover_frame_count + got;
  uint32_t out_frames = frames_needed;
  uint32_t old_in_frames = in_frames;

  if (stream_params.format == CUBEB_SAMPLE_FLOAT32NE) {
    float * in_buffer = reinterpret_cast<float *>(resampling_src_buffer.get());
    float * out_buffer = reinterpret_cast<float *>(output_buffer);
    speex_resampler_process_interleaved_float(speex_resampler, in_buffer, &in_frames,
                                              out_buffer, &out_frames);
  } else {
    short * in_buffer = reinterpret_cast<short *>(resampling_src_buffer.get());
    short * out_buffer = reinterpret_cast<short *>(output_buffer);
    speex_resampler_process_interleaved_int(speex_resampler, in_buffer, &in_frames,
                                            out_buffer, &out_frames);
  }

  // Copy the leftover frames to buffer for the next time.
  leftover_frame_count = old_in_frames - in_frames;
  assert(leftover_frame_count <= leftover_frame_size);

  size_t unresampled_bytes = frames_to_bytes(stream_params, leftover_frame_count);
  uint8_t * leftover_frames_start = resampling_src_buffer.get();
  leftover_frames_start += frames_to_bytes(stream_params, in_frames);
  memcpy(leftover_frames_buffer.get(), leftover_frames_start, unresampled_bytes);

  return out_frames;
}
コード例 #4
0
ファイル: Resampler.cpp プロジェクト: taqu/opus
    s32 Resampler::read(LSshort* pcm, u32 numSamples, Stream* stream)
    {
        if(dstSamplesPerSec_ == srcSamplesPerSec_){

            if(NULL != convertTypeFunc_){
                LSshort* buffer = reinterpret_cast<LSshort*>(sharedBuffer0_);
                s32 readSamples = stream->read(buffer, numSamples);
                if(readSamples<0){
                    return readSamples;
                }
                convertTypeFunc_(pcm, buffer, readSamples);
                return readSamples;

            } else{
                return stream->read(pcm, numSamples);
            }

        }else{
            LSshort* buffer0 = reinterpret_cast<LSshort*>(sharedBuffer0_);
            s32 readSamples = stream->read(buffer0, numSamples);
            if(readSamples<0){
                return readSamples;
            }

            u32 inN = numSamples;
            u32 outN = static_cast<u32>(resampleRate_*numSamples);
            if(NULL != convertTypeFunc_){
                LSshort* buffer1 = reinterpret_cast<LSshort*>(sharedBuffer1_);
                speex_resampler_process_interleaved_int(resampler_, buffer0, &inN, buffer1, &outN);
                convertTypeFunc_(pcm, buffer1, outN);
            }else{
                speex_resampler_process_interleaved_int(resampler_, buffer0, &inN, pcm, &outN);
            }
            return static_cast<s32>(outN);
        }
    }
コード例 #5
0
ファイル: speex.c プロジェクト: 0xheart0/vlc
static block_t *Resample (filter_t *filter, block_t *in)
{
    SpeexResamplerState *st = (SpeexResamplerState *)filter->p_sys;

    const size_t framesize = filter->fmt_out.audio.i_bytes_per_frame;
    const unsigned irate = filter->fmt_in.audio.i_rate;
    const unsigned orate = filter->fmt_out.audio.i_rate;

    spx_uint32_t ilen = in->i_nb_samples;
    spx_uint32_t olen = ((ilen + 2) * orate * UINT64_C(11))
                      / (irate * UINT64_C(10));

    block_t *out = block_Alloc (olen * framesize);
    if (unlikely(out == NULL))
        goto error;

    speex_resampler_set_rate (st, irate, orate);

    int err;
    if (filter->fmt_in.audio.i_format == VLC_CODEC_FL32)
        err = speex_resampler_process_interleaved_float (st,
            (float *)in->p_buffer, &ilen, (float *)out->p_buffer, &olen);
    else
        err = speex_resampler_process_interleaved_int (st,
            (int16_t *)in->p_buffer, &ilen, (int16_t *)out->p_buffer, &olen);
    if (err != 0)
    {
        msg_Err (filter, "cannot resample: %s",
                 speex_resampler_strerror (err));
        block_Release (out);
        out = NULL;
        goto error;
    }

    if (ilen < in->i_nb_samples)
        msg_Err (filter, "lost %"PRIu32" of %u input frames",
                 in->i_nb_samples - ilen, in->i_nb_samples);

    out->i_buffer = olen * framesize;
    out->i_nb_samples = olen;
    out->i_pts = in->i_pts;
    out->i_length = olen * CLOCK_FREQ / filter->fmt_out.audio.i_rate;
error:
    block_Release (in);
    return out;
}
コード例 #6
0
	bool ResamplePCM(uint32 NumChannels, const TArray<uint8>& InBuffer, uint32 InSampleRate, TArray<uint8>& OutBuffer, uint32 OutSampleRate) const
	{
		// Initialize resampler to convert to desired rate for Opus
		int32 err = 0;
		SpeexResamplerState* resampler = speex_resampler_init(NumChannels, InSampleRate, OutSampleRate, SPEEX_RESAMPLER_QUALITY_DESKTOP, &err);
		if (err != RESAMPLER_ERR_SUCCESS)
		{
			speex_resampler_destroy(resampler);
			return false;
		}

		// Calculate extra space required for sample rate
		const uint32 SampleStride = SAMPLE_SIZE * NumChannels;
		const float Duration = (float)InBuffer.Num() / (InSampleRate * SampleStride);
		const int32 SafeCopySize = (Duration + 1) * OutSampleRate * SampleStride;
		OutBuffer.Empty(SafeCopySize);
		OutBuffer.AddUninitialized(SafeCopySize);
		uint32 InSamples = InBuffer.Num() / SampleStride;
		uint32 OutSamples = OutBuffer.Num() / SampleStride;

		// Do resampling and check results
		if (NumChannels == 1)
		{
			err = speex_resampler_process_int(resampler, 0, (const short*)(InBuffer.GetData()), &InSamples, (short*)(OutBuffer.GetData()), &OutSamples);
		}
		else
		{
			err = speex_resampler_process_interleaved_int(resampler, (const short*)(InBuffer.GetData()), &InSamples, (short*)(OutBuffer.GetData()), &OutSamples);
		}

		speex_resampler_destroy(resampler);
		if (err != RESAMPLER_ERR_SUCCESS)
		{
			return false;
		}

		// reduce the size of Out Buffer if more space than necessary was allocated
		const int32 WrittenBytes = (int32)(OutSamples * SampleStride);
		if (WrittenBytes < OutBuffer.Num())
		{
			OutBuffer.SetNum(WrittenBytes, true);
		}

		return true;
	}
コード例 #7
0
ファイル: resample_speex.c プロジェクト: deveck/Deveck.TAM
PJ_DEF(void) pjmedia_resample_run( pjmedia_resample *resample,
				   const pj_int16_t *input,
				   pj_int16_t *output )
{
    spx_uint32_t in_length, out_length;

    PJ_ASSERT_ON_FAIL(resample, return);

    in_length = resample->in_samples_per_frame;
    out_length = resample->out_samples_per_frame;

    speex_resampler_process_interleaved_int(resample->state,
					    (const __int16 *)input, &in_length,
					    (__int16 *)output, &out_length);

    pj_assert(in_length == resample->in_samples_per_frame);
    pj_assert(out_length == resample->out_samples_per_frame);
}
コード例 #8
0
int resampler_resample_from_input(struct resampler_itfe *resampler,
                                  int16_t *in,
                                  size_t *inFrameCount,
                                  int16_t *out,
                                  size_t *outFrameCount)
{
    struct resampler *rsmp = (struct resampler *)resampler;

    if (rsmp == NULL || in == NULL || inFrameCount == NULL ||
            out == NULL || outFrameCount == NULL) {
        return -EINVAL;
    }
    if (rsmp->provider != NULL) {
        *outFrameCount = 0;
        return -ENOSYS;
    }

    if (rsmp->channel_count == 1) {
        speex_resampler_process_int(rsmp->speex_resampler,
                                    0,
                                    in,
                                    (spx_uint32_t *)inFrameCount,
                                    out,
                                    (spx_uint32_t *)outFrameCount);
    } else {
        speex_resampler_process_interleaved_int(rsmp->speex_resampler,
                                                in,
                                                (spx_uint32_t *)inFrameCount,
                                                out,
                                                (spx_uint32_t *)outFrameCount);
    }

    ALOGV("resampler_resample_from_input() DONE in %zu out %zu", *inFrameCount, *outFrameCount);

    return 0;
}
コード例 #9
0
ファイル: audio_resampler.cpp プロジェクト: EasyRPG/Player
int AudioResampler::FillBufferDifferentRate(uint8_t* buffer, int length) {
	const int input_samplesize = GetSamplesizeForFormat(input_format);
	const int output_samplesize = GetSamplesizeForFormat(output_format);
	//The buffer size has to be a multiple of a frame
	const int buffer_size=sizeof(internal_buffer) - sizeof(internal_buffer)%(nr_of_channels*((input_samplesize>output_samplesize) ? input_samplesize : output_samplesize));
	
	int total_output_frames = length / (output_samplesize*nr_of_channels);
	int amount_of_samples_to_read = 0;
	int amount_of_samples_read = 0;
	
	uint8_t * advanced_input_buffer = internal_buffer;
	int unused_frames = 0;
	int empty_buffer_space = 0;
	int error = 0;
	
	#ifdef HAVE_LIBSPEEXDSP
		spx_uint32_t numerator = 0;
		spx_uint32_t denominator = 0;
	#endif

	while (total_output_frames > 0) {
		//Calculate how much frames of the last cycle are unused - to reuse them
		unused_frames = conversion_data.input_frames - conversion_data.input_frames_used;
		empty_buffer_space = buffer_size / output_samplesize - unused_frames*nr_of_channels;
		
		advanced_input_buffer = internal_buffer;

		//If there is still unused data in the input_buffer order it to the front
		for (int i = 0; i < unused_frames*nr_of_channels*output_samplesize; i++) {
			*advanced_input_buffer = *(advanced_input_buffer + empty_buffer_space*output_samplesize);
			advanced_input_buffer++;
		}
		//advanced_input_buffer is now offset to the first frame of new data!

		//ensure that the input buffer is not able to overrun
		amount_of_samples_to_read = (input_samplesize > output_samplesize) ? (empty_buffer_space*output_samplesize) / input_samplesize : empty_buffer_space;

		//Read as many frames as needed to refill the buffer (filled after the conversion to float)
		if (amount_of_samples_to_read != 0) {
			switch (output_format) {
				case AudioDecoder::Format::F32: amount_of_samples_read = DecodeAndConvertFloat(wrapped_decoder.get(), advanced_input_buffer, amount_of_samples_to_read, input_samplesize, input_format); break;
			#ifdef HAVE_LIBSPEEXDSP
				case AudioDecoder::Format::S16:  amount_of_samples_read = DecodeAndConvertInt16(wrapped_decoder.get(), advanced_input_buffer, amount_of_samples_to_read, input_samplesize, input_format); break;
			#endif
				default: error_message = "internal error: output_format is not convertable"; return ERROR;
			}
			if (amount_of_samples_read < 0) {
				error_message = wrapped_decoder->GetError();
				return amount_of_samples_read; //error occured
			}
		}
		//Now we have a prepared full buffer of converted values

		//Prepare the source data
		conversion_data.input_frames = amount_of_samples_read / nr_of_channels + unused_frames;
		conversion_data.output_frames = total_output_frames;

		#if defined(HAVE_LIBSPEEXDSP)
			conversion_data.input_frames_used = conversion_data.input_frames;
			conversion_data.output_frames_gen = conversion_data.output_frames;

			//libspeexdsp defines a sample rate conversion with a fraction (input/output)
			numerator = input_rate*pitch;
			denominator = output_rate * STANDARD_PITCH;
			if (pitch_handled_by_decoder) {
				numerator = input_rate;
				denominator = output_rate;
			}
			if (conversion_data.ratio_num != numerator || conversion_data.ratio_denom != denominator) {
				speex_resampler_set_rate_frac(conversion_state, numerator, denominator, input_rate, output_rate);
				conversion_data.ratio_num = numerator;
				conversion_data.ratio_denom = denominator;
			}
			
			//A pitfall from libspeexdsp if the output buffer is defined to big - everything stutters -achieved good values with the same size as the input buffer for a maximum
			conversion_data.output_frames_gen=(conversion_data.input_frames<conversion_data.output_frames_gen) ? conversion_data.input_frames :conversion_data.output_frames_gen;
			
			switch (output_format) {
			case Format::F32:
				error = speex_resampler_process_interleaved_float(conversion_state, (float*)internal_buffer, &conversion_data.input_frames_used, (float*)buffer, &conversion_data.output_frames_gen);
				break;
			case Format::S16:
				error = speex_resampler_process_interleaved_int(conversion_state, (spx_int16_t*)internal_buffer, &conversion_data.input_frames_used, (spx_int16_t*)buffer, &conversion_data.output_frames_gen);
				break;
			default: error_message = "internal error: output_format is not convertable"; return ERROR;
			}
			
			if (error != 0) {
				error_message = speex_resampler_strerror(error);
				return ERROR;
			}
		#elif defined(HAVE_LIBSAMPLERATE)
			conversion_data.data_in = (float*)internal_buffer;
			conversion_data.data_out = (float*)buffer;
			if (pitch_handled_by_decoder) {
				conversion_data.src_ratio = (output_rate*1.0) / input_rate;
			}
			else {
				conversion_data.src_ratio = (output_rate*STANDARD_PITCH *1.0) / (input_rate*pitch*1.0);
			}
			conversion_data.end_of_input = (wrapped_decoder->IsFinished()) ? 1 : 0;

			//Now let libsamplerate filter the data
			error = src_process(conversion_state, &conversion_data);

			if (error != 0) {
				error_message = src_strerror(error);
				return ERROR;
			}
		#endif

		total_output_frames -= conversion_data.output_frames_gen;
		buffer += conversion_data.output_frames_gen*nr_of_channels*output_samplesize;

		if ((conversion_data.input_frames == 0 && conversion_data.output_frames_gen <= conversion_data.output_frames) || conversion_data.output_frames_gen == 0) {
			finished = true;
			//There is nothing left to convert - return how much samples (in bytes) are converted! 
			return length - total_output_frames*(output_samplesize*nr_of_channels);
		}
	}
	return length;
}
コード例 #10
0
ファイル: Main.cpp プロジェクト: 173210/project64
static int resample(unsigned char *input, int /*input_avail*/, int oldsamplerate, unsigned char *output, int output_needed, int newsamplerate)
{
    int *psrc = (int*)input;
    int *pdest = (int*)output;
    int i = 0, j = 0;

#ifdef USE_SPEEX
    spx_uint32_t in_len, out_len;
    if(Resample == RESAMPLER_SPEEX)
    {
        if(spx_state == NULL)
        {
            spx_state = speex_resampler_init(2, oldsamplerate, newsamplerate, ResampleQuality,  &error);
            if(spx_state == NULL)
            {
                memset(output, 0, output_needed);
                return 0;
            }
        }
        speex_resampler_set_rate(spx_state, oldsamplerate, newsamplerate);
        in_len = input_avail / 4;
        out_len = output_needed / 4;

        if ((error = speex_resampler_process_interleaved_int(spx_state, (const spx_int16_t *)input, &in_len, (spx_int16_t *)output, &out_len)))
        {
            memset(output, 0, output_needed);
            return input_avail;  // number of bytes consumed
        }
        return in_len * 4;
    }
#endif
#ifdef USE_SRC
    if(Resample == RESAMPLER_SRC)
    {
        // the high quality resampler needs more input than the samplerate ratio would indicate to work properly
        if (input_avail > output_needed * 3 / 2)
            input_avail = output_needed * 3 / 2; // just to avoid too much short-float-short conversion time
        if (_src_len < input_avail*2 && input_avail > 0)
        {
            if(_src) free(_src);
            _src_len = input_avail*2;
            _src = malloc(_src_len);
        }
        if (_dest_len < output_needed*2 && output_needed > 0)
        {
            if(_dest) free(_dest);
            _dest_len = output_needed*2;
            _dest = malloc(_dest_len);
        }
        memset(_src,0,_src_len);
        memset(_dest,0,_dest_len);
        if(src_state == NULL)
        {
            src_state = src_new (ResampleQuality, 2, &error);
            if(src_state == NULL)
            {
                memset(output, 0, output_needed);
                return 0;
            }
        }
        src_short_to_float_array ((short *) input, _src, input_avail/2);
        src_data.end_of_input = 0;
        src_data.data_in = _src;
        src_data.input_frames = input_avail/4;
        src_data.src_ratio = (float) newsamplerate / oldsamplerate;
        src_data.data_out = _dest;
        src_data.output_frames = output_needed/4;
        if ((error = src_process (src_state, &src_data)))
        {
            memset(output, 0, output_needed);
            return input_avail;  // number of bytes consumed
        }
        src_float_to_short_array (_dest, (short *) output, output_needed/2);
        return src_data.input_frames_used * 4;
    }
#endif
    // RESAMPLE == TRIVIAL
    if (newsamplerate >= oldsamplerate)
    {
        int sldf = oldsamplerate;
        int const2 = 2*sldf;
        int dldf = newsamplerate;
        int const1 = const2 - 2*dldf;
        int criteria = const2 - dldf;
        for (i = 0; i < output_needed/4; i++)
        {
            pdest[i] = psrc[j];
            if(criteria >= 0)
            {
                ++j;
                criteria += const1;
            }
            else criteria += const2;
        }
        return j * 4; //number of bytes consumed
    }
    // newsamplerate < oldsamplerate, this only happens when speed_factor > 1
    for (i = 0; i < output_needed/4; i++)
    {
        j = i * oldsamplerate / newsamplerate;
        pdest[i] = psrc[j];
    }
    return j * 4; //number of bytes consumed
}
コード例 #11
0
/** @internal @This handles data.
 *
 * @param upipe description structure of the pipe
 * @param uref uref structure
 * @param upump_p reference to pump that generated the buffer
 * @return false if the input must be blocked
 */
static bool upipe_speexdsp_handle(struct upipe *upipe, struct uref *uref,
                             struct upump **upump_p)
{
    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);

    struct urational drift_rate;
    if (!ubase_check(uref_clock_get_rate(uref, &drift_rate)))
        drift_rate = (struct urational){ 1, 1 };

    /* reinitialize resampler when drift rate changes */
    if (urational_cmp(&drift_rate, &upipe_speexdsp->drift_rate)) {
        upipe_speexdsp->drift_rate = drift_rate;
        spx_uint32_t ratio_num = drift_rate.den;
        spx_uint32_t ratio_den = drift_rate.num;
        spx_uint32_t in_rate = upipe_speexdsp->rate * ratio_num / ratio_den;
        spx_uint32_t out_rate = upipe_speexdsp->rate;
        int err = speex_resampler_set_rate_frac(upipe_speexdsp->ctx,
                ratio_num, ratio_den, in_rate, out_rate);
        if (err) {
            upipe_err_va(upipe, "Couldn't resample from %u to %u: %s",
                in_rate, out_rate, speex_resampler_strerror(err));
        } else {
            upipe_dbg_va(upipe, "Resampling from %u to %u",
                in_rate, out_rate);
        }
    }

    size_t size;
    if (!ubase_check(uref_sound_size(uref, &size, NULL /* sample_size */))) {
        uref_free(uref);
        return true;
    }

    struct ubuf *ubuf = ubuf_sound_alloc(upipe_speexdsp->ubuf_mgr, size + 10);
    if (!ubuf)
        return false;

    const void *in;
    uref_sound_read_void(uref, 0, -1, &in, 1);

    void *out;
    ubuf_sound_write_void(ubuf, 0, -1, &out, 1);

    spx_uint32_t in_len = size;         /* input size */
    spx_uint32_t out_len = size + 10;   /* available output size */

    int err;

    if (upipe_speexdsp->f32)
        err = speex_resampler_process_interleaved_float(upipe_speexdsp->ctx,
                in, &in_len, out, &out_len);
    else
        err = speex_resampler_process_interleaved_int(upipe_speexdsp->ctx,
                in, &in_len, out, &out_len);

    if (err) {
        upipe_err_va(upipe, "Could not resample: %s",
                speex_resampler_strerror(err));
    }

    uref_sound_unmap(uref, 0, -1, 1);
    ubuf_sound_unmap(ubuf, 0, -1, 1);

    if (err) {
        ubuf_free(ubuf);
    } else {
        ubuf_sound_resize(ubuf, 0, out_len);
        uref_attach_ubuf(uref, ubuf);
    }

    upipe_speexdsp_output(upipe, uref, upump_p);
    return true;
}

/** @internal @This receives incoming uref.
 *
 * @param upipe description structure of the pipe
 * @param uref uref structure describing the picture
 * @param upump_p reference to pump that generated the buffer
 */
static void upipe_speexdsp_input(struct upipe *upipe, struct uref *uref,
                            struct upump **upump_p)
{
    if (!upipe_speexdsp_check_input(upipe)) {
        upipe_speexdsp_hold_input(upipe, uref);
        upipe_speexdsp_block_input(upipe, upump_p);
    } else if (!upipe_speexdsp_handle(upipe, uref, upump_p)) {
        upipe_speexdsp_hold_input(upipe, uref);
        upipe_speexdsp_block_input(upipe, upump_p);
        /* Increment upipe refcount to avoid disappearing before all packets
         * have been sent. */
        upipe_use(upipe);
    }
}

/** @internal @This receives a provided ubuf manager.
 *
 * @param upipe description structure of the pipe
 * @param flow_format amended flow format
 * @return an error code
 */
static int upipe_speexdsp_check(struct upipe *upipe, struct uref *flow_format)
{
    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);
    if (flow_format != NULL)
        upipe_speexdsp_store_flow_def(upipe, flow_format);

    if (upipe_speexdsp->flow_def == NULL)
        return UBASE_ERR_NONE;

    bool was_buffered = !upipe_speexdsp_check_input(upipe);
    upipe_speexdsp_output_input(upipe);
    upipe_speexdsp_unblock_input(upipe);
    if (was_buffered && upipe_speexdsp_check_input(upipe)) {
        /* All packets have been output, release again the pipe that has been
         * used in @ref upipe_speexdsp_input. */
        upipe_release(upipe);
    }
    return UBASE_ERR_NONE;
}

/** @internal @This sets the input flow definition.
 *
 * @param upipe description structure of the pipe
 * @param flow_def flow definition packet
 * @return an error code
 */
static int upipe_speexdsp_set_flow_def(struct upipe *upipe, struct uref *flow_def)
{
    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);

    if (flow_def == NULL)
        return UBASE_ERR_INVALID;

    const char *def;
    UBASE_RETURN(uref_flow_get_def(flow_def, &def))

    if (unlikely(ubase_ncmp(def, "sound.f32.") &&
                ubase_ncmp(def, "sound.s16.")))
        return UBASE_ERR_INVALID;

    uint8_t in_planes;
    if (unlikely(!ubase_check(uref_sound_flow_get_planes(flow_def,
                                                         &in_planes))))
        return UBASE_ERR_INVALID;

    if (in_planes != 1) {
        upipe_err(upipe, "only interleaved audio is supported");
        return UBASE_ERR_INVALID;
    }

    if (!ubase_check(uref_sound_flow_get_rate(flow_def,
                    &upipe_speexdsp->rate))) {
        upipe_err(upipe, "no sound rate defined");
        uref_dump(flow_def, upipe->uprobe);
        return UBASE_ERR_INVALID;
    }

    uint8_t channels;
    if (unlikely(!ubase_check(uref_sound_flow_get_channels(flow_def,
                        &channels))))
        return UBASE_ERR_INVALID;

    flow_def = uref_dup(flow_def);
    if (unlikely(flow_def == NULL)) {
        upipe_throw_fatal(upipe, UBASE_ERR_ALLOC);
        return UBASE_ERR_ALLOC;
    }

    upipe_speexdsp_require_ubuf_mgr(upipe, flow_def);

    if (upipe_speexdsp->ctx)
        speex_resampler_destroy(upipe_speexdsp->ctx);

    upipe_speexdsp->f32 = !ubase_ncmp(def, "sound.f32.");

    int err;
    upipe_speexdsp->ctx = speex_resampler_init(channels,
                upipe_speexdsp->rate, upipe_speexdsp->rate,
                upipe_speexdsp->quality, &err);
    if (!upipe_speexdsp->ctx) {
        upipe_err_va(upipe, "Could not create resampler: %s",
                speex_resampler_strerror(err));
        return UBASE_ERR_INVALID;
    }

    return UBASE_ERR_NONE;
}

/** @internal @This provides a flow format suggestion.
 *
 * @param upipe description structure of the pipe
 * @param request description structure of the request
 * @return an error code
 */
static int upipe_speexdsp_provide_flow_format(struct upipe *upipe,
                                          struct urequest *request)
{
    const char *def;
    UBASE_RETURN(uref_flow_get_def(request->uref, &def))
    uint8_t channels;
    UBASE_RETURN(uref_sound_flow_get_channels(request->uref, &channels))
    uint8_t planes;
    UBASE_RETURN(uref_sound_flow_get_planes(request->uref, &planes))
    uint8_t sample_size;
    UBASE_RETURN(uref_sound_flow_get_sample_size(request->uref, &sample_size))

    struct uref *flow = uref_dup(request->uref);
    UBASE_ALLOC_RETURN(flow);

    uref_sound_flow_clear_format(flow);
    uref_sound_flow_set_planes(flow, 0);
    uref_sound_flow_set_channels(flow, channels);
    uref_sound_flow_add_plane(flow, "all");
    if (ubase_ncmp(def, "sound.s16.")) {
        uref_flow_set_def(flow, "sound.f32."); /* prefer f32 over s16 */
        uref_sound_flow_set_sample_size(flow, 4 * channels);
    } else {
        uref_flow_set_def(flow, def);
        uref_sound_flow_set_sample_size(flow, (planes > 1) ? sample_size :
                sample_size / channels);
    }

    return urequest_provide_flow_format(request, flow);
}

/** @internal @This processes control commands on a speexdsp pipe.
 *
 * @param upipe description structure of the pipe
 * @param command type of command to process
 * @param args arguments of the command
 * @return an error code
 */
static int upipe_speexdsp_control(struct upipe *upipe, int command, va_list args)
{
    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);

    switch (command) {
        /* generic commands */
        case UPIPE_REGISTER_REQUEST: {
            struct urequest *request = va_arg(args, struct urequest *);
            if (request->type == UREQUEST_FLOW_FORMAT)
                return upipe_speexdsp_provide_flow_format(upipe, request);
            if (request->type == UREQUEST_UBUF_MGR)
                return upipe_throw_provide_request(upipe, request);
            return upipe_speexdsp_alloc_output_proxy(upipe, request);
        }
        case UPIPE_UNREGISTER_REQUEST: {
            struct urequest *request = va_arg(args, struct urequest *);
            if (request->type == UREQUEST_FLOW_FORMAT ||
                request->type == UREQUEST_UBUF_MGR)
                return UBASE_ERR_NONE;
            return upipe_speexdsp_free_output_proxy(upipe, request);
        }

        case UPIPE_GET_OUTPUT: {
            struct upipe **p = va_arg(args, struct upipe **);
            return upipe_speexdsp_get_output(upipe, p);
        }
        case UPIPE_SET_OUTPUT: {
            struct upipe *output = va_arg(args, struct upipe *);
            return upipe_speexdsp_set_output(upipe, output);
        }
        case UPIPE_GET_FLOW_DEF: {
            struct uref **p = va_arg(args, struct uref **);
            return upipe_speexdsp_get_flow_def(upipe, p);
        }
        case UPIPE_SET_FLOW_DEF: {
            struct uref *flow = va_arg(args, struct uref *);
            return upipe_speexdsp_set_flow_def(upipe, flow);
        }
        case UPIPE_SET_OPTION: {
            const char *option = va_arg(args, const char *);
            const char *value  = va_arg(args, const char *);
            if (strcmp(option, "quality"))
                return UBASE_ERR_INVALID;
            if (upipe_speexdsp->ctx)
                return UBASE_ERR_BUSY;
            int quality = atoi(value);
            if (quality > SPEEX_RESAMPLER_QUALITY_MAX) {
                quality = SPEEX_RESAMPLER_QUALITY_MAX;
                upipe_err_va(upipe, "Clamping quality to %d",
                        SPEEX_RESAMPLER_QUALITY_MAX);
            } else if (quality < SPEEX_RESAMPLER_QUALITY_MIN) {
                quality = SPEEX_RESAMPLER_QUALITY_MIN;
                upipe_err_va(upipe, "Clamping quality to %d",
                        SPEEX_RESAMPLER_QUALITY_MIN);
            }
            upipe_speexdsp->quality = quality;
            return UBASE_ERR_NONE;
        }

        default:
            return UBASE_ERR_UNHANDLED;
    }
}

/** @internal @This allocates a speexdsp pipe.
 *
 * @param mgr common management structure
 * @param uprobe structure used to raise events
 * @param signature signature of the pipe allocator
 * @param args optional arguments
 * @return pointer to upipe or NULL in case of allocation error
 */
static struct upipe *upipe_speexdsp_alloc(struct upipe_mgr *mgr,
                                     struct uprobe *uprobe,
                                     uint32_t signature, va_list args)
{
    struct upipe *upipe = upipe_speexdsp_alloc_void(mgr, uprobe, signature,
                                               args);
    if (unlikely(upipe == NULL))
        return NULL;

    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);

    upipe_speexdsp->ctx = NULL;
    upipe_speexdsp->drift_rate = (struct urational){ 0, 0 };
    upipe_speexdsp->quality = SPEEX_RESAMPLER_QUALITY_MAX;

    upipe_speexdsp_init_urefcount(upipe);
    upipe_speexdsp_init_ubuf_mgr(upipe);
    upipe_speexdsp_init_output(upipe);
    upipe_speexdsp_init_flow_def(upipe);
    upipe_speexdsp_init_input(upipe);

    upipe_throw_ready(upipe);
    return upipe;
}

/** @This frees a upipe.
 *
 * @param upipe description structure of the pipe
 */
static void upipe_speexdsp_free(struct upipe *upipe)
{
    struct upipe_speexdsp *upipe_speexdsp = upipe_speexdsp_from_upipe(upipe);
    if (likely(upipe_speexdsp->ctx))
        speex_resampler_destroy(upipe_speexdsp->ctx);

    upipe_throw_dead(upipe);
    upipe_speexdsp_clean_input(upipe);
    upipe_speexdsp_clean_output(upipe);
    upipe_speexdsp_clean_flow_def(upipe);
    upipe_speexdsp_clean_ubuf_mgr(upipe);
    upipe_speexdsp_clean_urefcount(upipe);
    upipe_speexdsp_free_void(upipe);
}

/** module manager static descriptor */
static struct upipe_mgr upipe_speexdsp_mgr = {
    .refcount = NULL,
    .signature = UPIPE_SPEEXDSP_SIGNATURE,

    .upipe_alloc = upipe_speexdsp_alloc,
    .upipe_input = upipe_speexdsp_input,
    .upipe_control = upipe_speexdsp_control,

    .upipe_mgr_control = NULL
};

/** @This returns the management structure for speexdsp pipes
 *
 * @return pointer to manager
 */
struct upipe_mgr *upipe_speexdsp_mgr_alloc(void)
{
    return &upipe_speexdsp_mgr;
}
コード例 #12
0
ファイル: cras_fmt_conv.c プロジェクト: dgreid/adhd
size_t cras_fmt_conv_convert_frames(struct cras_fmt_conv *conv,
                                    const uint8_t *in_buf,
                                    uint8_t *out_buf,
                                    unsigned int *in_frames,
                                    size_t out_frames)
{
    uint32_t fr_in, fr_out;
    uint8_t *buffers[MAX_NUM_CONVERTERS + 1]; /* converters + out buffer. */
    size_t buf_idx = 0;
    static int logged_frames_dont_fit;
    unsigned int used_converters = conv->num_converters;
    unsigned int post_linear_resample = 0;
    unsigned int pre_linear_resample = 0;
    unsigned int linear_resample_fr = 0;

    assert(conv);

    if (linear_resampler_needed(conv->resampler)) {
        post_linear_resample = !conv->pre_linear_resample;
        pre_linear_resample = conv->pre_linear_resample;
    }

    /* If no SRC, then in_frames should = out_frames. */
    if (conv->speex_state == NULL) {
        fr_in = MIN(*in_frames, out_frames);
        if (out_frames < *in_frames && !logged_frames_dont_fit) {
            syslog(LOG_INFO,
                   "fmt_conv: %u to %zu no SRC.",
                   *in_frames,
                   out_frames);
            logged_frames_dont_fit = 1;
        }
    } else {
        fr_in = *in_frames;
    }
    fr_out = fr_in;

    /* Set up a chain of buffers.  The output buffer of the first conversion
     * is used as input to the second and so forth, ending in the output
     * buffer. */
    if (!linear_resampler_needed(conv->resampler))
        used_converters--;

    buffers[4] = (uint8_t *)conv->tmp_bufs[3];
    buffers[3] = (uint8_t *)conv->tmp_bufs[2];
    buffers[2] = (uint8_t *)conv->tmp_bufs[1];
    buffers[1] = (uint8_t *)conv->tmp_bufs[0];
    buffers[0] = (uint8_t *)in_buf;
    buffers[used_converters] = out_buf;

    if (pre_linear_resample) {
        linear_resample_fr = fr_in;
        unsigned resample_limit = out_frames;

        /* If there is a 2nd fmt conversion we should convert the
         * resample limit and round it to the lower bound in order
         * not to convert too many frames in the pre linear resampler.
         */
        if (conv->speex_state != NULL)
            resample_limit = resample_limit * conv->in_fmt.frame_rate /
                             conv->out_fmt.frame_rate;

        resample_limit = MIN(resample_limit, conv->tmp_buf_frames);
        fr_in = linear_resampler_resample(
                    conv->resampler,
                    buffers[buf_idx],
                    &linear_resample_fr,
                    buffers[buf_idx + 1],
                    resample_limit);
        buf_idx++;
    }

    /* If the input format isn't S16_LE convert to it. */
    if (conv->in_fmt.format != SND_PCM_FORMAT_S16_LE) {
        conv->in_format_converter(buffers[buf_idx],
                                  fr_in * conv->in_fmt.num_channels,
                                  (uint8_t *)buffers[buf_idx + 1]);
        buf_idx++;
    }

    /* Then channel conversion. */
    if (conv->channel_converter != NULL) {
        conv->channel_converter(conv,
                                (int16_t *)buffers[buf_idx],
                                fr_in,
                                (int16_t *)buffers[buf_idx + 1]);
        buf_idx++;
    }

    /* Then SRC. */
    if (conv->speex_state != NULL) {
        unsigned int out_limit = out_frames;

        if (post_linear_resample)
            out_limit = linear_resampler_out_frames_to_in(
                            conv->resampler, out_limit);
        fr_out = cras_frames_at_rate(conv->in_fmt.frame_rate,
                                     fr_in,
                                     conv->out_fmt.frame_rate);
        if (fr_out > out_frames + 1 && !logged_frames_dont_fit) {
            syslog(LOG_INFO,
                   "fmt_conv: put %u frames in %zu sized buffer",
                   fr_out,
                   out_frames);
            logged_frames_dont_fit = 1;
        }
        /* limit frames to the output size. */
        fr_out = MIN(fr_out, out_limit);
        speex_resampler_process_interleaved_int(
            conv->speex_state,
            (int16_t *)buffers[buf_idx],
            &fr_in,
            (int16_t *)buffers[buf_idx + 1],
            &fr_out);
        buf_idx++;
    }

    if (post_linear_resample) {
        linear_resample_fr = fr_out;
        unsigned resample_limit = MIN(conv->tmp_buf_frames, out_frames);
        fr_out = linear_resampler_resample(
                     conv->resampler,
                     buffers[buf_idx],
                     &linear_resample_fr,
                     buffers[buf_idx + 1],
                     resample_limit);
        buf_idx++;
    }

    /* If the output format isn't S16_LE convert to it. */
    if (conv->out_fmt.format != SND_PCM_FORMAT_S16_LE) {
        conv->out_format_converter(buffers[buf_idx],
                                   fr_out * conv->out_fmt.num_channels,
                                   (uint8_t *)buffers[buf_idx + 1]);
        buf_idx++;
    }

    if (pre_linear_resample)
        *in_frames = linear_resample_fr;
    else
        *in_frames = fr_in;
    return fr_out;
}
コード例 #13
0
nsresult
OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  PROFILER_LABEL("OpusTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);
  {
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
    // Wait until initialized or cancelled.
    while (!mCanceled && !mInitialized) {
      mReentrantMonitor.Wait();
    }
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }
  }

  // calculation below depends on the truth that mInitialized is true.
  MOZ_ASSERT(mInitialized);

  // re-sampled frames left last time which didn't fit into an Opus packet duration.
  const int framesLeft = mResampledLeftover.Length() / mChannels;
  // When framesLeft is 0, (GetPacketDuration() - framesLeft) is a multiple
  // of kOpusSamplingRate. There is not precision loss in the integer division
  // in computing framesToFetch. If frameLeft > 0, we need to add 1 to
  // framesToFetch to ensure there will be at least n frames after re-sampling.
  const int frameRoundUp = framesLeft ? 1 : 0;

  MOZ_ASSERT(GetPacketDuration() >= framesLeft);
  // Try to fetch m frames such that there will be n frames
  // where (n + frameLeft) >= GetPacketDuration() after re-sampling.
  const int framesToFetch = !mResampler ? GetPacketDuration()
    : (GetPacketDuration() - framesLeft) * mSamplingRate / kOpusSamplingRate
      + frameRoundUp;
  {
    // Move all the samples from mRawSegment to mSourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait until enough raw data, end of stream or cancelled.
    while (!mCanceled && mRawSegment.GetDuration() +
        mSourceSegment.GetDuration() < framesToFetch &&
        !mEndOfStream) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    mSourceSegment.AppendFrom(&mRawSegment);

    // Pad |mLookahead| samples to the end of source stream to prevent lost of
    // original data, the pcm duration will be calculated at rate 48K later.
    if (mEndOfStream && !mEosSetInEncoder) {
      mEosSetInEncoder = true;
      mSourceSegment.AppendNullData(mLookahead);
    }
  }

  // Start encoding data.
  nsAutoTArray<AudioDataValue, 9600> pcm;
  pcm.SetLength(GetPacketDuration() * mChannels);
  AudioSegment::ChunkIterator iter(mSourceSegment);
  int frameCopied = 0;

  while (!iter.IsEnded() && frameCopied < framesToFetch) {
    AudioChunk chunk = *iter;

    // Chunk to the required frame size.
    int frameToCopy = chunk.GetDuration();
    if (frameCopied + frameToCopy > framesToFetch) {
      frameToCopy = framesToFetch - frameCopied;
    }

    if (!chunk.IsNull()) {
      // Append the interleaved data to the end of pcm buffer.
      AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
        pcm.Elements() + frameCopied * mChannels);
    } else {
      memset(pcm.Elements() + frameCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }

    frameCopied += frameToCopy;
    iter.Next();
  }

  RefPtr<EncodedFrame> audiodata = new EncodedFrame();
  audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
  int framesInPCM = frameCopied;
  if (mResampler) {
    nsAutoTArray<AudioDataValue, 9600> resamplingDest;
    // We want to consume all the input data, so we slightly oversize the
    // resampled data buffer so we can fit the output data in. We cannot really
    // predict the output frame count at each call.
    uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
    uint32_t inframes = frameCopied;

    resamplingDest.SetLength(outframes * mChannels);

#if MOZ_SAMPLE_TYPE_S16
    short* in = reinterpret_cast<short*>(pcm.Elements());
    short* out = reinterpret_cast<short*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_int(mResampler, in, &inframes,
                                                        out, &outframes);
#else
    float* in = reinterpret_cast<float*>(pcm.Elements());
    float* out = reinterpret_cast<float*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_float(mResampler, in, &inframes,
                                                          out, &outframes);
#endif

    MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
    PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
        mResampledLeftover.Length());

    uint32_t outframesToCopy = std::min(outframes,
        static_cast<uint32_t>(GetPacketDuration() - framesLeft));

    MOZ_ASSERT(pcm.Length() - mResampledLeftover.Length() >=
        outframesToCopy * mChannels);
    PodCopy(pcm.Elements() + mResampledLeftover.Length(),
        resamplingDest.Elements(), outframesToCopy * mChannels);
    int frameLeftover = outframes - outframesToCopy;
    mResampledLeftover.SetLength(frameLeftover * mChannels);
    PodCopy(mResampledLeftover.Elements(),
        resamplingDest.Elements() + outframesToCopy * mChannels,
        mResampledLeftover.Length());
    // This is always at 48000Hz.
    framesInPCM = framesLeft + outframesToCopy;
    audiodata->SetDuration(framesInPCM);
  } else {
    // The ogg time stamping and pre-skip is always timed at 48000.
    audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
  }

  // Remove the raw data which has been pulled to pcm buffer.
  // The value of frameCopied should equal to (or smaller than, if eos)
  // GetPacketDuration().
  mSourceSegment.RemoveLeading(frameCopied);

  // Has reached the end of input stream and all queued data has pulled for
  // encoding.
  if (mSourceSegment.GetDuration() == 0 && mEndOfStream) {
    mEncodingComplete = true;
    LOG("[Opus] Done encoding.");
  }

  MOZ_ASSERT(mEndOfStream || framesInPCM == GetPacketDuration());

  // Append null data to pcm buffer if the leftover data is not enough for
  // opus encoder.
  if (framesInPCM < GetPacketDuration() && mEndOfStream) {
    PodZero(pcm.Elements() + framesInPCM * mChannels,
        (GetPacketDuration() - framesInPCM) * mChannels);
  }
  nsTArray<uint8_t> frameData;
  // Encode the data with Opus Encoder.
  frameData.SetLength(MAX_DATA_BYTES);
  // result is returned as opus error code if it is negative.
  int result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
  const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
  result = opus_encode(mEncoder, pcmBuf, GetPacketDuration(),
                       frameData.Elements(), MAX_DATA_BYTES);
#else
  const float* pcmBuf = static_cast<float*>(pcm.Elements());
  result = opus_encode_float(mEncoder, pcmBuf, GetPacketDuration(),
                             frameData.Elements(), MAX_DATA_BYTES);
#endif
  frameData.SetLength(result >= 0 ? result : 0);

  if (result < 0) {
    LOG("[Opus] Fail to encode data! Result: %s.", opus_strerror(result));
  }
  if (mEncodingComplete) {
    if (mResampler) {
      speex_resampler_destroy(mResampler);
      mResampler = nullptr;
    }
    mResampledLeftover.SetLength(0);
  }

  audiodata->SwapInFrameData(frameData);
  aData.AppendEncodedFrame(audiodata);
  return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
}
コード例 #14
0
ファイル: dsp.c プロジェクト: upswimsdn/libventrilo3
int
_v3_audio_send(v3_handle v3h, uint32_t rate, uint8_t channels, const void *pcm, uint32_t pcmlen) {
    _v3_connection *v3c;
    const v3_codec *codec;
    uint8_t pcmbuf[65536];
    uint32_t pcmbuflen = 0;
    uint8_t databuf[2048];
    uint32_t databuflen = 0;
    uint32_t framesize;
    uint8_t *ptr;
    uint32_t *len;
    uint32_t rd;
    float *volume[1];
    int ret = V3_OK;

    _v3_enter(v3h, __func__);

    channels = (channels == 2) ? 2 : 1;
    v3c = _v3_handles[v3h];
    codec = v3_codec_channel_get(v3h, v3c->luser.channel);
    framesize = codec->framesize * channels;

    if (!v3_codec_valid(codec)) {
        _v3_error(v3h, "invalid or unsupported codec");
        _v3_leave(v3h, __func__);
        return V3_FAILURE;
    }
    if (rate != codec->rate) {
#ifdef HAVE_SPEEXDSP
        int err = 0;
        uint32_t in_len = pcmlen;
        uint32_t out_len = sizeof(pcmbuf);

        if (!v3c->resampler.state ||
            v3c->resampler.in_rate != rate ||
            v3c->resampler.out_rate != codec->rate ||
            v3c->resampler.channels != channels) {
            if (v3c->resampler.state) {
                speex_resampler_destroy(v3c->resampler.state);
            }
            v3c->resampler.state = speex_resampler_init(
                    (v3c->resampler.channels = channels),
                    (v3c->resampler.in_rate = rate),
                    (v3c->resampler.out_rate = codec->rate),
                    SPEEX_RESAMPLER_QUALITY_VOIP,
                    &err);
        }
        in_len  /= sizeof(int16_t) * channels;
        out_len /= sizeof(int16_t) * channels;
        if (err || (err = speex_resampler_process_interleaved_int(
                v3c->resampler.state,
                pcm,
                &in_len,
                (void *)pcmbuf,
                &out_len))) {
            _v3_error(v3h, "resampler error: %i: %s", err, speex_resampler_strerror(err));
            _v3_leave(v3h, __func__);
            return V3_FAILURE;
        }
        pcmbuflen = out_len * sizeof(int16_t) * channels;
#else
        _v3_error(v3h, "resampler needed for output rate (in: %uHz != out: %uHz)", rate, codec->rate);
        _v3_leave(v3h, __func__);
        return V3_FAILURE;
#endif
    } else {
        if (pcmlen > sizeof(pcmbuf)) {
            _v3_error(v3h, "pcm length larger than buffer size (%u > %lu)", pcmlen, sizeof(pcmbuf));
            _v3_leave(v3h, __func__);
            return V3_FAILURE;
        }
        memcpy(pcmbuf, pcm, pcmlen);
        pcmbuflen = pcmlen;
    }
    if (channels == 2) {
        switch (codec->index) {
#ifdef HAVE_OPUS
          case 1:
          case 2:
            break;
#endif
          default:
            {
                int16_t *sample = (int16_t *)pcmbuf;
                uint32_t ctr;

                pcmbuflen /= 2;
                for (ctr = 0; ctr < pcmbuflen; ++ctr) {
                    sample[ctr] = sample[ctr*2] / 2 + sample[ctr*2+1] / 2;
                }
                channels = 1;
            }
            break;
        }
    }
    while (*(len = (v3c->pcmqueued) ? &v3c->pcmqueued : &pcmbuflen) / framesize >= codec->frames) {
        ptr = (v3c->pcmqueued) ? v3c->pcmq : pcmbuf;
        rd = framesize * codec->frames;
        *volume = &v3c->luser.volume;
        _v3_audio_amplify(v3h, (void *)ptr, rd, volume, sizeof(volume) / sizeof(*volume));
        databuflen = sizeof(databuf);
        if ((ret = _v3_audio_encode(
                v3h,
                ptr,
                rd,
                codec->index,
                codec->format,
                &v3c->encoder,
                databuf,
                &databuflen,
                channels)) == V3_OK) {
            ret = _v3_msg_audio_put(
                    v3h,
                    V3_AUDIO_DATA,
                    codec->index,
                    codec->format,
                    pcmlen,
                    databuf,
                    databuflen);
        }
        memmove(ptr, ptr + rd, (v3c->pcmqueued ? sizeof(v3c->pcmq) : sizeof(pcmbuf)) - rd);
        *len -= rd;
    }
    if (pcmbuflen) {
        rd = sizeof(v3c->pcmq) - v3c->pcmqueued;
        rd = (pcmbuflen > rd) ? rd : pcmbuflen;
        memcpy(v3c->pcmq + v3c->pcmqueued, pcmbuf, rd);
        v3c->pcmqueued += rd;
    }

    _v3_leave(v3h, __func__);
    return ret;
}
コード例 #15
0
static void resample_process_ms2(MSFilter *obj){
	ResampleData *dt=(ResampleData*)obj->data;
	mblk_t *im, *om = NULL, *om_chan = NULL;
	
	if (dt->output_rate==dt->input_rate){
		while((im=ms_queue_get(obj->inputs[0]))!=NULL){
			if (resample_channel_adapt(dt->in_nchannels, dt->out_nchannels, im, &om) == 0) {
				ms_queue_put(obj->outputs[0], im);
			} else {
				ms_queue_put(obj->outputs[0], om);
				freemsg(im);
			}
		}
		return;
	}
	ms_filter_lock(obj);
	if (dt->handle!=NULL){
		unsigned int inrate=0, outrate=0;
		speex_resampler_get_rate(dt->handle,&inrate,&outrate);
		if (inrate!=dt->input_rate || outrate!=dt->output_rate){
			speex_resampler_destroy(dt->handle);
			dt->handle=0;
		}
	}
	if (dt->handle==NULL){
		resample_init_speex(dt);
	}

	
	while((im=ms_queue_get(obj->inputs[0]))!=NULL){
		unsigned int inlen=(im->b_wptr-im->b_rptr)/(2*dt->in_nchannels);
		unsigned int outlen=((inlen*dt->output_rate)/dt->input_rate)+1;
		unsigned int inlen_orig=inlen;
		om=allocb(outlen*2*dt->in_nchannels,0);
		mblk_meta_copy(im, om);
		if (dt->in_nchannels==1){
			speex_resampler_process_int(dt->handle, 
					0, 
					(int16_t*)im->b_rptr, 
					&inlen, 
					(int16_t*)om->b_wptr, 
					&outlen);
		}else{
			speex_resampler_process_interleaved_int(dt->handle, 
					(int16_t*)im->b_rptr, 
					&inlen, 
					(int16_t*)om->b_wptr, 
					&outlen);
		}
		if (inlen_orig!=inlen){
			ms_error("Bug in resampler ! only %u samples consumed instead of %u, out=%u",
				inlen,inlen_orig,outlen);
		}
		om->b_wptr+=outlen*2*dt->in_nchannels;
		mblk_set_timestamp_info(om,dt->ts);
		dt->ts+=outlen;
		if (resample_channel_adapt(dt->in_nchannels, dt->out_nchannels, om, &om_chan) == 0) {
			ms_queue_put(obj->outputs[0], om);
		} else {
			ms_queue_put(obj->outputs[0], om_chan);
			freemsg(om);
		}
		freemsg(im);
	}
	ms_filter_unlock(obj);
}
コード例 #16
0
// outputs a number of frames less or equal to *outFrameCount and updates *outFrameCount
// with the actual number of frames produced.
int resampler_resample_from_provider(struct resampler_itfe *resampler,
                       int16_t *out,
                       size_t *outFrameCount)
{
    struct resampler *rsmp = (struct resampler *)resampler;

    if (rsmp == NULL || out == NULL || outFrameCount == NULL) {
        return -EINVAL;
    }
    if (rsmp->provider == NULL) {
        *outFrameCount = 0;
        return -ENOSYS;
    }

    size_t framesRq = *outFrameCount;
    // update and cache the number of frames needed at the input sampling rate to produce
    // the number of frames requested at the output sampling rate
    if (framesRq != rsmp->frames_rq) {
        rsmp->frames_needed = (framesRq * rsmp->in_sample_rate) / rsmp->out_sample_rate + 1;
        rsmp->frames_rq = framesRq;
    }

    size_t framesWr = 0;
    spx_uint32_t inFrames = 0;
    while (framesWr < framesRq) {
        if (rsmp->frames_in < rsmp->frames_needed) {
            // make sure that the number of frames present in rsmp->in_buf (rsmp->frames_in) is at
            // least the number of frames needed to produce the number of frames requested at
            // the output sampling rate
            if (rsmp->in_buf_size < rsmp->frames_needed) {
                rsmp->in_buf_size = rsmp->frames_needed;
                rsmp->in_buf = (int16_t *)realloc(rsmp->in_buf,
                                        rsmp->in_buf_size * rsmp->channel_count * sizeof(int16_t));
            }
            struct resampler_buffer buf;
            buf.frame_count = rsmp->frames_needed - rsmp->frames_in;
            rsmp->provider->get_next_buffer(rsmp->provider, &buf);
            if (buf.raw == NULL) {
                break;
            }
            memcpy(rsmp->in_buf + rsmp->frames_in * rsmp->channel_count,
                    buf.raw,
                    buf.frame_count * rsmp->channel_count * sizeof(int16_t));
            rsmp->frames_in += buf.frame_count;
            rsmp->provider->release_buffer(rsmp->provider, &buf);
        }

        spx_uint32_t outFrames = framesRq - framesWr;
        inFrames = rsmp->frames_in;
        if (rsmp->channel_count == 1) {
            speex_resampler_process_int(rsmp->speex_resampler,
                                        0,
                                        rsmp->in_buf,
                                        &inFrames,
                                        out + framesWr,
                                        &outFrames);
        } else {
            speex_resampler_process_interleaved_int(rsmp->speex_resampler,
                                        rsmp->in_buf,
                                        &inFrames,
                                        out + framesWr * rsmp->channel_count,
                                        &outFrames);
        }
        framesWr += outFrames;
        rsmp->frames_in -= inFrames;
        ALOGW_IF((framesWr != framesRq) && (rsmp->frames_in != 0),
                "ReSampler::resample() remaining %zu frames in and %zu frames out",
                rsmp->frames_in, (framesRq - framesWr));
    }
    if (rsmp->frames_in) {
        memmove(rsmp->in_buf,
                rsmp->in_buf + inFrames * rsmp->channel_count,
                rsmp->frames_in * rsmp->channel_count * sizeof(int16_t));
    }
    *outFrameCount = framesWr;

    return 0;
}
コード例 #17
0
ファイル: mixer.cpp プロジェクト: 1337Noob1337/OpenRCT2
void Mixer::MixChannel(Channel& channel, uint8* data, int length)
{
	// Do not mix channel if channel is a sound and sound is disabled
	if (channel.group == MIXER_GROUP_SOUND && !gConfigSound.sound_enabled) {
		return;
	}

	if (channel.source && channel.source->Length() > 0 && !channel.done) {
		AudioFormat streamformat = channel.source->Format();
		int loaded = 0;
		SDL_AudioCVT cvt;
		cvt.len_ratio = 1;
		do {
			int samplesize = format.channels * format.BytesPerSample();
			int samples = length / samplesize;
			int samplesloaded = loaded / samplesize;
			double rate = 1;
			if (format.format == AUDIO_S16SYS) {
				rate = channel.rate;
			}
			int samplestoread = (int)((samples - samplesloaded) * rate);
			int lengthloaded = 0;
			if (channel.offset < channel.source->Length()) {
				bool mustconvert = false;
				if (MustConvert(*channel.source)) {
					if (SDL_BuildAudioCVT(&cvt, streamformat.format, streamformat.channels, streamformat.freq, Mixer::format.format, Mixer::format.channels, Mixer::format.freq) == -1) {
						break;
					}
					mustconvert = true;
				}

				const uint8* datastream = 0;
				int toread = (int)(samplestoread / cvt.len_ratio) * samplesize;
				int readfromstream = (channel.source->GetSome(channel.offset, &datastream, toread));
				if (readfromstream == 0) {
					break;
				}

				uint8* dataconverted = 0;
				const uint8* tomix = 0;

				if (mustconvert) {
					// tofix: there seems to be an issue with converting audio using SDL_ConvertAudio in the callback vs preconverted, can cause pops and static depending on sample rate and channels
					if (Convert(cvt, datastream, readfromstream, &dataconverted)) {
						tomix = dataconverted;
						lengthloaded = cvt.len_cvt;
					} else {
						break;
					}
				} else {
					tomix = datastream;
					lengthloaded = readfromstream;
				}

				bool effectbufferloaded = false;
				if (rate != 1 && format.format == AUDIO_S16SYS) {
					int in_len = (int)((double)lengthloaded / samplesize);
					int out_len = samples;
					if (!channel.resampler) {
						channel.resampler = speex_resampler_init(format.channels, format.freq, format.freq, 0, 0);
					}
					if (readfromstream == toread) {
						// use buffer lengths for conversion ratio so that it fits exactly
						speex_resampler_set_rate(channel.resampler, in_len, samples - samplesloaded);
					} else {
						// reached end of stream so we cant use buffer length as resampling ratio
						speex_resampler_set_rate(channel.resampler, format.freq, (int)(format.freq * (1 / rate)));
					}
					speex_resampler_process_interleaved_int(channel.resampler, (const spx_int16_t*)tomix, (spx_uint32_t*)&in_len, (spx_int16_t*)effectbuffer, (spx_uint32_t*)&out_len);
					effectbufferloaded = true;
					tomix = effectbuffer;
					lengthloaded = (out_len * samplesize);
				}

				if (channel.pan != 0.5f && format.channels == 2) {
					if (!effectbufferloaded) {
						memcpy(effectbuffer, tomix, lengthloaded);
						effectbufferloaded = true;
						tomix = effectbuffer;
					}
					switch (format.format) {
						case AUDIO_S16SYS:
							EffectPanS16(channel, (sint16*)effectbuffer, lengthloaded / samplesize);
							break;
						case AUDIO_U8:
							EffectPanU8(channel, (uint8*)effectbuffer, lengthloaded / samplesize);
							break;
					}
				}

				int mixlength = lengthloaded;
				if (loaded + mixlength > length) {
					mixlength = length - loaded;
				}

				float volumeadjust = volume;
				volumeadjust *= (gConfigSound.master_volume / 100.0f);
				switch (channel.group) {
				case MIXER_GROUP_SOUND:
					volumeadjust *= (gConfigSound.sound_volume / 100.0f);

					// Cap sound volume on title screen so music is more audible
					if (RCT2_GLOBAL(RCT2_ADDRESS_SCREEN_FLAGS, uint8) & SCREEN_FLAGS_TITLE_DEMO) {
						volumeadjust = Math::Min(volumeadjust, 0.75f);
					}
					break;
				case MIXER_GROUP_RIDE_MUSIC:
					volumeadjust *= (gConfigSound.ride_music_volume / 100.0f);
					break;
				}
				int startvolume = (int)(channel.oldvolume * volumeadjust);
				int endvolume = (int)(channel.volume * volumeadjust);
				if (channel.stopping) {
					endvolume = 0;
				}
				int mixvolume = (int)(channel.volume * volumeadjust);
				if (startvolume != endvolume) {
					// fade between volume levels to smooth out sound and minimize clicks from sudden volume changes
					if (!effectbufferloaded) {
						memcpy(effectbuffer, tomix, lengthloaded);
						effectbufferloaded = true;
						tomix = effectbuffer;
					}
					mixvolume = SDL_MIX_MAXVOLUME; // set to max since we are adjusting the volume ourselves
					int fadelength = mixlength / format.BytesPerSample();
					switch (format.format) {
						case AUDIO_S16SYS:
							EffectFadeS16((sint16*)effectbuffer, fadelength, startvolume, endvolume);
							break;
						case AUDIO_U8:
							EffectFadeU8((uint8*)effectbuffer, fadelength, startvolume, endvolume);
							break;
					}
				}

				SDL_MixAudioFormat(&data[loaded], tomix, format.format, mixlength, mixvolume);

				if (dataconverted) {
					delete[] dataconverted;
				}

				channel.offset += readfromstream;
			}

			loaded += lengthloaded;

			if (channel.loop != 0 && channel.offset >= channel.source->Length()) {
				if (channel.loop != -1) {
					channel.loop--;
				}
				channel.offset = 0;
			}
		} while(loaded < length && channel.loop != 0 && !channel.stopping);

		channel.oldvolume = channel.volume;
		channel.oldvolume_l = channel.volume_l;
		channel.oldvolume_r = channel.volume_r;
		if (channel.loop == 0 && channel.offset >= channel.source->Length()) {
			channel.done = true;
		}
	}
}
コード例 #18
0
ファイル: mixer.cpp プロジェクト: Xaon60/OpenRCT2
void Mixer::MixChannel(Channel& channel, uint8* data, int length)
{
	if (channel.stream) {
		if (!channel.resampler) {
			channel.resampler = speex_resampler_init(format.channels, format.freq, format.freq, 0, 0);
		}
		AudioFormat channelformat = *channel.stream->Format();
		int loaded = 0;
		SDL_AudioCVT cvt;
		cvt.len_ratio = 1;
		do {
			int samplesize = format.channels * format.BytesPerSample();
			int samples = length / samplesize;
			int samplesloaded = loaded / samplesize;
			int samplestoread = (int)ceil((samples - samplesloaded) * channel.rate);
			int lengthloaded = 0;
			if (channel.offset < channel.stream->Length()) {
				bool mustconvert = false;
				if (MustConvert(*channel.stream)) {
					if (SDL_BuildAudioCVT(&cvt, channelformat.format, channelformat.channels, channelformat.freq, Mixer::format.format, Mixer::format.channels, Mixer::format.freq) == -1) {
						break;
					}
					mustconvert = true;
				}

				const uint8* datastream = 0;
				int readfromstream = (channel.stream->GetSome(channel.offset, &datastream, (int)(((samplestoread) * samplesize) / cvt.len_ratio)) / channelformat.BytesPerSample()) * channelformat.BytesPerSample();
				if (readfromstream == 0) {
					break;
				}

				int volume = channel.volume;
				uint8* dataconverted = 0;
				const uint8* tomix = 0;

				if (mustconvert) {
					if (Convert(cvt, datastream, readfromstream, &dataconverted)) {
						tomix = dataconverted;
						lengthloaded = (cvt.len_cvt / samplesize) * samplesize;
					} else {
						break;
					}
				} else {
					tomix = datastream;
					lengthloaded = readfromstream;
				}

				bool effectbufferloaded = false;

				if (channel.rate != 1 && format.format == AUDIO_S16SYS) {
					int in_len = (int)(ceil((double)lengthloaded / samplesize));
					int out_len = samples + 20; // needs some extra, otherwise resampler sometimes doesn't process all the input samples
					speex_resampler_set_rate(channel.resampler, format.freq, (int)(format.freq * (1 / channel.rate)));
					speex_resampler_process_interleaved_int(channel.resampler, (const spx_int16_t*)tomix, (spx_uint32_t*)&in_len, (spx_int16_t*)effectbuffer, (spx_uint32_t*)&out_len);
					effectbufferloaded = true;
					tomix = effectbuffer;
					lengthloaded = (out_len * samplesize);
				}

				if (channel.pan != 0.5f && format.channels == 2) {
					if (!effectbufferloaded) {
						memcpy(effectbuffer, tomix, lengthloaded);
						effectbufferloaded = true;
						tomix = effectbuffer;
					}
					switch (format.format) {
						case AUDIO_S16SYS:
							EffectPanS16(channel, (sint16*)effectbuffer, lengthloaded / samplesize);
							break;
						case AUDIO_U8:
							EffectPanU8(channel, (uint8*)effectbuffer, lengthloaded / samplesize);
						break;
					}
				}

				int mixlength = lengthloaded;
				if (loaded + mixlength > length) {
					mixlength = length - loaded;
				}

				SDL_MixAudioFormat(&data[loaded], tomix, format.format, mixlength, volume);

				if (dataconverted) {
					delete[] dataconverted;
				}

				channel.offset += readfromstream;

			}

			loaded += lengthloaded;

			if (channel.loop != 0 && channel.offset >= channel.stream->Length()) {
				if (channel.loop != -1) {
					channel.loop--;
				}
				channel.offset = 0;
			}
		} while(loaded < length && channel.loop != 0);
	}
}
コード例 #19
0
ファイル: src.c プロジェクト: SteveMcNeill/QB64-SteveRepo
int32 func__sndopen(qbs* filename,qbs* requirements,int32 passed){
sndsetup();
if (new_error) return 0;

static qbs *s1=NULL;
if (!s1) s1=qbs_new(0,0);
static qbs *req=NULL;
if (!req) req=qbs_new(0,0);
static qbs *s3=NULL;
if (!s3) s3=qbs_new(0,0);

static uint8 r[32];
static int32 i,i2,i3;
//check requirements
memset(r,0,32);
if (passed){
if (requirements->len){
i=1;
qbs_set(req,qbs_ucase(requirements));//convert tmp str to perm str
nextrequirement:
i2=func_instr(i,req,qbs_new_txt(","),1);
if (i2){
qbs_set(s1,func_mid(req,i,i2-i,1));
}else{
qbs_set(s1,func_mid(req,i,req->len-i+1,1));
}
qbs_set(s1,qbs_rtrim(qbs_ltrim(s1)));
if (qbs_equal(s1,qbs_new_txt("SYNC"))){r[0]++; goto valid;}
if (qbs_equal(s1,qbs_new_txt("VOL"))){r[1]++; goto valid;}
if (qbs_equal(s1,qbs_new_txt("PAUSE"))){r[2]++; goto valid;}
if (qbs_equal(s1,qbs_new_txt("LEN"))){r[3]++; goto valid;}
if (qbs_equal(s1,qbs_new_txt("SETPOS"))){r[4]++; goto valid;}
error(5); return 0;//invalid requirements
valid:
if (i2){i=i2+1; goto nextrequirement;}
for (i=0;i<32;i++) if (r[i]>1){error(5); return 0;}//cannot define requirements twice
}//->len
}//passed
qbs_set(s1,qbs_add(filename,qbs_new_txt_len("\0",1)));//s1=filename+CHR$(0)

if (!r[0]){//NOT SYNC
 if (snd_stream_handle){error(5); return 0;}//stream in use
}

//load file
if (s1->len==1) return 0;//return invalid handle if null length string
static int32 fh,result;
static int64 lof;
fh=gfs_open(s1,1,0,0);
if (fh<0) return 0;
lof=gfs_lof(fh);
static uint8* content;
content=(uint8*)malloc(lof); if (!content){gfs_close(fh); return 0;}
result=gfs_read(fh,-1,content,lof);
gfs_close(fh);
if (result<0){free(content); return 0;}

//identify file format
static snd_sequence_struct *seq;

//OGG?
#ifdef DEPENDENCY_AUDIO_DECODE_OGG
if (lof>=3){
if (content[0]==79){ if (content[1]==103){ if (content[2]==103){//"Ogg"
   seq=snd_decode_ogg(content,lof);
   goto got_seq;
}}}
}//3
#endif

//WAV?
#ifdef DEPENDENCY_AUDIO_DECODE_WAV
if (lof>=12){
  if ((*(uint32*)&content[8])==0x45564157){//WAVE
   seq=snd_decode_wav(content,lof);
   goto got_seq;
  }//WAVE
}
#endif

//assume mp3!
//MP3?
#ifdef DEPENDENCY_AUDIO_DECODE_MP3
seq=snd_decode_mp3(content,lof);
#endif

got_seq:
free(content);
if (seq==NULL) return 0;

//convert sequence (includes sample rate conversion etc etc)

//just perform sample_rate fix for now...

//1. 8->16bit conversion and/or edian conversion
static int32 incorrect_format;
incorrect_format=0;
if (seq->bits_per_sample!=16) incorrect_format=1;
if (seq->is_unsigned) incorrect_format=1;
//todo... if (seq->endian==???)

//this section does not fix the frequency, only the bits per sample
//and signed-ness of the data
if (incorrect_format){
 static int32 bps; bps=seq->bits_per_sample/8;
 static int32 samples; samples=seq->data_size/bps;
 static uint8 *new_data;
 if (bps!=2){
  new_data=(uint8*)malloc(samples*2);
 }else{
  new_data=seq->data;
 }
 static int32 i,v;
 for (i=0;i<samples;i++){
  //read original value
  v=0;
  if (bps==1){
   if (seq->is_unsigned){
    v=*(uint8*)(seq->data+i*1);
    v=(v-128)*256;
   }else{
    v=*(int8*)(seq->data+i*1);
    v=v*128;
   }
  }
  if (bps==2){
   if (seq->is_unsigned){
    v=*(uint16*)(seq->data+i*2);
    v=v-32768;
   }else{
    v=*(int16*)(seq->data+i*2);
   }
  }
  //place new value into array
  ((int16*)new_data)[i]=v;
 }//i
 if (bps!=2){free(seq->data); seq->data=new_data; seq->data_size=samples*2;}
 //update seq info
 seq->bits_per_sample=16;
 seq->is_unsigned=0;
}//incorrect format


//2. samplerate conversion
if (seq->sample_rate != snd_frequency) { //need to resample seq->data
  //create new resampler
  SpeexResamplerState *state;
  state = speex_resampler_init(seq->channels, seq->sample_rate, snd_frequency, SPEEX_RESAMPLER_QUALITY_MIN, NULL);
  if (!state) { //NULL means failure
    free(seq->data);
    return 0;
  }
  
  //allocate new memory for output
  int32 out_samples_max = ((double)seq->data_size / seq->channels / 2) * ((((double)snd_frequency) / ((double)seq->sample_rate)) + 0.1) + 100;//10%+100 extra samples as a buffer-zone
  int16 *resampled = (int16 *)malloc(out_samples_max * seq->channels * sizeof(int16));
  if (!resampled) {
    free(seq->data);
    return 0;
  }
  
  //establish data sizes
  //in_len will be set by the resampler to number of samples processed
  spx_uint32_t in_len = seq->data_size / seq->channels / 2; // divide by 2 because 2byte samples, divive by #channels because function wants it per-channel
  //out_len will be set to the number of samples written
  spx_uint32_t out_len;
  
  //resample!
  if (speex_resampler_process_interleaved_int(state, (spx_int16_t *)seq->data, &in_len, (spx_int16_t *)resampled, &out_len) != RESAMPLER_ERR_SUCCESS) {
    //Error
    free(resampled);
    free(seq->data);
    speex_resampler_destroy(state);
    return 0;
  }
  
  //destroy the resampler anyway
  speex_resampler_destroy(state);
  
  //establish real size of new data and update seq
  free(seq->data); //That was the old data
  seq->data_size = out_len * seq->channels * 2; //remember out_len is perchannel, and each sample is 2 bytes
  seq->data = (uint8_t *)realloc(resampled, seq->data_size); //we overestimated the array size before, so make it the correct size now
  if (!seq->data) { //realloc could fail
    free(resampled);
    return 0;
  }
  seq->sample_rate = snd_frequency;
}

if (seq->channels==1){
seq->data_mono=seq->data;
seq->data_mono_size=seq->data_size;
}
if (seq->channels==2){
seq->data_stereo=seq->data;
seq->data_stereo_size=seq->data_size;
}
if (seq->channels>2) return 0;

//attach sequence to handle (& inc. refs)
//create snd handle
static int32 handle; handle=list_add(snd_handles);
static snd_struct *snd; snd=(snd_struct*)list_get(snd_handles,handle);

snd->internal=0;
snd->type=2;
snd->seq=seq;
snd->volume=1.0;
snd->capability=r[0]*SND_CAPABILITY_SYNC+r[1]*SND_CAPABILITY_VOL+r[2]*SND_CAPABILITY_PAUSE+r[3]*SND_CAPABILITY_LEN+r[4]*SND_CAPABILITY_SETPOS;
if (!r[0]){
 snd->streamed=1;//NOT SYNC
 snd_stream_handle=handle;
}

return handle;
}
コード例 #20
0
ファイル: decode.c プロジェクト: Studio-Link/baresip
int mpa_decode_frm(struct audec_state *ads,
		   int fmt, void *sampv_void, size_t *sampc,
		    const uint8_t *buf, size_t len)
{
	int result, channels, encoding, i;
	long samplerate;
	size_t n;
	spx_uint32_t intermediate_len;
	spx_uint32_t out_len;
	int16_t *sampv = sampv_void;

#ifdef DEBUG
	debug("MPA dec start %d %ld\n",len, *sampc);
#endif

	if (!ads || !sampv || !sampc || !buf || len<=4)
		return EINVAL;

	if (*(uint32_t*)(void *)buf != 0) {
		warning("MPA dec header is not zero %08X, not supported yet\n",
			*(uint32_t*)(void *)buf);
		return EPROTO;
	}

	if (fmt != AUFMT_S16LE)
		return ENOTSUP;

	n = 0;
	result = mpg123_decode(ads->dec, buf+4, len-4,
				(unsigned char*)ads->intermediate_buffer,
				sizeof(ads->intermediate_buffer), &n);
				/* n counts bytes */
#ifdef DEBUG
	debug("MPA dec %d %d %d %d\n",result, len-4, n, ads->channels);
#endif

	if (result == MPG123_NEW_FORMAT) {
		mpg123_getformat(ads->dec, &samplerate, &channels, &encoding);
		info("MPA dec format change %d %d %04X\n",samplerate
			,channels,encoding);

		ads->channels = channels;
		ads->start = 0;
		if (ads->resampler)
			speex_resampler_destroy(ads->resampler);
		if (samplerate != MPA_IORATE) {
			ads->resampler = speex_resampler_init(channels,
				      (uint32_t)samplerate, MPA_IORATE,
				      3, &result);
			if (result!=RESAMPLER_ERR_SUCCESS
				|| ads->resampler==NULL) {
				warning("MPA dec upsampler failed %d\n",
					result);
				return EINVAL;
			}
		}
		else
			ads->resampler = NULL;
	}
	else if (result == MPG123_NEED_MORE)
		;			/* workaround: do nothing */
	else if (result != MPG123_OK) {
		warning("MPA dec feed error %d %s\n", result,
			mpg123_plain_strerror(result));
		return EPROTO;
	}

	if (ads->resampler)  {
		intermediate_len = (uint32_t)(n / 2 / ads->channels);
			/* intermediate_len counts samples per channel */
		out_len = (uint32_t)(*sampc / 2);

		result=speex_resampler_process_interleaved_int(
			ads->resampler, ads->intermediate_buffer,
			&intermediate_len, sampv, &out_len);
		if (result!=RESAMPLER_ERR_SUCCESS) {
			warning("MPA dec upsample error: %s %d %d\n",
				strerror(result), out_len, *sampc/2);
			return EPROTO;
		}
		if (ads->channels==1) {
			for (i=out_len-1;i>=0;i--)
				sampv[i+i+1]=sampv[i+i]=sampv[i];
			*sampc = out_len * 2;
		}
		else
			*sampc = out_len * ads->channels;
	}
	else {
		n /= 2;
		if (ads->channels!=1) {
			for (i=0;(unsigned)i<n;i++)
				sampv[i]=ads->intermediate_buffer[i];
			*sampc = n;
		}
		else {
			for (i=0;(unsigned)i<n;i++)
				sampv[i*2]=sampv[i*2+1]=
					ads->intermediate_buffer[i];
			*sampc = n * 2;
	}

#ifdef DEBUG
	debug("MPA dec done %d\n",*sampc);
#endif
	}

	return 0;
}
コード例 #21
0
ファイル: speexdspResampler.c プロジェクト: taktod/ttLibC
/*
 * sample_rate resample.
 * @param resampler  resampler object.
 * @param prev_frame reuse frame.
 * @param src_pcms16 source pcms16 data.
 * @return resampled pcms16 data.
 */
ttLibC_PcmS16 *ttLibC_SpeexdspResampler_resample(ttLibC_SpeexdspResampler *resampler, ttLibC_PcmS16 *prev_frame, ttLibC_PcmS16 *src_pcms16) {
	ttLibC_SpeexdspResampler_ *resampler_ = (ttLibC_SpeexdspResampler_ *)resampler;
	if(resampler_ == NULL) {
		return NULL;
	}
	if(src_pcms16 == NULL) {
		return NULL;
	}
	switch(src_pcms16->type) {
	case PcmS16Type_bigEndian:
	case PcmS16Type_bigEndian_planar:
		resampler_->inherit_super.error = ttLibC_updateError(Target_On_Resampler, Error_InvalidOperation);
		return NULL;
	default:
		return NULL;
	case PcmS16Type_littleEndian:
	case PcmS16Type_littleEndian_planar:
		break;
	}
	ttLibC_PcmS16 *pcms16 = prev_frame;
	uint32_t out_sample_num = (src_pcms16->inherit_super.sample_num
			* resampler_->inherit_super.output_sample_rate
			/ resampler_->inherit_super.input_sample_rate + 1);
	uint32_t in_sample_num  = src_pcms16->inherit_super.sample_num;
	// estimate result data size.
	size_t data_size = out_sample_num * sizeof(int16_t) * resampler_->inherit_super.channel_num;
	uint8_t *data = NULL;
	bool alloc_flag = false;
	if(pcms16 != NULL) {
		if(!pcms16->inherit_super.inherit_super.is_non_copy) {
			if(pcms16->inherit_super.inherit_super.data_size >= data_size) {
				// reuse frame have enough buffer.
				data = pcms16->inherit_super.inherit_super.data;
				data_size = pcms16->inherit_super.inherit_super.data_size;
			}
			else {
				ttLibC_free(pcms16->inherit_super.inherit_super.data);
			}
		}
		pcms16->inherit_super.inherit_super.is_non_copy = true;
	}
	if(data == NULL) {
		data = ttLibC_malloc(data_size);
		if(data == NULL) {
			resampler_->inherit_super.error = ttLibC_updateError(Target_On_Resampler, Error_MemoryAllocate);
			return NULL;
		}
		alloc_flag = true;
	}
	int res;
	switch(src_pcms16->type) {
	default:
		ERR_PRINT("no way to be here..");
		if(alloc_flag) {
			ttLibC_free(data);
		}
		return NULL;
	case PcmS16Type_littleEndian:
		res = speex_resampler_process_interleaved_int(resampler_->resampler, (const int16_t *)src_pcms16->l_data, &in_sample_num, (int16_t *)data, &out_sample_num);
		break;
	case PcmS16Type_littleEndian_planar:
		res = speex_resampler_process_int(resampler_->resampler, resampler_->inherit_super.channel_num, (const int16_t *)src_pcms16->l_data, &in_sample_num, (int16_t *)data, &out_sample_num);
		break;
	}
	if(res != 0) {
		ERR_PRINT("failed to resampler: %s", speex_resampler_strerror(res));
		if(alloc_flag) {
			ttLibC_free(data);
		}
		resampler_->inherit_super.error = ttLibC_updateError(Target_On_Resampler, Error_LibraryError);
		return NULL;
	}
	uint64_t pts = src_pcms16->inherit_super.inherit_super.pts * resampler_->inherit_super.output_sample_rate / resampler_->inherit_super.input_sample_rate;
	uint8_t *l_data = NULL;
	uint32_t l_stride = 0;
	uint8_t *r_data = NULL;
	uint32_t r_stride = 0;
	switch(src_pcms16->type) {
	case PcmS16Type_bigEndian:
	case PcmS16Type_littleEndian:
		l_data = data;
		l_stride = out_sample_num * 2 * resampler_->inherit_super.channel_num;
		break;
	case PcmS16Type_bigEndian_planar:
	case PcmS16Type_littleEndian_planar:
		l_data = data;
		l_stride = out_sample_num * 2;
		if(resampler_->inherit_super.channel_num == 2) {
			r_data = data + l_stride;
			r_stride = l_stride;
		}
		break;
	}
	pcms16 = ttLibC_PcmS16_make(
			pcms16,
			src_pcms16->type,
			resampler_->inherit_super.output_sample_rate,
			out_sample_num,
			resampler_->inherit_super.channel_num,
			data,
			data_size,
			l_data,
			l_stride,
			r_data,
			r_stride,
			true,
			pts,
			resampler_->inherit_super.output_sample_rate);
	if(pcms16 == NULL) {
		if(alloc_flag) {
			ttLibC_free(data);
		}
		resampler_->inherit_super.error = ttLibC_updateError(Target_On_Resampler, Error_MemoryAllocate);
		return NULL;
	}
	pcms16->inherit_super.inherit_super.is_non_copy = false;
	pcms16->inherit_super.inherit_super.buffer_size = pcms16->inherit_super.sample_num * pcms16->inherit_super.channel_num * sizeof(int16_t);
	return pcms16;
}
コード例 #22
0
int COggVorbisFileHelper::decode()
{
#if defined(HAVE_PTHREAD)
	pthread_mutex_lock(&mutex1);
#endif
	int cb_len = sizeof(convbuffer) -rcb_len-16;
	char* p = convbuffer + 16; //pre buffer
	memcpy(p,remaing_convbuffer,rcb_len);
	p = p+rcb_len;
	long ret=ov_read_func(&vf,p,cb_len,0,2,1,&current_section);
#if defined(HAVE_PTHREAD)
	pthread_mutex_unlock(&mutex1);
#endif

    if (ret == 0)
	{
      /* EOF */
      return EOF;
    } 
	else if (ret < 0) 
	{
		if(ret==OV_EBADLINK)
		{
			m_strLastError = "Corrupt bitstream section! Exiting.";
			s3eDebugTracePrintf("%s\n",m_strLastError.c_str());
			return ERR;
		}

    }
	else 
	{
		if(bStopDecoding) return EOS;

		int nr_samples = (ret + rcb_len)/sizeof(ogg_int16_t);
		
		if(!bEnableResampling) //store samples in the circular buffer
		{
			for(int k=0;k<nr_samples;k++)
			{
				//ogg_int16_t val = *(ogg_int16_t*)(convbuffer+k*sizeof(ogg_int16_t));
				if(bStopDecoding) return EOS;
				if(nStatus == OH_BUFFERING)
				{
					if(mDecBuffer->GetBusy() >= mDecBuffer->GetCapacity() * m_dBufferingMaxCapacity || k == nr_samples - 1)
					{
						s3eDebugTracePrintf("buffering complete. Playing now..\n");
						nStatus = OH_PLAYING;
						Wait_counter(0);
					}
				}
				while(!mDecBuffer->Enqueue(*(ogg_int16_t*)(convbuffer+k*sizeof(ogg_int16_t))))
				{
					s3eDeviceYieldUntilEvent(10);
					if(bStopDecoding) return EOS;/*fprintf(stderr,"Buffer full\n")*/;
					if(nStatus == OH_BUFFERING) 
					{
						s3eDebugTracePrintf("buffering complete. Playing now..\n");
						nStatus = OH_PLAYING;
						Wait_counter(0);
					}
					return BFF;
				}

			}

			return EOK;
		}

		if (get_nChannels() == 2) nr_samples /= 2;

		for(int k=0;k<nr_samples;k++)
		{
			if (get_nChannels() == 2) 
			{
				m_tmpbufL[k] = *(ogg_int16_t*)(convbuffer+k*2*sizeof(ogg_int16_t));
				m_tmpbufR[k] = *(ogg_int16_t*)(convbuffer+(k*2+1)*sizeof(ogg_int16_t));
			} 
			else 
			{
				m_tmpbufL[k] = *(ogg_int16_t*)(convbuffer+k*sizeof(ogg_int16_t));
			}
		}
		
		unsigned int inlengthL = nr_samples;
		unsigned int inlengthR = nr_samples;

		unsigned int inused = 0;   // output
		unsigned int outputL,outputR;
		outputL= m_outbufsizeL;
		outputR= m_outbufsizeR;
		if (get_nChannels() == 2)  // stereo input
		{		

			speex_resampler_process_int(res_contL,0,m_tmpbufL,&inlengthL,m_outL,&outputL);
			speex_resampler_process_int(res_contR,0,m_tmpbufR,&inlengthR,m_outR,&outputR);


			if(outputL != outputR)
			{
				s3eDebugTracePrintf("Left and Right channels out of sync\n");
			}

			if(inlengthL != inlengthR)
			{
				s3eDebugTracePrintf("Left and Right channels out of sync\n");
			}

			inused = inlengthL*2;

		}
		else
		{
			speex_resampler_process_interleaved_int(res_contL,m_tmpbufL,&inlengthL,m_outL,&outputL);
			
			inused = inlengthL;

		}


		p = convbuffer + inused * sizeof(ogg_int16_t);
		rcb_len = ret- inused*sizeof(ogg_int16_t);

		memcpy(remaing_convbuffer,p,rcb_len);


		for(unsigned int k = 0;k< outputL;k++)
		{
			if(bStopDecoding) return EOS;
			if(k%50 == 0) s3eDeviceYield(1);


			if(nStatus == OH_BUFFERING)
			{
				if(mDecBuffer->GetBusy() >= mDecBuffer->GetCapacity() * m_dBufferingMaxCapacity || k == nr_samples - 1)
				{
					s3eDebugTracePrintf("buffering complete. Playing now..\n");
					nStatus = OH_PLAYING;
					Wait_counter(0);
				}
			}
			while(!mDecBuffer->Enqueue((ogg_int16_t)m_outL[k]))
			{
				s3eDeviceYieldUntilEvent(10);
				if(bStopDecoding) return EOS;/*fprintf(stderr,"Buffer full\n")*/;
				if(nStatus == OH_BUFFERING) 
				{
					s3eDebugTracePrintf("buffering complete. Playing now..\n");
					nStatus = OH_PLAYING;
					Wait_counter(0);
				}
				return BFF;
			}
			if(get_nChannels() == 2)
			{
				while(!mDecBuffer->Enqueue((ogg_int16_t)m_outR[k]))
				{
					s3eDeviceYieldUntilEvent(10);
					if(bStopDecoding) return EOS;/*fprintf(stderr,"Buffer full\n")*/;
					if(nStatus == OH_BUFFERING) 
					{
						s3eDebugTracePrintf("buffering complete. Playing now..\n");
						nStatus = OH_PLAYING;
						Wait_counter(0);
					}
					return BFF;
				}
			}
		}

    }

	return EOK;
}
コード例 #23
0
/**
 *
 * @param aqi
 * @param sampleRate
 * @param sampleSizeInBits
 * @param channels
 * @param buffer
 * @param length the length of <tt>buffer</tt> in bytes
 */
static void
AudioQualityImprovement_resampleInPlay
    (AudioQualityImprovement *aqi,
    double sampleRate, unsigned long sampleSizeInBits, int channels,
    void *buffer, unsigned long length)
{
    spx_uint32_t playSize;
    spx_uint32_t playCapacity;
    spx_uint32_t playLength;;
    spx_int16_t *play;

    if (sampleRate == aqi->sampleRate)
        playSize = length;
    else if (length * aqi->sampleRate == aqi->frameSize * sampleRate)
    {
        if (aqi->resampler)
        {
            speex_resampler_set_rate(
                aqi->resampler,
                (spx_uint32_t) sampleRate, (spx_uint32_t) (aqi->sampleRate));
            playSize = aqi->frameSize;
        }
        else
        {
            aqi->resampler
                = speex_resampler_init(
                    channels,
                    (spx_uint32_t) sampleRate, (spx_uint32_t) (aqi->sampleRate),
                    SPEEX_RESAMPLER_QUALITY_VOIP,
                    NULL);
            if (aqi->resampler)
                playSize = aqi->frameSize;
            else
            {
                aqi->playIsDelaying = JNI_TRUE;
                aqi->playLength = 0;
                return;
            }
        }
    }
    else
    {
        /*
         * The specified buffer neither is in the format of the audio capture
         * nor can be resampled to it.
         */
        aqi->playIsDelaying = JNI_TRUE;
        aqi->playLength = 0;
        return;
    }

    /* Ensure that play exists and is large enough. */
    playCapacity
        = ((1 + aqi->playDelay) + 1) * (aqi->frameSize / sizeof(spx_int16_t));
    playLength = playSize / sizeof(spx_int16_t);
    if (playCapacity < playLength)
        playCapacity = playLength;
    if (!(aqi->play) || (aqi->playCapacity < playCapacity))
    {
        spx_int16_t *newPlay;

        newPlay = realloc(aqi->play, playCapacity * sizeof(spx_int16_t));
        if (newPlay)
        {
            if (!(aqi->play))
            {
                aqi->playIsDelaying = JNI_TRUE;
                aqi->playLength = 0;
            }

            aqi->play = newPlay;
            aqi->playCapacity = playCapacity;
        }
        else
        {
            aqi->playIsDelaying = JNI_TRUE;
            aqi->playLength = 0;
            return;
        }
    }

    /* Ensure that there is room for buffer in play. */
    if (aqi->playLength + playLength > aqi->playCapacity)
    {
        aqi->playIsDelaying = JNI_TRUE;
        aqi->playLength = 0;
        /*
         * We don't have enough room in play for buffer which means that we'll
         * have to throw some samples away. But it'll effectively mean that
         * we'll enlarge the drift which will disrupt the echo cancellation. So
         * it seems the least of two evils to just reset the echo cancellation.
         */
        speex_echo_state_reset(aqi->echo);
    }

    /* Place buffer in play. */
    play = aqi->play + aqi->playLength;
    if (length == aqi->frameSize)
        memcpy(play, buffer, playSize);
    else
    {
        unsigned long sampleSizeInBytes = sampleSizeInBits / 8;
        spx_uint32_t bufferSampleCount = length / sampleSizeInBytes;

        speex_resampler_process_interleaved_int(
            aqi->resampler,
            buffer, &bufferSampleCount, play, &playLength);
    }
    aqi->playLength += playLength;

    /* Take into account the latency. */
    if (aqi->playIsDelaying == JNI_TRUE)
        AudioQualityImprovement_updatePlayIsDelaying(aqi);
}