// create a callback for generating a block of samples void audioCB(AudioIOData& io){ UserData& user = *(UserData *)io.user(); float ampL = user.ampL; float ampR = user.ampR; // loop through the number of samples in the block for(int i=0; i<io.framesPerBuffer(); ++i){ float s = io.in(0,i); // get the line-in or microphone sample io.out(0,i) = s * ampL; // set left output sample io.out(1,i) = s * ampR; // set right output sample } // an alternative way to loop through the sample buffers while(io()){ io.out(0) *= 0.5; // scale left output sample io.out(1) *= 0.5; // scale right output sample } // if looping again, you must reset the frame iterator io.frame(0); while(io()){ io.out(0) *= 2; // scale left output sample io.out(1) *= 2; // scale right output sample } }
// create a callback for generating a block of samples void audioCB(AudioIOData& io){ UserData& user = *(UserData *)io.user(); float ampL = user.ampL; float ampR = user.ampR; // loop through the number of samples in the block for(int i=0; i<io.framesPerBuffer(); ++i){ float s = io.in(0,i); // get the line-in or microphone sample io.out(0,i) = s * ampL; // set left and right output channel samples io.out(1,i) = s * ampR; } }
inline void VCR::onAudioCB(AudioIOData& io) { // cached, because it may be being used in a different thread sampletime r = mAudioReadIndex; sampletime w = mAudioWriteIndex; unsigned channels = io.channelsOut(); if (channels > mAudioChans) channels = mAudioChans; unsigned numAudioFrames = io.framesPerBuffer(); unsigned numSamples = numAudioFrames * channels; unsigned length = mAudioRing.size(); sampletime ahead = w - r; // how much writer is ahead of reader if (ahead > (length - numSamples)) { // not enough space left in ringbuffer! fprintf(stderr, "audio underflow\n"); mAudioUnderflows++; return; } sampletime wnext = w + numSamples; unsigned wu = (unsigned)(w % length); //printf("writing %u samples to %llu\n", numSamples, wnext); for (unsigned c=0; c < channels; c++) { float * src = io.outBuffer(c); float * dst = &mAudioRing[0]; unsigned wc = wu + c; unsigned srcIdx = 0; while (srcIdx < numAudioFrames) { dst[wc] = src[srcIdx]; srcIdx++; wc += mAudioChans; if (wc >= length) wc -= length; } } // update write head position mAudioWriteIndex = wnext; }
void AudioScene::render(AudioIOData& io) { const int numFrames = io.framesPerBuffer(); double sampleRate = io.framesPerSecond(); io.zeroOut(); // iterate through all listeners adding contribution from all sources for(unsigned il=0; il<mListeners.size(); ++il){ Listener& l = *mListeners[il]; Spatializer* spatializer = l.mSpatializer; spatializer->prepare(); // update listener history data: l.updateHistory(numFrames); // iterate through all sound sources for(Sources::iterator it = mSources.begin(); it != mSources.end(); ++it){ SoundSource& src = *(*it); // scalar factor to convert distances into delayline indices double distanceToSample = 0; if(src.dopplerType() == DOPPLER_SYMMETRICAL) distanceToSample = sampleRate / mSpeedOfSound; if(!src.usePerSampleProcessing()) //if our src is using per sample processing we will update this in the frame loop instead src.updateHistory(); if(mPerSampleProcessing) //audioscene per sample processing { // iterate time samples for(int i=0; i < numFrames; ++i){ Vec3d relpos; if(src.usePerSampleProcessing() && il == 0) //if src is using per sample processing, we can only do this for the first listener (TODO: better design for this) { src.updateHistory(); src.onProcessSample(i); relpos = src.posHistory()[0] - l.posHistory()[0]; if(src.dopplerType() == DOPPLER_PHYSICAL) { double currentDist = relpos.mag(); double prevDistance = (src.posHistory()[1] - l.posHistory()[0]).mag(); double sourceVel = (currentDist - prevDistance)*sampleRate; //positive when moving away, negative moving toward if(sourceVel == -mSpeedOfSound) sourceVel -= 0.001; //prevent divide by 0 / inf freq distanceToSample = fabs(sampleRate / (mSpeedOfSound + sourceVel)); } } else { // compute interpolated source position relative to listener // TODO: this tends to warble when moving fast double alpha = double(i)/numFrames; // moving average: // cheaper & slightly less warbly than cubic, // less glitchy than linear relpos = ( (src.posHistory()[3]-l.posHistory()[3])*(1.-alpha) + (src.posHistory()[2]-l.posHistory()[2]) + (src.posHistory()[1]-l.posHistory()[1]) + (src.posHistory()[0]-l.posHistory()[0])*(alpha) )/3.0; } //Compute distance in world-space units double dist = relpos.mag(); // Compute how many samples ago to read from buffer // Start with time delay due to speed of sound double samplesAgo = dist * distanceToSample; // Add on time delay (in samples) - only needed if the source is rendered per buffer if(!src.usePerSampleProcessing()) samplesAgo += (numFrames-i); // Is our delay line big enough? if(samplesAgo <= src.maxIndex()){ double gain = src.attenuation(dist); //This seemed to get the same sample per block // float s = src.readSample(samplesAgo) * gain; //reading samplesAgo-i causes a discontinuity float s = src.readSample(samplesAgo-i-1) * gain; // s = src.presenceFilter(s); //TODO: causing stopband ripple here, why? spatializer->perform(io, src,relpos, numFrames, i, s); } } //end for each frame } //end per sample processing else //more efficient, per buffer processing for audioscene (does not work well with doppler) { Vec3d relpos = src.pose().pos() - l.pose().pos(); double distance = relpos.mag(); double gain = src.attenuation(distance); for(int i = 0; i < numFrames; i++) { double readIndex = distance * distanceToSample; readIndex += (numFrames - i - 1); mBuffer[i] = gain * src.readSample(readIndex); } spatializer->perform(io, src, relpos, numFrames, &mBuffer[0]); } } //end for each source spatializer->finalize(io); } // end for each listener }