void AudioScene::render(AudioIOData& io) { const int numFrames = io.framesPerBuffer(); double sampleRate = io.framesPerSecond(); io.zeroOut(); // iterate through all listeners adding contribution from all sources for(unsigned il=0; il<mListeners.size(); ++il){ Listener& l = *mListeners[il]; Spatializer* spatializer = l.mSpatializer; spatializer->prepare(); // update listener history data: l.updateHistory(numFrames); // iterate through all sound sources for(Sources::iterator it = mSources.begin(); it != mSources.end(); ++it){ SoundSource& src = *(*it); // scalar factor to convert distances into delayline indices double distanceToSample = 0; if(src.dopplerType() == DOPPLER_SYMMETRICAL) distanceToSample = sampleRate / mSpeedOfSound; if(!src.usePerSampleProcessing()) //if our src is using per sample processing we will update this in the frame loop instead src.updateHistory(); if(mPerSampleProcessing) //audioscene per sample processing { // iterate time samples for(int i=0; i < numFrames; ++i){ Vec3d relpos; if(src.usePerSampleProcessing() && il == 0) //if src is using per sample processing, we can only do this for the first listener (TODO: better design for this) { src.updateHistory(); src.onProcessSample(i); relpos = src.posHistory()[0] - l.posHistory()[0]; if(src.dopplerType() == DOPPLER_PHYSICAL) { double currentDist = relpos.mag(); double prevDistance = (src.posHistory()[1] - l.posHistory()[0]).mag(); double sourceVel = (currentDist - prevDistance)*sampleRate; //positive when moving away, negative moving toward if(sourceVel == -mSpeedOfSound) sourceVel -= 0.001; //prevent divide by 0 / inf freq distanceToSample = fabs(sampleRate / (mSpeedOfSound + sourceVel)); } } else { // compute interpolated source position relative to listener // TODO: this tends to warble when moving fast double alpha = double(i)/numFrames; // moving average: // cheaper & slightly less warbly than cubic, // less glitchy than linear relpos = ( (src.posHistory()[3]-l.posHistory()[3])*(1.-alpha) + (src.posHistory()[2]-l.posHistory()[2]) + (src.posHistory()[1]-l.posHistory()[1]) + (src.posHistory()[0]-l.posHistory()[0])*(alpha) )/3.0; } //Compute distance in world-space units double dist = relpos.mag(); // Compute how many samples ago to read from buffer // Start with time delay due to speed of sound double samplesAgo = dist * distanceToSample; // Add on time delay (in samples) - only needed if the source is rendered per buffer if(!src.usePerSampleProcessing()) samplesAgo += (numFrames-i); // Is our delay line big enough? if(samplesAgo <= src.maxIndex()){ double gain = src.attenuation(dist); //This seemed to get the same sample per block // float s = src.readSample(samplesAgo) * gain; //reading samplesAgo-i causes a discontinuity float s = src.readSample(samplesAgo-i-1) * gain; // s = src.presenceFilter(s); //TODO: causing stopband ripple here, why? spatializer->perform(io, src,relpos, numFrames, i, s); } } //end for each frame } //end per sample processing else //more efficient, per buffer processing for audioscene (does not work well with doppler) { Vec3d relpos = src.pose().pos() - l.pose().pos(); double distance = relpos.mag(); double gain = src.attenuation(distance); for(int i = 0; i < numFrames; i++) { double readIndex = distance * distanceToSample; readIndex += (numFrames - i - 1); mBuffer[i] = gain * src.readSample(readIndex); } spatializer->perform(io, src, relpos, numFrames, &mBuffer[0]); } } //end for each source spatializer->finalize(io); } // end for each listener }