Пример #1
0
	// Audio callback
	virtual void onSound(AudioIOData& io){
	
		// Set the base frequency to 55 Hz
		double freq = 55/io.framesPerSecond();
	
		while(io()){
		
			// Update the oscillators' phase
			phase += freq;
			if(phase > 1) phase -= 1;

			// Generate two sine waves at the 5th and 4th harmonics
			float out1 = cos(5*phase * 2*M_PI);
			float out2 = sin(4*phase * 2*M_PI);

			// Write the waveforms to the ring buffer.
			// Note that this call CANNOT block or wait on a lock from somewhere else. The audio
			// thread must keep writing to the buffer without regard for what other threads
			// might be reading from it.
			ring.write(Vec2f(out1, out2));

			// Send scaled waveforms to output...
			io.out(0) = out1*0.2;
			io.out(1) = out2*0.2;
		}
	}
void AudioScene::render(AudioIOData& io) {
	const int numFrames = io.framesPerBuffer();
	double sampleRate = io.framesPerSecond();
	io.zeroOut();

	// iterate through all listeners adding contribution from all sources
	for(unsigned il=0; il<mListeners.size(); ++il){
		Listener& l = *mListeners[il];

		Spatializer* spatializer = l.mSpatializer;
		spatializer->prepare();

		// update listener history data:
		l.updateHistory(numFrames);

		// iterate through all sound sources
		for(Sources::iterator it = mSources.begin(); it != mSources.end(); ++it){
			SoundSource& src = *(*it);

			// scalar factor to convert distances into delayline indices
			double distanceToSample = 0;
			if(src.dopplerType() == DOPPLER_SYMMETRICAL)
				distanceToSample = sampleRate / mSpeedOfSound;

			if(!src.usePerSampleProcessing()) //if our src is using per sample processing we will update this in the frame loop instead
				src.updateHistory();

			if(mPerSampleProcessing) //audioscene per sample processing
			{
				// iterate time samples
				for(int i=0; i < numFrames; ++i){

					Vec3d relpos;

					if(src.usePerSampleProcessing() && il == 0) //if src is using per sample processing, we can only do this for the first listener (TODO: better design for this)
					{
						src.updateHistory();
						src.onProcessSample(i);

						relpos = src.posHistory()[0] - l.posHistory()[0];

						if(src.dopplerType() == DOPPLER_PHYSICAL)
						{
							double currentDist = relpos.mag();
							double prevDistance = (src.posHistory()[1] - l.posHistory()[0]).mag();
							double sourceVel = (currentDist - prevDistance)*sampleRate; //positive when moving away, negative moving toward

							if(sourceVel == -mSpeedOfSound) sourceVel -= 0.001; //prevent divide by 0 / inf freq

							distanceToSample = fabs(sampleRate / (mSpeedOfSound + sourceVel));
						}
					}
					else
					{
						// compute interpolated source position relative to listener
						// TODO: this tends to warble when moving fast
						double alpha = double(i)/numFrames;

						// moving average:
						// cheaper & slightly less warbly than cubic,
						// less glitchy than linear
						relpos = (
									(src.posHistory()[3]-l.posHistory()[3])*(1.-alpha) +
								(src.posHistory()[2]-l.posHistory()[2]) +
								(src.posHistory()[1]-l.posHistory()[1]) +
								(src.posHistory()[0]-l.posHistory()[0])*(alpha)
								)/3.0;
					}

					//Compute distance in world-space units
					double dist = relpos.mag();

					// Compute how many samples ago to read from buffer
					// Start with time delay due to speed of sound
					double samplesAgo = dist * distanceToSample;

					// Add on time delay (in samples) - only needed if the source is rendered per buffer
					if(!src.usePerSampleProcessing())
						samplesAgo += (numFrames-i);

					// Is our delay line big enough?
					if(samplesAgo <= src.maxIndex()){
						double gain = src.attenuation(dist);

						//This seemed to get the same sample per block
						//   float s = src.readSample(samplesAgo) * gain;

						//reading samplesAgo-i causes a discontinuity
						float s = src.readSample(samplesAgo-i-1) * gain;

						// s = src.presenceFilter(s); //TODO: causing stopband ripple here, why?
						spatializer->perform(io, src,relpos, numFrames, i, s);
					}

				} //end for each frame
			} //end per sample processing

			else //more efficient, per buffer processing for audioscene (does not work well with doppler)
			{
				Vec3d relpos = src.pose().pos() - l.pose().pos();
				double distance = relpos.mag();
				double gain = src.attenuation(distance);

				for(int i = 0; i < numFrames; i++)
				{
					double readIndex = distance * distanceToSample;
					readIndex += (numFrames - i - 1);
					mBuffer[i] = gain * src.readSample(readIndex);
				}

				spatializer->perform(io, src, relpos, numFrames, &mBuffer[0]);
			}

		} //end for each source

		spatializer->finalize(io);

	} // end for each listener
}