Пример #1
0
// create a callback for generating a block of samples
void audioCB(AudioIOData& io){

	UserData& user = *(UserData *)io.user();
	float ampL = user.ampL;
	float ampR = user.ampR;

	// loop through the number of samples in the block
	for(int i=0; i<io.framesPerBuffer(); ++i){
		
		float s = io.in(0,i);		// get the line-in or microphone sample
		
		io.out(0,i) = s * ampL;		// set left  output sample
		io.out(1,i) = s * ampR;		// set right output sample
	}

	// an alternative way to loop through the sample buffers
	while(io()){
		io.out(0) *= 0.5;			// scale left  output sample
		io.out(1) *= 0.5;			// scale right output sample
	}

	// if looping again, you must reset the frame iterator
	io.frame(0);
	while(io()){
		io.out(0) *= 2;				// scale left  output sample
		io.out(1) *= 2;				// scale right output sample
	}	
}
Пример #2
0
// create a callback for generating a block of samples
void audioCB(AudioIOData& io){

	UserData& user = *(UserData *)io.user();
	float ampL = user.ampL;
	float ampR = user.ampR;

	// loop through the number of samples in the block
	for(int i=0; i<io.framesPerBuffer(); ++i){
		
		float s = io.in(0,i);		// get the line-in or microphone sample
		
		io.out(0,i) = s * ampL;	// set left and right output channel samples
		io.out(1,i) = s * ampR;
	}
}
// Our main audio callback
void audioCB(AudioIOData& io){

	UserData& user = *(UserData *)io.user();
	float ampL = user.ampL;
	float ampR = user.ampR;

	// loop through the number of samples in the block
	while(io()){

		float s = io.in(0);		// get the line-in or microphone sample

		io.out(0) = s * ampL;	// set left and right output channel samples
		io.out(1) = s * ampR;
	}
}
  virtual void onSound(AudioIOData& io) {
    static cuttlebone::Stats fps("onSound()");
    static float currentNoiseAmplitude = 0;

    fps(io.secondsPerBuffer());

    float maxInputAmplitude = 0.0f;
    
    // set the pose of our audio source
    tap.pose(Pose(state->p[state->pokedVertex], Quatf()));

    // "f" is the desired noise amplitude based on the state
    float f =
      (state->p[state->pokedVertex] - state->pokedVertexRest).mag() - 0.45;
    
    if (f > 0.99) f = 0.99;
    if (f < 0) f = 0;



    while (io()) {
      // find largest input amplitude of block
      //
      float in = fabs(io.in(0));
      if (in > maxInputAmplitude) maxInputAmplitude = in;

      // Make this 0.0001 so it will sound good
      float ProportionOfErrorToEliminateOnThisAudioSample = 0.0001;
      currentNoiseAmplitude += (f-currentNoiseAmplitude) * 
        ProportionOfErrorToEliminateOnThisAudioSample;

      // output sample directly or write sample to output through our audio source
      // io.out(0) = io.out(1) = pinkNoise() * f * state->audioGain;
      tap.writeSample( pinkNoise() * currentNoiseAmplitude * state->audioGain );
    }

    // poke the blob if the largest amplitude is above some threshold
    //
    if (maxInputAmplitude > 0.707f) shouldPoke = true;

    // set listener pose and render audio sources
    listener()->pose(state->pose);
    scene()->render(io);
  }