コード例 #1
0
ファイル: testApp.cpp プロジェクト: SFPC/Audio-Unit-Workshop
//--------------------------------------------------------------
void testApp::setup(){
    player.setFile(ofToDataPath("Willow Beats - TAWK TAKEOUT (Tawk Tomahawk Remixed) - 08 Ocelot (Willow Beats Remix).mp3"));
    
    sampler = ofxAudioUnitSampler('aumu', 'dls ', 'appl');
    
    
    delay = ofxAudioUnit('aufx', 'dely', 'appl');
    delay.printParameterList();
//    [1] delay time [0.0001 : 2 : 1]
//    [2] feedback [-99.9 : 99.9 : 50]
    delay.setParameter(1, kAudioUnitScope_Global, 2);
    delay.setParameter(2, kAudioUnitScope_Global, 99.9);
    
    filter = ofxAudioUnit('aufx', 'filt', 'appl');
    
    player.connectTo(filter);
    input.connectTo(delay);
    
    mixer.setInputBusCount(3);
    sampler.connectTo(mixer, 0);
    filter.connectTo(mixer, 1);
    delay.connectTo(mixer, 2);
    
    mixer.connectTo(output);
    
    input.start();
    output.start();
    player.loop();
}
コード例 #2
0
void ofApp::setup(){
    unit = ofxAudioUnit('aumu', 'ncut', 'TOGU');
    unit.showUI();
    unit.connectTo(output);
    output.start();

    midiIn.createMidiDestination("ofxAudioUnitAutomation");
    midiIn.routeMidiTo(unit);
    midiOut.openPort("ofxAudioUnitAutomation");
}
コード例 #3
0
void ofxManagedAudioUnit::setup(string _unitName, OSType type, OSType subType, OSType manufacturer,
                                string _className) {
    unitName = _unitName;
    unitSlug = _unitName;
    ofStringReplace(unitSlug, " ", "_");
    unit = ofxAudioUnit(type, subType, manufacturer);
    this->type = stringify(type) == "aumu" ? AU_TYPE_SYNTH : AU_TYPE_UNIT;
    className = _className;
    if(className == "") {
        className = stringify(type, subType, manufacturer);
    }
}
コード例 #4
0
//--------------------------------------------------------------
void testApp::setup(){
    frameRate = 30;
    ofSetFrameRate(frameRate);
    ofSetVerticalSync(true);
    
    _attention.x = ofRandomf();
    _meditation.x = ofRandomf();
    
    audioLevelsPreview.allocate(320, 240);
    frame.allocate(640, 480, OF_IMAGE_COLOR);
    
    spread = 2.0;
    bRecording = false;
    cam.initGrabber(320, 240);
    
    //
    //  Set up audio
    //
    masterMixer.setInputBusCount(2);
    
    reverb = ofxAudioUnit(kAudioUnitType_Effect, kAudioUnitSubType_MatrixReverb);
    varispeed = varispeed = ofxAudioUnit(kAudioUnitType_FormatConverter, kAudioUnitSubType_Varispeed);
    lowpass = ofxAudioUnit(kAudioUnitType_Effect, kAudioUnitSubType_LowPassFilter);
    delay = ofxAudioUnit(kAudioUnitType_Effect, kAudioUnitSubType_Delay);
    distortion = ofxAudioUnit(kAudioUnitType_Effect, kAudioUnitSubType_Distortion);
    
    
//    -BrainWave01-Both.wav -- do not use
//    -BrainWave02-Med.wav - this sound can't be loud 
//    -BrainWave03-Attn.wav - to use with delay. conflicts with 07/09
//    -BrainWave04-Both.wav - this can be relatively constant
//    -BrainWave05-Attn.wav - not sure this one works. DNU for now
//    -BrainWave06-Attn.wav - sort of conflicts with sound 04. BETTER AT LOWER ATT
//    -BrainWave07-Attn.wav - sort of conflicts with sound 03. delay can be used on this as well BETTER AT HIGHER ATT
//    -BrainWave08-Med.wav  - sort of conflics with 15, 12, 13
//    -BrainWave09-Attn.wav - sort of conflicts with 07, 03. 
//    -BrainWave10-Attn.wav - MID ATTN
//    -BrainWave11-Med.wav - very calm moments 
//    -BrainWave12-Med.wav - sort of conflicts with 15
//    -BrainWave13-Med.wav - sort of conflics with 12
//    -BrainWave14-Both.wav - sort of conflicts with 9. HIGH MED
//    -BrainWave15-Med.wav  - conflicts with 12, 13, 8
//    -BRainWave16-Attn.wav - MID ATTN
//    BrainWave17-Attn.wav - this sound can't play with many other sounds (blink?)
    
    
    attention_sounds.push_back("5minsilence.wav");
    attention_sounds.push_back("sounds2/BrainWave03-Attn.wav");
    attention_sounds.push_back("sounds2/BrainWave06-Attn.wav"); // DON'T USE WITH 4
    attention_sounds.push_back("5minsilence.wav");
    attention_sounds.push_back("sounds2/BRainWave16-Attn.wav");
    attention_sounds.push_back("sounds2/BrainWave07-Attn.wav");
    attention_sounds.push_back("sounds2/BrainWave10-Attn.wav");
    attention_sounds.push_back("5minsilence.wav");
    attention_sounds.push_back("sounds2/BrainWave09-Attn.wav");
    attention_sounds.push_back("sounds2/BrainWave17-Attn.wav");
    attention_sounds.push_back("5minsilence.wav");
    
    meditation_sounds.push_back("sounds2/5minsilence.wav");
    meditation_sounds.push_back("sounds2/BrainWave15-Med.wav");
    meditation_sounds.push_back("sounds2/BrainWave11-Med.wav");
    meditation_sounds.push_back("sounds2/5minsilence.wav"); //BrainWave08-Med.wav"); // lower level
    meditation_sounds.push_back("sounds2/BrainWave04-Both.wav");
    meditation_sounds.push_back("sounds2/BrainWave12-Med.wav");
    meditation_sounds.push_back("sounds2/BrainWave14-Both.wav");
    meditation_sounds.push_back("sounds2/BrainWave13-Med.wav"); 
    meditation_sounds.push_back("sounds2/5minsilence.wav");
    
    
    attentionVolume = new float[ attention_sounds.size() ];
    attentionLoops = new ofxAudioUnitFilePlayer[ attention_sounds.size() ];
    attentionMixer.setInputBusCount(attention_sounds.size());
    attentionMixer.connectTo(masterMixer, 0);
    for(int i=0; i<attention_sounds.size(); i++) {
        string fname = attention_sounds[i];
        attentionLoops[i].setFile(ofFilePath::getAbsolutePath(fname));
        attentionLoops[i].connectTo(attentionMixer, i);
        attentionLoops[i].loop();
        attentionMixer.setInputVolume(0, i);
    }
    
    
    

    
    meditationVolume = new float[ meditation_sounds.size() ];
    meditationLoops = new ofxAudioUnitFilePlayer[ meditation_sounds.size() ];
    meditationMixer.setInputBusCount(meditation_sounds.size());
    meditationMixer.connectTo(masterMixer, 1);
    for(int i=0; i<meditation_sounds.size(); i++) {
        string fname = meditation_sounds[i];
        meditationLoops[i].setFile(ofFilePath::getAbsolutePath(fname));
        meditationLoops[i].connectTo(meditationMixer, i);
        meditationLoops[i].loop();
        meditationMixer.setInputVolume(0, i);
    }
    
    
    

    
    reverb.printParameterList();
    varispeed.printParameterList();
    lowpass.printParameterList();
    delay.printParameterList();
    distortion.printParameterList();
        
    masterMixer
//        .connectTo(reverb)
//        .connectTo(lowpass)
//        .connectTo(delay)
//        .connectTo(distortion)
//        .connectTo(varispeed)
        .connectTo(output);
    output.start();
    
    
    debugMessage    << "Press 1 for reverb settings\n"
                    << "Press 2 for varispeed settings\n"
                    << "Press 3 for lowpass settings\n"
                    << "Press 4 for delay settings\n"
                    << "Press 5 for distortion settings\n";
    
    
    AudioUnitSetParameter(reverb.getUnit(), kReverbParam_DryWetMix, kAudioUnitScope_Global, 0, 0, 0);
    AudioUnitSetParameter(varispeed.getUnit(), kVarispeedParam_PlaybackRate, kAudioUnitScope_Global, 0, 1, 0);
    AudioUnitSetParameter(lowpass.getUnit(), kLowPassParam_CutoffFrequency, kAudioUnitScope_Global, 0, 6900, 0);
    AudioUnitSetParameter(delay.getUnit(), kDelayParam_WetDryMix, kAudioUnitScope_Global, 0, 0, 0);
    AudioUnitSetParameter(distortion.getUnit(), kDistortionParam_FinalMix, kAudioUnitScope_Global, 0, 0, 0);
    
    //
    // Set up recorder
    //
    sampleRate = 44100;
    channels = 2;
    vidRecorder.setVideoCodec("mpeg4");
    vidRecorder.setVideoBitrate("800k");
    vidRecorder.setAudioCodec("mp3");
    vidRecorder.setAudioBitrate("192k");
    
    
    
    //
    //  SoundStream setup
    //
    soundStream.listDevices();
    //soundStream.setDeviceID(5);
    soundStream.setup(this, 0, channels, sampleRate, 256, 4);

    
    
    ofAddListener(httpUtils.newResponseEvent,this,&testApp::newResponse);
	httpUtils.start();
    
    
    setupUI();
}
コード例 #5
0
//--------------------------------------------------------------
void testApp::setup(){
    setupComplete = false;
    
    //graphics
    ofSetFrameRate(60);
    ofSetVerticalSync(true);
    ofEnableAlphaBlending();
    
    bUseFbo = false;
    bFlip = true;
    
    //leap setup
	leap.open();
    
    
	cam.setOrientation(ofPoint(-20, 0, 0));
	l1.setPosition(200, 300, 50);
	l2.setPosition(-200, -200, 50);
	glEnable(GL_DEPTH_TEST);
    glEnable(GL_NORMALIZE);
    
    ofSetFullscreen(true);
    fadesFbo.allocate(ofGetWidth(), ofGetHeight(), GL_RGBA32F_ARB);
    fadesFbo.begin();
	ofClear(255,255,255, 20);
    fadesFbo.end();
    
    //audio
    synth = ofxAudioUnitSampler('aumu', 'CaC2', 'CamA');
    synth.getParameters(false, false);
    synth.setBank(0, 0);
    prog = 0;
    synth.setProgram(prog);
    
    limiter = ofxAudioUnit('aufx', 'mcmp', 'appl');
    limiter.setParameter(kMultibandCompressorParam_Threshold1, kAudioUnitScope_Global, -10.0);
    limiter.setParameter(kMultibandCompressorParam_Headroom1, kAudioUnitScope_Global, 0.1);
    limiter.setParameter(kMultibandCompressorParam_Threshold2, kAudioUnitScope_Global, -10.0);
    limiter.setParameter(kMultibandCompressorParam_Headroom2, kAudioUnitScope_Global, 0.1);
    limiter.setParameter(kMultibandCompressorParam_Threshold3, kAudioUnitScope_Global, -10.0);
    limiter.setParameter(kMultibandCompressorParam_Headroom3, kAudioUnitScope_Global, 0.1);
    limiter.setParameter(kMultibandCompressorParam_Threshold4, kAudioUnitScope_Global, -10.0);
    limiter.setParameter(kMultibandCompressorParam_Headroom4, kAudioUnitScope_Global, 0.1);
    
    synth >> tap >> limiter >> output;
    output.start();
    
    tap.getSamples(samples);
    bufferSize = samples.size();
    buffer = new float[bufferSize];
    spectrum = new float[bufferSize/2];
    
    ofSoundStreamSetup(2, 0, 44100, bufferSize, 4);
    fftCalc.setup(bufferSize, bufferSize/8, 44100);
    
    int DorianScaleDegrees[] = {0, 2, 3, 5, 7, 9, 10};
    dorian.assign(DorianScaleDegrees, DorianScaleDegrees+7);
    
    
    //postproc
    post.init(1920, 1080);
    post.createPass<FxaaPass>()->setEnabled(true);
    post.createPass<BloomPass>()->setEnabled(true);
    post.createPass<KaleidoscopePass>()->setEnabled(true);
    
    //gui
    
    gui = new ofxUICanvas(0,0,320,640);
    gui->addWidgetDown(new ofxUILabel("LEAP HACK DAY", OFX_UI_FONT_LARGE));
    gui->addSpacer(304, 2);
    gui->addWidgetDown(new ofxUISlider(304,16,0.0,255.0,100.0,"BACKGROUND VALUE"));
    gui->addWidgetDown(new ofxUIToggle(32, 32, false, "FULLSCREEN"));
    gui->addSpacer(304, 2);
    gui->addWidgetDown(new ofxUIButton("LAST PATCH", false, 16, 16));
    gui->addWidgetDown(new ofxUIButton("NEXT PATCH", false, 16, 16));
    gui->addSlider("PROGRAM", 0, 127, &prog, 95, 16);
    gui->addSlider("XyPad1x", 0.0,1.0, &XyPad1x, 304, 16);
    gui->addSlider("XyPad1y", 0.0,1.0, &XyPad1y, 304, 16);
    gui->addSlider("XyPad2x", 0.0,1.0, &XyPad2x, 304, 16);
    gui->addSlider("XyPad2y", 0.0,1.0, &XyPad2y, 304, 16);
    gui->addSpacer(304, 2);
    gui->addWidgetDown(new ofxUILabel("WAVEFORM DISPLAY", OFX_UI_FONT_MEDIUM));
	gui->addWidgetDown(new ofxUIWaveform(304, 64, buffer, bufferSize, -1.0, 1.0, "WAVEFORM"));
    gui->addWidgetDown(new ofxUILabel("SPECTRUM DISPLAY", OFX_UI_FONT_MEDIUM));
    gui->addWidgetDown(new ofxUISpectrum(304, 64, spectrum, bufferSize/2, 0.0, 1.0, "SPECTRUM"));
    gui->addSpacer(304, 2);
    gui->addSlider("fade amnt", 1.0, 200.0, &fadeAmnt, 304, 16);
    gui->addToggle("kaleidascope", bKaleidoscope);
    
    ofAddListener(gui->newGUIEvent, this, &testApp::guiEvent);
    gui->loadSettings("GUI/guiSettings.xml");

    setupComplete = true;
}
コード例 #6
0
ファイル: testApp.cpp プロジェクト: jasonlevine/ofxAudioUnit
//--------------------------------------------------------------
void testApp::setup(){
	
//	This example will show you how to generate your own sound
//	programmatically. To do this, you'll be using render callbacks.
	
//	A render callback works like this : when the output unit begins
//	to pull audio through an audio unit chain, each unit pulls from
//	the audio source before it. That source can be either another
//	unit or a render callback.

//	A render callback is a function in your program that provides
//	samples to an Audio Unit. You can create samples however you
//	like. For instance, you can use sin() to create sine waves, or
//	write uncompressed samples you have retrieved from an audio file.
//	You could even do something weird like using video or image data 
//	as your samples. 
	
//	Note that if you're being experimental, it's worth it to keep your
//	samples in a sensible range (ie -1 to 1). Audio Units will let
//	you seriously overload your computer's audio hardware, which could
//	mess up your speakers if you have your volume set high
	
//	There are a few important catches, however. Render callbacks
//	work on what's called a realtime thread. This means that your
//	render callback must finish fast. Very fast. You won't have time
//	to read data off the hard drive or do any heavy processing.
//	If you want to do anything complicated, you should have a buffer
//	ready. If your render callback takes too long, you will hear silence
//	or glitches in the audio.
	
//	Another catch is that the render callback must be a static function,
//	not a member of your testApp. What this means is that you'll have to
//	pass a reference to your app (eg. "this") if you want to access its variables.
//	Your callback gets called by an Audio Unit (you don't call it yourself).
	
//	One more catch is that (since the callback is being called by the
//	Audio Unit on a realtime thread) you'll have to deal with things like
//	mutexes if you want to share variables between the callback and the rest
//	of your app. See ofxAudioUnitTap.cpp for an example of this.

//	In this example, we'll just be rendering simple waveforms. If you want
//	to do more complicated things, search for more info on AURenderCallback.
//	I recommend "Learning Core Audio" by Chris Adamson if you want a
//	book on the subject (and all manner of other Core Audio / Audio Unit
//	things).
	
//	First, let's set up an Audio Unit chain
	
	distortion = ofxAudioUnit(kAudioUnitType_Effect,
							  kAudioUnitSubType_Distortion);
	
	reverb = ofxAudioUnit(kAudioUnitType_Effect,
						  kAudioUnitSubType_MatrixReverb);
	
	distortion >> reverb >> tap >> output;
	
//	Now, we'll tell the distortion unit to get its source samples from
//	our render callback. You need to give the Audio Unit two bits of
//	information: the name of your callback (inputProc) and a pointer
//	to any information you'll want to access inside the callback
//	(inputProcRefCon). I'm passing "this" to the callback for demonstration
//	purposes (we're not actually going to use it).
	
	AURenderCallbackStruct callbackInfo;
	callbackInfo.inputProc = renderPulseSineChord;
	callbackInfo.inputProcRefCon = this;

	distortion.setRenderCallback(callbackInfo);
	
//	Once we tell the output unit to start pulling audio, our callback
//	function will start getting called. Typically, this will be for
//	batches of 512 samples. At a sample rate of 44,100 Hz, this means
//	that our callback will be called about 86 times a second.
	
	output.start();
	
	ofSetVerticalSync(true);
}
コード例 #7
0
ファイル: testApp.cpp プロジェクト: ruxrux/ofxAudioUnit
//--------------------------------------------------------------
void testApp::setup(){
	
//	This example demonstrates what Audio Unit parameters
//	are and how to change them
	
//	Parameters are user-controllable variables on an Audio
//	Unit which you can change in real time. For example, a
//	Mixer audio unit typically has parameters for volume,
//	panning, etc. These are the parameters that you are
//	changing with ofxAudioUnitMixer's setPan() and
//	setInputVolume() functions.
	
//	However, there are a huge number of parameters that Audio
//	Units make available, and ofxAudioUnit can't make convienient
//	functions for all of them. This example shows you how to
//	programmatically set Audio Unit parameters on the fly.
	
//	These are the audio units we'll use:
	
//	AUVarispeed - let's you change the playback speed of audio
//	going through it, while simultaneously changing the pitch
//	(like slowing down and speeding up a record)
	
	varispeed = ofxAudioUnit(kAudioUnitType_FormatConverter,
							 kAudioUnitSubType_Varispeed);
	
//	AULowPass - a lowpass filter which cuts off high frequencies
//	at a user-controllable cutoff point
	
	lowpass = ofxAudioUnit(kAudioUnitType_Effect,
						   kAudioUnitSubType_LowPassFilter);
	
//	We'll also use an ofxAudioUnitFilePlayer to generate audio,
//	an ofxAudioUnitTap to extract the waveform, and an output
//	to send the resulting audio to your speakers
	
	filePlayer.connectTo(varispeed).connectTo(lowpass).connectTo(tap).connectTo(output);
	
//	First, we'll set the lowpass's resonance setting. All of the
//	Apple-manufactured Audio Units have parameter constants defined
//	which follow a particular naming pattern. The two lowpass
//	parameters are kLowPassParam_Resonance and
//	kLowPassParam_CutoffFrequency.
	
//	The easiest way to find the parameters for your Audio Unit
//	are to type kNameOfYourUnit and let Xcode's auto-complete
//	show you what's available. You can also see all of the parameters
//	defined in <AudioUnit/AudioUnitParameters.h>. It is a good idea
//	to read this header file anyway, as it'll tell you what values 
//	these parameters are expecting. One way to get information
//	on the parameter you want to change is to type it, then Cmd-click
//	on it to take you to its entry in AudioUnitParameters.h
	
//	You set parameters by using the function AudioUnitSetParameter().
//	This function expects a few arguments :

//	inUnit - An AudioUnit struct. You can get this from an ofxAudioUnit
//	by calling getUnit() on it
//	inID - The parameter you want to change
//	inScope - The scope the parameter applies to. This is usually
//	the Global scope. Try the Input or Ouput scope if Global doesn't
//	work.
//	inElement - The bus you're changing the parameter on. For Global
//	params, it's always 0
//	inValue - The value you're setting the parameter to.
//	inBufferOffsetFrames - If you want to set the parameter in the
//	future, give this an offset in terms of samples (eg. 44100 for
//	1 second). Usually you want this to be 0.
	
//	Here, we're setting the lowpass's resonance to 10
	
	AudioUnitSetParameter(lowpass.getUnit(), kLowPassParam_Resonance,
						  kAudioUnitScope_Global, 0, 10, 0);
	
//	You can also save the state of an Audio Unit's parameters as a
//	preset file. Saving / Loading a preset file is done like this:

//	varispeed.saveCustomPreset("MyPreset");
//	varispeed.loadCustomPreset("MyPreset");
	
//	These functions will look for or create a preset file with the extension
//	".aupreset" in your app's data folder.
	
//	You can also create .aupreset files in Garageband and other DAWs.
//	Usually, these are stored in ~/Library/Audio/Presets/
	
	output.start();
	
	filePlayer.setFile(ofFilePath::getAbsolutePath("chopin.mp3"));
	filePlayer.loop();
	
	ofSetVerticalSync(true);
}
コード例 #8
0
ファイル: testApp.cpp プロジェクト: nightshining/orbs
//--------------------------------------------------------------
void testApp::setup(){
    
    ///   SPHERES SETUP   ///
    
    ofSetVerticalSync(true);
    ofSetFullscreen(true);
    ofSetWindowTitle("M.Romeo");
    ofSetFrameRate(60);
    ofBackground(255);
    ofSetCircleResolution(60);
    
    int objectAmount = 100;
    
    sphere temp; //make temp class!
    
    for (int i = 0; i < objectAmount; i++) {
        
        redObjects.push_back(temp);   
    }
    
    for (int i = 0; i < redObjects.size(); i++) {
        
        position.set(ofRandom(ofGetWidth()), ofRandom(ofGetHeight()), ofRandom(-200, 200));
        
        redObjects[i].setup(position.x, position.y, position.z, ofRandom(5, 30));
        
        //individual noise
        noiseObjects.push_back(ofVec3f(ofRandom(0, 10000), ofRandom(0, 10000), ofRandom(0, 10000)));
    }
    
    ///   AUDIO SETUP   ///
    
    delay = ofxAudioUnit(kAudioUnitType_Effect,
                          kAudioUnitSubType_Delay);
        
    
    //Load a bunch of aif files using ofDirectory. This is very useful when using multiple sound files.
    
    string path = ofToDataPath("");
    
    ofDirectory dir(path);
    dir.allowExt("aif");
    
    //cout << dir.listDir() << endl; //total amount of files with .aif ext
    
    int fileAmount = dir.listDir();
    fileExt.resize(fileAmount);
    
    for (int i = 0; i < fileExt.size(); i++ ) {
        fileExt[i] = dir.getPath(i);
    }
    
    for (int i = 0; i < fileExt.size(); i++) {
        file.setFile(fileExt[i]);
    }
    
    constantAmbience.setFile(ofFilePath::getAbsolutePath("pads.aif"));
    
    mixer.setInputBusCount(2);
    
    file.connectTo(delay).connectTo(mixer, 0);
    constantAmbience.connectTo(mixer, 1);
    
    mixer.connectTo(output);
    
    introVolume = 0.0;
    mixer.setOutputVolume(introVolume);

    output.start();

    
    constantAmbience.play();
    constantAmbience.loop();
    
    randomSample = 0;
    counterObject = 0;
    
    ///   INTERACTION SETUP   ///
    
    moveHand.setup();
    
    font.loadFont("Junction-light.otf", 12);
    title.loadFont("Junction-bold.otf", 20);
    
    ///    GENERATIVE MODE    ///
    
    generativeMode = false;
    radius = 25;
    modeColor = ofColor::black;
    indicator = "off";
}
コード例 #9
0
ファイル: testApp.cpp プロジェクト: rickerbh/govhack2013
//--------------------------------------------------------------
void testApp::setup(){
  
  titleWidth = 250;
  roboto.loadFont("Roboto/Roboto-Thin.ttf", 20);
  roboto.setLineHeight(24);
  
  jsonF.open("Female.json");
  jsonM.open("Male.json");
  jsonA.open("merged.json");
  year = 2013;
  age = 0;
  
  s1.setFile(ofFilePath::getAbsolutePath("81804__bennstir__violin-loop1.wav"));
  s2.setFile(ofFilePath::getAbsolutePath("153610__carlos-vaquero__violin-g-4-tenuto-vibrato.wav"));
  s3.setFile(ofFilePath::getAbsolutePath("23580__loofa__gong1.aif"));
  s4.setFile(ofFilePath::getAbsolutePath("174725__archeos__bell-sound-b.wav"));
  s5.setFile(ofFilePath::getAbsolutePath("85579__drriquet__electro-beat.wav"));
  s6.setFile(ofFilePath::getAbsolutePath("154230__carlos-vaquero__transverse-flute-g-4-tenuto-non-vibrato.wav"));

  lp1 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  lp2 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  lp3 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  lp4 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  lp5 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  lp6 = ofxAudioUnit(kAudioUnitType_Effect,
                     kAudioUnitSubType_LowPassFilter);
  
  s1.connectTo(lp1).connectTo(tap1);
  s2.connectTo(lp2).connectTo(tap2);
  s3.connectTo(lp3).connectTo(tap3);
  s4.connectTo(lp4).connectTo(tap4);
  s5.connectTo(lp5).connectTo(tap5);
  s6.connectTo(lp6).connectTo(tap6);
  
  mixer.setInputBusCount(6);
  tap1.connectTo(mixer, 0);
  tap2.connectTo(mixer, 1);
  tap3.connectTo(mixer, 2);
  tap4.connectTo(mixer, 3);
  tap5.connectTo(mixer, 4);
  tap6.connectTo(mixer, 5);
  
  compressor = ofxAudioUnit(kAudioUnitType_Effect,
                            kAudioUnitSubType_DynamicsProcessor);
  
  mixer.connectTo(compressor).connectTo(output);
  
  output.start();
  
  s1.loop();
  s2.loop();
  s3.loop();
  s4.loop();
  s5.loop();
  s6.loop();
  
  ofSetVerticalSync(true);
  
  timer = ofxTimer();
  timer.setup(1000, true);
  
  ofAddListener(timer.TIMER_REACHED, this, &testApp::timerFired);
}