void NessBox2DApp::setup()
{
    gl::enableAlphaBlending();
    //gl::enableWireframe();
    //fmt.setWrap( GL_REPEAT, GL_REPEAT );
    _fmt.setMinFilter( GL_NEAREST );
    _fmt.setMagFilter( GL_NEAREST );
    int scale = 3;
    Surface walkStuff = Surface( loadImage( loadResource( "NESS_allWalk.png" ) ) );
    ness = Character( &walkStuff, _fmt, 16, 24, 16 * scale, 24 * scale );
    walkStuff = Surface( loadImage( loadResource( "JEFF_allWalk.png" ) ) );
    jeff = NPC( &walkStuff, _fmt, 16, 24, 16 * scale, 24 * scale );
    
    for (int i = 0; i < 5; i++) {
        keys[i] = false;
    }
    
    definedKeys[0] = KeyEvent::KEY_LEFT;
    definedKeys[1] = KeyEvent::KEY_UP;
    definedKeys[2] = KeyEvent::KEY_RIGHT;
    definedKeys[3] = KeyEvent::KEY_DOWN;
    definedKeys[4] = KeyEvent::KEY_LSHIFT;
}
void AudioVisualizerApp::setup()
{
	// initialize signals
	signalChannelEnd = false;

	// make a list of valid audio file extensions and initialize audio variables
	const char* extensions[] = {"mp3", "wav", "ogg"};
	mAudioExtensions = vector<string>(extensions, extensions+2);
	mAudioPath = getAssetPath("");
	mIsAudioPlaying = false;

	// setup camera
	mCamera.setPerspective(50.0f, 1.0f, 1.0f, 10000.0f);
	mCamera.setEyePoint( Vec3f(-kWidth/4, kHeight/2, -kWidth/8) );
	mCamera.setCenterOfInterestPoint( Vec3f(kWidth/4, -kHeight/8, kWidth/4) );

	// create channels from which we can construct our textures
	mChannelLeft = Channel32f(kBands, kHistory);
	mChannelRight = Channel32f(kBands, kHistory);
	memset(	mChannelLeft.getData(), 0, mChannelLeft.getRowBytes() * kHistory );
	memset(	mChannelRight.getData(), 0, mChannelRight.getRowBytes() * kHistory );

	// create texture format (wrap the y-axis, clamp the x-axis)
	mTextureFormat.setWrapS( GL_CLAMP );
	mTextureFormat.setWrapT( GL_REPEAT );
	mTextureFormat.setMinFilter( GL_LINEAR );
	mTextureFormat.setMagFilter( GL_LINEAR );

	// compile shader
	try {
		mShader = gl::GlslProg( loadAsset("shaders/spectrum.vert"), loadAsset("shaders/spectrum.frag") );
	}
	catch( const std::exception& e ) {
		console() << e.what() << std::endl;
		quit();
		return;
	}

	// create static mesh (all animation is done in the vertex shader)
	std::vector<Vec3f>	vertices;
	std::vector<Colorf>	colors;
	std::vector<Vec2f>	coords;
	std::vector<size_t>	indices;
	
	for(size_t h=0;h<kHeight;++h)
	{
		for(size_t w=0;w<kWidth;++w)
		{
			// add polygon indices
			if(h < kHeight-1 && w < kWidth-1)
			{
				size_t offset = vertices.size();

				indices.push_back(offset);
				indices.push_back(offset+kWidth);
				indices.push_back(offset+kWidth+1);
				indices.push_back(offset);
				indices.push_back(offset+kWidth+1);
				indices.push_back(offset+1);
			}

			// add vertex
			vertices.push_back( Vec3f(float(w), 0, float(h)) );

			// add texture coordinates
			// note: we only want to draw the lower part of the frequency bands,
			//  so we scale the coordinates a bit
			const float part = 0.5f;
			float s = w / float(kWidth-1);
			float t = h / float(kHeight-1);
			coords.push_back( Vec2f(part - part * s, t) );

			// add vertex colors
			colors.push_back( Color(CM_HSV, s, 0.5f, 0.75f) );
		}
	}

	gl::VboMesh::Layout layout;
	layout.setStaticPositions();
	layout.setStaticColorsRGB();
	layout.setStaticIndices();
	layout.setStaticTexCoords2d();

	mMesh = gl::VboMesh(vertices.size(), indices.size(), layout, GL_TRIANGLES);
	mMesh.bufferPositions(vertices);
	mMesh.bufferColorsRGB(colors);
	mMesh.bufferIndices(indices);
	mMesh.bufferTexCoords2d(0, coords);

	// play audio using the Cinder FMOD block
	FMOD::System_Create( &mFMODSystem );
	mFMODSystem->init( 32, FMOD_INIT_NORMAL | FMOD_INIT_ENABLE_PROFILE, NULL );
	mFMODSound = nullptr;
	mFMODChannel = nullptr;

	playAudio( findAudio( mAudioPath ) );
	
	mIsMouseDown = false;
	mMouseUpDelay = 30.0;
	mMouseUpTime = getElapsedSeconds() - mMouseUpDelay;

	// the texture offset has two purposes:
	//  1) it tells us where to upload the next spectrum data
	//  2) we use it to offset the texture coordinates in the shader for the scrolling effect
	mOffset = 0;
}