void getMinMax( const Channel32f &channel, float *resultMin, float *resultMax ) { float minVal = *(channel.getData( Vec2i::zero() )), maxVal = *(channel.getData( Vec2i::zero() )); Channel32f::ConstIter iter = channel.getIter(); while( iter.line() ) { while( iter.pixel() ) { minVal = std::min( minVal, iter.v() ); maxVal = std::max( maxVal, iter.v() ); } } *resultMin = minVal; *resultMax = maxVal; }
void AudioVisualizerApp::update() { // update FMOD so it can notify us of events mFMODSystem->update(); // handle signal: if audio has ended, play next file if(mIsAudioPlaying && signalChannelEnd) playAudio( nextAudio( mAudioPath ) ); // reset FMOD signals signalChannelEnd= false; // get spectrum for left and right channels and copy it into our channels float* pDataLeft = mChannelLeft.getData() + kBands * mOffset; float* pDataRight = mChannelRight.getData() + kBands * mOffset; mFMODSystem->getSpectrum( pDataLeft, kBands, 0, FMOD_DSP_FFT_WINDOW_HANNING ); mFMODSystem->getSpectrum( pDataRight, kBands, 1, FMOD_DSP_FFT_WINDOW_HANNING ); // increment texture offset mOffset = (mOffset+1) % kHistory; // clear the spectrum for this row to avoid old data from showing up pDataLeft = mChannelLeft.getData() + kBands * mOffset; pDataRight = mChannelRight.getData() + kBands * mOffset; memset( pDataLeft, 0, kBands * sizeof(float) ); memset( pDataRight, 0, kBands * sizeof(float) ); // animate camera if mouse has not been down for more than 30 seconds if(!mIsMouseDown && (getElapsedSeconds() - mMouseUpTime) > mMouseUpDelay) { float t = float( getElapsedSeconds() ); float x = 0.5f + 0.5f * math<float>::cos( t * 0.07f ); float y = 0.1f - 0.2f * math<float>::sin( t * 0.09f ); float z = 0.25f * math<float>::sin( t * 0.05f ) - 0.25f; Vec3f eye = Vec3f(kWidth * x, kHeight * y, kHeight * z); x = 1.0f - x; y = -0.3f; z = 0.6f + 0.2f * math<float>::sin( t * 0.12f ); Vec3f interest = Vec3f(kWidth * x, kHeight * y, kHeight * z); // gradually move to eye position and center of interest mCamera.setEyePoint( eye.lerp(0.995f, mCamera.getEyePoint()) ); mCamera.setCenterOfInterestPoint( interest.lerp(0.990f, mCamera.getCenterOfInterestPoint()) ); } }
Texture::Texture( const Channel32f &channel, Format format ) : mObj( shared_ptr<Obj>( new Obj( channel.getWidth(), channel.getHeight() ) ) ) { #if defined( CINDER_MAC ) bool supportsTextureFloat = gl::isExtensionAvailable( "GL_ARB_texture_float" ); #elif defined( CINDER_MSW ) bool supportsTextureFloat = GLEE_ARB_texture_float != 0; #endif if( format.mInternalFormat < 0 ) { #if ! defined( CINDER_GLES ) if( supportsTextureFloat ) format.mInternalFormat = GL_LUMINANCE32F_ARB; else format.mInternalFormat = GL_LUMINANCE; #else format.mInternalFormat = GL_LUMINANCE; #endif } mObj->mInternalFormat = format.mInternalFormat; mObj->mTarget = format.mTarget; // if the data is not already contiguous, we'll need to create a block of memory that is if( ( channel.getIncrement() != 1 ) || ( channel.getRowBytes() != channel.getWidth() * sizeof(float) ) ) { shared_ptr<float> data( new float[channel.getWidth() * channel.getHeight()], checked_array_deleter<float>() ); float *dest = data.get(); const int8_t inc = channel.getIncrement(); const int32_t width = channel.getWidth(); for( int y = 0; y < channel.getHeight(); ++y ) { const float *src = channel.getData( 0, y ); for( int x = 0; x < width; ++x ) { *dest++ = *src; src += inc; } } init( data.get(), GL_LUMINANCE, format ); } else init( channel.getData(), GL_LUMINANCE, format ); }
void AudioVisualizerApp::setup() { // initialize signals signalChannelEnd = false; // make a list of valid audio file extensions and initialize audio variables const char* extensions[] = { "mp3", "wav", "ogg" }; mAudioExtensions = vector<string>( extensions, extensions + 2 ); mAudioPath = getAssetPath( "" ); mIsAudioPlaying = false; // setup camera mCamera.setPerspective( 50.0f, 1.0f, 1.0f, 10000.0f ); mCamera.lookAt( vec3( -kWidth / 4, kHeight / 2, -kWidth / 8 ), vec3( kWidth / 4, -kHeight / 8, kWidth / 4 ) ); mCameraUi.setCamera( &mCamera ); // create channels from which we can construct our textures mChannelLeft = Channel32f( kBands, kHistory ); mChannelRight = Channel32f( kBands, kHistory ); memset( mChannelLeft.getData(), 0, mChannelLeft.getRowBytes() * kHistory ); memset( mChannelRight.getData(), 0, mChannelRight.getRowBytes() * kHistory ); // create texture format (wrap the y-axis, clamp the x-axis) mTextureFormat.setWrapS( GL_CLAMP_TO_BORDER ); mTextureFormat.setWrapT( GL_REPEAT ); mTextureFormat.setMinFilter( GL_LINEAR ); mTextureFormat.setMagFilter( GL_LINEAR ); mTextureFormat.loadTopDown( true ); // compile shader try { mShader = gl::GlslProg::create( loadAsset( "shaders/spectrum.vert" ), loadAsset( "shaders/spectrum.frag" ) ); } catch( const std::exception& e ) { console() << e.what() << std::endl; quit(); return; } // create static mesh (all animation is done in the vertex shader) std::vector<vec3> positions; std::vector<Colorf> colors; std::vector<vec2> coords; std::vector<uint32_t> indices; for( size_t h = 0; h < kHeight; ++h ) { for( size_t w = 0; w < kWidth; ++w ) { // add polygon indices if( h < kHeight - 1 && w < kWidth - 1 ) { size_t offset = positions.size(); indices.emplace_back( offset ); indices.emplace_back( offset + kWidth ); indices.emplace_back( offset + kWidth + 1 ); indices.emplace_back( offset ); indices.emplace_back( offset + kWidth + 1 ); indices.emplace_back( offset + 1 ); } // add vertex positions.emplace_back( vec3( float( w ), 0, float( h ) ) ); // add texture coordinates // note: we only want to draw the lower part of the frequency bands, // so we scale the coordinates a bit const float part = 0.5f; float s = w / float( kWidth - 1 ); float t = h / float( kHeight - 1 ); coords.emplace_back( vec2( part - part * s, t ) ); // add vertex colors colors.emplace_back( Color( CM_HSV, s, 0.5f, 0.75f ) ); } } gl::VboMesh::Layout layout; layout.usage( GL_STATIC_DRAW ); layout.attrib( geom::Attrib::POSITION, 3 ); layout.attrib( geom::Attrib::COLOR, 3 ); layout.attrib( geom::Attrib::TEX_COORD_0, 2 ); mMesh = gl::VboMesh::create( positions.size(), GL_TRIANGLES, { layout }, indices.size(), GL_UNSIGNED_INT ); mMesh->bufferAttrib( geom::POSITION, positions.size() * sizeof( vec3 ), positions.data() ); mMesh->bufferAttrib( geom::COLOR, colors.size() * sizeof( vec3 ), colors.data() ); mMesh->bufferAttrib( geom::TEX_COORD_0, coords.size() * sizeof( vec2 ), coords.data() ); mMesh->bufferIndices( indices.size() * sizeof( uint32_t ), indices.data() ); // play audio using the Cinder FMOD block FMOD::System_Create( &mFMODSystem ); mFMODSystem->init( 32, FMOD_INIT_NORMAL | FMOD_INIT_ENABLE_PROFILE, NULL ); mFMODSound = nullptr; mFMODChannel = nullptr; playAudio( findAudio( mAudioPath ) ); mIsMouseDown = false; mMouseUpDelay = 30.0; mMouseUpTime = getElapsedSeconds() - mMouseUpDelay; // the texture offset has two purposes: // 1) it tells us where to upload the next spectrum data // 2) we use it to offset the texture coordinates in the shader for the scrolling effect mOffset = 0; }