Exemplo n.º 1
0
  void OutputDeviceNodeWebAudio::initialize()
  {
    CI_LOG_I( "OutputDevicenode :: Initialize called" );

    const size_t sampleRate = getOutputSampleRate();
    const size_t framesPerBlock = getOutputFramesPerBlock();
    const size_t numChannels = getNumChannels();

    mInterleavedBuffer = BufferInterleaved( framesPerBlock, numChannels );

    auto functor = std::bind( &OutputDeviceNodeWebAudio::renderInputs,this,std::placeholders::_1 );
    mImpl->setRenderFunction( functor );

  }
Exemplo n.º 2
0
  void OutputDeviceNodeWebAudio::renderInputs( emscripten::val e )
  {
    auto outputBuffer = e["outputBuffer"];
    auto inputBuffer = e["inputBuffer"];

    auto ctx = getContext();
    if( ! ctx )
    {
       CI_LOG_I( "can't get context" );
       return;
    }

    // this technically doesn't do anything but leave it here for now.
    lock_guard<mutex> lock( ctx->getMutex() );

    ctx->preProcess();

      
    auto internalBuffer = getInternalBuffer();
    internalBuffer->zero();
    pullInputs( internalBuffer );

    if( checkNotClipping() )
    {
      internalBuffer->zero();
    }

    const size_t numFrames = internalBuffer->getNumFrames();
    const size_t numChannels = internalBuffer->getNumChannels();

    //dsp::interleave( internalBuffer->getData(), outputBuffer, numFrames, numChannels, numFrames );

    ctx->postProcess();

    // =========================================== //

    // make sure that the ScriptNodeProcessor's bufferSize prop is set properly.
    mImpl->setOutputBufferSize( numFrames );

    // output array is what gets directed towards speakers
    val output = outputBuffer.call<val>( "getChannelData", 0 );

    // input is only going to be populated when using native WebAudio nodes.
    val input = inputBuffer.call<val>( "getChannelData",0 );

    // check first 100 values to see if input buffer is filled or empty(aka the value is 0). If empty, we won't bother processing,
    bool mInputIsEmpty = true;
    for( int a = 0; a < 100; ++a )
    {
      if( input[a].as<float>() != 0.0 )
      {
        mInputIsEmpty = false;
      }
    }

    // get internal cinder data
    float * data = static_cast<float*>( internalBuffer->getData() );
    float * finalData;
    float * idata;


    if( !mInputIsEmpty ) 
    {

      // will hold input data
      std::vector<float> _inputData;

      // copy Float32Array(aka "input") into _inputData vector
      ci::em::helpers::copyToVector( input,_inputData );

      // convert to float* so we can pass information to dsp methods
      idata = &_inputData[0];

      // add input data to any cinder data
      dsp::add(idata,data,finalData,numFrames);

    }
    else{

      // if no input data just pass through
      finalData = data;
    }

    // loop through and set all info from finalData pointer onto the output buffer.
    for( int i = 0; i < numFrames; ++i )
    {
       output.set<float>( i,finalData[i] );
    }


      // release finalData pointer.
      // releasing these causes things to crash :(
      //free(finalData);
      //free(idata);
      
  }
Exemplo n.º 3
0
	void MovieGlHap::Obj::newFrame( CVImageBufferRef cvImage )
	{
		::CVPixelBufferLockBaseAddress( cvImage, kCVPixelBufferLock_ReadOnly );
		// Load HAP frame
		if( ::CFGetTypeID( cvImage ) == ::CVPixelBufferGetTypeID() ) {
			GLuint width = ::CVPixelBufferGetWidth( cvImage );
			GLuint height = ::CVPixelBufferGetHeight( cvImage );
			
			CI_ASSERT( cvImage != NULL );
			
			// Check the buffer padding
			size_t extraRight, extraBottom;
			::CVPixelBufferGetExtendedPixels( cvImage, NULL, &extraRight, NULL, &extraBottom );
			GLuint roundedWidth = width + extraRight;
			GLuint roundedHeight = height + extraBottom;
			
			// Valid DXT will be a multiple of 4 wide and high
			CI_ASSERT( !(roundedWidth % 4 != 0 || roundedHeight % 4 != 0) );
			OSType newPixelFormat = ::CVPixelBufferGetPixelFormatType( cvImage );
			GLenum internalFormat;
			unsigned int bitsPerPixel;
			switch (newPixelFormat) {
				case kHapPixelFormatTypeRGB_DXT1:
					internalFormat = GL_COMPRESSED_RGB_S3TC_DXT1_EXT;
					bitsPerPixel = 4;
					break;
				case kHapPixelFormatTypeRGBA_DXT5:
				case kHapPixelFormatTypeYCoCg_DXT5:
					internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
					bitsPerPixel = 8;
					break;
				default:
					CI_ASSERT_MSG( false, "We don't support non-DXT pixel buffers." );
					return;
					break;
			}
			
			// Ignore the value for CVPixelBufferGetBytesPerRow()
			size_t	bytesPerRow = (roundedWidth * bitsPerPixel) / 8;
			GLsizei	dataLength = bytesPerRow * roundedHeight; // usually not the full length of the buffer
			size_t	actualBufferSize = ::CVPixelBufferGetDataSize( cvImage );
			
			// Check the buffer is as large as we expect it to be
			CI_ASSERT( dataLength < actualBufferSize );
			
			GLvoid *baseAddress = ::CVPixelBufferGetBaseAddress( cvImage );
						
			if ( !mTexture ) {
				// On NVIDIA hardware there is a massive slowdown if DXT textures aren't POT-dimensioned, so we use POT-dimensioned backing
				GLuint backingWidth = 1;
				while (backingWidth < roundedWidth) backingWidth <<= 1;
				
				GLuint backingHeight = 1;
				while (backingHeight < roundedHeight) backingHeight <<= 1;
				
				// We allocate the texture with no pixel data, then use CompressedTexSubImage to update the content region
				gl::Texture2d::Format format;
				format.wrap( GL_CLAMP_TO_EDGE ).magFilter( GL_LINEAR ).minFilter( GL_LINEAR ).internalFormat( internalFormat ).dataType( GL_UNSIGNED_INT_8_8_8_8_REV ).immutableStorage();// .pixelDataFormat( GL_BGRA );
				mTexture = gl::Texture2d::create( backingWidth, backingHeight, format );
				mTexture->setCleanSize( width, height );
				
				CI_LOG_I( "Created texture." );
				
#if defined( CINDER_MAC )
				/// There is no default format GL_TEXTURE_STORAGE_HINT_APPLE param so we fill it manually
				gl::ScopedTextureBind bind( mTexture->getTarget(), mTexture->getId() );
				glTexParameteri( mTexture->getTarget(), GL_TEXTURE_STORAGE_HINT_APPLE, GL_STORAGE_SHARED_APPLE );
#endif
			}
			gl::ScopedTextureBind bind( mTexture );
#if defined( CINDER_MAC )
			glTextureRangeAPPLE( mTexture->getTarget(), dataLength, baseAddress );
			/* WARNING: Even though it is present here:
			 * https://github.com/Vidvox/hap-quicktime-playback-demo/blob/master/HapQuickTimePlayback/HapPixelBufferTexture.m#L186
			 * the following call does not appear necessary. Furthermore, it corrupts display
			 * when movies are loaded more than once
			 */
//			glPixelStorei( GL_UNPACK_CLIENT_STORAGE_APPLE, 1 );
#endif
			glCompressedTexSubImage2D(mTexture->getTarget(),
									  0,
									  0,
									  0,
									  roundedWidth,
									  roundedHeight,
									  mTexture->getInternalFormat(),
									  dataLength,
									  baseAddress);
		}
		
		::CVPixelBufferUnlockBaseAddress( cvImage, kCVPixelBufferLock_ReadOnly );
		::CVPixelBufferRelease(cvImage);
	}
Exemplo n.º 4
0
size_t DeviceManagerOpenSl::getSampleRate( const DeviceRef &device )
{
	size_t result = mJniImpl->getSampleRate();
	CI_LOG_I( "result: " << result );
	return result;
}
Exemplo n.º 5
0
	MovieGlHap::~MovieGlHap()
	{
		CI_LOG_I( "Detroying movie hap." );
	}