Ejemplo n.º 1
0
int main()
{
  // Set the global sample rate before creating class instances.
  Stk::setSampleRate( 44100.0 );

  SineWave sine;
  RtAudio dac;

  // Figure out how many bytes in an StkFloat and setup the RtAudio stream.
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = 1;
  RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
  unsigned int bufferFrames = RT_BUFFER_SIZE;
  try {
    dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine );
  }
  catch ( RtError &error ) {
    error.printMessage();
    goto cleanup;
  }
  double f = 440.0;
  double twelveRoot2 = 1.0594630943592952645618252949463;

  sine.setFrequency(f);
  try {
    dac.startStream();
  }
  catch ( RtError &error ) {
    error.printMessage();
    goto cleanup;
  }

  // Block waiting here.
  int keyhit = 0;
  std::cout << "\nPlaying ... press <esc> to quit.\n";
  while (keyhit != 32 && keyhit != 27)
  {
	  keyhit = _getch();
	  if (tolower(keyhit) == 'a')
	  {
		  f = 220.0;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'g')
	  {
		  f /= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'h')
	  {
		  f *= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'f')
	  {
		  for (int i = 0; i < 2; ++i)
			f /= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'j')
	  {
		  for (int i = 0; i < 2; ++i)
			  f *= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'd')
	  {
		  for (int i = 0; i < 3; ++i)
			  f /= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'k')
	  {
		  for (int i = 0; i < 3; ++i)
			  f *= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 's')
	  {
		  for (int i = 0; i < 4; ++i)
			  f /= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else if (tolower(keyhit) == 'l')
	  {
		  for (int i = 0; i < 4; ++i)
			  f *= twelveRoot2;
		  sine.setFrequency(f);
	  }
	  else
	  {
		  std::cout << "Freq: " << f << std::endl;
	  }
  }

  // Shut down the output stream.
  try {
    dac.closeStream();
  }
  catch ( RtError &error ) {
    error.printMessage();
  }

 cleanup:

  return 0;
}
Ejemplo n.º 2
0
//-----------------------------------------------------------------------------
// Name: main( )
// Desc: starting point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{
	// Get RtAudio Instance with default API
	RtAudio *audio = new RtAudio();
    // buffer size
    unsigned int buffer_size = 512;
	// Output Stream Parameters
	RtAudio::StreamParameters outputStreamParams;
	outputStreamParams.deviceId = audio->getDefaultOutputDevice();
	outputStreamParams.nChannels = 1;
	// Input Stream Parameters
	RtAudio::StreamParameters inputStreamParams;
	inputStreamParams.deviceId = audio->getDefaultInputDevice();
	inputStreamParams.nChannels = 1;
	
	// Get RtAudio Stream
	try {
		audio->openStream(
			NULL,
			&inputStreamParams,
			RTAUDIO_FLOAT32,
			MY_FREQ,
			&buffer_size,
			callback_func,
			NULL
			);
	}
	catch(RtError &err) {
		err.printMessage();
		exit(1);
	}
	g_bufferSize = buffer_size;
	// Samples for Feature Extraction in a Buffer
	g_samples = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_audio_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_another_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_buffest = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_residue = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_coeff = (SAMPLE *)malloc(sizeof(SAMPLE)*g_order);
    g_dwt = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	
    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "The New File" );
    
    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    
    // do our own initialization
    initialize();

	// initialize mfcc
	initMFCC();
	
	//init lpc
	initialize_lpc();
	
	// initialize osc
	// Initialize a socket to get a port
	g_transmitSocket = new UdpTransmitSocket( IpEndpointName( g_ADDRESS.c_str(), SERVERPORT ) );
	
//    // Set the global sample rate before creating class instances.
//    Stk::setSampleRate( 44100.0 );
//	// Read In File
//	try 
//    {
//        // read the file
//        g_fin.openFile( "TomVega.wav" );
//        // change the rate
//        g_fin.setRate( 1 );
//		// normalize the peak
//		g_fin.normalize();
//    } catch( stk::StkError & e )
//    {
//        cerr << "baaaaaaaaad..." << endl;
//        return 1;
//    }
	
	// Start Stream
	try {
        audio->startStream();
    } catch( RtError & err ) {
        // do stuff
        err.printMessage();
        goto cleanup;
    }

    // let GLUT handle the current thread from here
    glutMainLoop();
    
 	// if we get here, then stop!
	try{
		audio->stopStream();
	} 
	catch( RtError & err ) {
		// do stuff
		err.printMessage();
	}

	cleanup:
	    audio->closeStream();
	    delete audio;

    return 0;
}
Ejemplo n.º 3
0
int main( int argc, char *argv[] )
{
  TickData data;
  int i;

#if defined(__STK_REALTIME__)
  RtAudio dac;
#endif

  // If you want to change the default sample rate (set in Stk.h), do
  // it before instantiating any objects!  If the sample rate is
  // specified in the command line, it will override this setting.
  Stk::setSampleRate( 44100.0 );

  // By default, warning messages are not printed.  If we want to see
  // them, we need to specify that here.
  Stk::showWarnings( true );

  // Check the command-line arguments for errors and to determine
  // the number of WvOut objects to be instantiated (in utilities.cpp).
  data.nWvOuts = checkArgs( argc, argv );
  data.wvout = (WvOut **) calloc( data.nWvOuts, sizeof(WvOut *) );

  // Parse the command-line flags, instantiate WvOut objects, and
  // instantiate the input message controller (in utilities.cpp).
  try {
    data.realtime = parseArgs( argc, argv, data.wvout, data.messager );
  }
  catch (StkError &) {
    goto cleanup;
  }

  // If realtime output, allocate the dac here.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
    RtAudio::StreamParameters parameters;
    parameters.deviceId = dac.getDefaultOutputDevice();
    parameters.nChannels = data.channels;
    unsigned int bufferFrames = RT_BUFFER_SIZE;
    try {
      dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data );
    }
    catch ( RtAudioError& error ) {
      error.printMessage();
      goto cleanup;
    }
  }
#endif

  // Set the reverb parameters.
  data.reverb.setT60( data.t60 );
  data.reverb.setEffectMix( 0.2 );

  // Allocate guitar
  data.guitar = new Guitar( nStrings );

  // Configure distortion and feedback.
  data.distortion.setThreshold( 2.0 / 3.0 );
  data.distortion.setA1( 1.0 );
  data.distortion.setA2( 0.0 );
  data.distortion.setA3( -1.0 / 3.0 );
  data.distortionMix = 0.9;
  data.distortionGain = 1.0;
  data.feedbackDelay.setMaximumDelay( (unsigned long int)( 1.1 * Stk::sampleRate() ) );
  data.feedbackDelay.setDelay( 20000 );
  data.feedbackGain = 0.001;
  data.oldFeedbackGain = 0.001;


  // Install an interrupt handler function.
	(void) signal(SIGINT, finish);

  // If realtime output, set our callback function and start the dac.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    try {
      dac.startStream();
    }
    catch ( RtAudioError &error ) {
      error.printMessage();
      goto cleanup;
    }
  }
#endif

  // Setup finished.
  while ( !done ) {
#if defined(__STK_REALTIME__)
    if ( data.realtime )
      // Periodically check "done" status.
      Stk::sleep( 200 );
    else
#endif
      // Call the "tick" function to process data.
      tick( NULL, NULL, 256, 0, 0, (void *)&data );
  }

  // Shut down the output stream.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    try {
      dac.closeStream();
    }
    catch ( RtAudioError& error ) {
      error.printMessage();
    }
  }
#endif

 cleanup:

  for ( i=0; i<(int)data.nWvOuts; i++ ) delete data.wvout[i];
  free( data.wvout );
  delete data.guitar;

	std::cout << "\nStk eguitar finished ... goodbye.\n\n";
  return 0;
}
Ejemplo n.º 4
0
void test_nodeui_base(){
  
  RtAudio dac;
  RtAudio::StreamParameters parm;
  parm.deviceId = 0;//dac.getDefaultOutputDevice();
  parm.nChannels = 2;
  parm.firstChannel = 0;

  RtAudio::StreamOptions so;
  
  unsigned int bufferFrames = BUFFERSIZE;
  AudioScheduler a_sched;
  ControlScheduler c_sched;

  dac.openStream(&parm,NULL,RTAUDIO_SINT16, 44100, &bufferFrames, & saw, (void *) &a_sched);
  dac.startStream();
  
  
  init_ogl(1024,1024);
  

  GLProgram shader = make_program("shaders/naive.vert","shaders/naive.frag");
  UiBox ub1(Vec2f(-400.0,-300.0),Vec2f(800.0,600.0),{0.0,0.0,0.0,1.0},0.0,{0.0,0.0,0.0,0.0});
  
  square_vbo  = make_buffer<float>( {0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0}, 2,ARRAY_BUFFER, STATIC_DRAW);
  line_vbo = make_buffer<float>({0.0,0.0,1.0,1.0}, 2,ARRAY_BUFFER, STATIC_DRAW);
  float it = 0.0;
  scale =0.5;
  camera_x = 200.0;
  camera_y = -50.0;

  bool running = true;
  bool pause_proc = false;
  bool pause_ready = false;
  NodeGridInputHandler ngrid(&pause_proc, &pause_ready);;
  ngrid.backend.register_scheduler(&a_sched);
  ngrid.backend.register_scheduler(&c_sched);
  ngrid.shader = shader;
  ngrid.node_creators["osc"] = [](){return new Osc(440);};
  ngrid.node_creators["add-audio"] = [](){return new AddAudio();};
  ngrid.node_creators["audio-out"] = [](){return new AudioOut3();};
  ngrid.node_creators["line-play"] = [](){return new line_play(1.0,std::vector<float>({0.0})) ;};
  ngrid.node_creators["phasor"] = [](){return new Phasor(440) ;};
  ngrid.node_creators["multiply"] = []() {return new MultNode();};
  ngrid.node_creators["sub-audio"] = [](){return new SubAudio();};
  ngrid.node_creators["divide"] = []() {return new DivNode();};
  ngrid.node_creators["clip"] = []() {return new ClipNode();};
  ngrid.node_creators["quit-program"] = [&running](){return new QuitProgramNode(&running);};
  ngrid.node_creators["save-patch"] = [&ngrid](){return new SavePatchNode(&ngrid);};
  ngrid.node_creators["load-patch"] = [&ngrid](){return new LoadPatchNode(&ngrid);};
  Node * n1 = ngrid.create_and_insert_node("osc", Vec2f(0.0,0.0));
  Node *n2 = ngrid.create_and_insert_node("audio-out", Vec2f(0.0,50.0));
  Node *n3 = ngrid.create_and_insert_node("line-play 0.01 10 12 10 8",Vec2f(0.0,-100));
  ngrid.connect(n1,0,n2,0);
  ngrid.connect(n3,0,n1,0);
  mouse_move_spawner.register_listener(&ngrid);
  mouse_click_handler.register_listener(&ngrid);
  char_event_spawner.register_listener(&ngrid);
  key_event_handler.register_listener(&ngrid);
  mouse_wheel_event_spawner.register_listener(&ngrid);
  
  std::thread ngrid_thread([&](){
      while(running){
	
	if(pause_proc){
	  pause_ready = true;
	}else{
	  pause_ready = false;
	  ngrid.backend.update();
	}
	
      }
    });

  while(running){
    float t = get_time();
    if(ngrid.change){
      shader.uniformf("camera_scale",1.0,1.0);
      shader.uniformf("camera_translate",0.0,0.0);
      bind_buffer_object(square_vbo,0);
      shader.uniformf("size",2.0,2.0);
      shader.uniformf("pos",-1.0,-1.0);
      shader.uniformf("color",0.0,0.0,0.5,1.0);
      draw_buffers_triangle_fan(4);


      shader.uniformf("camera_scale",2.0/1024.0*scale,2.0/1024.0*scale);
      shader.uniformf("camera_translate",camera_x,camera_y);
      ngrid.draw();
      ngrid.draw2(shader);
      ngrid.change = false;
    }else{
      std::cout << "Graphics sleeping.. \n";
    }
    swapbuffers();
    std::cout << "DT: " << get_time() - t << "\n";
    float t_left = 1.0/30.0 - (get_time() - t);
    std::cout << t_left << "\n";
    if(t_left > 0){
      sleep_sec(t_left);
    }
    
  }

  ngrid_thread.join();

}
Ejemplo n.º 5
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, oDevice = 0, iDevice = 0, iOffset = 0, oOffset = 0;
  char input;

  // minimal command-line checking
  if (argc < 3 || argc > 7 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    iDevice = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    oDevice = (unsigned int) atoi(argv[4]);
  if ( argc > 5 )
    iOffset = (unsigned int) atoi(argv[5]);
  if ( argc > 6 )
    oOffset = (unsigned int) atoi(argv[6]);

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams, iParams;
  oParams.deviceId = oDevice;
  oParams.nChannels = channels;
  oParams.firstChannel = oOffset;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_HOG_DEVICE;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawi, (void *)data, &options );
    std::cout << "\nStream latency = " << dac.getStreamLatency() << std::endl;

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();

    // Restart again
    std::cout << "Press <enter> to restart.\n";
    std::cin.get( input );
    dac.startStream();

    // Test abort function
    std::cout << "Playing again ... press <enter> to abort.\n";
    std::cin.get( input );
    dac.abortStream();

    // Restart another time
    std::cout << "Press <enter> to restart again.\n";
    std::cin.get( input );
    dac.startStream();

    std::cout << "Playing again ... press <enter> to close the stream.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Test non-interleaved functionality
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawni, (void *)data, &options );

    std::cout << "Press <enter> to start non-interleaved playback.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Now open a duplex stream.
  unsigned int bufferBytes;
  iParams.deviceId = iDevice;
  iParams.nChannels = channels;
  iParams.firstChannel = iOffset;
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, &iParams, RTAUDIO_SINT32, fs, &bufferFrames, &inout, (void *)&bufferBytes, &options );

    bufferBytes = bufferFrames * channels * 4;

    std::cout << "Press <enter> to start duplex operation.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();
    std::cout << "\nStopped ... press <enter> to restart.\n";
    std::cin.get( input );

    // Restart the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
Ejemplo n.º 6
0
Archivo: play.cpp Proyecto: johnty/stk
int main(int argc, char *argv[])
{
  // Minimal command-line checking.
  if ( argc < 3 || argc > 4 ) usage();

  // Set the global sample rate before creating class instances.
  Stk::setSampleRate( (StkFloat) atof( argv[2] ) );

  // Initialize our WvIn and RtAudio pointers.
  RtAudio dac;
  FileWvIn input;
  FileLoop inputLoop;

  // Try to load the soundfile.
  try {
    input.openFile( argv[1] );
	inputLoop.openFile( argv[1] );
  }
  catch ( StkError & ) {
    exit( 1 );
  }

  // Set input read rate based on the default STK sample rate.
  double rate = 1.0;
  rate = input.getFileRate() / Stk::sampleRate();
  rate = inputLoop.getFileRate() / Stk::sampleRate();
  if ( argc == 4 ) rate *= atof( argv[3] );
  input.setRate( rate );

  input.ignoreSampleRateChange();

  // Find out how many channels we have.
  int channels = input.channelsOut();

  // Figure out how many bytes in an StkFloat and setup the RtAudio stream.
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = channels;
  RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
  unsigned int bufferFrames = RT_BUFFER_SIZE;
  try {
    dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&inputLoop );
  }
  catch ( RtAudioError &error ) {
    error.printMessage();
    goto cleanup;
  }

  // Install an interrupt handler function.
	(void) signal(SIGINT, finish);

  // Resize the StkFrames object appropriately.
  frames.resize( bufferFrames, channels );

  try {
    dac.startStream();
  }
  catch ( RtAudioError &error ) {
    error.printMessage();
    goto cleanup;
  }

  // Block waiting until callback signals done.
  while ( !done )
    Stk::sleep( 100 );
  
  // By returning a non-zero value in the callback above, the stream
  // is automatically stopped.  But we should still close it.
  try {
    dac.closeStream();
  }
  catch ( RtAudioError &error ) {
    error.printMessage();
  }

 cleanup:
  return 0;
}
Ejemplo n.º 7
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

 RtMidiIn *midiin = new RtMidiIn();

  // Check available ports.
  unsigned int nPorts = midiin->getPortCount();
  if ( nPorts == 0 ) {
    std::cout << "No ports available!\n";
    //goto cleanup;
  }

  midiin->openPort( 0 );

  // Set our callback function.  This should be done immediately after
  // opening the port to avoid having incoming messages written to the
  // queue.
  midiin->setCallback( &mycallback );

  // Don't ignore sysex, timing, or active sensing messages.
  midiin->ignoreTypes( false, false, false );

  std::cout << "\nReading MIDI input ... press <enter> to quit.\n";
  char input;
  std::cin.get(input);


    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int numFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
    
    // let RtAudio print messages to stderr.
    audio.showWarnings( true );
    
    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;
    
    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &numFrames, &callme, NULL, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }
    
    // compute
    bufferBytes = numFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << audio.getStreamLatency() << " frames" << endl;
    
    for( int i = 0; i < MY_NUMSTRINGS; i++ )
    {
        // intialize
        g_ks[i].init( MY_SRATE*2, 440, MY_SRATE );
	
    }
    
    // go for it
    try {
        // start stream
        audio.startStream();
	char input;
        std::cout << "Press any key to quit ";
	std::cin.get(input);
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    delete midiin;
    
    // done
    return 0;
}
Ejemplo n.º 8
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{
    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // initialize GLUT
    glutInit( &argc, argv );
    // init gfx
    initGfx();

    // let RtAudio print messages to stderr.
    audio.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&bufferBytes, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    // allocate global buffer
    g_bufferSize = bufferFrames;
    g_buffer = new SAMPLE[g_bufferSize];
    memset( g_buffer, 0, sizeof(SAMPLE)*g_bufferSize );

    // go for it
    try {
        // start stream
        audio.startStream();

        // let GLUT handle the current thread from here
        glutMainLoop();
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    
    // done
    return 0;
}
Ejemplo n.º 9
0
Archivo: demo.cpp Proyecto: Ahbee/stk
int main( int argc, char *argv[] )
{
  TickData data;
  int i;

#if defined(__STK_REALTIME__)
  RtAudio dac;
#endif

  // If you want to change the default sample rate (set in Stk.h), do
  // it before instantiating any objects!  If the sample rate is
  // specified in the command line, it will override this setting.
  Stk::setSampleRate( 44100.0 );

  // Depending on how you compile STK, you may need to explicitly set
  // the path to the rawwave directory.
  Stk::setRawwavePath( "../../rawwaves/" );

  // By default, warning messages are not printed.  If we want to see
  // them, we need to specify that here.
  Stk::showWarnings( true );

  // Check the command-line arguments for errors and to determine
  // the number of WvOut objects to be instantiated (in utilities.cpp).
  data.nWvOuts = checkArgs( argc, argv );
  data.wvout = (WvOut **) calloc( data.nWvOuts, sizeof(WvOut *) );

  // Instantiate the instrument(s) type from the command-line argument
  // (in utilities.cpp).
  data.nVoices = countVoices( argc, argv );
  data.instrument = (Instrmnt **) calloc( data.nVoices, sizeof(Instrmnt *) );
  data.currentVoice = voiceByName( argv[1], &data.instrument[0] );
  if ( data.currentVoice < 0 ) {
    free( data.wvout );
    free( data.instrument );
    usage(argv[0]);
  }
  // If there was no error allocating the first voice, we should be fine for more.
  for ( i=1; i<data.nVoices; i++ )
    voiceByName( argv[1], &data.instrument[i] );

  data.voicer = (Voicer *) new Voicer( 0.0 );
  for ( i=0; i<data.nVoices; i++ )
    data.voicer->addInstrument( data.instrument[i] );

  // Parse the command-line flags, instantiate WvOut objects, and
  // instantiate the input message controller (in utilities.cpp).
  try {
    data.realtime = parseArgs( argc, argv, data.wvout, data.messager );
  }
  catch (StkError &) {
    goto cleanup;
  }

  // If realtime output, allocate the dac here.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
    RtAudio::StreamParameters parameters;
    parameters.deviceId = dac.getDefaultOutputDevice();
    parameters.nChannels = data.channels;
    unsigned int bufferFrames = RT_BUFFER_SIZE;
    try {
      dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data );
    }
    catch ( RtAudioError& error ) {
      error.printMessage();
      goto cleanup;
    }
  }
#endif

  // Set the reverb parameters.
  data.reverb.setT60( data.t60 );
  data.reverb.setEffectMix(0.2);

  // Install an interrupt handler function.
	(void) signal(SIGINT, finish);

  // If realtime output, set our callback function and start the dac.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    try {
      dac.startStream();
    }
    catch ( RtAudioError &error ) {
      error.printMessage();
      goto cleanup;
    }
  }
#endif

  // Setup finished.
  while ( !done ) {
#if defined(__STK_REALTIME__)
    if ( data.realtime )
      // Periodically check "done" status.
      Stk::sleep( 200 );
    else
#endif
      // Call the "tick" function to process data.
      tick( NULL, NULL, 256, 0, 0, (void *)&data );
  }

  // Shut down the output stream.
#if defined(__STK_REALTIME__)
  if ( data.realtime ) {
    try {
      dac.closeStream();
    }
    catch ( RtAudioError& error ) {
      error.printMessage();
    }
  }
#endif

 cleanup:

  for ( i=0; i<(int)data.nWvOuts; i++ ) delete data.wvout[i];
  free( data.wvout );

  delete data.voicer;

  for ( i=0; i<data.nVoices; i++ ) delete data.instrument[i];
  free( data.instrument );

	std::cout << "\nStk demo finished ... goodbye.\n\n";
  return 0;
}
Ejemplo n.º 10
0
int main(const int argc, const char *argv[]) {
	RtAudio adc;
	unsigned int deviceCount = adc.getDeviceCount();
	if (deviceCount < 1) {
		cout << endl << "No audio devices found!" << endl;
		exit(0);
	}

	unsigned int inputDevice = adc.getDefaultInputDevice();
	unsigned int outputDevice = adc.getDefaultOutputDevice();
	for (int i=0; i<argc; i++) {
		if (strcmp(argv[i], "-devices") == 0) {
			// Scan through devices for various capabilities
			showDevices(deviceCount, adc);
			exit(0);
		}
		if (strcmp(argv[i], "-input") == 0) {
			if (i == argc-1) {
				usage();
				exit(0);
			}
			inputDevice=atoi(argv[++i]);
			validateDevice(inputDevice, deviceCount, adc, true);
		}
		if (strcmp(argv[i], "-output") == 0) {
			if (i == argc-1) {
				usage();
				exit(0);
			}
			outputDevice=atoi(argv[++i]);
			validateDevice(outputDevice, deviceCount, adc, false);
		}
	}

	// Initialise DSP thread
	// Initialise GUI

	unsigned int sampleRate = 44100;
	unsigned int bufferFrames = 512;
	unsigned int bufferBytes = 0;
	RtAudio::StreamParameters inputParameters;
	inputParameters.deviceId = inputDevice;
	inputParameters.nChannels = 2;
	inputParameters.firstChannel = 0;

	RtAudio::StreamParameters outputParameters;
	outputParameters.deviceId = outputDevice;
	outputParameters.nChannels = 2;
	outputParameters.firstChannel = 0;

	try {
		adc.openStream(&outputParameters, &inputParameters, RTAUDIO_SINT16, sampleRate,
				&bufferFrames, &inout, &bufferBytes);
		adc.startStream();
	} catch (RtAudioError& e) {
		e.printMessage();
		exit(0);
	}
	// adc.openStream could have adjusted the bufferFrames.
	// Set the user data buffer to the sample buffer size in bytes, so that the
	// inout callback function knows how much data to copy. The example code
	// uses this - 2 is Stereo, 4 is signed int (4 bytes on OSX)
	bufferBytes = bufferFrames * 2 * 4;

	// Can now initialise buffer management. inout could have been asking for
	// buffers but buffer management won't give them until it has been
	// initialised.
	cout << "buffer size in bytes is " << bufferBytes << endl;
	// TODO protect with mutex
	bufferManager = new BufferManager(bufferBytes, maxBuffers);


	char input;
	cout << endl << "Recording ... press <enter> to quit." << endl;
	cin.get(input);
	cout << "Terminating" << endl;

	try {
		// Stop the stream
		adc.stopStream();
	} catch (RtAudioError& e) {
		e.printMessage();
	}
	if (adc.isStreamOpen())
		adc.closeStream();

	// TODO shut down DSP chain, release all buffers
	// TODO shut down Display chain, release all buffers

	delete bufferManager;

	cout << "Terminated" << endl;
	return 0;
}
Ejemplo n.º 11
0
int main( int argc, char *argv[] )
{
  unsigned int channels, fs, bufferFrames, device = 0, offset = 0;
  double time = 2.0;
  FILE *fd;

  // minimal command-line checking
  if ( argc < 3 || argc > 6 ) usage();

  RtAudio adc;
  if ( adc.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    time = (double) atof( argv[3] );
  if ( argc > 4 )
    device = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    offset = (unsigned int) atoi( argv[5] );

  // Let RtAudio print messages to stderr.
  adc.showWarnings( true );

  // Set our stream parameters for input only.
  bufferFrames = 512;
  RtAudio::StreamParameters iParams;
  iParams.deviceId = device;
  iParams.nChannels = channels;
  iParams.firstChannel = offset;

  InputData data;
  data.buffer = 0;
  try {
    adc.openStream( NULL, &iParams, FORMAT, fs, &bufferFrames, &input, (void *)&data );
  }
  catch ( RtError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  data.bufferBytes = bufferFrames * channels * sizeof( MY_TYPE );
  data.totalFrames = (unsigned long) (fs * time);
  data.frameCounter = 0;
  data.channels = channels;
  unsigned long totalBytes;
  totalBytes = data.totalFrames * channels * sizeof( MY_TYPE );

  // Allocate the entire data buffer before starting stream.
  data.buffer = (MY_TYPE*) malloc( totalBytes );
  if ( data.buffer == 0 ) {
    std::cout << "Memory allocation error ... quitting!\n";
    goto cleanup;
  }

  try {
    adc.startStream();
  }
  catch ( RtError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  std::cout << "\nRecording for " << time << " seconds ... writing file 'record.raw' (buffer frames = " << bufferFrames << ")." << std::endl;
  while ( adc.isStreamRunning() ) {
    SLEEP( 100 ); // wake every 100 ms to check if we're done
  }

  // Now write the entire data to the file.
  fd = fopen( "record.raw", "wb" );
  fwrite( data.buffer, sizeof( MY_TYPE ), data.totalFrames * channels, fd );
  fclose( fd );

 cleanup:
  if ( adc.isStreamOpen() ) adc.closeStream();
  if ( data.buffer ) free( data.buffer );

  return 0;
}
Ejemplo n.º 12
0
// ========
// = Main =
// ========
// Entry point
int main (int argc, char *argv[])
{
    
    // RtAudio config + init

    // pointer to RtAudio object
    RtAudio *  audio = NULL;

    // create the object
    try
    {
        audio = new RtAudio();
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 1;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options);
        
        cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl;
        
        // allocate the buffer for the fft
        g_fftBuff = new float[g_buffSize * ZPF];
        if ( g_fftBuff == NULL ) {
            cerr << "Something went wrong when creating the fft buffers" << endl;
            exit (1);
        }
        
        // allocate the buffer for the time domain window
        g_window = new float[g_buffSize];
        if ( g_window == NULL ) {
            cerr << "Something went wrong when creating the window" << endl;
            exit (1);
        }

        // create a hanning window
        make_window( g_window, g_buffSize );
        
        // start the audio stream
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }


    // ============
    // = GL stuff =
    // ============

    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "Hello GL" );
    //glutEnterGameMode();

    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    // set the special function - called on special keys events (fn, arrows, pgDown, etc)
    glutSpecialFunc( specialFunc );

    // do our own initialization
    initialize();

    // let GLUT handle the current thread from here
    glutMainLoop();

        
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 13
0
int main (int argc, char ** argv)
{
    
    //parse tempo 
    if (argc>2)
    {
        cerr<<"Error in arguments\n";
        printHelp();
        exit(1);
    }
    else if (argc==2) 
    {
        g_tempo = atoi(argv[1]);
        if (g_tempo<40 && g_tempo>200)
        {
            cerr<<"Tempo out of bounds!\n";
            printHelp();
            exit(1);
        }
        tempoChange();
    }
    
    // set up fluid synth stuff
    // TODO: error checking!!!!
    g_settings = new_fluid_settings(); 
    g_synth = new_fluid_synth( g_settings );
    g_metronome = new_fluid_synth( g_settings );  
    
    
    //fluid_player_t* player;
    //player = new_fluid_player(g_synth);
    //fluid_player_add(player, "backing.mid");
    //fluid_player_play(player);

    
    
    if (fluid_synth_sfload(g_synth, "piano.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    if (fluid_synth_sfload(g_metronome, "drum.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    
    // RtAudio config + init

    // pointer to RtAudio object
    RtMidiIn * midiin = NULL;    
	RtAudio *  audio = NULL;
    unsigned int bufferSize = 512;//g_sixteenth/100;

    // MIDI config + init
    try 
    {
        midiin = new RtMidiIn();
    }
    catch( RtError & err ) {
        err.printMessage();
       // goto cleanup;
    }
    
    // Check available ports.
    if ( midiin->getPortCount() == 0 )
    {
        std::cout << "No ports available!\n";
       // goto cleanup;
    }
    // use the first available port
    if ( midiin->getPortCount() > 2)
        midiin->openPort( 1 );
    else 
        midiin->openPort( 0 );

    // set midi callback
    midiin->setCallback( &midi_callback );

    // Don't ignore sysex, timing, or active sensing messages.
    midiin->ignoreTypes( false, false, false );

    // create the object
    try
    {
        audio = new RtAudio();
        cerr << "buffer size: " << bufferSize << endl;
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 2;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT32, MY_SRATE, &bufferSize, &audioCallback, NULL, &options);
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }

    // wait for user input
    cout << "Type CTRL+C to quit:";
    
    //initialize graphics
    gfxInit(&argc,argv);
    
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 14
0
int main( int argc, char *argv[] )
{
  unsigned int channels, fs, bufferFrames, device = 0, offset = 0;
  char *file;

  // minimal command-line checking
  if ( argc < 4 || argc > 6 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 0 );
  }

  channels = (unsigned int) atoi( argv[1]) ;
  fs = (unsigned int) atoi( argv[2] );
  file = argv[3];
  if ( argc > 4 )
    device = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    offset = (unsigned int) atoi( argv[5] );

  OutputData data;
  data.fd = fopen( file, "rb" );
  if ( !data.fd ) {
    std::cout << "Unable to find or open file!\n";
    exit( 1 );
  }

  // Set our stream parameters for output only.
  bufferFrames = 512;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  if ( device == 0 )
    oParams.deviceId = dac.getDefaultOutputDevice();

  data.channels = channels;
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &output, (void *)&data );
    dac.startStream();
  }
  catch ( RtAudioError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  std::cout << "\nPlaying raw file " << file << " (buffer frames = " << bufferFrames << ")." << std::endl;
  while ( 1 ) {
    SLEEP( 100 ); // wake every 100 ms to check if we're done
    if ( dac.isStreamRunning() == false ) break;
  }

 cleanup:
  fclose( data.fd );
  dac.closeStream();

  return 0;
}
Ejemplo n.º 15
0
int main( int argc, char *argv[] )
{
	//Dekrispator init
	randomGen_init();
	Synth_Init();
	//end Dekrispator init
	
//	FILE* f = fopen("bla.txt","wb");
//	fclose(f);
	
  TickData data;
  RtAudio dac;
  int i;

  //if ( argc < 2 || argc > 6 ) usage();

  // If you want to change the default sample rate (set in Stk.h), do
  // it before instantiating any objects!  If the sample rate is
  // specified in the command line, it will override this setting.
  Stk::setSampleRate( 44100.0 );

	{
	 RtMidiIn *midiin = 0;
	 midiin = new RtMidiIn();
	unsigned int i = 0, nPorts = midiin->getPortCount();
	if ( nPorts == 0 ) {
		std::cout << "No input Midi ports available, just running demo mode." << std::endl;
		delete midiin;
		midiin = 0;
	} else
	{
		for ( i=0; i<nPorts; i++ ) {
			std::string portName = midiin->getPortName(i);
			std::cout << "  Input port #" << i << ": " << portName << '\n';
		}

		delete midiin;
		midiin = 0;
		
		for ( i=0; i<nPorts && i<MAX_MIDI_DEVICES; i++ ) {
			data.messagers[data.numMessagers++].startMidiInput(i);
		}

		
	}
	
	}
	
	
  // Parse the command-line arguments.
  unsigned int port = 2001;
  for ( i=1; i<argc; i++ ) {
    if ( !strcmp( argv[i], "-is" ) ) {
      if ( i+1 < argc && argv[i+1][0] != '-' ) port = atoi(argv[++i]);
		if (data.numMessagers<MAX_MIDI_DEVICES)
		{
		data.messagers[data.numMessagers++].startSocketInput( port );
		}
    }
    else if (!strcmp( argv[i], "-ip" ) )
	{
		if (data.numMessagers<MAX_MIDI_DEVICES)
		{
      data.messagers[data.numMessagers++].startStdInput();
		}
	}
    else if ( !strcmp( argv[i], "-s" ) && ( i+1 < argc ) && argv[i+1][0] != '-')
      Stk::setSampleRate( atoi(argv[++i]) );
    else
      usage();
  }

  // Allocate the dac here.
  RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = 2;
  unsigned int bufferFrames = RT_BUFFER_SIZE;
  try {
    dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data );
  }
  catch ( RtAudioError& error ) {
    error.printMessage();
    goto cleanup;
  }

  data.reverbs[0].setT60( data.t60 );
  data.reverbs[0].setEffectMix( 0.5 );
  data.reverbs[1].setT60( 2.0 );
  data.reverbs[1].setEffectMix( 0.2 );

 
  data.rateScaler = 22050.0 / Stk::sampleRate();

  // Install an interrupt handler function.
	(void) signal( SIGINT, finish );

  // If realtime output, set our callback function and start the dac.
  try {
    dac.startStream();
  }
  catch ( RtAudioError &error ) {
    error.printMessage();
    goto cleanup;
  }

  // Setup finished.
  while ( !done ) {
    // Periodically check "done" status.
    Stk::sleep( 50 );
  }

  // Shut down the output stream.
  try {
    dac.closeStream();
  }
  catch ( RtAudioError& error ) {
    error.printMessage();
  }

 cleanup:

  return 0;

}
Ejemplo n.º 16
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, device = 0, offset = 0;

  // minimal command-line checking
  if (argc < 3 || argc > 5 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    device = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    offset = (unsigned int) atoi( argv[4] );

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;
#if !defined( USE_INTERLEAVED )
  options.flags |= RTAUDIO_NONINTERLEAVED;
#endif
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options );
    dac.startStream();
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  char input;
  //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl;
  std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n";
  std::cin.get( input );

  try {
    // Stop the stream
    dac.stopStream();
  }
  catch ( RtError& e ) {
    e.printMessage();
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
Ejemplo n.º 17
0
void init(int argc, char **argv) {

  /////
  theString = new StringModel ( 1000, 0.5, 0.99999, 8 );

  // *** test the rtaudio callback
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 0 );
  }
    
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = 2;
  parameters.firstChannel = 0;
  sampleRate = 44100;
  bufferFrames = 256; // 256 sample frames

  try { 
    dac.openStream ( &parameters, 
		     NULL, 
		     RTAUDIO_FLOAT32,
		     sampleRate, 
		     &bufferFrames, 
		     StringModel::audioCallback,
		     (void *)theString );
    dac.startStream();
  }
  catch ( RtError& e ) {
    std::cout << "\nexception on dac:\n";
    e.printMessage();
    exit(0);
  }

  //////
    
  GLfloat pos[] = {5.0, 5.0, 10.0, 0.0};
  glLightfv(GL_LIGHT0, GL_POSITION, pos);
  glEnable(GL_CULL_FACE);
  glEnable(GL_LIGHTING);
  glEnable(GL_LIGHT0);
  glEnable(GL_DEPTH_TEST);
  glEnable(GL_NORMALIZE);
  glEnable(GL_COLOR_MATERIAL);
  glEnable(GL_BLEND);
  glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
  glEnable(GL_FOG);
  float FogCol[3]={0.0,0.0,0.0};
  glFogfv(GL_FOG_COLOR,FogCol); 
  glFogi(GL_FOG_MODE, GL_LINEAR);
  glFogf(GL_FOG_START, 10.0f);
  glFogf(GL_FOG_END, 40.f);

  glClearColor (0.0, 0.0, 0.0, 0.0);

  set_to_ident(g_trackball_transform);

  std::cout << "\nPlaying ... press \n";
  std::cout << "t to tighten\n";
  std::cout << "l to loosen\n";
  std::cout << "p to pluck\n";
  std::cout << "r to reset\n";
  std::cout << "d to dump velocities\n";
  std::cout << "f/F to change vibrator freq\n";
  std::cout << "ESC to quit.\n";
}