Ejemplo n.º 1
0
av_Audio * av_audio_get() {
	static bool initialized = false;
	if (!initialized) {
		initialized = true;
		
		rta.showWarnings( true );		
		
		// defaults:
		audio.samplerate = 44100;
		audio.blocksize = 256;
		audio.inchannels = 2;
		audio.outchannels = 2;
		audio.time = 0;
		audio.lag = 0.04;
		audio.indevice = rta.getDefaultInputDevice();
		audio.outdevice = rta.getDefaultOutputDevice();
		/*
		audio.msgbuffer.size = AV_AUDIO_MSGBUFFER_SIZE_DEFAULT;
		audio.msgbuffer.read = 0;
		audio.msgbuffer.write = 0;
		audio.msgbuffer.data = (unsigned char *)malloc(audio.msgbuffer.size);
		*/
		audio.onframes = 0;
		
		// one second of ringbuffer:
		int blockspersecond = audio.samplerate / audio.blocksize;
		audio.blocks = blockspersecond + 1;
		audio.blockstep = audio.blocksize * audio.outchannels;
		int len = audio.blockstep * audio.blocks;
		audio.buffer = (float *)calloc(len, sizeof(float));
		audio.blockread = 0;
		audio.blockwrite = 0;
		
		printf("audio initialized\n");
		
		//AL = lua_open(); //av_init_lua();
		
		// unique to audio thread:
		//if (luaL_dostring(AL, "require 'audioprocess'")) {
		//	printf("error: %s\n", lua_tostring(AL, -1));
	//		initialized = false;
		//}
		 
	}
	return &audio;
}
Ejemplo n.º 2
0
int main( int argc, char *argv[])
{
  // COMMAND LINE ARG HANDLING
  map<string, ugen> ugens;
  ugens["--sine"] = &sine;
  ugens["--saw"] = &saw;
  ugens["--pulse"] = &pulse;
  ugens["--noise"] = &noise;
  ugens["--impulse"] = &impulse;

  if (argc < 4 || argc > 10 ) usage();

  string type_arg = argv[1];
  g_active_ugen = ugens[type_arg];
  if (g_active_ugen == NULL)
    usage();

  double freq_arg = atof(argv[2]);
  if (freq_arg <= 0)
    usage();
  g_frequency = freq_arg;

  double width_arg = atof(argv[3]);
  if (width_arg < 0 || width_arg > 1)
    usage();
  g_width = width_arg;

  if (argc > 4) { // modulation parameters present
    for (int i = 4; i < argc;) {
      if (string(argv[i]).compare("--input") == 0) {
	g_modulate_input = true;
	i++;
      } 
      else if (string(argv[i]).compare("--fm") == 0) {
	g_fm_on = true;

	string fm_type_arg = argv[++i];
	g_active_fm_ugen = ugens[fm_type_arg];
	if (g_active_fm_ugen == NULL)
	  usage();
	
	double fm_freq_arg = atof(argv[++i]);
	if (fm_freq_arg <= 0)
	  usage();
	g_fm_frequency = fm_freq_arg;
	
	double fm_width_arg = atof(argv[++i]);
	if (fm_width_arg < 0 || fm_width_arg > 1)
	  usage();
	g_fm_width = fm_width_arg;
	
	double fm_index_arg = atoi(argv[++i]);
	g_fm_index = fm_index_arg;

	i++;
      }
      else
	usage();
    }
  }

  // AUDIO SETUP
  RtAudio audio;
  audio.showWarnings( true );

  RtAudio::StreamParameters output_params;
  RtAudio::StreamParameters input_params;

  // Choose an audio device and a sample rate
  unsigned int sample_rate;
  unsigned int devices = audio.getDeviceCount();
  if ( devices < 1 ) {
    cerr << "No audio device found!" << endl;
    exit(1);
  }
  RtAudio::DeviceInfo info;
  for (unsigned int i = 0; i < devices; i++ ) {
    info = audio.getDeviceInfo(i);
    if ( info.isDefaultOutput ) {
      output_params.deviceId = i;
      output_params.nChannels = 2;

      if (info.sampleRates.size() < 1) {
	cerr << "No supported sample rates found!" << endl;
	exit(1);
      }
      for (int i = 0; i < info.sampleRates.size(); i++) {
	sample_rate = info.sampleRates[i];
	if (sample_rate == 44100 || sample_rate == 48000) {
	  // Found a nice sample rate, stop looking
	  break;
	}
      }
      cout << "Using sample rate: " << sample_rate << endl;

    }
    if ( info.isDefaultInput ) {
      input_params.deviceId = i;
      input_params.nChannels = 1;
    }
  }

  cout << "Using output device ID " << output_params.deviceId << " which has " << 
    output_params.nChannels << " output channels." << endl;
  cout << "Using input device ID " << input_params.deviceId << " which has " << 
    input_params.nChannels << " input channels." << endl;

  RtAudio::StreamOptions options;
  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;

  unsigned int buffer_frames = 256;

  try {
    audio.openStream( &output_params,     // output params
		      &input_params,      // input params
		      RTAUDIO_FLOAT64,    // audio format 
		      sample_rate,        // sample rate
		      &buffer_frames,     // num frames per buffer (mutable by rtaudio)
		      &callback,          // audio callback
		      &audio,             // user data pointer HACK HACK :D
		      &options);          // stream options
    audio.startStream();
  } catch ( RtError &e ) {
    e.printMessage();
    goto cleanup;
  }

  char input;
  cout << "Playing, press enter to quit (buffer frames = " << buffer_frames << ")." << endl;
  cin.get( input );

  try {
    audio.stopStream();
  }
  catch ( RtError &e ) {
    e.printMessage();
  }

 cleanup:
  if ( audio.isStreamOpen() ) {
    audio.closeStream();
  }

  return 0;
}
Ejemplo n.º 3
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, oDevice = 0, iDevice = 0, iOffset = 0, oOffset = 0;
  char input;

  // minimal command-line checking
  if (argc < 3 || argc > 7 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    iDevice = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    oDevice = (unsigned int) atoi(argv[4]);
  if ( argc > 5 )
    iOffset = (unsigned int) atoi(argv[5]);
  if ( argc > 6 )
    oOffset = (unsigned int) atoi(argv[6]);

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams, iParams;
  oParams.deviceId = oDevice;
  oParams.nChannels = channels;
  oParams.firstChannel = oOffset;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_HOG_DEVICE;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawi, (void *)data, &options );
    std::cout << "\nStream latency = " << dac.getStreamLatency() << std::endl;

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();

    // Restart again
    std::cout << "Press <enter> to restart.\n";
    std::cin.get( input );
    dac.startStream();

    // Test abort function
    std::cout << "Playing again ... press <enter> to abort.\n";
    std::cin.get( input );
    dac.abortStream();

    // Restart another time
    std::cout << "Press <enter> to restart again.\n";
    std::cin.get( input );
    dac.startStream();

    std::cout << "Playing again ... press <enter> to close the stream.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Test non-interleaved functionality
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawni, (void *)data, &options );

    std::cout << "Press <enter> to start non-interleaved playback.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Now open a duplex stream.
  unsigned int bufferBytes;
  iParams.deviceId = iDevice;
  iParams.nChannels = channels;
  iParams.firstChannel = iOffset;
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, &iParams, RTAUDIO_SINT32, fs, &bufferFrames, &inout, (void *)&bufferBytes, &options );

    bufferBytes = bufferFrames * channels * 4;

    std::cout << "Press <enter> to start duplex operation.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();
    std::cout << "\nStopped ... press <enter> to restart.\n";
    std::cin.get( input );

    // Restart the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
Ejemplo n.º 4
0
// ========
// = Main =
// ========
// Entry point
int main (int argc, char *argv[])
{
	cout<<argc<<"  "<<argv[0];
	if (argc>3) {cerr<<"\nERROR - wrong number of arguments\n";exit(1);}
	if (argc==3) 
		g_audio_history = atoi(argv[2]); 
	else
		g_audio_history = 30;
	if (argc>1) 
		g_fft_history = atoi(argv[1]);
	else
		g_fft_history = 100;
	help();
    // RtAudio config + init

    // pointer to RtAudio object
    RtAudio *  audio = NULL;

    // create the object
    try
    {
        audio = new RtAudio();
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 1;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options);
        
        cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl;
        
        // allocate the buffer for the fft
        g_fftBuff = new float[g_buffSize * ZPF];
		g_audioBuff = new float[g_buffSize * ZPF];
        if ( g_fftBuff == NULL ) {
            cerr << "Something went wrong when creating the fft and audio buffers" << endl;
            exit (1);
        }
        
        // allocate the buffer for the time domain window
        g_window = new float[g_buffSize];
        if ( g_window == NULL ) {
            cerr << "Something went wrong when creating the window" << endl;
            exit (1);
        }

        // create a hanning window
        make_window( g_window, g_buffSize );
        
        // start the audio stream
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }


    // ============
    // = GL stuff =
    // ============

    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "Hello GL" );
    //glutEnterGameMode();

    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    // set the special function - called on special keys events (fn, arrows, pgDown, etc)
    glutSpecialFunc( specialFunc );

    // do our own initialization
    initialize();

    // let GLUT handle the current thread from here
    glutMainLoop();

        
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 5
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, device = 0, offset = 0;

  // minimal command-line checking
  if (argc < 3 || argc > 6 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    device = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    offset = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    nFrames = (unsigned int) (fs * atof( argv[5] ));
  if ( nFrames > 0 ) checkCount = true;

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;
#if !defined( USE_INTERLEAVED )
  options.flags |= RTAUDIO_NONINTERLEAVED;
#endif
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options );
    dac.startStream();
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( checkCount ) {
    while ( dac.isStreamRunning() == true ) SLEEP( 100 );
  }
  else {
    char input;
    //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl;
    std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n";
    std::cin.get( input );

    try {
      // Stop the stream
      dac.stopStream();
    }
    catch ( RtError& e ) {
      e.printMessage();
    }
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

	
	
	callbackData data;
		// global for frequency
		data.g_freq=440;
		// global sample number variable
		data.g_t = 0;
		// global for width;
		data.g_width = 0;
		//global for input
		data.g_input=0;
		
	
	//check parameters and parse input
	if (!parse(argc,argv,data))
	{
		exit(0);
	}

    // instantiate RtAudio object
    RtAudio adac;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( adac.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // let RtAudio print messages to stderr.
    adac.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = adac.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = adac.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        adac.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&data, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << adac.getStreamLatency() << " frames" << endl;

    // go for it
    try {
        // start stream
        adac.startStream();

        // get input
        char input;
        std::cout << "running... press <enter> to quit (buffer frames: " << bufferFrames << ")" << endl;
        std::cin.get(input);
        
        // stop the stream.
        adac.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( adac.isStreamOpen() )
        adac.closeStream();
    
    // done
    outfile<<"];\nplot(x)";
    return 0;
}
Ejemplo n.º 7
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

 RtMidiIn *midiin = new RtMidiIn();

  // Check available ports.
  unsigned int nPorts = midiin->getPortCount();
  if ( nPorts == 0 ) {
    std::cout << "No ports available!\n";
    //goto cleanup;
  }

  midiin->openPort( 0 );

  // Set our callback function.  This should be done immediately after
  // opening the port to avoid having incoming messages written to the
  // queue.
  midiin->setCallback( &mycallback );

  // Don't ignore sysex, timing, or active sensing messages.
  midiin->ignoreTypes( false, false, false );

  std::cout << "\nReading MIDI input ... press <enter> to quit.\n";
  char input;
  std::cin.get(input);


    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int numFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
    
    // let RtAudio print messages to stderr.
    audio.showWarnings( true );
    
    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;
    
    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &numFrames, &callme, NULL, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }
    
    // compute
    bufferBytes = numFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << audio.getStreamLatency() << " frames" << endl;
    
    for( int i = 0; i < MY_NUMSTRINGS; i++ )
    {
        // intialize
        g_ks[i].init( MY_SRATE*2, 440, MY_SRATE );
	
    }
    
    // go for it
    try {
        // start stream
        audio.startStream();
	char input;
        std::cout << "Press any key to quit ";
	std::cin.get(input);
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    delete midiin;
    
    // done
    return 0;
}
Ejemplo n.º 8
0
int main( int argc, char *argv[] )
{
  unsigned int channels, fs, bufferFrames, device = 0, offset = 0;
  double time = 2.0;
  FILE *fd;

  // minimal command-line checking
  if ( argc < 3 || argc > 6 ) usage();

  RtAudio adc;
  if ( adc.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    time = (double) atof( argv[3] );
  if ( argc > 4 )
    device = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    offset = (unsigned int) atoi( argv[5] );

  // Let RtAudio print messages to stderr.
  adc.showWarnings( true );

  // Set our stream parameters for input only.
  bufferFrames = 512;
  RtAudio::StreamParameters iParams;
  iParams.deviceId = device;
  iParams.nChannels = channels;
  iParams.firstChannel = offset;

  InputData data;
  data.buffer = 0;
  try {
    adc.openStream( NULL, &iParams, FORMAT, fs, &bufferFrames, &input, (void *)&data );
  }
  catch ( RtError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  data.bufferBytes = bufferFrames * channels * sizeof( MY_TYPE );
  data.totalFrames = (unsigned long) (fs * time);
  data.frameCounter = 0;
  data.channels = channels;
  unsigned long totalBytes;
  totalBytes = data.totalFrames * channels * sizeof( MY_TYPE );

  // Allocate the entire data buffer before starting stream.
  data.buffer = (MY_TYPE*) malloc( totalBytes );
  if ( data.buffer == 0 ) {
    std::cout << "Memory allocation error ... quitting!\n";
    goto cleanup;
  }

  try {
    adc.startStream();
  }
  catch ( RtError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  std::cout << "\nRecording for " << time << " seconds ... writing file 'record.raw' (buffer frames = " << bufferFrames << ")." << std::endl;
  while ( adc.isStreamRunning() ) {
    SLEEP( 100 ); // wake every 100 ms to check if we're done
  }

  // Now write the entire data to the file.
  fd = fopen( "record.raw", "wb" );
  fwrite( data.buffer, sizeof( MY_TYPE ), data.totalFrames * channels, fd );
  fclose( fd );

 cleanup:
  if ( adc.isStreamOpen() ) adc.closeStream();
  if ( data.buffer ) free( data.buffer );

  return 0;
}
Ejemplo n.º 9
0
int main (int argc, char ** argv)
{
    
    //parse tempo 
    if (argc>2)
    {
        cerr<<"Error in arguments\n";
        printHelp();
        exit(1);
    }
    else if (argc==2) 
    {
        g_tempo = atoi(argv[1]);
        if (g_tempo<40 && g_tempo>200)
        {
            cerr<<"Tempo out of bounds!\n";
            printHelp();
            exit(1);
        }
        tempoChange();
    }
    
    // set up fluid synth stuff
    // TODO: error checking!!!!
    g_settings = new_fluid_settings(); 
    g_synth = new_fluid_synth( g_settings );
    g_metronome = new_fluid_synth( g_settings );  
    
    
    //fluid_player_t* player;
    //player = new_fluid_player(g_synth);
    //fluid_player_add(player, "backing.mid");
    //fluid_player_play(player);

    
    
    if (fluid_synth_sfload(g_synth, "piano.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    if (fluid_synth_sfload(g_metronome, "drum.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    
    // RtAudio config + init

    // pointer to RtAudio object
    RtMidiIn * midiin = NULL;    
	RtAudio *  audio = NULL;
    unsigned int bufferSize = 512;//g_sixteenth/100;

    // MIDI config + init
    try 
    {
        midiin = new RtMidiIn();
    }
    catch( RtError & err ) {
        err.printMessage();
       // goto cleanup;
    }
    
    // Check available ports.
    if ( midiin->getPortCount() == 0 )
    {
        std::cout << "No ports available!\n";
       // goto cleanup;
    }
    // use the first available port
    if ( midiin->getPortCount() > 2)
        midiin->openPort( 1 );
    else 
        midiin->openPort( 0 );

    // set midi callback
    midiin->setCallback( &midi_callback );

    // Don't ignore sysex, timing, or active sensing messages.
    midiin->ignoreTypes( false, false, false );

    // create the object
    try
    {
        audio = new RtAudio();
        cerr << "buffer size: " << bufferSize << endl;
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 2;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT32, MY_SRATE, &bufferSize, &audioCallback, NULL, &options);
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }

    // wait for user input
    cout << "Type CTRL+C to quit:";
    
    //initialize graphics
    gfxInit(&argc,argv);
    
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 10
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{
    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // initialize GLUT
    glutInit( &argc, argv );
    // init gfx
    initGfx();

    // let RtAudio print messages to stderr.
    audio.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&bufferBytes, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    // allocate global buffer
    g_bufferSize = bufferFrames;
    g_buffer = new SAMPLE[g_bufferSize];
    memset( g_buffer, 0, sizeof(SAMPLE)*g_bufferSize );

    // go for it
    try {
        // start stream
        audio.startStream();

        // let GLUT handle the current thread from here
        glutMainLoop();
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    
    // done
    return 0;
}