Ejemplo n.º 1
0
void Audio::closeInputDevice()
{
    if (!adc.isStreamOpen()) return;
    adc.stopStream();
    while (adc.isStreamRunning());
    adc.closeStream();
}
Ejemplo n.º 2
0
int main(int argc, const char * argv[])
{
    RtAudio dac;
    RtAudio::StreamParameters rtParams;
    rtParams.deviceId = dac.getDefaultOutputDevice();
    rtParams.nChannels = nChannels;
#if RASPI
    unsigned int sampleRate = 22000;
#else
    unsigned int sampleRate = 44100;
#endif
    unsigned int bufferFrames = 512; // 512 sample frames
    
    Tonic::setSampleRate(sampleRate);
    
    std::vector<Synth> synths;
    synths.push_back(*new BassDrum());
    synths.push_back(*new Snare());
    synths.push_back(*new HiHat());
    synths.push_back(*new Funky());
    
    // Test write pattern
    
    DrumMachine *drumMachine = new DrumMachine(synths);
    
    drumMachine->loadPattern(0);
    
    ControlMetro metro = ControlMetro().bpm(480);
    
    ControlCallback drumMachineTick = ControlCallback(&mixer, [&](ControlGeneratorOutput output){
        drumMachine->tick();
    }).input(metro);
    
    Generator mixedSignal;
    for(int i = 0; i < NUM_TRACKS; i++)
    {
        mixedSignal = mixedSignal + synths[i];
    }
    mixer.setOutputGen(mixedSignal);
    
    try
    {
        dac.openStream( &rtParams, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &renderCallback, NULL, NULL );
        dac.startStream();
        
        // Send a pointer to our global drumMachine instance
        // to the serial communications layer.
        listenForMessages( drumMachine );
        
        dac.stopStream();
    }
    catch ( RtError& e )
    {
        std::cout << '\n' << e.getMessage() << '\n' << std::endl;
        exit( 0 );
    }
    
    return 0;
}
Ejemplo n.º 3
0
	int
		playsin(void)
	{
		RtAudio *audio;
		unsigned int bufsize = 4096;
		CallbackData data;

		try {
			audio = new RtAudio(RtAudio::WINDOWS_WASAPI);
		}
		catch (...) {
			
			return 1;
		}
		if (!audio) {
			fprintf(stderr, "fail to allocate RtAudio¥n");
			return 1;
		}
		/* probe audio devices */
		unsigned int devId = audio->getDefaultOutputDevice();

		/* Setup output stream parameters */
		RtAudio::StreamParameters *outParam = new RtAudio::StreamParameters();

		outParam->deviceId = devId;
		outParam->nChannels = 2;

		audio->openStream(outParam, NULL, RTAUDIO_FLOAT32, 44100,
			&bufsize, rtaudio_callback, &data);

		/* Create Wave Form Table */
		data.nRate = 44100;
		/* Frame Number is based on Freq(440Hz) and Sampling Rate(44100) */
		/* hmm... nFrame = 44100 is enough approximation, maybe... */
		data.nFrame = 44100;
		data.nChannel = outParam->nChannels;
		data.cur = 0;
		data.wftable = (float *)calloc(data.nChannel * data.nFrame, sizeof(float));
		if (!data.wftable)
		{
			delete audio;
			fprintf(stderr, "fail to allocate memory¥n");
			return 1;
		}
		for (unsigned int i = 0; i < data.nFrame; i++) {
			float v = sin(i * 3.1416 * 2 * 440 / data.nRate);
			for (unsigned int j = 0; j < data.nChannel; j++) {
				data.wftable[i*data.nChannel + j] = v;
			}
		}

		audio->startStream();
//		sleep(10);
		audio->stopStream();
		audio->closeStream();
		delete audio;

		return 0;
	}
Ejemplo n.º 4
0
void av_audio_start() {
	av_audio_get();
	
	if (rta.isStreamRunning()) {
		rta.stopStream();
	}
	if (rta.isStreamOpen()) {
		// close it:
		rta.closeStream();
	}	
	
	unsigned int devices = rta.getDeviceCount();
	if (devices < 1) {
		printf("No audio devices found\n");
		return;
	}
	
	RtAudio::DeviceInfo info;
	RtAudio::StreamParameters iParams, oParams;
	
	printf("Available audio devices (%d):\n", devices);
	for (unsigned int i=0; i<devices; i++) {
		info = rta.getDeviceInfo(i);
		printf("Device %d: %dx%d (%d) %s\n", i, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	}
	
	printf("device %d\n", audio.indevice);
	
	info = rta.getDeviceInfo(audio.indevice);
	printf("Using audio input %d: %dx%d (%d) %s\n", audio.indevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	
	audio.inchannels = info.inputChannels;
	
	iParams.deviceId = audio.indevice;
	iParams.nChannels = audio.inchannels;
	iParams.firstChannel = 0;
	
	info = rta.getDeviceInfo(audio.outdevice);
	printf("Using audio output %d: %dx%d (%d) %s\n", audio.outdevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	
	audio.outchannels = info.outputChannels;
	
	oParams.deviceId = audio.outdevice;
	oParams.nChannels = audio.outchannels;
	oParams.firstChannel = 0;

	RtAudio::StreamOptions options;
	//options.flags |= RTAUDIO_NONINTERLEAVED;
	options.streamName = "av";
	
	try {
		rta.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, audio.samplerate, &audio.blocksize, &av_rtaudio_callback, NULL, &options );
		rta.startStream();
		printf("Audio started\n");
	}
	catch ( RtError& e ) {
		fprintf(stderr, "%s\n", e.getMessage().c_str());
	}
}
Ejemplo n.º 5
0
void Audio::closeOutputDevice()
{
    if (!dac.isStreamOpen()) return;
    dac.stopStream();
    while (dac.isStreamRunning());
    dac.closeStream();
    outIsOpened = false;
}
Ejemplo n.º 6
0
 void stop() {
   try {
     // Stop the stream
     dac.stopStream();
   } catch (RtAudioError &e) {
     e.printMessage();
   }
   if (dac.isStreamOpen()) dac.closeStream();
 }
Ejemplo n.º 7
0
int main(int argc, char** argv)
{

  if (argc != 2) {
    printf("Usage: synth file.sf2\n");
    exit(0);
  }

  LightFluidSynth *usynth;

  usynth = new LightFluidSynth();

  usynth->loadSF2(argv[1]);
//  usynth->loadSF2("tim.sf2");

  RtMidiIn *midiIn = new RtMidiIn();
  if (midiIn->getPortCount() == 0) {
    std::cout << "No MIDI ports available!\n";
  }
  midiIn->openPort(0);
  midiIn->setCallback( &midiCallback, (void *)usynth );
  midiIn->ignoreTypes( false, false, false );

//   RtAudio dac(RtAudio::LINUX_PULSE);
  RtAudio dac;
  RtAudio::StreamParameters rtParams;

  // Determine the number of devices available
  unsigned int devices = dac.getDeviceCount();
  // Scan through devices for various capabilities
  RtAudio::DeviceInfo info;
  for ( unsigned int i = 0; i < devices; i++ ) {
    info = dac.getDeviceInfo( i );
    if ( info.probed == true ) {
      std::cout << "device " << " = " << info.name;
      std::cout << ": maximum output channels = " << info.outputChannels << "\n";
    }
  }
//  rtParams.deviceId = 3;
  rtParams.deviceId = dac.getDefaultOutputDevice();
  rtParams.nChannels = 2;
  unsigned int bufferFrames = FRAME_SIZE;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_SCHEDULE_REALTIME;

  dac.openStream( &rtParams, NULL, AUDIO_FORMAT, SAMPLE_RATE, &bufferFrames, &audioCallback, (void *)usynth, &options );
  dac.startStream();

  printf("\n\nPress Enter to stop\n\n");
  cin.get();
  dac.stopStream();

  delete(usynth);
  return 0;
}
Ejemplo n.º 8
0
int main(int argc, char *argv[])
{
  int buffer_size, fs, device = 0;
  RtAudio *audio;
  double *data;
  char input;

  // minimal command-line checking
  if (argc != 3 && argc != 4 ) usage();

  chans = (int) atoi(argv[1]);
  fs = (int) atoi(argv[2]);
  if ( argc == 4 )
    device = (int) atoi(argv[3]);

  // Open the realtime output device
  buffer_size = 1024;
  try {
    audio = new RtAudio(device, chans, 0, 0,
                        FORMAT, fs, &buffer_size, 4);
  }
  catch (RtError &error) {
    error.printMessage();
    exit(EXIT_FAILURE);
  }

  data = (double *) calloc(chans, sizeof(double));

  try {
    audio->setStreamCallback(&saw, (void *)data);
    audio->startStream();
  }
  catch (RtError &error) {
    error.printMessage();
    goto cleanup;
  }

  std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << buffer_size << ").\n";
  std::cin.get(input);

  // Stop the stream.
  try {
    audio->stopStream();
  }
  catch (RtError &error) {
    error.printMessage();
  }

 cleanup:
  audio->closeStream();
  delete audio;
  if (data) free(data);

  return 0;
}
Ejemplo n.º 9
0
void
stop_audio(void)
{
	try {
		audio.stopStream();
	} catch(RtError& e) {
		e.printMessage();
	}
	
	if(audio.isStreamOpen())
		audio.closeStream();
}
Ejemplo n.º 10
0
int main(int argc, char *argv[])
{
  int chans, fs, device = 0;
  RtAudio *audio;
  char input;

  // minimal command-line checking
  if (argc != 3 && argc != 4 ) usage();

  chans = (int) atoi(argv[1]);
  fs = (int) atoi(argv[2]);
  if ( argc == 4 )
    device = (int) atoi(argv[3]);

  // Open the realtime output device
  int buffer_size = 512;
  try {
    audio = new RtAudio(device, chans, device, chans,
                        FORMAT, fs, &buffer_size, 8);
  }
  catch (RtError &error) {
    error.printMessage();
    exit(EXIT_FAILURE);
  }

  try {
    audio->setStreamCallback(&inout, NULL);
    audio->startStream();
  }
  catch (RtError &error) {
    error.printMessage();
    goto cleanup;
  }

  std::cout << "\nRunning ... press <enter> to quit (buffer size = " << buffer_size << ").\n";
  std::cin.get(input);

  try {
    audio->stopStream();
  }
  catch (RtError &error) {
    error.printMessage();
  }

 cleanup:
  audio->closeStream();
  delete audio;

  return 0;
}
Ejemplo n.º 11
0
int main()
{
	RtAudio dac;

	//std::cout << dac.getDeviceCount() << std::endl;   //2
	if (dac.getDeviceCount() < 1) {
		std::cout << "\nNo audio devices found!\n";
		exit(0);
	}
	RtAudio::StreamParameters parameters;
	//std::cout << dac.getDefaultOutputDevice() << std::endl;
	parameters.deviceId = dac.getDefaultOutputDevice();    //0
	parameters.nChannels = 2;
	parameters.firstChannel = 0;
	unsigned int sampleRate = 44100;
	unsigned int bufferFrames = 256; // 256 sample frames

	RtAudio::StreamParameters input;
	input.deviceId = dac.getDefaultInputDevice();
	input.nChannels = 2;
	input.firstChannel = 0;

	double data[2];
	try {
		dac.openStream(&parameters, &input, RTAUDIO_SINT16,
			sampleRate, &bufferFrames, &saw, (void *)&data);
		dac.startStream();
	}
	catch (RtAudioError& e) {
		e.printMessage();
		exit(0);
	}

	char input1;
	std::cout << "\nPlaying ... press <enter> to quit.\n";
	std::cin.get(input1);
	
	try {
		// Stop the stream
		dac.stopStream();
	}
	catch (RtAudioError& e) {
		e.printMessage();
	}
	if (dac.isStreamOpen()) dac.closeStream();
	system("pause");
	
	return 0;
}
Ejemplo n.º 12
0
Archivo: vessl.cpp Proyecto: ddf/vessl
static void close_output(va_list args)
{
    try
    {
        // Stop the stream
        vessl_out.stopStream();
        
        if ( vessl_out.isStreamOpen() )
        {
            vessl_out.closeStream();
            
            printf("[vessl] output was closed.\n");
        }
    }
    catch (RtAudioError& e)
    {
        e.printMessage();
    }
}
Ejemplo n.º 13
0
void keyboard (unsigned char key, int x, int y)
{
  switch (key) {
  case 'a':
    autorotate = ! autorotate;
    break;

  case 't' : theString->Ktension *= 1.05946; 
    theString->Ktension = fmin ( 1.0, theString->Ktension );
    break;
  case 'l' : theString->Ktension *= 0.943876; break;
  case 'P' : theString->pluck(); break;
  case 'p' : theString->pluckvel(); break;
  case 'r' : theString->reset(); break;
  case 'd' : theString->print(); break;
  case 'v' : theString->toggleVibrator(); break;
  case 'f' : theString->vibratorFreq *= 0.943875; 
    std::cout << "vib freq = " << theString->vibratorFreq << std::endl;
    break;
  case 'F' : theString->vibratorFreq /= 0.943875; 
    std::cout << "vib freq = " << theString->vibratorFreq << std::endl;
    break;
  case 27: /* ESC */
    try {
      // Stop the stream
      dac.stopStream();
    }
    catch (RtError& e) {
      e.printMessage();
    }
	  
    if (dac.isStreamOpen()) dac.closeStream();
    exit(0);
    break;
  default: break;
  }
}
Ejemplo n.º 14
0
int main(const int argc, const char *argv[]) {
	RtAudio adc;
	unsigned int deviceCount = adc.getDeviceCount();
	if (deviceCount < 1) {
		cout << endl << "No audio devices found!" << endl;
		exit(0);
	}

	unsigned int inputDevice = adc.getDefaultInputDevice();
	unsigned int outputDevice = adc.getDefaultOutputDevice();
	for (int i=0; i<argc; i++) {
		if (strcmp(argv[i], "-devices") == 0) {
			// Scan through devices for various capabilities
			showDevices(deviceCount, adc);
			exit(0);
		}
		if (strcmp(argv[i], "-input") == 0) {
			if (i == argc-1) {
				usage();
				exit(0);
			}
			inputDevice=atoi(argv[++i]);
			validateDevice(inputDevice, deviceCount, adc, true);
		}
		if (strcmp(argv[i], "-output") == 0) {
			if (i == argc-1) {
				usage();
				exit(0);
			}
			outputDevice=atoi(argv[++i]);
			validateDevice(outputDevice, deviceCount, adc, false);
		}
	}

	// Initialise DSP thread
	// Initialise GUI

	unsigned int sampleRate = 44100;
	unsigned int bufferFrames = 512;
	unsigned int bufferBytes = 0;
	RtAudio::StreamParameters inputParameters;
	inputParameters.deviceId = inputDevice;
	inputParameters.nChannels = 2;
	inputParameters.firstChannel = 0;

	RtAudio::StreamParameters outputParameters;
	outputParameters.deviceId = outputDevice;
	outputParameters.nChannels = 2;
	outputParameters.firstChannel = 0;

	try {
		adc.openStream(&outputParameters, &inputParameters, RTAUDIO_SINT16, sampleRate,
				&bufferFrames, &inout, &bufferBytes);
		adc.startStream();
	} catch (RtAudioError& e) {
		e.printMessage();
		exit(0);
	}
	// adc.openStream could have adjusted the bufferFrames.
	// Set the user data buffer to the sample buffer size in bytes, so that the
	// inout callback function knows how much data to copy. The example code
	// uses this - 2 is Stereo, 4 is signed int (4 bytes on OSX)
	bufferBytes = bufferFrames * 2 * 4;

	// Can now initialise buffer management. inout could have been asking for
	// buffers but buffer management won't give them until it has been
	// initialised.
	cout << "buffer size in bytes is " << bufferBytes << endl;
	// TODO protect with mutex
	bufferManager = new BufferManager(bufferBytes, maxBuffers);


	char input;
	cout << endl << "Recording ... press <enter> to quit." << endl;
	cin.get(input);
	cout << "Terminating" << endl;

	try {
		// Stop the stream
		adc.stopStream();
	} catch (RtAudioError& e) {
		e.printMessage();
	}
	if (adc.isStreamOpen())
		adc.closeStream();

	// TODO shut down DSP chain, release all buffers
	// TODO shut down Display chain, release all buffers

	delete bufferManager;

	cout << "Terminated" << endl;
	return 0;
}
Ejemplo n.º 15
0
// entry point
int main( int argc, char ** argv )
{
    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "VisualSine" );
    
    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    
    // RtAudio pointer
    RtAudio * audio = NULL;
    // buffer size
    int buffer_size = 512;
    
    // create the RtAudio
    try {
        audio = new RtAudio(
            0, // device number of output
            1, // number of output channels
            1, // device number for input
            1, // number of input channels
            RTAUDIO_FLOAT64, // format
            MY_SRATE, // sample rate
            &buffer_size, // buffer size
            8 // number of buffers
        );
    } catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }
    
    // allocate global buffer
    g_buffer = new SAMPLE[buffer_size];
    g_bufferSize = buffer_size;

    // set the callback
    try {
        audio->setStreamCallback( &callme, NULL );
        audio->startStream();
    } catch( RtError & err ) {
        // do stuff
        err.printMessage();
        goto cleanup;
    }

    // let GLUT handle the current thread from here
    glutMainLoop();

    // if we get here, then stop!
    try {
        audio->stopStream();
    } catch( RtError & err ) {
        // do stuff
        err.printMessage();
    }

cleanup:
    audio->closeStream();
    delete audio;
    
    return 0;
}
int main () {
  // Damit das Programm funktioniert, muss eine 16Bit PCM Wave-Datei im
  // gleichen Ordner liegen !
	const char * fname = "test.flac" ;

  // Soundfile-Handle aus der libsndfile-Bibliothek
	SndfileHandle file = SndfileHandle (fname) ;

  // Alle möglichen Infos über die Audio-Datei ausgeben !
  std::cout << "Reading file: " << fname << std::endl;
  std::cout << "File format: " << file.format() << std::endl;
  std::cout << "PCM 16 BIT: " << (SF_FORMAT_WAV | SF_FORMAT_PCM_16) << std::endl;
  std::cout << "Samples in file: " << file.frames() << std::endl;
  std::cout << "Samplerate " << file.samplerate() << std::endl;
  std::cout << "Channels: " << file.channels() << std::endl;

  // Die RtAudio-Klasse ist gleichermassen dac und adc, wird hier aber nur als dac verwendet !
	RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    return 0;
  }

  // Output params ...
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = 2;
  parameters.firstChannel = 0;
  unsigned int sampleRate = 44100;

  // ACHTUNG! Frames != Samples
  // ein Frame = Samples für alle Kanäle
  // d.h. |Samples| = Kanäle*Frames !
  unsigned int bufferFrames = 1024;

  // Da wir 16 Bit PCM-Daten lesen, sollte als Datenformat RTAUDIO_SINT16 genutzt
  // werden.
  // Als Daten wird der Callback-Struktur hier das Soundfile-Handle übergeben.
  // Sollte man in einer "ernsthaften" Lösung anders machen !
  // Inkompatible Formate können übrigens "interessante" Effekte ergeben !
  try {
    dac.openStream( &parameters, NULL, RTAUDIO_SINT16,
                    sampleRate, &bufferFrames, &fplay, (void *)&file);
    dac.startStream();
  }
  catch ( RtAudioError& e ) {
    e.printMessage();
    return 0;
  }

  char input;
  std::cout << "\nPlaying ... press <enter> to quit.\n";
  std::cin.get( input );
  try {
    // Stop the stream
    dac.stopStream();
  }
  catch (RtAudioError& e) {
    e.printMessage();
  }
  if ( dac.isStreamOpen() ) dac.closeStream();

  return 0 ;

}
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

	
	
	callbackData data;
		// global for frequency
		data.g_freq=440;
		// global sample number variable
		data.g_t = 0;
		// global for width;
		data.g_width = 0;
		//global for input
		data.g_input=0;
		
	
	//check parameters and parse input
	if (!parse(argc,argv,data))
	{
		exit(0);
	}

    // instantiate RtAudio object
    RtAudio adac;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( adac.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // let RtAudio print messages to stderr.
    adac.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = adac.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = adac.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        adac.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&data, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << adac.getStreamLatency() << " frames" << endl;

    // go for it
    try {
        // start stream
        adac.startStream();

        // get input
        char input;
        std::cout << "running... press <enter> to quit (buffer frames: " << bufferFrames << ")" << endl;
        std::cin.get(input);
        
        // stop the stream.
        adac.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( adac.isStreamOpen() )
        adac.closeStream();
    
    // done
    outfile<<"];\nplot(x)";
    return 0;
}
Ejemplo n.º 18
0
//-----------------------------------------------------------------------------
// Name: main( )
// Desc: starting point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{
	// Get RtAudio Instance with default API
	RtAudio *audio = new RtAudio();
    // buffer size
    unsigned int buffer_size = 512;
	// Output Stream Parameters
	RtAudio::StreamParameters outputStreamParams;
	outputStreamParams.deviceId = audio->getDefaultOutputDevice();
	outputStreamParams.nChannels = 1;
	// Input Stream Parameters
	RtAudio::StreamParameters inputStreamParams;
	inputStreamParams.deviceId = audio->getDefaultInputDevice();
	inputStreamParams.nChannels = 1;
	
	// Get RtAudio Stream
	try {
		audio->openStream(
			NULL,
			&inputStreamParams,
			RTAUDIO_FLOAT32,
			MY_FREQ,
			&buffer_size,
			callback_func,
			NULL
			);
	}
	catch(RtError &err) {
		err.printMessage();
		exit(1);
	}
	g_bufferSize = buffer_size;
	// Samples for Feature Extraction in a Buffer
	g_samples = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_audio_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_another_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_buffest = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_residue = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	g_coeff = (SAMPLE *)malloc(sizeof(SAMPLE)*g_order);
    g_dwt = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse);
	
    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "The New File" );
    
    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    
    // do our own initialization
    initialize();

	// initialize mfcc
	initMFCC();
	
	//init lpc
	initialize_lpc();
	
	// initialize osc
	// Initialize a socket to get a port
	g_transmitSocket = new UdpTransmitSocket( IpEndpointName( g_ADDRESS.c_str(), SERVERPORT ) );
	
//    // Set the global sample rate before creating class instances.
//    Stk::setSampleRate( 44100.0 );
//	// Read In File
//	try 
//    {
//        // read the file
//        g_fin.openFile( "TomVega.wav" );
//        // change the rate
//        g_fin.setRate( 1 );
//		// normalize the peak
//		g_fin.normalize();
//    } catch( stk::StkError & e )
//    {
//        cerr << "baaaaaaaaad..." << endl;
//        return 1;
//    }
	
	// Start Stream
	try {
        audio->startStream();
    } catch( RtError & err ) {
        // do stuff
        err.printMessage();
        goto cleanup;
    }

    // let GLUT handle the current thread from here
    glutMainLoop();
    
 	// if we get here, then stop!
	try{
		audio->stopStream();
	} 
	catch( RtError & err ) {
		// do stuff
		err.printMessage();
	}

	cleanup:
	    audio->closeStream();
	    delete audio;

    return 0;
}
Ejemplo n.º 19
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{
    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // initialize GLUT
    glutInit( &argc, argv );
    // init gfx
    initGfx();

    // let RtAudio print messages to stderr.
    audio.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&bufferBytes, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    // allocate global buffer
    g_bufferSize = bufferFrames;
    g_buffer = new SAMPLE[g_bufferSize];
    memset( g_buffer, 0, sizeof(SAMPLE)*g_bufferSize );

    // go for it
    try {
        // start stream
        audio.startStream();

        // let GLUT handle the current thread from here
        glutMainLoop();
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    
    // done
    return 0;
}
Ejemplo n.º 20
0
int main (int argc, char ** argv)
{
    
    //parse tempo 
    if (argc>2)
    {
        cerr<<"Error in arguments\n";
        printHelp();
        exit(1);
    }
    else if (argc==2) 
    {
        g_tempo = atoi(argv[1]);
        if (g_tempo<40 && g_tempo>200)
        {
            cerr<<"Tempo out of bounds!\n";
            printHelp();
            exit(1);
        }
        tempoChange();
    }
    
    // set up fluid synth stuff
    // TODO: error checking!!!!
    g_settings = new_fluid_settings(); 
    g_synth = new_fluid_synth( g_settings );
    g_metronome = new_fluid_synth( g_settings );  
    
    
    //fluid_player_t* player;
    //player = new_fluid_player(g_synth);
    //fluid_player_add(player, "backing.mid");
    //fluid_player_play(player);

    
    
    if (fluid_synth_sfload(g_synth, "piano.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    if (fluid_synth_sfload(g_metronome, "drum.sf2", 1) == -1)
    {
        cerr << "Error loading sound font" << endl;
        exit(1);
    }
    
    
    // RtAudio config + init

    // pointer to RtAudio object
    RtMidiIn * midiin = NULL;    
	RtAudio *  audio = NULL;
    unsigned int bufferSize = 512;//g_sixteenth/100;

    // MIDI config + init
    try 
    {
        midiin = new RtMidiIn();
    }
    catch( RtError & err ) {
        err.printMessage();
       // goto cleanup;
    }
    
    // Check available ports.
    if ( midiin->getPortCount() == 0 )
    {
        std::cout << "No ports available!\n";
       // goto cleanup;
    }
    // use the first available port
    if ( midiin->getPortCount() > 2)
        midiin->openPort( 1 );
    else 
        midiin->openPort( 0 );

    // set midi callback
    midiin->setCallback( &midi_callback );

    // Don't ignore sysex, timing, or active sensing messages.
    midiin->ignoreTypes( false, false, false );

    // create the object
    try
    {
        audio = new RtAudio();
        cerr << "buffer size: " << bufferSize << endl;
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 2;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT32, MY_SRATE, &bufferSize, &audioCallback, NULL, &options);
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }

    // wait for user input
    cout << "Type CTRL+C to quit:";
    
    //initialize graphics
    gfxInit(&argc,argv);
    
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 21
0
int rec_raw (int margin_dB, int chans, int fs, char *nameFile, bool*flagParar, int *clipping, int device)
{
 
  int i = 0, j = 0;
  int buffer_size;
  int dummy;
  
  int clipping_threshold = (int)((double)INT_MAX/pow(10, ((double)margin_dB/(double)20)) );
      
  MY_TYPE *buffer;

  RtAudio *audio = 0;

  char nomFichEGG[MAXPATH];
  strcpy(nomFichEGG,nameFile);
  flhPonerExtension(nomFichEGG,".egg.wav");

  char nomFichMicro[MAXPATH];
  strcpy(nomFichMicro,nameFile);
  flhPonerExtension(nomFichMicro,".ch1.wav");

  /*Implemetación con libnsnd*/

  SNDFILE *fdTOT;
  SNDFILE *fdEGG;
  SNDFILE *fdMicro;

  SF_INFO SfInfoTot, SfInfoEGG, SfInfoMicro;

  SfInfoTot.samplerate = fs;
  SfInfoTot.channels   = chans;
  SfInfoTot.format     = SF_FORMAT_WAV | SF_FORMAT_PCM_24 | SF_ENDIAN_LITTLE;

  SfInfoEGG.samplerate = fs;
  SfInfoEGG.channels   = 1;
  SfInfoEGG.format     = SF_FORMAT_WAV | SF_FORMAT_PCM_16 | SF_ENDIAN_LITTLE;

  SfInfoMicro.samplerate = fs;
  SfInfoMicro.channels   = 1;
  SfInfoMicro.format     = SF_FORMAT_WAV | SF_FORMAT_PCM_16 | SF_ENDIAN_LITTLE;

  fdTOT   = sf_open (nameFile, SFM_WRITE, &SfInfoTot);
  fdEGG   = sf_open (nomFichEGG, SFM_WRITE, &SfInfoEGG);
  fdMicro = sf_open (nomFichMicro, SFM_WRITE, &SfInfoMicro);

  /*Implemetación con libnsnd*/
 (*clipping) = 0;

  buffer_size = TAM_BUFF_AUDIO;
  try {
    audio = new RtAudio(0, 0, device, chans,
		FORMAT, fs, &buffer_size, MAX_NUM_CHANNELS, 
#ifdef __WINDOWS_DS__
		RtAudio::WINDOWS_DS
#elif defined(__WINDOWS_ASIO__)
		RtAudio::WINDOWS_ASIO
#endif
		);

  }
  catch (RtError &error) {
    error.printMessage();
    exit(EXIT_FAILURE);
  }

  try {
    buffer = (MY_TYPE *) audio->getStreamBuffer();
    audio->startStream();
  }
  catch (RtError &error) {
    error.printMessage();
    goto cleanup;
  }

/********* GRABAMOS ************/
/*******************************/

  while (1) {
 
    if (*flagParar==false) 
	{
      try {
        audio->tickStream();
      }
      catch (RtError &error) {
        error.printMessage();
        goto cleanup;
      }


	  //Grabamos los ocho canales
	  sf_count_t counts = buffer_size;
	  counts = sf_writef_int  (fdTOT, buffer, counts);

	  for (int i=0; i < buffer_size; i++)
	  {
		  dummy = INT_MAX;

		  if ((dummy=abs(buffer[i*chans+canalMicro])) > clipping_threshold)
		  {
			  (*clipping)++;
			//dummy = CLIPPING_THRESHOLD;
		  }
	
		  counts = sf_writef_int  (fdEGG, &buffer[i*chans+canalEGG], 1);
		  counts = sf_writef_int  (fdMicro, &buffer[i*chans+canalMicro], 1);
	  }
	
/*	  EnterCriticalSection(&CriticalSection); 

	  for (i = 0, j = 0; i < buffer_size; i += chans, j++)
	  {
		  muestraOSC_GLOTO[j]   = buffer[i*chans + canalEGG];
		  muestraOSC_CHANNEL[j] = buffer[i*chans + canalMicro];
	  }

	  LeaveCriticalSection (&CriticalSection);
*/
	}
    else
	{

      try {
        audio->tickStream();
      }
      catch (RtError &error) {
        error.printMessage();
        goto cleanup;
      }

      break;
	}

  }//end while
  
  
  try {
    audio->stopStream();
  }
  catch (RtError &error) {
    error.printMessage();
  }

 cleanup:
  audio->closeStream();
  delete audio;

  sf_close (fdTOT);
  sf_close (fdEGG);
  sf_close (fdMicro);

  return 0;
}
Ejemplo n.º 22
0
int main( int argc, char *argv[])
{
  // COMMAND LINE ARG HANDLING
  map<string, ugen> ugens;
  ugens["--sine"] = &sine;
  ugens["--saw"] = &saw;
  ugens["--pulse"] = &pulse;
  ugens["--noise"] = &noise;
  ugens["--impulse"] = &impulse;

  if (argc < 4 || argc > 10 ) usage();

  string type_arg = argv[1];
  g_active_ugen = ugens[type_arg];
  if (g_active_ugen == NULL)
    usage();

  double freq_arg = atof(argv[2]);
  if (freq_arg <= 0)
    usage();
  g_frequency = freq_arg;

  double width_arg = atof(argv[3]);
  if (width_arg < 0 || width_arg > 1)
    usage();
  g_width = width_arg;

  if (argc > 4) { // modulation parameters present
    for (int i = 4; i < argc;) {
      if (string(argv[i]).compare("--input") == 0) {
	g_modulate_input = true;
	i++;
      } 
      else if (string(argv[i]).compare("--fm") == 0) {
	g_fm_on = true;

	string fm_type_arg = argv[++i];
	g_active_fm_ugen = ugens[fm_type_arg];
	if (g_active_fm_ugen == NULL)
	  usage();
	
	double fm_freq_arg = atof(argv[++i]);
	if (fm_freq_arg <= 0)
	  usage();
	g_fm_frequency = fm_freq_arg;
	
	double fm_width_arg = atof(argv[++i]);
	if (fm_width_arg < 0 || fm_width_arg > 1)
	  usage();
	g_fm_width = fm_width_arg;
	
	double fm_index_arg = atoi(argv[++i]);
	g_fm_index = fm_index_arg;

	i++;
      }
      else
	usage();
    }
  }

  // AUDIO SETUP
  RtAudio audio;
  audio.showWarnings( true );

  RtAudio::StreamParameters output_params;
  RtAudio::StreamParameters input_params;

  // Choose an audio device and a sample rate
  unsigned int sample_rate;
  unsigned int devices = audio.getDeviceCount();
  if ( devices < 1 ) {
    cerr << "No audio device found!" << endl;
    exit(1);
  }
  RtAudio::DeviceInfo info;
  for (unsigned int i = 0; i < devices; i++ ) {
    info = audio.getDeviceInfo(i);
    if ( info.isDefaultOutput ) {
      output_params.deviceId = i;
      output_params.nChannels = 2;

      if (info.sampleRates.size() < 1) {
	cerr << "No supported sample rates found!" << endl;
	exit(1);
      }
      for (int i = 0; i < info.sampleRates.size(); i++) {
	sample_rate = info.sampleRates[i];
	if (sample_rate == 44100 || sample_rate == 48000) {
	  // Found a nice sample rate, stop looking
	  break;
	}
      }
      cout << "Using sample rate: " << sample_rate << endl;

    }
    if ( info.isDefaultInput ) {
      input_params.deviceId = i;
      input_params.nChannels = 1;
    }
  }

  cout << "Using output device ID " << output_params.deviceId << " which has " << 
    output_params.nChannels << " output channels." << endl;
  cout << "Using input device ID " << input_params.deviceId << " which has " << 
    input_params.nChannels << " input channels." << endl;

  RtAudio::StreamOptions options;
  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;

  unsigned int buffer_frames = 256;

  try {
    audio.openStream( &output_params,     // output params
		      &input_params,      // input params
		      RTAUDIO_FLOAT64,    // audio format 
		      sample_rate,        // sample rate
		      &buffer_frames,     // num frames per buffer (mutable by rtaudio)
		      &callback,          // audio callback
		      &audio,             // user data pointer HACK HACK :D
		      &options);          // stream options
    audio.startStream();
  } catch ( RtError &e ) {
    e.printMessage();
    goto cleanup;
  }

  char input;
  cout << "Playing, press enter to quit (buffer frames = " << buffer_frames << ")." << endl;
  cin.get( input );

  try {
    audio.stopStream();
  }
  catch ( RtError &e ) {
    e.printMessage();
  }

 cleanup:
  if ( audio.isStreamOpen() ) {
    audio.closeStream();
  }

  return 0;
}
Ejemplo n.º 23
0
// ========
// = Main =
// ========
// Entry point
int main (int argc, char *argv[])
{
	cout<<argc<<"  "<<argv[0];
	if (argc>3) {cerr<<"\nERROR - wrong number of arguments\n";exit(1);}
	if (argc==3) 
		g_audio_history = atoi(argv[2]); 
	else
		g_audio_history = 30;
	if (argc>1) 
		g_fft_history = atoi(argv[1]);
	else
		g_fft_history = 100;
	help();
    // RtAudio config + init

    // pointer to RtAudio object
    RtAudio *  audio = NULL;

    // create the object
    try
    {
        audio = new RtAudio();
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 1;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options);
        
        cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl;
        
        // allocate the buffer for the fft
        g_fftBuff = new float[g_buffSize * ZPF];
		g_audioBuff = new float[g_buffSize * ZPF];
        if ( g_fftBuff == NULL ) {
            cerr << "Something went wrong when creating the fft and audio buffers" << endl;
            exit (1);
        }
        
        // allocate the buffer for the time domain window
        g_window = new float[g_buffSize];
        if ( g_window == NULL ) {
            cerr << "Something went wrong when creating the window" << endl;
            exit (1);
        }

        // create a hanning window
        make_window( g_window, g_buffSize );
        
        // start the audio stream
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }


    // ============
    // = GL stuff =
    // ============

    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "Hello GL" );
    //glutEnterGameMode();

    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    // set the special function - called on special keys events (fn, arrows, pgDown, etc)
    glutSpecialFunc( specialFunc );

    // do our own initialization
    initialize();

    // let GLUT handle the current thread from here
    glutMainLoop();

        
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
Ejemplo n.º 24
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, device = 0, offset = 0;

  // minimal command-line checking
  if (argc < 3 || argc > 6 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    device = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    offset = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    nFrames = (unsigned int) (fs * atof( argv[5] ));
  if ( nFrames > 0 ) checkCount = true;

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;
#if !defined( USE_INTERLEAVED )
  options.flags |= RTAUDIO_NONINTERLEAVED;
#endif
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options );
    dac.startStream();
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( checkCount ) {
    while ( dac.isStreamRunning() == true ) SLEEP( 100 );
  }
  else {
    char input;
    //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl;
    std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n";
    std::cin.get( input );

    try {
      // Stop the stream
      dac.stopStream();
    }
    catch ( RtError& e ) {
      e.printMessage();
    }
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
Ejemplo n.º 25
0
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

 RtMidiIn *midiin = new RtMidiIn();

  // Check available ports.
  unsigned int nPorts = midiin->getPortCount();
  if ( nPorts == 0 ) {
    std::cout << "No ports available!\n";
    //goto cleanup;
  }

  midiin->openPort( 0 );

  // Set our callback function.  This should be done immediately after
  // opening the port to avoid having incoming messages written to the
  // queue.
  midiin->setCallback( &mycallback );

  // Don't ignore sysex, timing, or active sensing messages.
  midiin->ignoreTypes( false, false, false );

  std::cout << "\nReading MIDI input ... press <enter> to quit.\n";
  char input;
  std::cin.get(input);


    // instantiate RtAudio object
    RtAudio audio;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int numFrames = 512;
    
    // check for audio devices
    if( audio.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
    
    // let RtAudio print messages to stderr.
    audio.showWarnings( true );
    
    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = audio.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;
    
    // go for it
    try {
        // open a stream
        audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &numFrames, &callme, NULL, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }
    
    // compute
    bufferBytes = numFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << audio.getStreamLatency() << " frames" << endl;
    
    for( int i = 0; i < MY_NUMSTRINGS; i++ )
    {
        // intialize
        g_ks[i].init( MY_SRATE*2, 440, MY_SRATE );
	
    }
    
    // go for it
    try {
        // start stream
        audio.startStream();
	char input;
        std::cout << "Press any key to quit ";
	std::cin.get(input);
        
        // stop the stream.
        audio.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( audio.isStreamOpen() )
        audio.closeStream();
    delete midiin;
    
    // done
    return 0;
}
Ejemplo n.º 26
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, oDevice = 0, iDevice = 0, iOffset = 0, oOffset = 0;
  char input;

  // minimal command-line checking
  if (argc < 3 || argc > 7 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    iDevice = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    oDevice = (unsigned int) atoi(argv[4]);
  if ( argc > 5 )
    iOffset = (unsigned int) atoi(argv[5]);
  if ( argc > 6 )
    oOffset = (unsigned int) atoi(argv[6]);

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams, iParams;
  oParams.deviceId = oDevice;
  oParams.nChannels = channels;
  oParams.firstChannel = oOffset;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_HOG_DEVICE;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawi, (void *)data, &options );
    std::cout << "\nStream latency = " << dac.getStreamLatency() << std::endl;

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();

    // Restart again
    std::cout << "Press <enter> to restart.\n";
    std::cin.get( input );
    dac.startStream();

    // Test abort function
    std::cout << "Playing again ... press <enter> to abort.\n";
    std::cin.get( input );
    dac.abortStream();

    // Restart another time
    std::cout << "Press <enter> to restart again.\n";
    std::cin.get( input );
    dac.startStream();

    std::cout << "Playing again ... press <enter> to close the stream.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Test non-interleaved functionality
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawni, (void *)data, &options );

    std::cout << "Press <enter> to start non-interleaved playback.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Now open a duplex stream.
  unsigned int bufferBytes;
  iParams.deviceId = iDevice;
  iParams.nChannels = channels;
  iParams.firstChannel = iOffset;
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, &iParams, RTAUDIO_SINT32, fs, &bufferFrames, &inout, (void *)&bufferBytes, &options );

    bufferBytes = bufferFrames * channels * 4;

    std::cout << "Press <enter> to start duplex operation.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();
    std::cout << "\nStopped ... press <enter> to restart.\n";
    std::cin.get( input );

    // Restart the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
Ejemplo n.º 27
0
int main(int argc, char *argv[])
{
  int chans, fs, buffer_size, count, device = 0;
  long counter = 0;
  MY_TYPE *buffer;
  char *file;
  FILE *fd;
  RtAudio *audio;

  // minimal command-line checking
  if (argc != 4 && argc != 5 ) usage();

  chans = (int) atoi(argv[1]);
  fs = (int) atoi(argv[2]);
  file = argv[3];
  if ( argc == 5 )
    device = (int) atoi(argv[4]);

  fd = fopen(file,"rb");
  if (!fd) {
    std::cout << "can't find file!\n";
    exit(0);
  }

  // Open the realtime output device
  buffer_size = 512;
  try {
    audio = new RtAudio(device, chans, 0, 0,
                        FORMAT, fs, &buffer_size, 2);
  }
  catch (RtError &error) {
    fclose(fd);
    error.printMessage();
    exit(EXIT_FAILURE);
  }

  try {
    buffer = (MY_TYPE *) audio->getStreamBuffer();
    audio->startStream();
  }
  catch (RtError &error) {
    error.printMessage();
    goto cleanup;
  }

  while (1) {
    count = fread(buffer, chans * sizeof(MY_TYPE), buffer_size, fd);

    if (count == buffer_size) {
      try {
        audio->tickStream();
      }
      catch (RtError &error) {
        error.printMessage();
        goto cleanup;
      }
    }
    else
      break;
        
    counter += buffer_size;
  }

  try {
    audio->stopStream();
  }
  catch (RtError &error) {
    error.printMessage();
  }

 cleanup:
  audio->closeStream();
  delete audio;
  fclose(fd);

  return 0;
}
Ejemplo n.º 28
0
int main(int argc, char ** argv)
{
    if ( argc==1 )
    {
        std::cerr << "f " << freq << "\t frequency" << std::endl; 
        std::cerr << "s " << scale << "\t scale" << std::endl; 
        std::cerr << "t " << steps << "\t steps" << std::endl; 
        std::cerr << "r " << rthresh << "\t right thresh" << std::endl; 
        std::cerr << "f " << lthresh << "\t left  thresh" << std::endl; 
        std::cerr << "i " << inp_audio << "\t inp_audio device id" << std::endl; 
        std::cerr << "o " << out_audio << "\t out_audio device id" << std::endl; 
    }
    for ( int i = 1; i<argc-1; i++ )
    {
        if ( !strcmp(argv[i],"f") ) { freq=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"l") ) { lthresh=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"r") ) { rthresh=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"s") ) { scale=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"t") ) { steps=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"i") ) { inp_audio=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"o") ) { out_audio=atoi(argv[++i]); continue; }
    }
    unsigned int bufferFrames = NUM_FREQ*2;

    Data data;
	data.fft.Init(bufferFrames, NUM_FREQ, 1, 2.5f);

    RtAudio adac;
    if ( adac.getDeviceCount() < 1 ) 
    {
        std::cout << "\nNo audio devices found!\n";
        exit( 0 );
    }

    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId  = inp_audio; // <----------- put them on 
    iParams.nChannels = 1; //              different devices
    oParams.deviceId  = out_audio; // <----------- for duplex mode
    oParams.nChannels = 1; // 

    try 
    {
        adac.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, 44100, &bufferFrames, &inout, &data );
    }
    catch ( RtError& e ) 
    {
        e.printMessage();
        exit( 0 );
    }
    try 
    {
        adac.startStream();
        main_init();

        int k = 0;
        int kcen = 658;
        int lmean=0,rmean=0;
        while(true)
        {
            // find the global max:
            float m = 0;
            int mi=-1;
            for ( int i=64; i<NUM_FREQ; i++ ) // skip low freq
            {
                if ( data.freqs[i] > m ) 
                {
                    m = data.freqs[i];
                    mi = i;
                }
            }
            kcen = ipol(kcen,mi,4);
            // get the mean of the lower and the higher neighbours
            int lsum=0,rsum=0;
            for( int i=-steps; i<-2; i++ )
            {
                lsum += data.value(kcen+i,scale);
            }
            for( int i=2; i<steps; i++ )
            {
                rsum += data.value(kcen+i,scale);
            }
            rsum /= (steps-2);
            lsum /= (steps-2);
            int rd = rsum-rmean;
            int ld = lsum-lmean;
            lmean=ipol(lmean,lsum,256);
            rmean=ipol(rmean,rsum,256);
            int lc=' ',rc=' ';
            if ( rd>rthresh )
                rc='r';
            if ( ld>lthresh )
                lc='l';

            //if ( ld>lthresh || ld>lthresh )
                std::cerr << char(lc) << " " << char(rc) << std::endl;

            main_idle(data,kcen);
        }
        // Stop the stream.
        adac.stopStream();
    }
    catch ( RtError& e ) 
    {
        e.printMessage();
        goto cleanup;
    }

cleanup:
   if ( adac.isStreamOpen() ) adac.closeStream();

  return 0;
}
Ejemplo n.º 29
0
int main(int argc, char *argv[])
{
  int chans, fs, buffer_size, device = 0;
  long frames, counter = 0;
  MY_TYPE *buffer;
  RtAudio *audio;

  // minimal command-line checking
  if (argc != 3 && argc != 4 ) usage();

  chans = (int) atoi(argv[1]);
  fs = (int) atoi(argv[2]);
  if ( argc == 4 )
    device = (int) atoi(argv[3]);

  // Open the realtime output device
  buffer_size = 512;
  try {
    audio = new RtAudio(device, chans, device, chans,
                        FORMAT, fs, &buffer_size, 8);
  }
  catch (RtError &error) {
    error.printMessage();
    exit(EXIT_FAILURE);
  }

  frames = (long) (fs * TIME);

  try {
    buffer = (MY_TYPE *) audio->getStreamBuffer();
    audio->startStream();
  }
  catch (RtError &error) {
    error.printMessage();
    goto cleanup;
  }

  std::cout << "\nRunning for " << TIME << " seconds ... fragment_size = " << buffer_size << std::endl;
  while (counter < frames) {

    try {
      audio->tickStream();
    }
    catch (RtError &error) {
      error.printMessage();
      goto cleanup;
    }
    counter += buffer_size;
  }

  try {
    audio->stopStream();
  }
  catch (RtError &error) {
    error.printMessage();
  }

 cleanup:
  audio->closeStream();
  delete audio;

  return 0;
}