void Audio::closeInputDevice() { if (!adc.isStreamOpen()) return; adc.stopStream(); while (adc.isStreamRunning()); adc.closeStream(); }
int main() { // Set the global sample rate before creating class instances. Stk::setSampleRate( 44100.0 ); SineWave sine; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.deviceId = 3; parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // configuration of oscilator sine.setFrequency(440.0); // start the main real time loop try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // USER interface // Block waiting here. char keyhit; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get( keyhit ); // SYSTEM shutdown // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: return 0; }
void av_audio_start() { av_audio_get(); if (rta.isStreamRunning()) { rta.stopStream(); } if (rta.isStreamOpen()) { // close it: rta.closeStream(); } unsigned int devices = rta.getDeviceCount(); if (devices < 1) { printf("No audio devices found\n"); return; } RtAudio::DeviceInfo info; RtAudio::StreamParameters iParams, oParams; printf("Available audio devices (%d):\n", devices); for (unsigned int i=0; i<devices; i++) { info = rta.getDeviceInfo(i); printf("Device %d: %dx%d (%d) %s\n", i, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); } printf("device %d\n", audio.indevice); info = rta.getDeviceInfo(audio.indevice); printf("Using audio input %d: %dx%d (%d) %s\n", audio.indevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); audio.inchannels = info.inputChannels; iParams.deviceId = audio.indevice; iParams.nChannels = audio.inchannels; iParams.firstChannel = 0; info = rta.getDeviceInfo(audio.outdevice); printf("Using audio output %d: %dx%d (%d) %s\n", audio.outdevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); audio.outchannels = info.outputChannels; oParams.deviceId = audio.outdevice; oParams.nChannels = audio.outchannels; oParams.firstChannel = 0; RtAudio::StreamOptions options; //options.flags |= RTAUDIO_NONINTERLEAVED; options.streamName = "av"; try { rta.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, audio.samplerate, &audio.blocksize, &av_rtaudio_callback, NULL, &options ); rta.startStream(); printf("Audio started\n"); } catch ( RtError& e ) { fprintf(stderr, "%s\n", e.getMessage().c_str()); } }
int playsin(void) { RtAudio *audio; unsigned int bufsize = 4096; CallbackData data; try { audio = new RtAudio(RtAudio::WINDOWS_WASAPI); } catch (...) { return 1; } if (!audio) { fprintf(stderr, "fail to allocate RtAudio¥n"); return 1; } /* probe audio devices */ unsigned int devId = audio->getDefaultOutputDevice(); /* Setup output stream parameters */ RtAudio::StreamParameters *outParam = new RtAudio::StreamParameters(); outParam->deviceId = devId; outParam->nChannels = 2; audio->openStream(outParam, NULL, RTAUDIO_FLOAT32, 44100, &bufsize, rtaudio_callback, &data); /* Create Wave Form Table */ data.nRate = 44100; /* Frame Number is based on Freq(440Hz) and Sampling Rate(44100) */ /* hmm... nFrame = 44100 is enough approximation, maybe... */ data.nFrame = 44100; data.nChannel = outParam->nChannels; data.cur = 0; data.wftable = (float *)calloc(data.nChannel * data.nFrame, sizeof(float)); if (!data.wftable) { delete audio; fprintf(stderr, "fail to allocate memory¥n"); return 1; } for (unsigned int i = 0; i < data.nFrame; i++) { float v = sin(i * 3.1416 * 2 * 440 / data.nRate); for (unsigned int j = 0; j < data.nChannel; j++) { data.wftable[i*data.nChannel + j] = v; } } audio->startStream(); // sleep(10); audio->stopStream(); audio->closeStream(); delete audio; return 0; }
void Audio::closeOutputDevice() { if (!dac.isStreamOpen()) return; dac.stopStream(); while (dac.isStreamRunning()); dac.closeStream(); outIsOpened = false; }
int main( int argc, char *argv[] ) { unsigned int channels, fs, bufferFrames, device = 0, offset = 0; char *file; // minimal command-line checking if ( argc < 4 || argc > 6 ) usage(); RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 0 ); } channels = (unsigned int) atoi( argv[1]) ; fs = (unsigned int) atoi( argv[2] ); file = argv[3]; if ( argc > 4 ) device = (unsigned int) atoi( argv[4] ); if ( argc > 5 ) offset = (unsigned int) atoi( argv[5] ); OutputData data; data.fd = fopen( file, "rb" ); if ( !data.fd ) { std::cout << "Unable to find or open file!\n"; exit( 1 ); } // Set our stream parameters for output only. bufferFrames = 512; RtAudio::StreamParameters oParams; oParams.deviceId = device; oParams.nChannels = channels; oParams.firstChannel = offset; data.channels = channels; try { dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &output, (void *)&data ); dac.startStream(); } catch ( RtError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; goto cleanup; } std::cout << "\nPlaying raw file " << file << " (buffer frames = " << bufferFrames << ")." << std::endl; while ( 1 ) { SLEEP( 100 ); // wake every 100 ms to check if we're done if ( dac.isStreamRunning() == false ) break; } cleanup: fclose( data.fd ); dac.closeStream(); return 0; }
void stop() { try { // Stop the stream dac.stopStream(); } catch (RtAudioError &e) { e.printMessage(); } if (dac.isStreamOpen()) dac.closeStream(); }
int main(int argc, char *argv[]) { int buffer_size, fs, device = 0; RtAudio *audio; double *data; char input; // minimal command-line checking if (argc != 3 && argc != 4 ) usage(); chans = (int) atoi(argv[1]); fs = (int) atoi(argv[2]); if ( argc == 4 ) device = (int) atoi(argv[3]); // Open the realtime output device buffer_size = 1024; try { audio = new RtAudio(device, chans, 0, 0, FORMAT, fs, &buffer_size, 4); } catch (RtError &error) { error.printMessage(); exit(EXIT_FAILURE); } data = (double *) calloc(chans, sizeof(double)); try { audio->setStreamCallback(&saw, (void *)data); audio->startStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << buffer_size << ").\n"; std::cin.get(input); // Stop the stream. try { audio->stopStream(); } catch (RtError &error) { error.printMessage(); } cleanup: audio->closeStream(); delete audio; if (data) free(data); return 0; }
void stop_audio(void) { try { audio.stopStream(); } catch(RtError& e) { e.printMessage(); } if(audio.isStreamOpen()) audio.closeStream(); }
int main( int argc, char *argv[] ) { if ( argc != 2 ) usage(); // Set the global sample rate and rawwave path before creating class instances. Stk::setSampleRate( 44100.0 ); Stk::setRawwavePath( "rawwaves/" ); TickData data; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } try { // Define and load the BeeThree instrument data.instrument = new BeeThree(); } catch ( StkError & ) { goto cleanup; } if ( data.messager.setScoreFile( argv[1] ) == false ) goto cleanup; try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Block waiting until callback signals done. while ( !data.done ) Stk::sleep( 100 ); // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: delete data.instrument; return 0; }
int main(int argc, char *argv[]) { int chans, fs, device = 0; RtAudio *audio; char input; // minimal command-line checking if (argc != 3 && argc != 4 ) usage(); chans = (int) atoi(argv[1]); fs = (int) atoi(argv[2]); if ( argc == 4 ) device = (int) atoi(argv[3]); // Open the realtime output device int buffer_size = 512; try { audio = new RtAudio(device, chans, device, chans, FORMAT, fs, &buffer_size, 8); } catch (RtError &error) { error.printMessage(); exit(EXIT_FAILURE); } try { audio->setStreamCallback(&inout, NULL); audio->startStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } std::cout << "\nRunning ... press <enter> to quit (buffer size = " << buffer_size << ").\n"; std::cin.get(input); try { audio->stopStream(); } catch (RtError &error) { error.printMessage(); } cleanup: audio->closeStream(); delete audio; return 0; }
int main() { RtAudio dac; //std::cout << dac.getDeviceCount() << std::endl; //2 if (dac.getDeviceCount() < 1) { std::cout << "\nNo audio devices found!\n"; exit(0); } RtAudio::StreamParameters parameters; //std::cout << dac.getDefaultOutputDevice() << std::endl; parameters.deviceId = dac.getDefaultOutputDevice(); //0 parameters.nChannels = 2; parameters.firstChannel = 0; unsigned int sampleRate = 44100; unsigned int bufferFrames = 256; // 256 sample frames RtAudio::StreamParameters input; input.deviceId = dac.getDefaultInputDevice(); input.nChannels = 2; input.firstChannel = 0; double data[2]; try { dac.openStream(¶meters, &input, RTAUDIO_SINT16, sampleRate, &bufferFrames, &saw, (void *)&data); dac.startStream(); } catch (RtAudioError& e) { e.printMessage(); exit(0); } char input1; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get(input1); try { // Stop the stream dac.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if (dac.isStreamOpen()) dac.closeStream(); system("pause"); return 0; }
static void close_output(va_list args) { try { // Stop the stream vessl_out.stopStream(); if ( vessl_out.isStreamOpen() ) { vessl_out.closeStream(); printf("[vessl] output was closed.\n"); } } catch (RtAudioError& e) { e.printMessage(); } }
void keyboard (unsigned char key, int x, int y) { switch (key) { case 'a': autorotate = ! autorotate; break; case 't' : theString->Ktension *= 1.05946; theString->Ktension = fmin ( 1.0, theString->Ktension ); break; case 'l' : theString->Ktension *= 0.943876; break; case 'P' : theString->pluck(); break; case 'p' : theString->pluckvel(); break; case 'r' : theString->reset(); break; case 'd' : theString->print(); break; case 'v' : theString->toggleVibrator(); break; case 'f' : theString->vibratorFreq *= 0.943875; std::cout << "vib freq = " << theString->vibratorFreq << std::endl; break; case 'F' : theString->vibratorFreq /= 0.943875; std::cout << "vib freq = " << theString->vibratorFreq << std::endl; break; case 27: /* ESC */ try { // Stop the stream dac.stopStream(); } catch (RtError& e) { e.printMessage(); } if (dac.isStreamOpen()) dac.closeStream(); exit(0); break; default: break; } }
int main( int argc, char *argv[] ) { unsigned int bufferFrames, fs, device = 0, offset = 0; // minimal command-line checking if (argc < 3 || argc > 6 ) usage(); RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 1 ); } channels = (unsigned int) atoi( argv[1] ); fs = (unsigned int) atoi( argv[2] ); if ( argc > 3 ) device = (unsigned int) atoi( argv[3] ); if ( argc > 4 ) offset = (unsigned int) atoi( argv[4] ); if ( argc > 5 ) nFrames = (unsigned int) (fs * atof( argv[5] )); if ( nFrames > 0 ) checkCount = true; double *data = (double *) calloc( channels, sizeof( double ) ); // Let RtAudio print messages to stderr. dac.showWarnings( true ); // Set our stream parameters for output only. bufferFrames = 256; RtAudio::StreamParameters oParams; oParams.deviceId = device; oParams.nChannels = channels; oParams.firstChannel = offset; options.flags |= RTAUDIO_HOG_DEVICE; options.flags |= RTAUDIO_SCHEDULE_REALTIME; #if !defined( USE_INTERLEAVED ) options.flags |= RTAUDIO_NONINTERLEAVED; #endif try { dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options ); dac.startStream(); } catch ( RtError& e ) { e.printMessage(); goto cleanup; } if ( checkCount ) { while ( dac.isStreamRunning() == true ) SLEEP( 100 ); } else { char input; //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl; std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n"; std::cin.get( input ); try { // Stop the stream dac.stopStream(); } catch ( RtError& e ) { e.printMessage(); } } cleanup: if ( dac.isStreamOpen() ) dac.closeStream(); free( data ); return 0; }
// entry point int main( int argc, char ** argv ) { // initialize GLUT glutInit( &argc, argv ); // double buffer, use rgb color, enable depth buffer glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH ); // initialize the window size glutInitWindowSize( g_width, g_height ); // set the window postion glutInitWindowPosition( 100, 100 ); // create the window glutCreateWindow( "VisualSine" ); // set the idle function - called when idle glutIdleFunc( idleFunc ); // set the display function - called when redrawing glutDisplayFunc( displayFunc ); // set the reshape function - called when client area changes glutReshapeFunc( reshapeFunc ); // set the keyboard function - called on keyboard events glutKeyboardFunc( keyboardFunc ); // set the mouse function - called on mouse stuff glutMouseFunc( mouseFunc ); // RtAudio pointer RtAudio * audio = NULL; // buffer size int buffer_size = 512; // create the RtAudio try { audio = new RtAudio( 0, // device number of output 1, // number of output channels 1, // device number for input 1, // number of input channels RTAUDIO_FLOAT64, // format MY_SRATE, // sample rate &buffer_size, // buffer size 8 // number of buffers ); } catch( RtError & err ) { err.printMessage(); exit(1); } // allocate global buffer g_buffer = new SAMPLE[buffer_size]; g_bufferSize = buffer_size; // set the callback try { audio->setStreamCallback( &callme, NULL ); audio->startStream(); } catch( RtError & err ) { // do stuff err.printMessage(); goto cleanup; } // let GLUT handle the current thread from here glutMainLoop(); // if we get here, then stop! try { audio->stopStream(); } catch( RtError & err ) { // do stuff err.printMessage(); } cleanup: audio->closeStream(); delete audio; return 0; }
int main () { // Damit das Programm funktioniert, muss eine 16Bit PCM Wave-Datei im // gleichen Ordner liegen ! const char * fname = "test.flac" ; // Soundfile-Handle aus der libsndfile-Bibliothek SndfileHandle file = SndfileHandle (fname) ; // Alle möglichen Infos über die Audio-Datei ausgeben ! std::cout << "Reading file: " << fname << std::endl; std::cout << "File format: " << file.format() << std::endl; std::cout << "PCM 16 BIT: " << (SF_FORMAT_WAV | SF_FORMAT_PCM_16) << std::endl; std::cout << "Samples in file: " << file.frames() << std::endl; std::cout << "Samplerate " << file.samplerate() << std::endl; std::cout << "Channels: " << file.channels() << std::endl; // Die RtAudio-Klasse ist gleichermassen dac und adc, wird hier aber nur als dac verwendet ! RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; return 0; } // Output params ... RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; unsigned int sampleRate = 44100; // ACHTUNG! Frames != Samples // ein Frame = Samples für alle Kanäle // d.h. |Samples| = Kanäle*Frames ! unsigned int bufferFrames = 1024; // Da wir 16 Bit PCM-Daten lesen, sollte als Datenformat RTAUDIO_SINT16 genutzt // werden. // Als Daten wird der Callback-Struktur hier das Soundfile-Handle übergeben. // Sollte man in einer "ernsthaften" Lösung anders machen ! // Inkompatible Formate können übrigens "interessante" Effekte ergeben ! try { dac.openStream( ¶meters, NULL, RTAUDIO_SINT16, sampleRate, &bufferFrames, &fplay, (void *)&file); dac.startStream(); } catch ( RtAudioError& e ) { e.printMessage(); return 0; } char input; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get( input ); try { // Stop the stream dac.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if ( dac.isStreamOpen() ) dac.closeStream(); return 0 ; }
//----------------------------------------------------------------------------- // name: main() // desc: entry point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { callbackData data; // global for frequency data.g_freq=440; // global sample number variable data.g_t = 0; // global for width; data.g_width = 0; //global for input data.g_input=0; //check parameters and parse input if (!parse(argc,argv,data)) { exit(0); } // instantiate RtAudio object RtAudio adac; // variables unsigned int bufferBytes = 0; // frame size unsigned int bufferFrames = 512; // check for audio devices if( adac.getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. adac.showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = adac.getDefaultInputDevice(); iParams.nChannels = MY_CHANNELS; iParams.firstChannel = 0; oParams.deviceId = adac.getDefaultOutputDevice(); oParams.nChannels = MY_CHANNELS; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // go for it try { // open a stream adac.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&data, &options ); } catch( RtError& e ) { // error! cout << e.getMessage() << endl; exit( 1 ); } // compute bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE); // test RtAudio functionality for reporting latency. cout << "stream latency: " << adac.getStreamLatency() << " frames" << endl; // go for it try { // start stream adac.startStream(); // get input char input; std::cout << "running... press <enter> to quit (buffer frames: " << bufferFrames << ")" << endl; std::cin.get(input); // stop the stream. adac.stopStream(); } catch( RtError& e ) { // print error message cout << e.getMessage() << endl; goto cleanup; } cleanup: // close if open if( adac.isStreamOpen() ) adac.closeStream(); // done outfile<<"];\nplot(x)"; return 0; }
int main( int argc, char *argv[] ) { //Dekrispator init randomGen_init(); Synth_Init(); //end Dekrispator init // FILE* f = fopen("bla.txt","wb"); // fclose(f); TickData data; RtAudio dac; int i; //if ( argc < 2 || argc > 6 ) usage(); // If you want to change the default sample rate (set in Stk.h), do // it before instantiating any objects! If the sample rate is // specified in the command line, it will override this setting. Stk::setSampleRate( 44100.0 ); { RtMidiIn *midiin = 0; midiin = new RtMidiIn(); unsigned int i = 0, nPorts = midiin->getPortCount(); if ( nPorts == 0 ) { std::cout << "No input Midi ports available, just running demo mode." << std::endl; delete midiin; midiin = 0; } else { for ( i=0; i<nPorts; i++ ) { std::string portName = midiin->getPortName(i); std::cout << " Input port #" << i << ": " << portName << '\n'; } delete midiin; midiin = 0; for ( i=0; i<nPorts && i<MAX_MIDI_DEVICES; i++ ) { data.messagers[data.numMessagers++].startMidiInput(i); } } } // Parse the command-line arguments. unsigned int port = 2001; for ( i=1; i<argc; i++ ) { if ( !strcmp( argv[i], "-is" ) ) { if ( i+1 < argc && argv[i+1][0] != '-' ) port = atoi(argv[++i]); if (data.numMessagers<MAX_MIDI_DEVICES) { data.messagers[data.numMessagers++].startSocketInput( port ); } } else if (!strcmp( argv[i], "-ip" ) ) { if (data.numMessagers<MAX_MIDI_DEVICES) { data.messagers[data.numMessagers++].startStdInput(); } } else if ( !strcmp( argv[i], "-s" ) && ( i+1 < argc ) && argv[i+1][0] != '-') Stk::setSampleRate( atoi(argv[++i]) ); else usage(); } // Allocate the dac here. RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError& error ) { error.printMessage(); goto cleanup; } data.reverbs[0].setT60( data.t60 ); data.reverbs[0].setEffectMix( 0.5 ); data.reverbs[1].setT60( 2.0 ); data.reverbs[1].setEffectMix( 0.2 ); data.rateScaler = 22050.0 / Stk::sampleRate(); // Install an interrupt handler function. (void) signal( SIGINT, finish ); // If realtime output, set our callback function and start the dac. try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Setup finished. while ( !done ) { // Periodically check "done" status. Stk::sleep( 50 ); } // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError& error ) { error.printMessage(); } cleanup: return 0; }
int main() { // Set the global sample rate before creating class instances. Stk::setSampleRate( 44100.0 ); SineWave sine; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine ); } catch ( RtError &error ) { error.printMessage(); goto cleanup; } double f = 440.0; double twelveRoot2 = 1.0594630943592952645618252949463; sine.setFrequency(f); try { dac.startStream(); } catch ( RtError &error ) { error.printMessage(); goto cleanup; } // Block waiting here. int keyhit = 0; std::cout << "\nPlaying ... press <esc> to quit.\n"; while (keyhit != 32 && keyhit != 27) { keyhit = _getch(); if (tolower(keyhit) == 'a') { f = 220.0; sine.setFrequency(f); } else if (tolower(keyhit) == 'g') { f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'h') { f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'f') { for (int i = 0; i < 2; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'j') { for (int i = 0; i < 2; ++i) f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'd') { for (int i = 0; i < 3; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'k') { for (int i = 0; i < 3; ++i) f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 's') { for (int i = 0; i < 4; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'l') { for (int i = 0; i < 4; ++i) f *= twelveRoot2; sine.setFrequency(f); } else { std::cout << "Freq: " << f << std::endl; } } // Shut down the output stream. try { dac.closeStream(); } catch ( RtError &error ) { error.printMessage(); } cleanup: return 0; }
int main(const int argc, const char *argv[]) { RtAudio adc; unsigned int deviceCount = adc.getDeviceCount(); if (deviceCount < 1) { cout << endl << "No audio devices found!" << endl; exit(0); } unsigned int inputDevice = adc.getDefaultInputDevice(); unsigned int outputDevice = adc.getDefaultOutputDevice(); for (int i=0; i<argc; i++) { if (strcmp(argv[i], "-devices") == 0) { // Scan through devices for various capabilities showDevices(deviceCount, adc); exit(0); } if (strcmp(argv[i], "-input") == 0) { if (i == argc-1) { usage(); exit(0); } inputDevice=atoi(argv[++i]); validateDevice(inputDevice, deviceCount, adc, true); } if (strcmp(argv[i], "-output") == 0) { if (i == argc-1) { usage(); exit(0); } outputDevice=atoi(argv[++i]); validateDevice(outputDevice, deviceCount, adc, false); } } // Initialise DSP thread // Initialise GUI unsigned int sampleRate = 44100; unsigned int bufferFrames = 512; unsigned int bufferBytes = 0; RtAudio::StreamParameters inputParameters; inputParameters.deviceId = inputDevice; inputParameters.nChannels = 2; inputParameters.firstChannel = 0; RtAudio::StreamParameters outputParameters; outputParameters.deviceId = outputDevice; outputParameters.nChannels = 2; outputParameters.firstChannel = 0; try { adc.openStream(&outputParameters, &inputParameters, RTAUDIO_SINT16, sampleRate, &bufferFrames, &inout, &bufferBytes); adc.startStream(); } catch (RtAudioError& e) { e.printMessage(); exit(0); } // adc.openStream could have adjusted the bufferFrames. // Set the user data buffer to the sample buffer size in bytes, so that the // inout callback function knows how much data to copy. The example code // uses this - 2 is Stereo, 4 is signed int (4 bytes on OSX) bufferBytes = bufferFrames * 2 * 4; // Can now initialise buffer management. inout could have been asking for // buffers but buffer management won't give them until it has been // initialised. cout << "buffer size in bytes is " << bufferBytes << endl; // TODO protect with mutex bufferManager = new BufferManager(bufferBytes, maxBuffers); char input; cout << endl << "Recording ... press <enter> to quit." << endl; cin.get(input); cout << "Terminating" << endl; try { // Stop the stream adc.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if (adc.isStreamOpen()) adc.closeStream(); // TODO shut down DSP chain, release all buffers // TODO shut down Display chain, release all buffers delete bufferManager; cout << "Terminated" << endl; return 0; }
int main( int argc, char *argv[] ) { TickData data; int i; #if defined(__STK_REALTIME__) RtAudio dac; #endif // If you want to change the default sample rate (set in Stk.h), do // it before instantiating any objects! If the sample rate is // specified in the command line, it will override this setting. Stk::setSampleRate( 44100.0 ); // By default, warning messages are not printed. If we want to see // them, we need to specify that here. Stk::showWarnings( true ); // Check the command-line arguments for errors and to determine // the number of WvOut objects to be instantiated (in utilities.cpp). data.nWvOuts = checkArgs( argc, argv ); data.wvout = (WvOut **) calloc( data.nWvOuts, sizeof(WvOut *) ); // Parse the command-line flags, instantiate WvOut objects, and // instantiate the input message controller (in utilities.cpp). try { data.realtime = parseArgs( argc, argv, data.wvout, data.messager ); } catch (StkError &) { goto cleanup; } // If realtime output, allocate the dac here. #if defined(__STK_REALTIME__) if ( data.realtime ) { RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = data.channels; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError& error ) { error.printMessage(); goto cleanup; } } #endif // Set the reverb parameters. data.reverb.setT60( data.t60 ); data.reverb.setEffectMix( 0.2 ); // Allocate guitar data.guitar = new Guitar( nStrings ); // Configure distortion and feedback. data.distortion.setThreshold( 2.0 / 3.0 ); data.distortion.setA1( 1.0 ); data.distortion.setA2( 0.0 ); data.distortion.setA3( -1.0 / 3.0 ); data.distortionMix = 0.9; data.distortionGain = 1.0; data.feedbackDelay.setMaximumDelay( (unsigned long int)( 1.1 * Stk::sampleRate() ) ); data.feedbackDelay.setDelay( 20000 ); data.feedbackGain = 0.001; data.oldFeedbackGain = 0.001; // Install an interrupt handler function. (void) signal(SIGINT, finish); // If realtime output, set our callback function and start the dac. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } } #endif // Setup finished. while ( !done ) { #if defined(__STK_REALTIME__) if ( data.realtime ) // Periodically check "done" status. Stk::sleep( 200 ); else #endif // Call the "tick" function to process data. tick( NULL, NULL, 256, 0, 0, (void *)&data ); } // Shut down the output stream. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.closeStream(); } catch ( RtAudioError& error ) { error.printMessage(); } } #endif cleanup: for ( i=0; i<(int)data.nWvOuts; i++ ) delete data.wvout[i]; free( data.wvout ); delete data.guitar; std::cout << "\nStk eguitar finished ... goodbye.\n\n"; return 0; }
int main( int argc, char *argv[]) { // COMMAND LINE ARG HANDLING map<string, ugen> ugens; ugens["--sine"] = &sine; ugens["--saw"] = &saw; ugens["--pulse"] = &pulse; ugens["--noise"] = &noise; ugens["--impulse"] = &impulse; if (argc < 4 || argc > 10 ) usage(); string type_arg = argv[1]; g_active_ugen = ugens[type_arg]; if (g_active_ugen == NULL) usage(); double freq_arg = atof(argv[2]); if (freq_arg <= 0) usage(); g_frequency = freq_arg; double width_arg = atof(argv[3]); if (width_arg < 0 || width_arg > 1) usage(); g_width = width_arg; if (argc > 4) { // modulation parameters present for (int i = 4; i < argc;) { if (string(argv[i]).compare("--input") == 0) { g_modulate_input = true; i++; } else if (string(argv[i]).compare("--fm") == 0) { g_fm_on = true; string fm_type_arg = argv[++i]; g_active_fm_ugen = ugens[fm_type_arg]; if (g_active_fm_ugen == NULL) usage(); double fm_freq_arg = atof(argv[++i]); if (fm_freq_arg <= 0) usage(); g_fm_frequency = fm_freq_arg; double fm_width_arg = atof(argv[++i]); if (fm_width_arg < 0 || fm_width_arg > 1) usage(); g_fm_width = fm_width_arg; double fm_index_arg = atoi(argv[++i]); g_fm_index = fm_index_arg; i++; } else usage(); } } // AUDIO SETUP RtAudio audio; audio.showWarnings( true ); RtAudio::StreamParameters output_params; RtAudio::StreamParameters input_params; // Choose an audio device and a sample rate unsigned int sample_rate; unsigned int devices = audio.getDeviceCount(); if ( devices < 1 ) { cerr << "No audio device found!" << endl; exit(1); } RtAudio::DeviceInfo info; for (unsigned int i = 0; i < devices; i++ ) { info = audio.getDeviceInfo(i); if ( info.isDefaultOutput ) { output_params.deviceId = i; output_params.nChannels = 2; if (info.sampleRates.size() < 1) { cerr << "No supported sample rates found!" << endl; exit(1); } for (int i = 0; i < info.sampleRates.size(); i++) { sample_rate = info.sampleRates[i]; if (sample_rate == 44100 || sample_rate == 48000) { // Found a nice sample rate, stop looking break; } } cout << "Using sample rate: " << sample_rate << endl; } if ( info.isDefaultInput ) { input_params.deviceId = i; input_params.nChannels = 1; } } cout << "Using output device ID " << output_params.deviceId << " which has " << output_params.nChannels << " output channels." << endl; cout << "Using input device ID " << input_params.deviceId << " which has " << input_params.nChannels << " input channels." << endl; RtAudio::StreamOptions options; options.flags |= RTAUDIO_HOG_DEVICE; options.flags |= RTAUDIO_SCHEDULE_REALTIME; unsigned int buffer_frames = 256; try { audio.openStream( &output_params, // output params &input_params, // input params RTAUDIO_FLOAT64, // audio format sample_rate, // sample rate &buffer_frames, // num frames per buffer (mutable by rtaudio) &callback, // audio callback &audio, // user data pointer HACK HACK :D &options); // stream options audio.startStream(); } catch ( RtError &e ) { e.printMessage(); goto cleanup; } char input; cout << "Playing, press enter to quit (buffer frames = " << buffer_frames << ")." << endl; cin.get( input ); try { audio.stopStream(); } catch ( RtError &e ) { e.printMessage(); } cleanup: if ( audio.isStreamOpen() ) { audio.closeStream(); } return 0; }
int main(int argc, char *argv[]) { int chans, fs, buffer_size, count, device = 0; long counter = 0; MY_TYPE *buffer; char *file; FILE *fd; RtAudio *audio; // minimal command-line checking if (argc != 4 && argc != 5 ) usage(); chans = (int) atoi(argv[1]); fs = (int) atoi(argv[2]); file = argv[3]; if ( argc == 5 ) device = (int) atoi(argv[4]); fd = fopen(file,"rb"); if (!fd) { std::cout << "can't find file!\n"; exit(0); } // Open the realtime output device buffer_size = 512; try { audio = new RtAudio(device, chans, 0, 0, FORMAT, fs, &buffer_size, 2); } catch (RtError &error) { fclose(fd); error.printMessage(); exit(EXIT_FAILURE); } try { buffer = (MY_TYPE *) audio->getStreamBuffer(); audio->startStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } while (1) { count = fread(buffer, chans * sizeof(MY_TYPE), buffer_size, fd); if (count == buffer_size) { try { audio->tickStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } } else break; counter += buffer_size; } try { audio->stopStream(); } catch (RtError &error) { error.printMessage(); } cleanup: audio->closeStream(); delete audio; fclose(fd); return 0; }
// ======== // = Main = // ======== // Entry point int main (int argc, char *argv[]) { cout<<argc<<" "<<argv[0]; if (argc>3) {cerr<<"\nERROR - wrong number of arguments\n";exit(1);} if (argc==3) g_audio_history = atoi(argv[2]); else g_audio_history = 30; if (argc>1) g_fft_history = atoi(argv[1]); else g_fft_history = 100; help(); // RtAudio config + init // pointer to RtAudio object RtAudio * audio = NULL; // create the object try { audio = new RtAudio(); } catch( RtError & err ) { err.printMessage(); exit(1); } if( audio->getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. audio->showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio->getDefaultInputDevice(); iParams.nChannels = 1; iParams.firstChannel = 0; oParams.deviceId = audio->getDefaultOutputDevice(); oParams.nChannels = 1; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // set the callback and start stream try { audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options); cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl; // allocate the buffer for the fft g_fftBuff = new float[g_buffSize * ZPF]; g_audioBuff = new float[g_buffSize * ZPF]; if ( g_fftBuff == NULL ) { cerr << "Something went wrong when creating the fft and audio buffers" << endl; exit (1); } // allocate the buffer for the time domain window g_window = new float[g_buffSize]; if ( g_window == NULL ) { cerr << "Something went wrong when creating the window" << endl; exit (1); } // create a hanning window make_window( g_window, g_buffSize ); // start the audio stream audio->startStream(); // test RtAudio functionality for reporting latency. cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl; } catch( RtError & err ) { err.printMessage(); goto cleanup; } // ============ // = GL stuff = // ============ // initialize GLUT glutInit( &argc, argv ); // double buffer, use rgb color, enable depth buffer glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH ); // initialize the window size glutInitWindowSize( g_width, g_height ); // set the window postion glutInitWindowPosition( 100, 100 ); // create the window glutCreateWindow( "Hello GL" ); //glutEnterGameMode(); // set the idle function - called when idle glutIdleFunc( idleFunc ); // set the display function - called when redrawing glutDisplayFunc( displayFunc ); // set the reshape function - called when client area changes glutReshapeFunc( reshapeFunc ); // set the keyboard function - called on keyboard events glutKeyboardFunc( keyboardFunc ); // set the mouse function - called on mouse stuff glutMouseFunc( mouseFunc ); // set the special function - called on special keys events (fn, arrows, pgDown, etc) glutSpecialFunc( specialFunc ); // do our own initialization initialize(); // let GLUT handle the current thread from here glutMainLoop(); // if we get here, stop! try { audio->stopStream(); } catch( RtError & err ) { err.printMessage(); } // Clean up cleanup: if(audio) { audio->closeStream(); delete audio; } return 0; }
//----------------------------------------------------------------------------- // Name: main( ) // Desc: starting point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { // Get RtAudio Instance with default API RtAudio *audio = new RtAudio(); // buffer size unsigned int buffer_size = 512; // Output Stream Parameters RtAudio::StreamParameters outputStreamParams; outputStreamParams.deviceId = audio->getDefaultOutputDevice(); outputStreamParams.nChannels = 1; // Input Stream Parameters RtAudio::StreamParameters inputStreamParams; inputStreamParams.deviceId = audio->getDefaultInputDevice(); inputStreamParams.nChannels = 1; // Get RtAudio Stream try { audio->openStream( NULL, &inputStreamParams, RTAUDIO_FLOAT32, MY_FREQ, &buffer_size, callback_func, NULL ); } catch(RtError &err) { err.printMessage(); exit(1); } g_bufferSize = buffer_size; // Samples for Feature Extraction in a Buffer g_samples = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_audio_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_another_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_buffest = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_residue = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_coeff = (SAMPLE *)malloc(sizeof(SAMPLE)*g_order); g_dwt = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); // initialize GLUT glutInit( &argc, argv ); // double buffer, use rgb color, enable depth buffer glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH ); // initialize the window size glutInitWindowSize( g_width, g_height ); // set the window postion glutInitWindowPosition( 100, 100 ); // create the window glutCreateWindow( "The New File" ); // set the idle function - called when idle glutIdleFunc( idleFunc ); // set the display function - called when redrawing glutDisplayFunc( displayFunc ); // set the reshape function - called when client area changes glutReshapeFunc( reshapeFunc ); // set the keyboard function - called on keyboard events glutKeyboardFunc( keyboardFunc ); // set the mouse function - called on mouse stuff glutMouseFunc( mouseFunc ); // do our own initialization initialize(); // initialize mfcc initMFCC(); //init lpc initialize_lpc(); // initialize osc // Initialize a socket to get a port g_transmitSocket = new UdpTransmitSocket( IpEndpointName( g_ADDRESS.c_str(), SERVERPORT ) ); // // Set the global sample rate before creating class instances. // Stk::setSampleRate( 44100.0 ); // // Read In File // try // { // // read the file // g_fin.openFile( "TomVega.wav" ); // // change the rate // g_fin.setRate( 1 ); // // normalize the peak // g_fin.normalize(); // } catch( stk::StkError & e ) // { // cerr << "baaaaaaaaad..." << endl; // return 1; // } // Start Stream try { audio->startStream(); } catch( RtError & err ) { // do stuff err.printMessage(); goto cleanup; } // let GLUT handle the current thread from here glutMainLoop(); // if we get here, then stop! try{ audio->stopStream(); } catch( RtError & err ) { // do stuff err.printMessage(); } cleanup: audio->closeStream(); delete audio; return 0; }
int main( int argc, char *argv[] ) { unsigned int bufferFrames, fs, oDevice = 0, iDevice = 0, iOffset = 0, oOffset = 0; char input; // minimal command-line checking if (argc < 3 || argc > 7 ) usage(); RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 1 ); } channels = (unsigned int) atoi( argv[1] ); fs = (unsigned int) atoi( argv[2] ); if ( argc > 3 ) iDevice = (unsigned int) atoi( argv[3] ); if ( argc > 4 ) oDevice = (unsigned int) atoi(argv[4]); if ( argc > 5 ) iOffset = (unsigned int) atoi(argv[5]); if ( argc > 6 ) oOffset = (unsigned int) atoi(argv[6]); double *data = (double *) calloc( channels, sizeof( double ) ); // Let RtAudio print messages to stderr. dac.showWarnings( true ); // Set our stream parameters for output only. bufferFrames = 256; RtAudio::StreamParameters oParams, iParams; oParams.deviceId = oDevice; oParams.nChannels = channels; oParams.firstChannel = oOffset; RtAudio::StreamOptions options; options.flags = RTAUDIO_HOG_DEVICE; try { dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawi, (void *)data, &options ); std::cout << "\nStream latency = " << dac.getStreamLatency() << std::endl; // Start the stream dac.startStream(); std::cout << "\nPlaying ... press <enter> to stop.\n"; std::cin.get( input ); // Stop the stream dac.stopStream(); // Restart again std::cout << "Press <enter> to restart.\n"; std::cin.get( input ); dac.startStream(); // Test abort function std::cout << "Playing again ... press <enter> to abort.\n"; std::cin.get( input ); dac.abortStream(); // Restart another time std::cout << "Press <enter> to restart again.\n"; std::cin.get( input ); dac.startStream(); std::cout << "Playing again ... press <enter> to close the stream.\n"; std::cin.get( input ); } catch ( RtError& e ) { e.printMessage(); goto cleanup; } if ( dac.isStreamOpen() ) dac.closeStream(); // Test non-interleaved functionality options.flags = RTAUDIO_NONINTERLEAVED; try { dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawni, (void *)data, &options ); std::cout << "Press <enter> to start non-interleaved playback.\n"; std::cin.get( input ); // Start the stream dac.startStream(); std::cout << "\nPlaying ... press <enter> to stop.\n"; std::cin.get( input ); } catch ( RtError& e ) { e.printMessage(); goto cleanup; } if ( dac.isStreamOpen() ) dac.closeStream(); // Now open a duplex stream. unsigned int bufferBytes; iParams.deviceId = iDevice; iParams.nChannels = channels; iParams.firstChannel = iOffset; options.flags = RTAUDIO_NONINTERLEAVED; try { dac.openStream( &oParams, &iParams, RTAUDIO_SINT32, fs, &bufferFrames, &inout, (void *)&bufferBytes, &options ); bufferBytes = bufferFrames * channels * 4; std::cout << "Press <enter> to start duplex operation.\n"; std::cin.get( input ); // Start the stream dac.startStream(); std::cout << "\nRunning ... press <enter> to stop.\n"; std::cin.get( input ); // Stop the stream dac.stopStream(); std::cout << "\nStopped ... press <enter> to restart.\n"; std::cin.get( input ); // Restart the stream dac.startStream(); std::cout << "\nRunning ... press <enter> to stop.\n"; std::cin.get( input ); } catch ( RtError& e ) { e.printMessage(); } cleanup: if ( dac.isStreamOpen() ) dac.closeStream(); free( data ); return 0; }
//----------------------------------------------------------------------------- // name: main() // desc: entry point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { RtMidiIn *midiin = new RtMidiIn(); // Check available ports. unsigned int nPorts = midiin->getPortCount(); if ( nPorts == 0 ) { std::cout << "No ports available!\n"; //goto cleanup; } midiin->openPort( 0 ); // Set our callback function. This should be done immediately after // opening the port to avoid having incoming messages written to the // queue. midiin->setCallback( &mycallback ); // Don't ignore sysex, timing, or active sensing messages. midiin->ignoreTypes( false, false, false ); std::cout << "\nReading MIDI input ... press <enter> to quit.\n"; char input; std::cin.get(input); // instantiate RtAudio object RtAudio audio; // variables unsigned int bufferBytes = 0; // frame size unsigned int numFrames = 512; // check for audio devices if( audio.getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. audio.showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio.getDefaultInputDevice(); iParams.nChannels = MY_CHANNELS; iParams.firstChannel = 0; oParams.deviceId = audio.getDefaultOutputDevice(); oParams.nChannels = MY_CHANNELS; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // go for it try { // open a stream audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &numFrames, &callme, NULL, &options ); } catch( RtError& e ) { // error! cout << e.getMessage() << endl; exit( 1 ); } // compute bufferBytes = numFrames * MY_CHANNELS * sizeof(SAMPLE); // test RtAudio functionality for reporting latency. cout << "stream latency: " << audio.getStreamLatency() << " frames" << endl; for( int i = 0; i < MY_NUMSTRINGS; i++ ) { // intialize g_ks[i].init( MY_SRATE*2, 440, MY_SRATE ); } // go for it try { // start stream audio.startStream(); char input; std::cout << "Press any key to quit "; std::cin.get(input); // stop the stream. audio.stopStream(); } catch( RtError& e ) { // print error message cout << e.getMessage() << endl; goto cleanup; } cleanup: // close if open if( audio.isStreamOpen() ) audio.closeStream(); delete midiin; // done return 0; }
int main(int argc, char ** argv) { if ( argc==1 ) { std::cerr << "f " << freq << "\t frequency" << std::endl; std::cerr << "s " << scale << "\t scale" << std::endl; std::cerr << "t " << steps << "\t steps" << std::endl; std::cerr << "r " << rthresh << "\t right thresh" << std::endl; std::cerr << "f " << lthresh << "\t left thresh" << std::endl; std::cerr << "i " << inp_audio << "\t inp_audio device id" << std::endl; std::cerr << "o " << out_audio << "\t out_audio device id" << std::endl; } for ( int i = 1; i<argc-1; i++ ) { if ( !strcmp(argv[i],"f") ) { freq=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"l") ) { lthresh=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"r") ) { rthresh=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"s") ) { scale=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"t") ) { steps=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"i") ) { inp_audio=atoi(argv[++i]); continue; } if ( !strcmp(argv[i],"o") ) { out_audio=atoi(argv[++i]); continue; } } unsigned int bufferFrames = NUM_FREQ*2; Data data; data.fft.Init(bufferFrames, NUM_FREQ, 1, 2.5f); RtAudio adac; if ( adac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 0 ); } RtAudio::StreamParameters iParams, oParams; iParams.deviceId = inp_audio; // <----------- put them on iParams.nChannels = 1; // different devices oParams.deviceId = out_audio; // <----------- for duplex mode oParams.nChannels = 1; // try { adac.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, 44100, &bufferFrames, &inout, &data ); } catch ( RtError& e ) { e.printMessage(); exit( 0 ); } try { adac.startStream(); main_init(); int k = 0; int kcen = 658; int lmean=0,rmean=0; while(true) { // find the global max: float m = 0; int mi=-1; for ( int i=64; i<NUM_FREQ; i++ ) // skip low freq { if ( data.freqs[i] > m ) { m = data.freqs[i]; mi = i; } } kcen = ipol(kcen,mi,4); // get the mean of the lower and the higher neighbours int lsum=0,rsum=0; for( int i=-steps; i<-2; i++ ) { lsum += data.value(kcen+i,scale); } for( int i=2; i<steps; i++ ) { rsum += data.value(kcen+i,scale); } rsum /= (steps-2); lsum /= (steps-2); int rd = rsum-rmean; int ld = lsum-lmean; lmean=ipol(lmean,lsum,256); rmean=ipol(rmean,rsum,256); int lc=' ',rc=' '; if ( rd>rthresh ) rc='r'; if ( ld>lthresh ) lc='l'; //if ( ld>lthresh || ld>lthresh ) std::cerr << char(lc) << " " << char(rc) << std::endl; main_idle(data,kcen); } // Stop the stream. adac.stopStream(); } catch ( RtError& e ) { e.printMessage(); goto cleanup; } cleanup: if ( adac.isStreamOpen() ) adac.closeStream(); return 0; }
int main(int argc, char *argv[]) { // Minimal command-line checking. if ( argc < 3 || argc > 4 ) usage(); // Set the global sample rate before creating class instances. Stk::setSampleRate( (StkFloat) atof( argv[2] ) ); // Initialize our WvIn and RtAudio pointers. RtAudio dac; FileWvIn input; FileLoop inputLoop; // Try to load the soundfile. try { input.openFile( argv[1] ); inputLoop.openFile( argv[1] ); } catch ( StkError & ) { exit( 1 ); } // Set input read rate based on the default STK sample rate. double rate = 1.0; rate = input.getFileRate() / Stk::sampleRate(); rate = inputLoop.getFileRate() / Stk::sampleRate(); if ( argc == 4 ) rate *= atof( argv[3] ); input.setRate( rate ); input.ignoreSampleRateChange(); // Find out how many channels we have. int channels = input.channelsOut(); // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = channels; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&inputLoop ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Install an interrupt handler function. (void) signal(SIGINT, finish); // Resize the StkFrames object appropriately. frames.resize( bufferFrames, channels ); try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Block waiting until callback signals done. while ( !done ) Stk::sleep( 100 ); // By returning a non-zero value in the callback above, the stream // is automatically stopped. But we should still close it. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: return 0; }