int main() { RtAudio dac; if ( dac.getDeviceCount() == 0 ) exit( 0 ); RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; unsigned int sampleRate = 44100; unsigned int bufferFrames = 256; // 256 sample frames RtAudio::StreamOptions options; options.flags = RTAUDIO_NONINTERLEAVED; try { dac.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &myCallback, NULL, &options ); } catch ( RtError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; exit( 0 ); } return 0; }
int main() { // Set the global sample rate before creating class instances. Stk::setSampleRate( 44100.0 ); SineWave sine; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.deviceId = 3; parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // configuration of oscilator sine.setFrequency(440.0); // start the main real time loop try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // USER interface // Block waiting here. char keyhit; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get( keyhit ); // SYSTEM shutdown // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: return 0; }
/* returns 0 on failure */ int start_audio(AudioCallback _callback, int sample_rate, void *data) { if(audio.getDeviceCount() < 1) { std::cout << "No audio devices found!\n"; return 0; } RtAudio::StreamParameters iparams, oparams; /* configure input (microphone) */ iparams.deviceId = audio.getDefaultInputDevice(); iparams.nChannels = 1; iparams.firstChannel = 0; /* configure output */ oparams.deviceId = audio.getDefaultOutputDevice(); oparams.nChannels = 2; oparams.firstChannel = 0; unsigned int bufferFrames = 256; callback = _callback; try { audio.openStream(&oparams, &iparams, RTAUDIO_FLOAT64 /* double */, sample_rate, &bufferFrames, &render, data); audio.startStream(); } catch(RtError& e) { e.printMessage(); return 0; } return 1; }
int main(int argc, const char * argv[]) { RtAudio dac; RtAudio::StreamParameters rtParams; rtParams.deviceId = dac.getDefaultOutputDevice(); rtParams.nChannels = nChannels; #if RASPI unsigned int sampleRate = 22000; #else unsigned int sampleRate = 44100; #endif unsigned int bufferFrames = 512; // 512 sample frames Tonic::setSampleRate(sampleRate); std::vector<Synth> synths; synths.push_back(*new BassDrum()); synths.push_back(*new Snare()); synths.push_back(*new HiHat()); synths.push_back(*new Funky()); // Test write pattern DrumMachine *drumMachine = new DrumMachine(synths); drumMachine->loadPattern(0); ControlMetro metro = ControlMetro().bpm(480); ControlCallback drumMachineTick = ControlCallback(&mixer, [&](ControlGeneratorOutput output){ drumMachine->tick(); }).input(metro); Generator mixedSignal; for(int i = 0; i < NUM_TRACKS; i++) { mixedSignal = mixedSignal + synths[i]; } mixer.setOutputGen(mixedSignal); try { dac.openStream( &rtParams, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &renderCallback, NULL, NULL ); dac.startStream(); // Send a pointer to our global drumMachine instance // to the serial communications layer. listenForMessages( drumMachine ); dac.stopStream(); } catch ( RtError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; exit( 0 ); } return 0; }
int playsin(void) { RtAudio *audio; unsigned int bufsize = 4096; CallbackData data; try { audio = new RtAudio(RtAudio::WINDOWS_WASAPI); } catch (...) { return 1; } if (!audio) { fprintf(stderr, "fail to allocate RtAudio¥n"); return 1; } /* probe audio devices */ unsigned int devId = audio->getDefaultOutputDevice(); /* Setup output stream parameters */ RtAudio::StreamParameters *outParam = new RtAudio::StreamParameters(); outParam->deviceId = devId; outParam->nChannels = 2; audio->openStream(outParam, NULL, RTAUDIO_FLOAT32, 44100, &bufsize, rtaudio_callback, &data); /* Create Wave Form Table */ data.nRate = 44100; /* Frame Number is based on Freq(440Hz) and Sampling Rate(44100) */ /* hmm... nFrame = 44100 is enough approximation, maybe... */ data.nFrame = 44100; data.nChannel = outParam->nChannels; data.cur = 0; data.wftable = (float *)calloc(data.nChannel * data.nFrame, sizeof(float)); if (!data.wftable) { delete audio; fprintf(stderr, "fail to allocate memory¥n"); return 1; } for (unsigned int i = 0; i < data.nFrame; i++) { float v = sin(i * 3.1416 * 2 * 440 / data.nRate); for (unsigned int j = 0; j < data.nChannel; j++) { data.wftable[i*data.nChannel + j] = v; } } audio->startStream(); // sleep(10); audio->stopStream(); audio->closeStream(); delete audio; return 0; }
int main(int argc, char** argv) { if (argc != 2) { printf("Usage: synth file.sf2\n"); exit(0); } LightFluidSynth *usynth; usynth = new LightFluidSynth(); usynth->loadSF2(argv[1]); // usynth->loadSF2("tim.sf2"); RtMidiIn *midiIn = new RtMidiIn(); if (midiIn->getPortCount() == 0) { std::cout << "No MIDI ports available!\n"; } midiIn->openPort(0); midiIn->setCallback( &midiCallback, (void *)usynth ); midiIn->ignoreTypes( false, false, false ); // RtAudio dac(RtAudio::LINUX_PULSE); RtAudio dac; RtAudio::StreamParameters rtParams; // Determine the number of devices available unsigned int devices = dac.getDeviceCount(); // Scan through devices for various capabilities RtAudio::DeviceInfo info; for ( unsigned int i = 0; i < devices; i++ ) { info = dac.getDeviceInfo( i ); if ( info.probed == true ) { std::cout << "device " << " = " << info.name; std::cout << ": maximum output channels = " << info.outputChannels << "\n"; } } // rtParams.deviceId = 3; rtParams.deviceId = dac.getDefaultOutputDevice(); rtParams.nChannels = 2; unsigned int bufferFrames = FRAME_SIZE; RtAudio::StreamOptions options; options.flags = RTAUDIO_SCHEDULE_REALTIME; dac.openStream( &rtParams, NULL, AUDIO_FORMAT, SAMPLE_RATE, &bufferFrames, &audioCallback, (void *)usynth, &options ); dac.startStream(); printf("\n\nPress Enter to stop\n\n"); cin.get(); dac.stopStream(); delete(usynth); return 0; }
int main( int argc, char *argv[] ) { if ( argc != 2 ) usage(); // Set the global sample rate and rawwave path before creating class instances. Stk::setSampleRate( 44100.0 ); Stk::setRawwavePath( "rawwaves/" ); TickData data; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } try { // Define and load the BeeThree instrument data.instrument = new BeeThree(); } catch ( StkError & ) { goto cleanup; } if ( data.messager.setScoreFile( argv[1] ) == false ) goto cleanup; try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Block waiting until callback signals done. while ( !data.done ) Stk::sleep( 100 ); // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: delete data.instrument; return 0; }
int main() { RtAudio dac; //std::cout << dac.getDeviceCount() << std::endl; //2 if (dac.getDeviceCount() < 1) { std::cout << "\nNo audio devices found!\n"; exit(0); } RtAudio::StreamParameters parameters; //std::cout << dac.getDefaultOutputDevice() << std::endl; parameters.deviceId = dac.getDefaultOutputDevice(); //0 parameters.nChannels = 2; parameters.firstChannel = 0; unsigned int sampleRate = 44100; unsigned int bufferFrames = 256; // 256 sample frames RtAudio::StreamParameters input; input.deviceId = dac.getDefaultInputDevice(); input.nChannels = 2; input.firstChannel = 0; double data[2]; try { dac.openStream(¶meters, &input, RTAUDIO_SINT16, sampleRate, &bufferFrames, &saw, (void *)&data); dac.startStream(); } catch (RtAudioError& e) { e.printMessage(); exit(0); } char input1; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get(input1); try { // Stop the stream dac.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if (dac.isStreamOpen()) dac.closeStream(); system("pause"); return 0; }
static void open_output(va_list args) { RtAudio::StreamParameters parameters; parameters.deviceId = vessl_out.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; RtAudio::StreamOptions options; if ( vessl_out_bufferFrames == 0 ) { options.flags |= RTAUDIO_MINIMIZE_LATENCY; } options.numberOfBuffers = 2; parameters.deviceId = vessl_out.getDefaultOutputDevice(); try { vessl_out.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, vessl_out_sampleRate, &vessl_out_bufferFrames, &output_render_callback, (void*)0, &options ); } catch( RtAudioError& e ) { printf("[vessl] FAILED TO OPEN OUTPUT: %s\n", e.getMessage().c_str()); } if ( vessl_out.isStreamOpen() ) { try { vessl_out.startStream(); } catch( RtAudioError& e ) { printf("[vessl] FAILED TO START OUTPUT: %s\n", e.getMessage().c_str()); } } }
void init(){ unsigned int sampleRate = 44100; unsigned int bufferFrames = 128; // init pd if(!lpd.init(0, 2, sampleRate)) { std::cerr << "Could not init pd" << std::endl; exit(1); } // receive messages from pd lpd.setReceiver(&pdObject); lpd.subscribe("cursor"); // send DSP 1 message to pd lpd.computeAudio(true); // load the patch pd::Patch patch = lpd.openPatch("test.pd", "./pd"); std::cout << patch << std::endl; // use the RtAudio API to connect to the default audio device if(audio.getDeviceCount()==0){ std::cout << "There are no available sound devices." << std::endl; exit(1); } RtAudio::StreamParameters parameters; parameters.deviceId = audio.getDefaultOutputDevice(); parameters.nChannels = 2; RtAudio::StreamOptions options; options.streamName = "libpd rtaudio test"; options.flags = RTAUDIO_SCHEDULE_REALTIME; if(audio.getCurrentApi() != RtAudio::MACOSX_CORE) { options.flags |= RTAUDIO_MINIMIZE_LATENCY; // CoreAudio doesn't seem to like this } try { audio.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioCallback, NULL, &options ); audio.startStream(); } catch(RtAudioError& e) { std::cerr << e.getMessage() << std::endl; exit(1); } }
av_Audio * av_audio_get() { static bool initialized = false; if (!initialized) { initialized = true; rta.showWarnings( true ); // defaults: audio.samplerate = 44100; audio.blocksize = 256; audio.inchannels = 2; audio.outchannels = 2; audio.time = 0; audio.lag = 0.04; audio.indevice = rta.getDefaultInputDevice(); audio.outdevice = rta.getDefaultOutputDevice(); /* audio.msgbuffer.size = AV_AUDIO_MSGBUFFER_SIZE_DEFAULT; audio.msgbuffer.read = 0; audio.msgbuffer.write = 0; audio.msgbuffer.data = (unsigned char *)malloc(audio.msgbuffer.size); */ audio.onframes = 0; // one second of ringbuffer: int blockspersecond = audio.samplerate / audio.blocksize; audio.blocks = blockspersecond + 1; audio.blockstep = audio.blocksize * audio.outchannels; int len = audio.blockstep * audio.blocks; audio.buffer = (float *)calloc(len, sizeof(float)); audio.blockread = 0; audio.blockwrite = 0; printf("audio initialized\n"); //AL = lua_open(); //av_init_lua(); // unique to audio thread: //if (luaL_dostring(AL, "require 'audioprocess'")) { // printf("error: %s\n", lua_tostring(AL, -1)); // initialized = false; //} } return &audio; }
int startAudio() { // Determine the number of devices available unsigned int devices = audio.getDeviceCount(); if(devices==0) { printf("please run 'sudo modprobe snd_bcm2835' to enable the alsa driver\n"); return 1; } // Scan through devices for various capabilities RtAudio::DeviceInfo info; for ( unsigned int i=0; i<devices; i++ ) { info = audio.getDeviceInfo( i ); if ( info.probed == true ) { // Print, for example, the maximum number of output channels for each device std::cout << "device = " << i; std::cout << ": maximum output channels = " << info.outputChannels << "\n"; } } RtAudio::StreamParameters parameters; parameters.deviceId = audio.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; unsigned int sampleRate = SAMPLERATE; unsigned int bufferFrames = BUFFERSIZE; double data[2]; try { audio.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioCallback, (void *)&data ); audio.startStream(); } catch ( RtError& e ) { e.printMessage(); return 1; } return 0; }
//----------------------------------------------------------------------------- // Name: main( ) // Desc: starting point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { // Get RtAudio Instance with default API RtAudio *audio = new RtAudio(); // buffer size unsigned int buffer_size = 512; // Output Stream Parameters RtAudio::StreamParameters outputStreamParams; outputStreamParams.deviceId = audio->getDefaultOutputDevice(); outputStreamParams.nChannels = 1; // Input Stream Parameters RtAudio::StreamParameters inputStreamParams; inputStreamParams.deviceId = audio->getDefaultInputDevice(); inputStreamParams.nChannels = 1; // Get RtAudio Stream try { audio->openStream( NULL, &inputStreamParams, RTAUDIO_FLOAT32, MY_FREQ, &buffer_size, callback_func, NULL ); } catch(RtError &err) { err.printMessage(); exit(1); } g_bufferSize = buffer_size; // Samples for Feature Extraction in a Buffer g_samples = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_audio_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_another_buffer = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_buffest = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_residue = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); g_coeff = (SAMPLE *)malloc(sizeof(SAMPLE)*g_order); g_dwt = (SAMPLE *)malloc(sizeof(SAMPLE)*g_bufferSize*g_numMaxBuffersToUse); // initialize GLUT glutInit( &argc, argv ); // double buffer, use rgb color, enable depth buffer glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH ); // initialize the window size glutInitWindowSize( g_width, g_height ); // set the window postion glutInitWindowPosition( 100, 100 ); // create the window glutCreateWindow( "The New File" ); // set the idle function - called when idle glutIdleFunc( idleFunc ); // set the display function - called when redrawing glutDisplayFunc( displayFunc ); // set the reshape function - called when client area changes glutReshapeFunc( reshapeFunc ); // set the keyboard function - called on keyboard events glutKeyboardFunc( keyboardFunc ); // set the mouse function - called on mouse stuff glutMouseFunc( mouseFunc ); // do our own initialization initialize(); // initialize mfcc initMFCC(); //init lpc initialize_lpc(); // initialize osc // Initialize a socket to get a port g_transmitSocket = new UdpTransmitSocket( IpEndpointName( g_ADDRESS.c_str(), SERVERPORT ) ); // // Set the global sample rate before creating class instances. // Stk::setSampleRate( 44100.0 ); // // Read In File // try // { // // read the file // g_fin.openFile( "TomVega.wav" ); // // change the rate // g_fin.setRate( 1 ); // // normalize the peak // g_fin.normalize(); // } catch( stk::StkError & e ) // { // cerr << "baaaaaaaaad..." << endl; // return 1; // } // Start Stream try { audio->startStream(); } catch( RtError & err ) { // do stuff err.printMessage(); goto cleanup; } // let GLUT handle the current thread from here glutMainLoop(); // if we get here, then stop! try{ audio->stopStream(); } catch( RtError & err ) { // do stuff err.printMessage(); } cleanup: audio->closeStream(); delete audio; return 0; }
int main() { // Set the global sample rate before creating class instances. Stk::setSampleRate( 44100.0 ); SineWave sine; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine ); } catch ( RtError &error ) { error.printMessage(); goto cleanup; } double f = 440.0; double twelveRoot2 = 1.0594630943592952645618252949463; sine.setFrequency(f); try { dac.startStream(); } catch ( RtError &error ) { error.printMessage(); goto cleanup; } // Block waiting here. int keyhit = 0; std::cout << "\nPlaying ... press <esc> to quit.\n"; while (keyhit != 32 && keyhit != 27) { keyhit = _getch(); if (tolower(keyhit) == 'a') { f = 220.0; sine.setFrequency(f); } else if (tolower(keyhit) == 'g') { f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'h') { f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'f') { for (int i = 0; i < 2; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'j') { for (int i = 0; i < 2; ++i) f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'd') { for (int i = 0; i < 3; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'k') { for (int i = 0; i < 3; ++i) f *= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 's') { for (int i = 0; i < 4; ++i) f /= twelveRoot2; sine.setFrequency(f); } else if (tolower(keyhit) == 'l') { for (int i = 0; i < 4; ++i) f *= twelveRoot2; sine.setFrequency(f); } else { std::cout << "Freq: " << f << std::endl; } } // Shut down the output stream. try { dac.closeStream(); } catch ( RtError &error ) { error.printMessage(); } cleanup: return 0; }
int main( int argc, char *argv[] ) { TickData data; int i; #if defined(__STK_REALTIME__) RtAudio dac; #endif // If you want to change the default sample rate (set in Stk.h), do // it before instantiating any objects! If the sample rate is // specified in the command line, it will override this setting. Stk::setSampleRate( 44100.0 ); // By default, warning messages are not printed. If we want to see // them, we need to specify that here. Stk::showWarnings( true ); // Check the command-line arguments for errors and to determine // the number of WvOut objects to be instantiated (in utilities.cpp). data.nWvOuts = checkArgs( argc, argv ); data.wvout = (WvOut **) calloc( data.nWvOuts, sizeof(WvOut *) ); // Parse the command-line flags, instantiate WvOut objects, and // instantiate the input message controller (in utilities.cpp). try { data.realtime = parseArgs( argc, argv, data.wvout, data.messager ); } catch (StkError &) { goto cleanup; } // If realtime output, allocate the dac here. #if defined(__STK_REALTIME__) if ( data.realtime ) { RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = data.channels; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError& error ) { error.printMessage(); goto cleanup; } } #endif // Set the reverb parameters. data.reverb.setT60( data.t60 ); data.reverb.setEffectMix( 0.2 ); // Allocate guitar data.guitar = new Guitar( nStrings ); // Configure distortion and feedback. data.distortion.setThreshold( 2.0 / 3.0 ); data.distortion.setA1( 1.0 ); data.distortion.setA2( 0.0 ); data.distortion.setA3( -1.0 / 3.0 ); data.distortionMix = 0.9; data.distortionGain = 1.0; data.feedbackDelay.setMaximumDelay( (unsigned long int)( 1.1 * Stk::sampleRate() ) ); data.feedbackDelay.setDelay( 20000 ); data.feedbackGain = 0.001; data.oldFeedbackGain = 0.001; // Install an interrupt handler function. (void) signal(SIGINT, finish); // If realtime output, set our callback function and start the dac. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } } #endif // Setup finished. while ( !done ) { #if defined(__STK_REALTIME__) if ( data.realtime ) // Periodically check "done" status. Stk::sleep( 200 ); else #endif // Call the "tick" function to process data. tick( NULL, NULL, 256, 0, 0, (void *)&data ); } // Shut down the output stream. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.closeStream(); } catch ( RtAudioError& error ) { error.printMessage(); } } #endif cleanup: for ( i=0; i<(int)data.nWvOuts; i++ ) delete data.wvout[i]; free( data.wvout ); delete data.guitar; std::cout << "\nStk eguitar finished ... goodbye.\n\n"; return 0; }
int main(int argc, char *argv[]) { // Minimal command-line checking. if ( argc < 3 || argc > 4 ) usage(); // Set the global sample rate before creating class instances. Stk::setSampleRate( (StkFloat) atof( argv[2] ) ); // Initialize our WvIn and RtAudio pointers. RtAudio dac; FileWvIn input; FileLoop inputLoop; // Try to load the soundfile. try { input.openFile( argv[1] ); inputLoop.openFile( argv[1] ); } catch ( StkError & ) { exit( 1 ); } // Set input read rate based on the default STK sample rate. double rate = 1.0; rate = input.getFileRate() / Stk::sampleRate(); rate = inputLoop.getFileRate() / Stk::sampleRate(); if ( argc == 4 ) rate *= atof( argv[3] ); input.setRate( rate ); input.ignoreSampleRateChange(); // Find out how many channels we have. int channels = input.channelsOut(); // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = channels; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&inputLoop ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Install an interrupt handler function. (void) signal(SIGINT, finish); // Resize the StkFrames object appropriately. frames.resize( bufferFrames, channels ); try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Block waiting until callback signals done. while ( !done ) Stk::sleep( 100 ); // By returning a non-zero value in the callback above, the stream // is automatically stopped. But we should still close it. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: return 0; }
//----------------------------------------------------------------------------- // name: main() // desc: entry point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { // instantiate RtAudio object RtAudio audio; // variables unsigned int bufferBytes = 0; // frame size unsigned int bufferFrames = 512; // check for audio devices if( audio.getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // initialize GLUT glutInit( &argc, argv ); // init gfx initGfx(); // let RtAudio print messages to stderr. audio.showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio.getDefaultInputDevice(); iParams.nChannels = MY_CHANNELS; iParams.firstChannel = 0; oParams.deviceId = audio.getDefaultOutputDevice(); oParams.nChannels = MY_CHANNELS; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // go for it try { // open a stream audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&bufferBytes, &options ); } catch( RtError& e ) { // error! cout << e.getMessage() << endl; exit( 1 ); } // compute bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE); // allocate global buffer g_bufferSize = bufferFrames; g_buffer = new SAMPLE[g_bufferSize]; memset( g_buffer, 0, sizeof(SAMPLE)*g_bufferSize ); // go for it try { // start stream audio.startStream(); // let GLUT handle the current thread from here glutMainLoop(); // stop the stream. audio.stopStream(); } catch( RtError& e ) { // print error message cout << e.getMessage() << endl; goto cleanup; } cleanup: // close if open if( audio.isStreamOpen() ) audio.closeStream(); // done return 0; }
//----------------------------------------------------------------------------- // name: main() // desc: entry point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { callbackData data; // global for frequency data.g_freq=440; // global sample number variable data.g_t = 0; // global for width; data.g_width = 0; //global for input data.g_input=0; //check parameters and parse input if (!parse(argc,argv,data)) { exit(0); } // instantiate RtAudio object RtAudio adac; // variables unsigned int bufferBytes = 0; // frame size unsigned int bufferFrames = 512; // check for audio devices if( adac.getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. adac.showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = adac.getDefaultInputDevice(); iParams.nChannels = MY_CHANNELS; iParams.firstChannel = 0; oParams.deviceId = adac.getDefaultOutputDevice(); oParams.nChannels = MY_CHANNELS; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // go for it try { // open a stream adac.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&data, &options ); } catch( RtError& e ) { // error! cout << e.getMessage() << endl; exit( 1 ); } // compute bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE); // test RtAudio functionality for reporting latency. cout << "stream latency: " << adac.getStreamLatency() << " frames" << endl; // go for it try { // start stream adac.startStream(); // get input char input; std::cout << "running... press <enter> to quit (buffer frames: " << bufferFrames << ")" << endl; std::cin.get(input); // stop the stream. adac.stopStream(); } catch( RtError& e ) { // print error message cout << e.getMessage() << endl; goto cleanup; } cleanup: // close if open if( adac.isStreamOpen() ) adac.closeStream(); // done outfile<<"];\nplot(x)"; return 0; }
int main(const int argc, const char *argv[]) { RtAudio adc; unsigned int deviceCount = adc.getDeviceCount(); if (deviceCount < 1) { cout << endl << "No audio devices found!" << endl; exit(0); } unsigned int inputDevice = adc.getDefaultInputDevice(); unsigned int outputDevice = adc.getDefaultOutputDevice(); for (int i=0; i<argc; i++) { if (strcmp(argv[i], "-devices") == 0) { // Scan through devices for various capabilities showDevices(deviceCount, adc); exit(0); } if (strcmp(argv[i], "-input") == 0) { if (i == argc-1) { usage(); exit(0); } inputDevice=atoi(argv[++i]); validateDevice(inputDevice, deviceCount, adc, true); } if (strcmp(argv[i], "-output") == 0) { if (i == argc-1) { usage(); exit(0); } outputDevice=atoi(argv[++i]); validateDevice(outputDevice, deviceCount, adc, false); } } // Initialise DSP thread // Initialise GUI unsigned int sampleRate = 44100; unsigned int bufferFrames = 512; unsigned int bufferBytes = 0; RtAudio::StreamParameters inputParameters; inputParameters.deviceId = inputDevice; inputParameters.nChannels = 2; inputParameters.firstChannel = 0; RtAudio::StreamParameters outputParameters; outputParameters.deviceId = outputDevice; outputParameters.nChannels = 2; outputParameters.firstChannel = 0; try { adc.openStream(&outputParameters, &inputParameters, RTAUDIO_SINT16, sampleRate, &bufferFrames, &inout, &bufferBytes); adc.startStream(); } catch (RtAudioError& e) { e.printMessage(); exit(0); } // adc.openStream could have adjusted the bufferFrames. // Set the user data buffer to the sample buffer size in bytes, so that the // inout callback function knows how much data to copy. The example code // uses this - 2 is Stereo, 4 is signed int (4 bytes on OSX) bufferBytes = bufferFrames * 2 * 4; // Can now initialise buffer management. inout could have been asking for // buffers but buffer management won't give them until it has been // initialised. cout << "buffer size in bytes is " << bufferBytes << endl; // TODO protect with mutex bufferManager = new BufferManager(bufferBytes, maxBuffers); char input; cout << endl << "Recording ... press <enter> to quit." << endl; cin.get(input); cout << "Terminating" << endl; try { // Stop the stream adc.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if (adc.isStreamOpen()) adc.closeStream(); // TODO shut down DSP chain, release all buffers // TODO shut down Display chain, release all buffers delete bufferManager; cout << "Terminated" << endl; return 0; }
int main (int argc, char ** argv) { //parse tempo if (argc>2) { cerr<<"Error in arguments\n"; printHelp(); exit(1); } else if (argc==2) { g_tempo = atoi(argv[1]); if (g_tempo<40 && g_tempo>200) { cerr<<"Tempo out of bounds!\n"; printHelp(); exit(1); } tempoChange(); } // set up fluid synth stuff // TODO: error checking!!!! g_settings = new_fluid_settings(); g_synth = new_fluid_synth( g_settings ); g_metronome = new_fluid_synth( g_settings ); //fluid_player_t* player; //player = new_fluid_player(g_synth); //fluid_player_add(player, "backing.mid"); //fluid_player_play(player); if (fluid_synth_sfload(g_synth, "piano.sf2", 1) == -1) { cerr << "Error loading sound font" << endl; exit(1); } if (fluid_synth_sfload(g_metronome, "drum.sf2", 1) == -1) { cerr << "Error loading sound font" << endl; exit(1); } // RtAudio config + init // pointer to RtAudio object RtMidiIn * midiin = NULL; RtAudio * audio = NULL; unsigned int bufferSize = 512;//g_sixteenth/100; // MIDI config + init try { midiin = new RtMidiIn(); } catch( RtError & err ) { err.printMessage(); // goto cleanup; } // Check available ports. if ( midiin->getPortCount() == 0 ) { std::cout << "No ports available!\n"; // goto cleanup; } // use the first available port if ( midiin->getPortCount() > 2) midiin->openPort( 1 ); else midiin->openPort( 0 ); // set midi callback midiin->setCallback( &midi_callback ); // Don't ignore sysex, timing, or active sensing messages. midiin->ignoreTypes( false, false, false ); // create the object try { audio = new RtAudio(); cerr << "buffer size: " << bufferSize << endl; } catch( RtError & err ) { err.printMessage(); exit(1); } if( audio->getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. audio->showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio->getDefaultInputDevice(); iParams.nChannels = 1; iParams.firstChannel = 0; oParams.deviceId = audio->getDefaultOutputDevice(); oParams.nChannels = 2; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // set the callback and start stream try { audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT32, MY_SRATE, &bufferSize, &audioCallback, NULL, &options); audio->startStream(); // test RtAudio functionality for reporting latency. cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl; } catch( RtError & err ) { err.printMessage(); goto cleanup; } // wait for user input cout << "Type CTRL+C to quit:"; //initialize graphics gfxInit(&argc,argv); // if we get here, stop! try { audio->stopStream(); } catch( RtError & err ) { err.printMessage(); } // Clean up cleanup: if(audio) { audio->closeStream(); delete audio; } return 0; }
int main( int argc, char *argv[] ) { TickData data; int i; #if defined(__STK_REALTIME__) RtAudio dac; #endif // If you want to change the default sample rate (set in Stk.h), do // it before instantiating any objects! If the sample rate is // specified in the command line, it will override this setting. Stk::setSampleRate( 44100.0 ); // Depending on how you compile STK, you may need to explicitly set // the path to the rawwave directory. Stk::setRawwavePath( "../../rawwaves/" ); // By default, warning messages are not printed. If we want to see // them, we need to specify that here. Stk::showWarnings( true ); // Check the command-line arguments for errors and to determine // the number of WvOut objects to be instantiated (in utilities.cpp). data.nWvOuts = checkArgs( argc, argv ); data.wvout = (WvOut **) calloc( data.nWvOuts, sizeof(WvOut *) ); // Instantiate the instrument(s) type from the command-line argument // (in utilities.cpp). data.nVoices = countVoices( argc, argv ); data.instrument = (Instrmnt **) calloc( data.nVoices, sizeof(Instrmnt *) ); data.currentVoice = voiceByName( argv[1], &data.instrument[0] ); if ( data.currentVoice < 0 ) { free( data.wvout ); free( data.instrument ); usage(argv[0]); } // If there was no error allocating the first voice, we should be fine for more. for ( i=1; i<data.nVoices; i++ ) voiceByName( argv[1], &data.instrument[i] ); data.voicer = (Voicer *) new Voicer( 0.0 ); for ( i=0; i<data.nVoices; i++ ) data.voicer->addInstrument( data.instrument[i] ); // Parse the command-line flags, instantiate WvOut objects, and // instantiate the input message controller (in utilities.cpp). try { data.realtime = parseArgs( argc, argv, data.wvout, data.messager ); } catch (StkError &) { goto cleanup; } // If realtime output, allocate the dac here. #if defined(__STK_REALTIME__) if ( data.realtime ) { RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = data.channels; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError& error ) { error.printMessage(); goto cleanup; } } #endif // Set the reverb parameters. data.reverb.setT60( data.t60 ); data.reverb.setEffectMix(0.2); // Install an interrupt handler function. (void) signal(SIGINT, finish); // If realtime output, set our callback function and start the dac. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } } #endif // Setup finished. while ( !done ) { #if defined(__STK_REALTIME__) if ( data.realtime ) // Periodically check "done" status. Stk::sleep( 200 ); else #endif // Call the "tick" function to process data. tick( NULL, NULL, 256, 0, 0, (void *)&data ); } // Shut down the output stream. #if defined(__STK_REALTIME__) if ( data.realtime ) { try { dac.closeStream(); } catch ( RtAudioError& error ) { error.printMessage(); } } #endif cleanup: for ( i=0; i<(int)data.nWvOuts; i++ ) delete data.wvout[i]; free( data.wvout ); delete data.voicer; for ( i=0; i<data.nVoices; i++ ) delete data.instrument[i]; free( data.instrument ); std::cout << "\nStk demo finished ... goodbye.\n\n"; return 0; }
int main( int argc, char *argv[] ) { unsigned int channels, fs, bufferFrames, device = 0, offset = 0; char *file; // minimal command-line checking if ( argc < 4 || argc > 6 ) usage(); RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 0 ); } channels = (unsigned int) atoi( argv[1]) ; fs = (unsigned int) atoi( argv[2] ); file = argv[3]; if ( argc > 4 ) device = (unsigned int) atoi( argv[4] ); if ( argc > 5 ) offset = (unsigned int) atoi( argv[5] ); OutputData data; data.fd = fopen( file, "rb" ); if ( !data.fd ) { std::cout << "Unable to find or open file!\n"; exit( 1 ); } // Set our stream parameters for output only. bufferFrames = 512; RtAudio::StreamParameters oParams; oParams.deviceId = device; oParams.nChannels = channels; oParams.firstChannel = offset; if ( device == 0 ) oParams.deviceId = dac.getDefaultOutputDevice(); data.channels = channels; try { dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &output, (void *)&data ); dac.startStream(); } catch ( RtAudioError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; goto cleanup; } std::cout << "\nPlaying raw file " << file << " (buffer frames = " << bufferFrames << ")." << std::endl; while ( 1 ) { SLEEP( 100 ); // wake every 100 ms to check if we're done if ( dac.isStreamRunning() == false ) break; } cleanup: fclose( data.fd ); dac.closeStream(); return 0; }
int main( int argc, char *argv[] ) { //Dekrispator init randomGen_init(); Synth_Init(); //end Dekrispator init // FILE* f = fopen("bla.txt","wb"); // fclose(f); TickData data; RtAudio dac; int i; //if ( argc < 2 || argc > 6 ) usage(); // If you want to change the default sample rate (set in Stk.h), do // it before instantiating any objects! If the sample rate is // specified in the command line, it will override this setting. Stk::setSampleRate( 44100.0 ); { RtMidiIn *midiin = 0; midiin = new RtMidiIn(); unsigned int i = 0, nPorts = midiin->getPortCount(); if ( nPorts == 0 ) { std::cout << "No input Midi ports available, just running demo mode." << std::endl; delete midiin; midiin = 0; } else { for ( i=0; i<nPorts; i++ ) { std::string portName = midiin->getPortName(i); std::cout << " Input port #" << i << ": " << portName << '\n'; } delete midiin; midiin = 0; for ( i=0; i<nPorts && i<MAX_MIDI_DEVICES; i++ ) { data.messagers[data.numMessagers++].startMidiInput(i); } } } // Parse the command-line arguments. unsigned int port = 2001; for ( i=1; i<argc; i++ ) { if ( !strcmp( argv[i], "-is" ) ) { if ( i+1 < argc && argv[i+1][0] != '-' ) port = atoi(argv[++i]); if (data.numMessagers<MAX_MIDI_DEVICES) { data.messagers[data.numMessagers++].startSocketInput( port ); } } else if (!strcmp( argv[i], "-ip" ) ) { if (data.numMessagers<MAX_MIDI_DEVICES) { data.messagers[data.numMessagers++].startStdInput(); } } else if ( !strcmp( argv[i], "-s" ) && ( i+1 < argc ) && argv[i+1][0] != '-') Stk::setSampleRate( atoi(argv[++i]) ); else usage(); } // Allocate the dac here. RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError& error ) { error.printMessage(); goto cleanup; } data.reverbs[0].setT60( data.t60 ); data.reverbs[0].setEffectMix( 0.5 ); data.reverbs[1].setT60( 2.0 ); data.reverbs[1].setEffectMix( 0.2 ); data.rateScaler = 22050.0 / Stk::sampleRate(); // Install an interrupt handler function. (void) signal( SIGINT, finish ); // If realtime output, set our callback function and start the dac. try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Setup finished. while ( !done ) { // Periodically check "done" status. Stk::sleep( 50 ); } // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError& error ) { error.printMessage(); } cleanup: return 0; }
int main( int argc, char *argv[] ) { unsigned int bufferFrames, fs, device = 0, offset = 0; // minimal command-line checking if (argc < 3 || argc > 6 ) usage(); RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 1 ); } channels = (unsigned int) atoi( argv[1] ); fs = (unsigned int) atoi( argv[2] ); if ( argc > 3 ) device = (unsigned int) atoi( argv[3] ); if ( argc > 4 ) offset = (unsigned int) atoi( argv[4] ); if ( argc > 5 ) nFrames = (unsigned int) (fs * atof( argv[5] )); if ( nFrames > 0 ) checkCount = true; double *data = (double *) calloc( channels, sizeof( double ) ); // Let RtAudio print messages to stderr. dac.showWarnings( true ); // Set our stream parameters for output only. bufferFrames = 512; RtAudio::StreamParameters oParams; oParams.deviceId = device; oParams.nChannels = channels; oParams.firstChannel = offset; if ( device == 0 ) oParams.deviceId = dac.getDefaultOutputDevice(); options.flags = RTAUDIO_HOG_DEVICE; options.flags |= RTAUDIO_SCHEDULE_REALTIME; #if !defined( USE_INTERLEAVED ) options.flags |= RTAUDIO_NONINTERLEAVED; #endif try { dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options, &errorCallback ); dac.startStream(); } catch ( RtAudioError& e ) { e.printMessage(); goto cleanup; } if ( checkCount ) { while ( dac.isStreamRunning() == true ) SLEEP( 100 ); } else { char input; //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl; std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n"; std::cin.get( input ); try { // Stop the stream dac.stopStream(); } catch ( RtAudioError& e ) { e.printMessage(); } } cleanup: if ( dac.isStreamOpen() ) dac.closeStream(); free( data ); return 0; }
unsigned int Audio::getDefaultOutputDevice() { return dac.getDefaultOutputDevice(); }
int main () { // Damit das Programm funktioniert, muss eine 16Bit PCM Wave-Datei im // gleichen Ordner liegen ! const char * fname = "test.flac" ; // Soundfile-Handle aus der libsndfile-Bibliothek SndfileHandle file = SndfileHandle (fname) ; // Alle möglichen Infos über die Audio-Datei ausgeben ! std::cout << "Reading file: " << fname << std::endl; std::cout << "File format: " << file.format() << std::endl; std::cout << "PCM 16 BIT: " << (SF_FORMAT_WAV | SF_FORMAT_PCM_16) << std::endl; std::cout << "Samples in file: " << file.frames() << std::endl; std::cout << "Samplerate " << file.samplerate() << std::endl; std::cout << "Channels: " << file.channels() << std::endl; // Die RtAudio-Klasse ist gleichermassen dac und adc, wird hier aber nur als dac verwendet ! RtAudio dac; if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; return 0; } // Output params ... RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; unsigned int sampleRate = 44100; // ACHTUNG! Frames != Samples // ein Frame = Samples für alle Kanäle // d.h. |Samples| = Kanäle*Frames ! unsigned int bufferFrames = 1024; // Da wir 16 Bit PCM-Daten lesen, sollte als Datenformat RTAUDIO_SINT16 genutzt // werden. // Als Daten wird der Callback-Struktur hier das Soundfile-Handle übergeben. // Sollte man in einer "ernsthaften" Lösung anders machen ! // Inkompatible Formate können übrigens "interessante" Effekte ergeben ! try { dac.openStream( ¶meters, NULL, RTAUDIO_SINT16, sampleRate, &bufferFrames, &fplay, (void *)&file); dac.startStream(); } catch ( RtAudioError& e ) { e.printMessage(); return 0; } char input; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get( input ); try { // Stop the stream dac.stopStream(); } catch (RtAudioError& e) { e.printMessage(); } if ( dac.isStreamOpen() ) dac.closeStream(); return 0 ; }
//----------------------------------------------------------------------------- // name: main() // desc: entry point //----------------------------------------------------------------------------- int main( int argc, char ** argv ) { RtMidiIn *midiin = new RtMidiIn(); // Check available ports. unsigned int nPorts = midiin->getPortCount(); if ( nPorts == 0 ) { std::cout << "No ports available!\n"; //goto cleanup; } midiin->openPort( 0 ); // Set our callback function. This should be done immediately after // opening the port to avoid having incoming messages written to the // queue. midiin->setCallback( &mycallback ); // Don't ignore sysex, timing, or active sensing messages. midiin->ignoreTypes( false, false, false ); std::cout << "\nReading MIDI input ... press <enter> to quit.\n"; char input; std::cin.get(input); // instantiate RtAudio object RtAudio audio; // variables unsigned int bufferBytes = 0; // frame size unsigned int numFrames = 512; // check for audio devices if( audio.getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. audio.showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio.getDefaultInputDevice(); iParams.nChannels = MY_CHANNELS; iParams.firstChannel = 0; oParams.deviceId = audio.getDefaultOutputDevice(); oParams.nChannels = MY_CHANNELS; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // go for it try { // open a stream audio.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &numFrames, &callme, NULL, &options ); } catch( RtError& e ) { // error! cout << e.getMessage() << endl; exit( 1 ); } // compute bufferBytes = numFrames * MY_CHANNELS * sizeof(SAMPLE); // test RtAudio functionality for reporting latency. cout << "stream latency: " << audio.getStreamLatency() << " frames" << endl; for( int i = 0; i < MY_NUMSTRINGS; i++ ) { // intialize g_ks[i].init( MY_SRATE*2, 440, MY_SRATE ); } // go for it try { // start stream audio.startStream(); char input; std::cout << "Press any key to quit "; std::cin.get(input); // stop the stream. audio.stopStream(); } catch( RtError& e ) { // print error message cout << e.getMessage() << endl; goto cleanup; } cleanup: // close if open if( audio.isStreamOpen() ) audio.closeStream(); delete midiin; // done return 0; }
// ======== // = Main = // ======== // Entry point int main (int argc, char *argv[]) { cout<<argc<<" "<<argv[0]; if (argc>3) {cerr<<"\nERROR - wrong number of arguments\n";exit(1);} if (argc==3) g_audio_history = atoi(argv[2]); else g_audio_history = 30; if (argc>1) g_fft_history = atoi(argv[1]); else g_fft_history = 100; help(); // RtAudio config + init // pointer to RtAudio object RtAudio * audio = NULL; // create the object try { audio = new RtAudio(); } catch( RtError & err ) { err.printMessage(); exit(1); } if( audio->getDeviceCount() < 1 ) { // nopes cout << "no audio devices found!" << endl; exit( 1 ); } // let RtAudio print messages to stderr. audio->showWarnings( true ); // set input and output parameters RtAudio::StreamParameters iParams, oParams; iParams.deviceId = audio->getDefaultInputDevice(); iParams.nChannels = 1; iParams.firstChannel = 0; oParams.deviceId = audio->getDefaultOutputDevice(); oParams.nChannels = 1; oParams.firstChannel = 0; // create stream options RtAudio::StreamOptions options; // set the callback and start stream try { audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options); cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl; // allocate the buffer for the fft g_fftBuff = new float[g_buffSize * ZPF]; g_audioBuff = new float[g_buffSize * ZPF]; if ( g_fftBuff == NULL ) { cerr << "Something went wrong when creating the fft and audio buffers" << endl; exit (1); } // allocate the buffer for the time domain window g_window = new float[g_buffSize]; if ( g_window == NULL ) { cerr << "Something went wrong when creating the window" << endl; exit (1); } // create a hanning window make_window( g_window, g_buffSize ); // start the audio stream audio->startStream(); // test RtAudio functionality for reporting latency. cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl; } catch( RtError & err ) { err.printMessage(); goto cleanup; } // ============ // = GL stuff = // ============ // initialize GLUT glutInit( &argc, argv ); // double buffer, use rgb color, enable depth buffer glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH ); // initialize the window size glutInitWindowSize( g_width, g_height ); // set the window postion glutInitWindowPosition( 100, 100 ); // create the window glutCreateWindow( "Hello GL" ); //glutEnterGameMode(); // set the idle function - called when idle glutIdleFunc( idleFunc ); // set the display function - called when redrawing glutDisplayFunc( displayFunc ); // set the reshape function - called when client area changes glutReshapeFunc( reshapeFunc ); // set the keyboard function - called on keyboard events glutKeyboardFunc( keyboardFunc ); // set the mouse function - called on mouse stuff glutMouseFunc( mouseFunc ); // set the special function - called on special keys events (fn, arrows, pgDown, etc) glutSpecialFunc( specialFunc ); // do our own initialization initialize(); // let GLUT handle the current thread from here glutMainLoop(); // if we get here, stop! try { audio->stopStream(); } catch( RtError & err ) { err.printMessage(); } // Clean up cleanup: if(audio) { audio->closeStream(); delete audio; } return 0; }
void init(int argc, char **argv) { ///// theString = new StringModel ( 1000, 0.5, 0.99999, 8 ); // *** test the rtaudio callback if ( dac.getDeviceCount() < 1 ) { std::cout << "\nNo audio devices found!\n"; exit( 0 ); } parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; sampleRate = 44100; bufferFrames = 256; // 256 sample frames try { dac.openStream ( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, StringModel::audioCallback, (void *)theString ); dac.startStream(); } catch ( RtError& e ) { std::cout << "\nexception on dac:\n"; e.printMessage(); exit(0); } ////// GLfloat pos[] = {5.0, 5.0, 10.0, 0.0}; glLightfv(GL_LIGHT0, GL_POSITION, pos); glEnable(GL_CULL_FACE); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_DEPTH_TEST); glEnable(GL_NORMALIZE); glEnable(GL_COLOR_MATERIAL); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnable(GL_FOG); float FogCol[3]={0.0,0.0,0.0}; glFogfv(GL_FOG_COLOR,FogCol); glFogi(GL_FOG_MODE, GL_LINEAR); glFogf(GL_FOG_START, 10.0f); glFogf(GL_FOG_END, 40.f); glClearColor (0.0, 0.0, 0.0, 0.0); set_to_ident(g_trackball_transform); std::cout << "\nPlaying ... press \n"; std::cout << "t to tighten\n"; std::cout << "l to loosen\n"; std::cout << "p to pluck\n"; std::cout << "r to reset\n"; std::cout << "d to dump velocities\n"; std::cout << "f/F to change vibrator freq\n"; std::cout << "ESC to quit.\n"; }