int main() { // Set the global sample rate before creating class instances. Stk::setSampleRate( 44100.0 ); SineWave sine; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.deviceId = 3; parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&sine ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // configuration of oscilator sine.setFrequency(440.0); // start the main real time loop try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // USER interface // Block waiting here. char keyhit; std::cout << "\nPlaying ... press <enter> to quit.\n"; std::cin.get( keyhit ); // SYSTEM shutdown // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: return 0; }
void start(unsigned int bufferFrames = 512, unsigned int sampleRate = 44100) { this->bufferFrames = bufferFrames; this->sampleRate = sampleRate; if (dac.getDeviceCount() < 1) { std::cout << "\nNo audio devices found!\n"; exit(0); } parameters.deviceId = (id == 1) ? 0 : 1; RtAudio::DeviceInfo info; info = dac.getDeviceInfo(parameters.deviceId); std::cout << "device = " << info.name << std::endl; //parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; parameters.firstChannel = 0; //RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (default), No such file or directory. try { unsigned int got = bufferFrames; dac.openStream(¶meters, NULL, RTAUDIO_SINT16, sampleRate, &got, &process, (void *)&data); //dac.openStream(¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &got, &process, (void *)&data); dac.startStream(); std::cout << "requested " << bufferFrames << " but got " << got << std::endl; } catch (RtAudioError &e) { e.printMessage(); exit(0); } }
int callback( void *output_buffer, void *input_buffer, unsigned int n_buffer_frames, double stream_time, RtAudioStreamStatus status, void *data ) { RtAudio *audio = (RtAudio *)data; unsigned int sample_rate = audio->getStreamSampleRate(); for (unsigned int i = 0; i < n_buffer_frames * 2;) { double fm_offset = 0; if (g_fm_on) { double fm_increment = (2.0 * M_PI) / sample_rate * g_fm_frequency; g_fm_phase += fm_increment; if (g_fm_phase > 2 * M_PI) { g_fm_phase -= 2 * M_PI; } fm_offset = g_active_fm_ugen(g_fm_phase, g_fm_width) * g_fm_index; } double increment = (2.0 * M_PI) / sample_rate * (g_frequency + fm_offset); g_phase += increment; if (g_phase > 2 * M_PI) { g_phase -= 2 * M_PI; } double samp = g_active_ugen(g_phase, g_width); if (g_modulate_input) { samp = ((double *)input_buffer)[i / 2] * samp; } ((double *)output_buffer)[i++] = samp; ((double *)output_buffer)[i++] = samp; } return 0; }
int main() { RtAudio dac; if ( dac.getDeviceCount() == 0 ) exit( 0 ); RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 2; unsigned int sampleRate = 44100; unsigned int bufferFrames = 256; // 256 sample frames RtAudio::StreamOptions options; options.flags = RTAUDIO_NONINTERLEAVED; try { dac.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &myCallback, NULL, &options ); } catch ( RtError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; exit( 0 ); } return 0; }
GOrgueSoundPort* GOrgueSoundRtPort::create(GOrgueSound* sound, wxString name) { try { std::vector<RtAudio::Api> rtaudio_apis; RtAudio::getCompiledApi(rtaudio_apis); for (unsigned k = 0; k < rtaudio_apis.size(); k++) { RtAudio* audioDevice = 0; try { audioDevice = new RtAudio(rtaudio_apis[k]); for (unsigned i = 0; i < audioDevice->getDeviceCount(); i++) if (getName(rtaudio_apis[k], audioDevice, i) == name) return new GOrgueSoundRtPort(sound, name, rtaudio_apis[k]); } catch (RtAudioError &e) { wxString error = wxString::FromAscii(e.getMessage().c_str()); wxLogError(_("RtAudio error: %s"), error.c_str()); } if (audioDevice) delete audioDevice; } } catch (RtAudioError &e) { wxString error = wxString::FromAscii(e.getMessage().c_str()); wxLogError(_("RtAudio error: %s"), error.c_str()); } return NULL; }
void slgAudio::info(){ RtAudio *audioTemp = NULL; audioTemp = new RtAudio(); unsigned int devices = audioTemp->getDeviceCount(); RtAudio::DeviceInfo info; for (int i=0;i<devices;i++){ info = audioTemp->getDeviceInfo(i); // std::cout<<"default input: "<<m_audio->getDefaultInputDevice()<<std::endl; // std::cout<<"default output: "<<m_audio->getDefaultOutputDevice()<<std::endl; if (info.probed ==true){ std::cout<<"----------------------------- Device "<<i<<" ---------------------------"<<std::endl; if (info.isDefaultInput) std::cout << "--Default Input"<<std::endl; if (info.isDefaultOutput) std::cout << "--Default Output"<<std::endl; std::cout << "Name = " << info.name << '\n'; std::cout << "Max Input Channels = " << info.inputChannels << '\n'; std::cout << "Max Output Channels = " << info.outputChannels << '\n'; std::cout << "Max Duplex Channels = " << info.duplexChannels << '\n'; } } delete audioTemp; audioTemp = NULL; }
/* returns 0 on failure */ int start_audio(AudioCallback _callback, int sample_rate, void *data) { if(audio.getDeviceCount() < 1) { std::cout << "No audio devices found!\n"; return 0; } RtAudio::StreamParameters iparams, oparams; /* configure input (microphone) */ iparams.deviceId = audio.getDefaultInputDevice(); iparams.nChannels = 1; iparams.firstChannel = 0; /* configure output */ oparams.deviceId = audio.getDefaultOutputDevice(); oparams.nChannels = 2; oparams.firstChannel = 0; unsigned int bufferFrames = 256; callback = _callback; try { audio.openStream(&oparams, &iparams, RTAUDIO_FLOAT64 /* double */, sample_rate, &bufferFrames, &render, data); audio.startStream(); } catch(RtError& e) { e.printMessage(); return 0; } return 1; }
void Audio::closeInputDevice() { if (!adc.isStreamOpen()) return; adc.stopStream(); while (adc.isStreamRunning()); adc.closeStream(); }
bool Audio::openAudioInputDevice(unsigned int device) { RtAudio::StreamParameters p; double data[2]; unsigned int num = 0; if (inIsOpened) return false; for (unsigned int i=0; i<adc.getDeviceCount(); i++) { if (adc.getDeviceInfo(i).inputChannels) { if (device == num) { p.deviceId = i; break; } num++; } } p.firstChannel = 0; p.nChannels = 2; try { adc.openStream(NULL, &p, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioInputCallback, (void*)&data); adc.startStream(); } catch (RtAudioError& e) { inError = e.getMessage(); return false; } inBuffer=(float*)calloc(bufferFrames, sizeof(float)); inIsOpened=true; return true; }
//--------------------------------------------------------- void ofSoundStreamListDevices(){ RtAudio *audioTemp = 0; try { audioTemp = new RtAudio(); } catch (RtError &error) { error.printMessage(); } int devices = audioTemp->getDeviceCount(); RtAudio::DeviceInfo info; for (int i=0; i< devices; i++) { try { info = audioTemp->getDeviceInfo(i); } catch (RtError &error) { error.printMessage(); break; } std::cout << "device = " << i << " (" << info.name << ")\n"; if (info.isDefaultInput) std::cout << "----* default ----* \n"; std::cout << "maximum output channels = " << info.outputChannels << "\n"; std::cout << "maximum input channels = " << info.inputChannels << "\n"; std::cout << "-----------------------------------------\n"; } delete audioTemp; }
void av_audio_start() { av_audio_get(); if (rta.isStreamRunning()) { rta.stopStream(); } if (rta.isStreamOpen()) { // close it: rta.closeStream(); } unsigned int devices = rta.getDeviceCount(); if (devices < 1) { printf("No audio devices found\n"); return; } RtAudio::DeviceInfo info; RtAudio::StreamParameters iParams, oParams; printf("Available audio devices (%d):\n", devices); for (unsigned int i=0; i<devices; i++) { info = rta.getDeviceInfo(i); printf("Device %d: %dx%d (%d) %s\n", i, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); } printf("device %d\n", audio.indevice); info = rta.getDeviceInfo(audio.indevice); printf("Using audio input %d: %dx%d (%d) %s\n", audio.indevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); audio.inchannels = info.inputChannels; iParams.deviceId = audio.indevice; iParams.nChannels = audio.inchannels; iParams.firstChannel = 0; info = rta.getDeviceInfo(audio.outdevice); printf("Using audio output %d: %dx%d (%d) %s\n", audio.outdevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str()); audio.outchannels = info.outputChannels; oParams.deviceId = audio.outdevice; oParams.nChannels = audio.outchannels; oParams.firstChannel = 0; RtAudio::StreamOptions options; //options.flags |= RTAUDIO_NONINTERLEAVED; options.streamName = "av"; try { rta.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, audio.samplerate, &audio.blocksize, &av_rtaudio_callback, NULL, &options ); rta.startStream(); printf("Audio started\n"); } catch ( RtError& e ) { fprintf(stderr, "%s\n", e.getMessage().c_str()); } }
bool DeviceManager::getAudioDevices(bool input, std::vector<Device>& devs) { devs.clear(); #if defined(ANDROID) // Under Android, we don't access the device file directly. // Arbitrary use 0 for the mic and 1 for the output. // These ids are used in MediaEngine::SetSoundDevices(in, out); // The strings are for human consumption. if (input) { devs.push_back(Device("audioin", "audiorecord", 0)); } else { devs.push_back(Device("audioout", "audiotrack", 1)); } return true; #elif defined(HAVE_RTAUDIO) // Since we are using RtAudio for audio capture it's best to // use RtAudio to enumerate devices to ensure indexes match. RtAudio audio; // Determine the number of devices available auto ndevices = audio.getDeviceCount(); TraceS(this) << "Get audio devices: " << ndevices << endl; // Scan through devices for various capabilities RtAudio::DeviceInfo info; for (unsigned i = 0; i <= ndevices; i++) { try { info = audio.getDeviceInfo(i); // may throw RtAudioError TraceS(this) << "Device:" << "\n\tName: " << info.name << "\n\tOutput Channels: " << info.outputChannels << "\n\tInput Channels: " << info.inputChannels << "\n\tDuplex Channels: " << info.duplexChannels << "\n\tDefault Output: " << info.isDefaultOutput << "\n\tDefault Input: " << info.isDefaultInput << "\n\tProbed: " << info.probed << endl; if (info.probed == true && ( (input && info.inputChannels > 0) || (!input && info.outputChannels > 0))) { TraceS(this) << "Adding device: " << info.name << endl; Device dev((input ? "audioin" : "audioout"), i, info.name, "", (input ? info.isDefaultInput : info.isDefaultOutput)); devs.push_back(dev); } } catch (RtAudioError& e) { ErrorS(this) << "Cannot probe audio device: " << e.getMessage() << endl; } } return filterDevices(devs, kFilteredAudioDevicesName); #endif }
int main(int argc, const char * argv[]) { RtAudio dac; RtAudio::StreamParameters rtParams; rtParams.deviceId = dac.getDefaultOutputDevice(); rtParams.nChannels = nChannels; #if RASPI unsigned int sampleRate = 22000; #else unsigned int sampleRate = 44100; #endif unsigned int bufferFrames = 512; // 512 sample frames Tonic::setSampleRate(sampleRate); std::vector<Synth> synths; synths.push_back(*new BassDrum()); synths.push_back(*new Snare()); synths.push_back(*new HiHat()); synths.push_back(*new Funky()); // Test write pattern DrumMachine *drumMachine = new DrumMachine(synths); drumMachine->loadPattern(0); ControlMetro metro = ControlMetro().bpm(480); ControlCallback drumMachineTick = ControlCallback(&mixer, [&](ControlGeneratorOutput output){ drumMachine->tick(); }).input(metro); Generator mixedSignal; for(int i = 0; i < NUM_TRACKS; i++) { mixedSignal = mixedSignal + synths[i]; } mixer.setOutputGen(mixedSignal); try { dac.openStream( &rtParams, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &renderCallback, NULL, NULL ); dac.startStream(); // Send a pointer to our global drumMachine instance // to the serial communications layer. listenForMessages( drumMachine ); dac.stopStream(); } catch ( RtError& e ) { std::cout << '\n' << e.getMessage() << '\n' << std::endl; exit( 0 ); } return 0; }
const char* getAudioDeviceName(unsigned int deviceId) { RtAudio audio; std::string name = audio.getDeviceInfo(deviceId).name; unsigned long len = name.length(); char* c = new char[len+1]; memcpy(c, name.c_str(), len+1); return c; }
void Audio::closeOutputDevice() { if (!dac.isStreamOpen()) return; dac.stopStream(); while (dac.isStreamRunning()); dac.closeStream(); outIsOpened = false; }
void stop() { try { // Stop the stream dac.stopStream(); } catch (RtAudioError &e) { e.printMessage(); } if (dac.isStreamOpen()) dac.closeStream(); }
// サポートしているデバイスリストを表示 void listDevices() { RtAudio audio; unsigned int devices = audio.getDeviceCount(); RtAudio::DeviceInfo info; for(unsigned int i=0; i<devices; i++) { info = audio.getDeviceInfo(i); std::cout << "============================" << std::endl; std::cout << "\nDevide ID:" << i << std::endl; std::cout << "Name:" << info.name << std::endl; if ( info.probed == false ) std::cout << "Probe Status = UNsuccessful\n"; else { std::cout << "Probe Status = Successful\n"; std::cout << "Output Channels = " << info.outputChannels << '\n'; std::cout << "Input Channels = " << info.inputChannels << '\n'; std::cout << "Duplex Channels = " << info.duplexChannels << '\n'; if ( info.isDefaultOutput ) { std::cout << "This is the default output device.\n"; } else { std::cout << "This is NOT the default output device.\n"; } if ( info.isDefaultInput ) { std::cout << "This is the default input device.\n"; } else { std::cout << "This is NOT the default input device.\n"; } if ( info.nativeFormats == 0 ) { std::cout << "No natively supported data formats(?)!"; } else { std::cout << "Natively supported data formats:\n"; if ( info.nativeFormats & RTAUDIO_SINT8 ) std::cout << " 8-bit int\n"; if ( info.nativeFormats & RTAUDIO_SINT16 ) std::cout << " 16-bit int\n"; if ( info.nativeFormats & RTAUDIO_SINT24 ) std::cout << " 24-bit int\n"; if ( info.nativeFormats & RTAUDIO_SINT32 ) std::cout << " 32-bit int\n"; if ( info.nativeFormats & RTAUDIO_FLOAT32 ) std::cout << " 32-bit float\n"; if ( info.nativeFormats & RTAUDIO_FLOAT64 ) std::cout << " 64-bit float\n"; } if ( info.sampleRates.size() < 1 ) { std::cout << "No supported sample rates found!"; } else { std::cout << "Supported sample rates = "; for (unsigned int j=0; j<info.sampleRates.size(); j++) std::cout << info.sampleRates[j] << " "; } std::cout << std::endl; } } }
unsigned int Audio::getAudioInputCount() { unsigned int num = 0; unsigned int deviceCount=adc.getDeviceCount(); for (unsigned int i=0; i<deviceCount; i++) { if(adc.getDeviceInfo(i).inputChannels) num++; } return num; }
int main(int argc, char *argv[]) { int buffer_size, fs, device = 0; RtAudio *audio; double *data; char input; // minimal command-line checking if (argc != 3 && argc != 4 ) usage(); chans = (int) atoi(argv[1]); fs = (int) atoi(argv[2]); if ( argc == 4 ) device = (int) atoi(argv[3]); // Open the realtime output device buffer_size = 1024; try { audio = new RtAudio(device, chans, 0, 0, FORMAT, fs, &buffer_size, 4); } catch (RtError &error) { error.printMessage(); exit(EXIT_FAILURE); } data = (double *) calloc(chans, sizeof(double)); try { audio->setStreamCallback(&saw, (void *)data); audio->startStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << buffer_size << ").\n"; std::cin.get(input); // Stop the stream. try { audio->stopStream(); } catch (RtError &error) { error.printMessage(); } cleanup: audio->closeStream(); delete audio; if (data) free(data); return 0; }
void stop_audio(void) { try { audio.stopStream(); } catch(RtError& e) { e.printMessage(); } if(audio.isStreamOpen()) audio.closeStream(); }
int main( int argc, char *argv[] ) { if ( argc != 2 ) usage(); // Set the global sample rate and rawwave path before creating class instances. Stk::setSampleRate( 44100.0 ); Stk::setRawwavePath( "rawwaves/" ); TickData data; RtAudio dac; // Figure out how many bytes in an StkFloat and setup the RtAudio stream. RtAudio::StreamParameters parameters; parameters.deviceId = dac.getDefaultOutputDevice(); parameters.nChannels = 1; RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32; unsigned int bufferFrames = RT_BUFFER_SIZE; try { dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data ); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } try { // Define and load the BeeThree instrument data.instrument = new BeeThree(); } catch ( StkError & ) { goto cleanup; } if ( data.messager.setScoreFile( argv[1] ) == false ) goto cleanup; try { dac.startStream(); } catch ( RtAudioError &error ) { error.printMessage(); goto cleanup; } // Block waiting until callback signals done. while ( !data.done ) Stk::sleep( 100 ); // Shut down the output stream. try { dac.closeStream(); } catch ( RtAudioError &error ) { error.printMessage(); } cleanup: delete data.instrument; return 0; }
QString Audio::getInDeviceName(unsigned int device) { unsigned int num = 0; for (unsigned int i=0; i<adc.getDeviceCount(); i++) { if (adc.getDeviceInfo(i).inputChannels) { if (device == num) return QString::fromStdString(adc.getDeviceInfo(i).name); num++; } } return QObject::tr("Unknown audio input device"); }
void AudioThread::enumerateDevices(std::vector<RtAudio::DeviceInfo> &devs) { RtAudio endac; int numDevices = endac.getDeviceCount(); for (int i = 0; i < numDevices; i++) { RtAudio::DeviceInfo info = endac.getDeviceInfo(i); devs.push_back(info); std::cout << std::endl; std::cout << "Audio Device #" << i << " " << info.name << std::endl; std::cout << "\tDefault Output? " << (info.isDefaultOutput ? "Yes" : "No") << std::endl; std::cout << "\tDefault Input? " << (info.isDefaultOutput ? "Yes" : "No") << std::endl; std::cout << "\tInput channels: " << info.inputChannels << std::endl; std::cout << "\tOutput channels: " << info.outputChannels << std::endl; std::cout << "\tDuplex channels: " << info.duplexChannels << std::endl; std::cout << "\t" << "Native formats:" << std::endl; RtAudioFormat nFormats = info.nativeFormats; if (nFormats & RTAUDIO_SINT8) { std::cout << "\t\t8-bit signed integer." << std::endl; } if (nFormats & RTAUDIO_SINT16) { std::cout << "\t\t16-bit signed integer." << std::endl; } if (nFormats & RTAUDIO_SINT24) { std::cout << "\t\t24-bit signed integer." << std::endl; } if (nFormats & RTAUDIO_SINT32) { std::cout << "\t\t32-bit signed integer." << std::endl; } if (nFormats & RTAUDIO_FLOAT32) { std::cout << "\t\t32-bit float normalized between plus/minus 1.0." << std::endl; } if (nFormats & RTAUDIO_FLOAT64) { std::cout << "\t\t32-bit float normalized between plus/minus 1.0." << std::endl; } std::vector<unsigned int>::iterator srate; std::cout << "\t" << "Supported sample rates:" << std::endl; for (srate = info.sampleRates.begin(); srate != info.sampleRates.end(); srate++) { std::cout << "\t\t" << (*srate) << "hz" << std::endl; } std::cout << std::endl; } }
int main(int argc, char *argv[]) { int chans, fs, device = 0; RtAudio *audio; char input; // minimal command-line checking if (argc != 3 && argc != 4 ) usage(); chans = (int) atoi(argv[1]); fs = (int) atoi(argv[2]); if ( argc == 4 ) device = (int) atoi(argv[3]); // Open the realtime output device int buffer_size = 512; try { audio = new RtAudio(device, chans, device, chans, FORMAT, fs, &buffer_size, 8); } catch (RtError &error) { error.printMessage(); exit(EXIT_FAILURE); } try { audio->setStreamCallback(&inout, NULL); audio->startStream(); } catch (RtError &error) { error.printMessage(); goto cleanup; } std::cout << "\nRunning ... press <enter> to quit (buffer size = " << buffer_size << ").\n"; std::cin.get(input); try { audio->stopStream(); } catch (RtError &error) { error.printMessage(); } cleanup: audio->closeStream(); delete audio; return 0; }
/* * Class: jass_render_RTAudioIn * Method: readNativeSound * Signature: (J[SI)V */ JNIEXPORT void JNICALL Java_jass_render_RTAudioIn_readNativeSound(JNIEnv *env, jobject obj, jlong nativePointer, jshortArray buf, jint bufsz) { RtAudio *pRtAudioObject = (RtAudio *)nativePointer; static const int MAXBUF = 44100; static short localBuf[MAXBUF]; if(bufsz>MAXBUF) { bufsz = MAXBUF; // better safe than sorry } pRtAudioObject->recordBuffer(localBuf, (int)bufsz); jsize len = env->GetArrayLength(buf); jshort *body = env->GetShortArrayElements(buf, 0); for (int i=0; i<len ; i++) { body[i] = localBuf[i]; } env->ReleaseShortArrayElements(buf, body, 0); }
void validateDevice(unsigned int deviceId, unsigned int deviceCount, RtAudio& adc, bool input) { if (deviceId >= deviceCount) { cout << "Device ID " << deviceId << " must be less than " << deviceCount << endl; exit(0); } RtAudio::DeviceInfo info = adc.getDeviceInfo(deviceId); if (!info.probed) { cout << "Could not probe device ID " << deviceId << endl; exit(0); } if (!isCdQuality(info)) { cout << "Device ID " << deviceId << " is not CD quality" << endl; exit(0); } if (input) { if (info.inputChannels == 0) { cout << "Device ID " << deviceId << " has no input channels" << endl; exit(0); } } else { if (info.outputChannels == 0) { cout << "Device ID " << deviceId << " has no output channels" << endl; exit(0); } } }
//----------------------------------------------------------------------------- // name: probe() // desc: ... //----------------------------------------------------------------------------- void Digitalio::probe() { #ifndef __DISABLE_RTAUDIO__ RtAudio * rta = NULL; RtAudio::DeviceInfo info; // allocate RtAudio try { rta = new RtAudio( ); } catch( RtError err ) { // problem finding audio devices, most likely EM_error2b( 0, "%s", err.getMessage().c_str() ); return; } // get count int devices = rta->getDeviceCount(); EM_error2b( 0, "found %d device(s) ...", devices ); // EM_error2( 0, "--------------------------" ); EM_reset_msg(); // loop for( int i = 0; i < devices; i++ ) { try { info = rta->getDeviceInfo(i); } catch( RtError & error ) { error.printMessage(); break; } // print EM_error2b( 0, "------( audio device: %d )---------------", i+1 ); print( info ); // skip if( i < devices ) EM_error2( 0, "" ); EM_reset_msg(); } delete rta; #endif // __DISABLE_RTAUDIO__ return; }
void init(){ unsigned int sampleRate = 44100; unsigned int bufferFrames = 128; // init pd if(!lpd.init(0, 2, sampleRate)) { std::cerr << "Could not init pd" << std::endl; exit(1); } // receive messages from pd lpd.setReceiver(&pdObject); lpd.subscribe("cursor"); // send DSP 1 message to pd lpd.computeAudio(true); // load the patch pd::Patch patch = lpd.openPatch("test.pd", "./pd"); std::cout << patch << std::endl; // use the RtAudio API to connect to the default audio device if(audio.getDeviceCount()==0){ std::cout << "There are no available sound devices." << std::endl; exit(1); } RtAudio::StreamParameters parameters; parameters.deviceId = audio.getDefaultOutputDevice(); parameters.nChannels = 2; RtAudio::StreamOptions options; options.streamName = "libpd rtaudio test"; options.flags = RTAUDIO_SCHEDULE_REALTIME; if(audio.getCurrentApi() != RtAudio::MACOSX_CORE) { options.flags |= RTAUDIO_MINIMIZE_LATENCY; // CoreAudio doesn't seem to like this } try { audio.openStream( ¶meters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioCallback, NULL, &options ); audio.startStream(); } catch(RtAudioError& e) { std::cerr << e.getMessage() << std::endl; exit(1); } }
QHash<int, QString> BleAudioCapture::availableDevices() { RtAudio rtAudio; int deviceCount = rtAudio.getDeviceCount(); RtAudio::DeviceInfo info; QHash<int, QString> devices; for (int i = 0; i < deviceCount; ++i) { info = rtAudio.getDeviceInfo(i); if (info.inputChannels > 0) { devices.insert(i, QString::fromStdString(info.name)); } } return devices; }
av_Audio * av_audio_get() { static bool initialized = false; if (!initialized) { initialized = true; rta.showWarnings( true ); // defaults: audio.samplerate = 44100; audio.blocksize = 256; audio.inchannels = 2; audio.outchannels = 2; audio.time = 0; audio.lag = 0.04; audio.indevice = rta.getDefaultInputDevice(); audio.outdevice = rta.getDefaultOutputDevice(); /* audio.msgbuffer.size = AV_AUDIO_MSGBUFFER_SIZE_DEFAULT; audio.msgbuffer.read = 0; audio.msgbuffer.write = 0; audio.msgbuffer.data = (unsigned char *)malloc(audio.msgbuffer.size); */ audio.onframes = 0; // one second of ringbuffer: int blockspersecond = audio.samplerate / audio.blocksize; audio.blocks = blockspersecond + 1; audio.blockstep = audio.blocksize * audio.outchannels; int len = audio.blockstep * audio.blocks; audio.buffer = (float *)calloc(len, sizeof(float)); audio.blockread = 0; audio.blockwrite = 0; printf("audio initialized\n"); //AL = lua_open(); //av_init_lua(); // unique to audio thread: //if (luaL_dostring(AL, "require 'audioprocess'")) { // printf("error: %s\n", lua_tostring(AL, -1)); // initialized = false; //} } return &audio; }