示例#1
0
文件: search.cpp 项目: rbtsx/latency
  void start(unsigned int bufferFrames = 512, unsigned int sampleRate = 44100) {
    this->bufferFrames = bufferFrames;
    this->sampleRate = sampleRate;

    if (dac.getDeviceCount() < 1) {
      std::cout << "\nNo audio devices found!\n";
      exit(0);
    }

    parameters.deviceId = (id == 1) ? 0 : 1;
    RtAudio::DeviceInfo info;
    info = dac.getDeviceInfo(parameters.deviceId);
    std::cout << "device = " << info.name << std::endl;
    //parameters.deviceId = dac.getDefaultOutputDevice();
    parameters.nChannels = 2;
    parameters.firstChannel = 0;

//RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (default), No such file or directory.

    try {
      unsigned int got = bufferFrames;
      dac.openStream(&parameters, NULL, RTAUDIO_SINT16, sampleRate, &got, &process, (void *)&data);
      //dac.openStream(&parameters, NULL, RTAUDIO_FLOAT32, sampleRate, &got, &process, (void *)&data);
      dac.startStream();
      std::cout << "requested " << bufferFrames << " but got " << got << std::endl;
    } catch (RtAudioError &e) {
      e.printMessage();
      exit(0);
    }
  }
示例#2
0
void slgAudio::info(){
    RtAudio *audioTemp = NULL;
    audioTemp = new RtAudio();
    unsigned int devices = audioTemp->getDeviceCount();
    RtAudio::DeviceInfo info;
    
    for (int i=0;i<devices;i++){
        info = audioTemp->getDeviceInfo(i);
        // std::cout<<"default input: "<<m_audio->getDefaultInputDevice()<<std::endl;
        // std::cout<<"default output: "<<m_audio->getDefaultOutputDevice()<<std::endl;
        if (info.probed ==true){
            std::cout<<"----------------------------- Device "<<i<<" ---------------------------"<<std::endl;
            if (info.isDefaultInput)
                std::cout << "--Default Input"<<std::endl;
            if (info.isDefaultOutput)
                std::cout << "--Default Output"<<std::endl;      
            std::cout << "Name = " << info.name << '\n';
            std::cout << "Max Input Channels = " << info.inputChannels << '\n';
            std::cout << "Max Output Channels = " << info.outputChannels << '\n';
            std::cout << "Max Duplex Channels = " << info.duplexChannels << '\n';
        }
    }
    delete audioTemp;
    audioTemp = NULL;
}
示例#3
0
GOrgueSoundPort* GOrgueSoundRtPort::create(GOrgueSound* sound, wxString name)
{
	try
	{
		std::vector<RtAudio::Api> rtaudio_apis;
		RtAudio::getCompiledApi(rtaudio_apis);

		for (unsigned k = 0; k < rtaudio_apis.size(); k++)
		{
			RtAudio* audioDevice = 0;

			try
			{
				audioDevice = new RtAudio(rtaudio_apis[k]);
				for (unsigned i = 0; i < audioDevice->getDeviceCount(); i++)
					if (getName(rtaudio_apis[k], audioDevice, i) == name)
						return new GOrgueSoundRtPort(sound, name, rtaudio_apis[k]);

			}
			catch (RtAudioError &e)
			{
				wxString error = wxString::FromAscii(e.getMessage().c_str());
				wxLogError(_("RtAudio error: %s"), error.c_str());
			}
			if (audioDevice)
				delete audioDevice;
		}
	}
	catch (RtAudioError &e)
	{
		wxString error = wxString::FromAscii(e.getMessage().c_str());
		wxLogError(_("RtAudio error: %s"), error.c_str());
	}
	return NULL;
}
int main()
{
    RtAudio dac;
    if ( dac.getDeviceCount() == 0 ) exit( 0 );

    RtAudio::StreamParameters parameters;
    parameters.deviceId = dac.getDefaultOutputDevice();
    parameters.nChannels = 2;
    unsigned int sampleRate = 44100;
    unsigned int bufferFrames = 256; // 256 sample frames

    RtAudio::StreamOptions options;
    options.flags = RTAUDIO_NONINTERLEAVED;

    try {
        dac.openStream( &parameters, NULL, RTAUDIO_FLOAT32,
                        sampleRate, &bufferFrames, &myCallback, NULL, &options );
    }
    catch ( RtError& e ) {
        std::cout << '\n' << e.getMessage() << '\n' << std::endl;
        exit( 0 );
    }

    return 0;
}
示例#5
0
/* returns 0 on failure */
int
start_audio(AudioCallback _callback, int sample_rate, void *data)
{
	if(audio.getDeviceCount() < 1) {
		std::cout << "No audio devices found!\n";
		return 0;
	}
	
	RtAudio::StreamParameters iparams, oparams;
	
	/* configure input (microphone) */
	iparams.deviceId = audio.getDefaultInputDevice();
	iparams.nChannels = 1;
	iparams.firstChannel = 0;
	
	/* configure output */
	oparams.deviceId = audio.getDefaultOutputDevice();
	oparams.nChannels = 2;
	oparams.firstChannel = 0;
	unsigned int bufferFrames = 256;
	
	callback = _callback;
	
	try {
		audio.openStream(&oparams, &iparams, RTAUDIO_FLOAT64 /* double */, sample_rate, &bufferFrames, &render, data);
		audio.startStream();
	} catch(RtError& e) {
		e.printMessage();
		return 0;
	}
	
	return 1;
}
示例#6
0
文件: audio.cpp 项目: hzoli17/ZSFEdit
bool Audio::openAudioInputDevice(unsigned int device)
{
    RtAudio::StreamParameters p;
    double data[2];
    unsigned int num = 0;
    if (inIsOpened) return false;
    for (unsigned int i=0; i<adc.getDeviceCount(); i++)
    {
        if (adc.getDeviceInfo(i).inputChannels)
        {
            if (device == num)
            {
                p.deviceId = i;
                break;
            }
            num++;
        }
    }
    p.firstChannel = 0;
    p.nChannels = 2;
    try
    {
        adc.openStream(NULL, &p, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioInputCallback, (void*)&data);
        adc.startStream();
    }
    catch (RtAudioError& e)
    {
        inError = e.getMessage();
        return false;
    }
    inBuffer=(float*)calloc(bufferFrames, sizeof(float));
    inIsOpened=true;
    return true;
}
//---------------------------------------------------------
void ofSoundStreamListDevices(){
	RtAudio *audioTemp = 0;
	try {
		audioTemp = new RtAudio();
	} catch (RtError &error) {
		error.printMessage();
	}
 	int devices = audioTemp->getDeviceCount();
	RtAudio::DeviceInfo info;
	for (int i=0; i< devices; i++) {
		try {
			info = audioTemp->getDeviceInfo(i);
		} catch (RtError &error) {
			error.printMessage();
			break;
		}
		std::cout << "device = " << i << " (" << info.name << ")\n";
		if (info.isDefaultInput) std::cout << "----* default ----* \n";
		std::cout << "maximum output channels = " << info.outputChannels << "\n";
		std::cout << "maximum input channels = " << info.inputChannels << "\n";
		std::cout << "-----------------------------------------\n";

	}
	delete audioTemp;
}
bool DeviceManager::getAudioDevices(bool input, std::vector<Device>& devs)
{
    devs.clear();

#if defined(ANDROID)
    // Under Android, we don't access the device file directly.
    // Arbitrary use 0 for the mic and 1 for the output.
    // These ids are used in MediaEngine::SetSoundDevices(in, out);
    // The strings are for human consumption.
    if (input) {
        devs.push_back(Device("audioin", "audiorecord", 0));
    } else {
        devs.push_back(Device("audioout", "audiotrack", 1));
    }
    return true;
#elif defined(HAVE_RTAUDIO)

    // Since we are using RtAudio for audio capture it's best to
    // use RtAudio to enumerate devices to ensure indexes match.
    RtAudio audio;

    // Determine the number of devices available
    auto ndevices = audio.getDeviceCount();
    TraceS(this) << "Get audio devices: " << ndevices << endl;

    // Scan through devices for various capabilities
    RtAudio::DeviceInfo info;
    for (unsigned i = 0; i <= ndevices; i++) {
        try {
            info = audio.getDeviceInfo(i);    // may throw RtAudioError

            TraceS(this) << "Device:"
                << "\n\tName: " << info.name
                << "\n\tOutput Channels: " << info.outputChannels
                << "\n\tInput Channels: " << info.inputChannels
                << "\n\tDuplex Channels: " << info.duplexChannels
                << "\n\tDefault Output: " << info.isDefaultOutput
                << "\n\tDefault Input: " << info.isDefaultInput
                << "\n\tProbed: " << info.probed
                << endl;

            if (info.probed == true && (
                (input && info.inputChannels > 0) ||
                (!input && info.outputChannels > 0))) {

                TraceS(this) << "Adding device: " << info.name << endl;
                Device dev((input ? "audioin" : "audioout"), i, info.name, "",
                    (input ? info.isDefaultInput : info.isDefaultOutput));
                devs.push_back(dev);
            }
        }
        catch (RtAudioError& e) {
            ErrorS(this) << "Cannot probe audio device: " << e.getMessage() << endl;
        }
    }

    return filterDevices(devs, kFilteredAudioDevicesName);
#endif
}
示例#9
0
void av_audio_start() {
	av_audio_get();
	
	if (rta.isStreamRunning()) {
		rta.stopStream();
	}
	if (rta.isStreamOpen()) {
		// close it:
		rta.closeStream();
	}	
	
	unsigned int devices = rta.getDeviceCount();
	if (devices < 1) {
		printf("No audio devices found\n");
		return;
	}
	
	RtAudio::DeviceInfo info;
	RtAudio::StreamParameters iParams, oParams;
	
	printf("Available audio devices (%d):\n", devices);
	for (unsigned int i=0; i<devices; i++) {
		info = rta.getDeviceInfo(i);
		printf("Device %d: %dx%d (%d) %s\n", i, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	}
	
	printf("device %d\n", audio.indevice);
	
	info = rta.getDeviceInfo(audio.indevice);
	printf("Using audio input %d: %dx%d (%d) %s\n", audio.indevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	
	audio.inchannels = info.inputChannels;
	
	iParams.deviceId = audio.indevice;
	iParams.nChannels = audio.inchannels;
	iParams.firstChannel = 0;
	
	info = rta.getDeviceInfo(audio.outdevice);
	printf("Using audio output %d: %dx%d (%d) %s\n", audio.outdevice, info.inputChannels, info.outputChannels, info.duplexChannels, info.name.c_str());
	
	audio.outchannels = info.outputChannels;
	
	oParams.deviceId = audio.outdevice;
	oParams.nChannels = audio.outchannels;
	oParams.firstChannel = 0;

	RtAudio::StreamOptions options;
	//options.flags |= RTAUDIO_NONINTERLEAVED;
	options.streamName = "av";
	
	try {
		rta.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, audio.samplerate, &audio.blocksize, &av_rtaudio_callback, NULL, &options );
		rta.startStream();
		printf("Audio started\n");
	}
	catch ( RtError& e ) {
		fprintf(stderr, "%s\n", e.getMessage().c_str());
	}
}
示例#10
0
int main( int argc, char *argv[] )
{
  unsigned int channels, fs, bufferFrames, device = 0, offset = 0;
  char *file;

  // minimal command-line checking
  if ( argc < 4 || argc > 6 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 0 );
  }

  channels = (unsigned int) atoi( argv[1]) ;
  fs = (unsigned int) atoi( argv[2] );
  file = argv[3];
  if ( argc > 4 )
    device = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    offset = (unsigned int) atoi( argv[5] );

  OutputData data;
  data.fd = fopen( file, "rb" );
  if ( !data.fd ) {
    std::cout << "Unable to find or open file!\n";
    exit( 1 );
  }

  // Set our stream parameters for output only.
  bufferFrames = 512;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  data.channels = channels;
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &output, (void *)&data );
    dac.startStream();
  }
  catch ( RtError& e ) {
    std::cout << '\n' << e.getMessage() << '\n' << std::endl;
    goto cleanup;
  }

  std::cout << "\nPlaying raw file " << file << " (buffer frames = " << bufferFrames << ")." << std::endl;
  while ( 1 ) {
    SLEEP( 100 ); // wake every 100 ms to check if we're done
    if ( dac.isStreamRunning() == false ) break;
  }

 cleanup:
  fclose( data.fd );
  dac.closeStream();

  return 0;
}
// サポートしているデバイスリストを表示
void listDevices() {
    RtAudio audio;
    
    unsigned int devices = audio.getDeviceCount();
    RtAudio::DeviceInfo info;
    
    for(unsigned int i=0; i<devices; i++) {
        info = audio.getDeviceInfo(i);
        
        std::cout << "============================" << std::endl;
        std::cout << "\nDevide ID:" << i << std::endl;
        std::cout << "Name:" << info.name << std::endl;
        if ( info.probed == false )
            std::cout << "Probe Status = UNsuccessful\n";
        else {
            std::cout << "Probe Status = Successful\n";
            std::cout << "Output Channels = " << info.outputChannels << '\n';
            std::cout << "Input Channels = " << info.inputChannels << '\n';
            std::cout << "Duplex Channels = " << info.duplexChannels << '\n';
            if ( info.isDefaultOutput ) {
                std::cout << "This is the default output device.\n";
            } else {
                std::cout << "This is NOT the default output device.\n";
            }
            if ( info.isDefaultInput ) { std::cout << "This is the default input device.\n";
            } else {
                std::cout << "This is NOT the default input device.\n";
            }
            if ( info.nativeFormats == 0 ) {
                std::cout << "No natively supported data formats(?)!";
            } else {
                std::cout << "Natively supported data formats:\n";
                if ( info.nativeFormats & RTAUDIO_SINT8 )
                    std::cout << "  8-bit int\n";
                if ( info.nativeFormats & RTAUDIO_SINT16 )
                    std::cout << "  16-bit int\n";
                if ( info.nativeFormats & RTAUDIO_SINT24 )
                    std::cout << "  24-bit int\n";
                if ( info.nativeFormats & RTAUDIO_SINT32 )
                    std::cout << "  32-bit int\n";
                if ( info.nativeFormats & RTAUDIO_FLOAT32 )
                    std::cout << "  32-bit float\n";
                if ( info.nativeFormats & RTAUDIO_FLOAT64 )
                    std::cout << "  64-bit float\n";
            }
            if ( info.sampleRates.size() < 1 ) {
                std::cout << "No supported sample rates found!";
            } else {
                std::cout << "Supported sample rates = ";
                for (unsigned int j=0; j<info.sampleRates.size(); j++)
                    std::cout << info.sampleRates[j] << " ";
            }
            std::cout << std::endl;
        }
    }
}
示例#12
0
int main(int argc, char** argv)
{

  if (argc != 2) {
    printf("Usage: synth file.sf2\n");
    exit(0);
  }

  LightFluidSynth *usynth;

  usynth = new LightFluidSynth();

  usynth->loadSF2(argv[1]);
//  usynth->loadSF2("tim.sf2");

  RtMidiIn *midiIn = new RtMidiIn();
  if (midiIn->getPortCount() == 0) {
    std::cout << "No MIDI ports available!\n";
  }
  midiIn->openPort(0);
  midiIn->setCallback( &midiCallback, (void *)usynth );
  midiIn->ignoreTypes( false, false, false );

//   RtAudio dac(RtAudio::LINUX_PULSE);
  RtAudio dac;
  RtAudio::StreamParameters rtParams;

  // Determine the number of devices available
  unsigned int devices = dac.getDeviceCount();
  // Scan through devices for various capabilities
  RtAudio::DeviceInfo info;
  for ( unsigned int i = 0; i < devices; i++ ) {
    info = dac.getDeviceInfo( i );
    if ( info.probed == true ) {
      std::cout << "device " << " = " << info.name;
      std::cout << ": maximum output channels = " << info.outputChannels << "\n";
    }
  }
//  rtParams.deviceId = 3;
  rtParams.deviceId = dac.getDefaultOutputDevice();
  rtParams.nChannels = 2;
  unsigned int bufferFrames = FRAME_SIZE;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_SCHEDULE_REALTIME;

  dac.openStream( &rtParams, NULL, AUDIO_FORMAT, SAMPLE_RATE, &bufferFrames, &audioCallback, (void *)usynth, &options );
  dac.startStream();

  printf("\n\nPress Enter to stop\n\n");
  cin.get();
  dac.stopStream();

  delete(usynth);
  return 0;
}
示例#13
0
文件: audio.cpp 项目: hzoli17/ZSFEdit
unsigned int Audio::getAudioInputCount()
{
    unsigned int num = 0;
    unsigned int deviceCount=adc.getDeviceCount();
    for (unsigned int i=0; i<deviceCount; i++)
    {
        if(adc.getDeviceInfo(i).inputChannels) num++;
    }
    return num;
}
示例#14
0
文件: audio.cpp 项目: hzoli17/ZSFEdit
QString Audio::getInDeviceName(unsigned int device)
{
    unsigned int num = 0;
    for (unsigned int i=0; i<adc.getDeviceCount(); i++)
    {
        if (adc.getDeviceInfo(i).inputChannels)
        {
            if (device == num) return QString::fromStdString(adc.getDeviceInfo(i).name);
            num++;
        }
    }
    return QObject::tr("Unknown audio input device");
}
示例#15
0
void AudioThread::enumerateDevices(std::vector<RtAudio::DeviceInfo> &devs) {
    RtAudio endac;

    int numDevices = endac.getDeviceCount();

    for (int i = 0; i < numDevices; i++) {
        RtAudio::DeviceInfo info = endac.getDeviceInfo(i);

        devs.push_back(info);

        std::cout << std::endl;

        std::cout << "Audio Device #" << i << " " << info.name << std::endl;
        std::cout << "\tDefault Output? " << (info.isDefaultOutput ? "Yes" : "No") << std::endl;
        std::cout << "\tDefault Input? " << (info.isDefaultOutput ? "Yes" : "No") << std::endl;
        std::cout << "\tInput channels: " << info.inputChannels << std::endl;
        std::cout << "\tOutput channels: " << info.outputChannels << std::endl;
        std::cout << "\tDuplex channels: " << info.duplexChannels << std::endl;

        std::cout << "\t" << "Native formats:" << std::endl;
        RtAudioFormat nFormats = info.nativeFormats;
        if (nFormats & RTAUDIO_SINT8) {
            std::cout << "\t\t8-bit signed integer." << std::endl;
        }
        if (nFormats & RTAUDIO_SINT16) {
            std::cout << "\t\t16-bit signed integer." << std::endl;
        }
        if (nFormats & RTAUDIO_SINT24) {
            std::cout << "\t\t24-bit signed integer." << std::endl;
        }
        if (nFormats & RTAUDIO_SINT32) {
            std::cout << "\t\t32-bit signed integer." << std::endl;
        }
        if (nFormats & RTAUDIO_FLOAT32) {
            std::cout << "\t\t32-bit float normalized between plus/minus 1.0." << std::endl;
        }
        if (nFormats & RTAUDIO_FLOAT64) {
            std::cout << "\t\t32-bit float normalized between plus/minus 1.0." << std::endl;
        }

        std::vector<unsigned int>::iterator srate;

        std::cout << "\t" << "Supported sample rates:" << std::endl;

        for (srate = info.sampleRates.begin(); srate != info.sampleRates.end(); srate++) {
            std::cout << "\t\t" << (*srate) << "hz" << std::endl;
        }

        std::cout << std::endl;
    }
}
示例#16
0
int main()
{
	RtAudio dac;

	//std::cout << dac.getDeviceCount() << std::endl;   //2
	if (dac.getDeviceCount() < 1) {
		std::cout << "\nNo audio devices found!\n";
		exit(0);
	}
	RtAudio::StreamParameters parameters;
	//std::cout << dac.getDefaultOutputDevice() << std::endl;
	parameters.deviceId = dac.getDefaultOutputDevice();    //0
	parameters.nChannels = 2;
	parameters.firstChannel = 0;
	unsigned int sampleRate = 44100;
	unsigned int bufferFrames = 256; // 256 sample frames

	RtAudio::StreamParameters input;
	input.deviceId = dac.getDefaultInputDevice();
	input.nChannels = 2;
	input.firstChannel = 0;

	double data[2];
	try {
		dac.openStream(&parameters, &input, RTAUDIO_SINT16,
			sampleRate, &bufferFrames, &saw, (void *)&data);
		dac.startStream();
	}
	catch (RtAudioError& e) {
		e.printMessage();
		exit(0);
	}

	char input1;
	std::cout << "\nPlaying ... press <enter> to quit.\n";
	std::cin.get(input1);
	
	try {
		// Stop the stream
		dac.stopStream();
	}
	catch (RtAudioError& e) {
		e.printMessage();
	}
	if (dac.isStreamOpen()) dac.closeStream();
	system("pause");
	
	return 0;
}
示例#17
0
//-----------------------------------------------------------------------------
// name: probe()
// desc: ...
//-----------------------------------------------------------------------------
void Digitalio::probe()
{
#ifndef __DISABLE_RTAUDIO__
    RtAudio * rta = NULL;
    RtAudio::DeviceInfo info;
    
    // allocate RtAudio
    try { rta = new RtAudio( ); }
    catch( RtError err )
    {
        // problem finding audio devices, most likely
        EM_error2b( 0, "%s", err.getMessage().c_str() );
        return;
    }

    // get count    
    int devices = rta->getDeviceCount();
    EM_error2b( 0, "found %d device(s) ...", devices );
    // EM_error2( 0, "--------------------------" );
    
    EM_reset_msg();
    
    // loop
    for( int i = 0; i < devices; i++ )
    {
        try { info = rta->getDeviceInfo(i); }
        catch( RtError & error )
        {
            error.printMessage();
            break;
        }
        
        // print
        EM_error2b( 0, "------( audio device: %d )---------------", i+1 );
        print( info );
        // skip
        if( i < devices ) EM_error2( 0, "" );
        
        EM_reset_msg();
    }

    delete rta;
#endif // __DISABLE_RTAUDIO__

    return;
}
QHash<int, QString> BleAudioCapture::availableDevices()
{
    RtAudio rtAudio;
    int deviceCount = rtAudio.getDeviceCount();
    RtAudio::DeviceInfo info;
    QHash<int, QString> devices;

    for (int i = 0; i < deviceCount; ++i) {
        info = rtAudio.getDeviceInfo(i);

        if (info.inputChannels > 0) {
            devices.insert(i, QString::fromStdString(info.name));
        }
    }

    return devices;
}
示例#19
0
void init(){
   unsigned int sampleRate = 44100;
   unsigned int bufferFrames = 128;

   // init pd
   if(!lpd.init(0, 2, sampleRate)) {
      std::cerr << "Could not init pd" << std::endl;
      exit(1);
   }

   // receive messages from pd
   lpd.setReceiver(&pdObject);
   lpd.subscribe("cursor");

   // send DSP 1 message to pd
   lpd.computeAudio(true);

   // load the patch
   pd::Patch patch = lpd.openPatch("test.pd", "./pd");
   std::cout << patch << std::endl;

   // use the RtAudio API to connect to the default audio device
   if(audio.getDeviceCount()==0){
      std::cout << "There are no available sound devices." << std::endl;
      exit(1);
   }

   RtAudio::StreamParameters parameters;
   parameters.deviceId = audio.getDefaultOutputDevice();
   parameters.nChannels = 2;

   RtAudio::StreamOptions options;
   options.streamName = "libpd rtaudio test";
   options.flags = RTAUDIO_SCHEDULE_REALTIME;
   if(audio.getCurrentApi() != RtAudio::MACOSX_CORE) {
      options.flags |= RTAUDIO_MINIMIZE_LATENCY; // CoreAudio doesn't seem to like this
   }
   try {
      audio.openStream( &parameters, NULL, RTAUDIO_FLOAT32, sampleRate, &bufferFrames, &audioCallback, NULL, &options );
      audio.startStream();
   }
   catch(RtAudioError& e) {
      std::cerr << e.getMessage() << std::endl;
      exit(1);
   }
}
示例#20
0
文件: main.cpp 项目: eriser/noisebox
int startAudio() {

	// Determine the number of devices available
	unsigned int devices = audio.getDeviceCount();

	
	if(devices==0) {
		printf("please run 'sudo modprobe snd_bcm2835' to enable the alsa driver\n");
		return 1;
	}
	// Scan through devices for various capabilities
	RtAudio::DeviceInfo info;
	for ( unsigned int i=0; i<devices; i++ ) {

		info = audio.getDeviceInfo( i );

		if ( info.probed == true ) {
			// Print, for example, the maximum number of output channels for each device
			std::cout << "device = " << i;
			std::cout << ": maximum output channels = " << info.outputChannels << "\n";
		}
	}
	
	

	
	RtAudio::StreamParameters parameters;
	parameters.deviceId = audio.getDefaultOutputDevice();
	parameters.nChannels = 2;
	parameters.firstChannel = 0;
	unsigned int sampleRate = SAMPLERATE;
	unsigned int bufferFrames = BUFFERSIZE;
	double data[2];

	try {
		audio.openStream( &parameters, NULL, RTAUDIO_FLOAT32,
                    sampleRate, &bufferFrames, &audioCallback, (void *)&data );
		audio.startStream();
	} catch ( RtError& e ) {
		e.printMessage();
		return 1;
	}
	
	return 0;
}
示例#21
0
    void initaudio() {

        using namespace std;

        /*
        vector<RtAudio::Api> apis;
        RtAudio::getCompiledApi(apis);
        cout << apis.size() << endl;
        for (size_t i = 0; i < apis.size(); ++i) {
            cout << apis[i] << endl;
        }
    */
        int ndevices = adc.getDeviceCount();
        if ( ndevices < 1 ) {
            std::cout << "\nNo audio devices found!\n";
            return;
        }


        cout << endl;
        for (int i = 0; i < ndevices; ++i) {
            RtAudio::DeviceInfo devinfo;
            devinfo = adc.getDeviceInfo(i);
            cout << i << " " << devinfo.name << " " << devinfo.inputChannels << endl;
        }


        RtAudio::StreamParameters parameters;
        parameters.deviceId = 2;
        parameters.nChannels = 1;
        parameters.firstChannel = 0;
        unsigned int sampleRate = 1200;
        unsigned int bufferFrames = 8; // 256 sample frames
        try {
            adc.openStream( NULL, &parameters, RTAUDIO_SINT16,
                            sampleRate, &bufferFrames, &record );
            adc.startStream();
        }
        catch ( exception& e ) {
            std::cout << "EXCEPTION" << endl << e.what();
            return;
        }

    }
示例#22
0
void GOrgueSoundRtPort::addDevices(std::vector<GOrgueSoundDevInfo>& result)
{
	try
	{
		std::vector<RtAudio::Api> rtaudio_apis;
		RtAudio::getCompiledApi(rtaudio_apis);

		for (unsigned k = 0; k < rtaudio_apis.size(); k++)
		{
			RtAudio* audioDevice = 0;

			try
			{
				audioDevice = new RtAudio(rtaudio_apis[k]);
				for (unsigned i = 0; i < audioDevice->getDeviceCount(); i++)
				{
					RtAudio::DeviceInfo dev_info = audioDevice->getDeviceInfo(i);
					if (!dev_info.probed)
						continue;
					if (dev_info.outputChannels < 1)
						continue;
					GOrgueSoundDevInfo info;
					info.channels = dev_info.outputChannels;
					info.isDefault = dev_info.isDefaultOutput;
					info.name = getName(rtaudio_apis[k], audioDevice, i);
					result.push_back(info);
				}
			}
			catch (RtAudioError &e)
			{
				wxString error = wxString::FromAscii(e.getMessage().c_str());
				wxLogError(_("RtAudio error: %s"), error.c_str());
			}
			if (audioDevice)
				delete audioDevice;
		}
	}
	catch (RtAudioError &e)
	{
		wxString error = wxString::FromAscii(e.getMessage().c_str());
		wxLogError(_("RtAudio error: %s"), error.c_str());
	}
}
示例#23
0
int main(int argc, char ** argv)
{
    if ( argc==1 )
    {
        std::cerr << "f " << freq << "\t frequency" << std::endl; 
        std::cerr << "s " << scale << "\t scale" << std::endl; 
        std::cerr << "t " << steps << "\t steps" << std::endl; 
        std::cerr << "r " << rthresh << "\t right thresh" << std::endl; 
        std::cerr << "f " << lthresh << "\t left  thresh" << std::endl; 
        std::cerr << "i " << inp_audio << "\t inp_audio device id" << std::endl; 
        std::cerr << "o " << out_audio << "\t out_audio device id" << std::endl; 
    }
    for ( int i = 1; i<argc-1; i++ )
    {
        if ( !strcmp(argv[i],"f") ) { freq=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"l") ) { lthresh=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"r") ) { rthresh=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"s") ) { scale=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"t") ) { steps=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"i") ) { inp_audio=atoi(argv[++i]); continue; }
        if ( !strcmp(argv[i],"o") ) { out_audio=atoi(argv[++i]); continue; }
    }
    unsigned int bufferFrames = NUM_FREQ*2;

    Data data;
	data.fft.Init(bufferFrames, NUM_FREQ, 1, 2.5f);

    RtAudio adac;
    if ( adac.getDeviceCount() < 1 ) 
    {
        std::cout << "\nNo audio devices found!\n";
        exit( 0 );
    }

    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId  = inp_audio; // <----------- put them on 
    iParams.nChannels = 1; //              different devices
    oParams.deviceId  = out_audio; // <----------- for duplex mode
    oParams.nChannels = 1; // 

    try 
    {
        adac.openStream( &oParams, &iParams, RTAUDIO_FLOAT32, 44100, &bufferFrames, &inout, &data );
    }
    catch ( RtError& e ) 
    {
        e.printMessage();
        exit( 0 );
    }
    try 
    {
        adac.startStream();
        main_init();

        int k = 0;
        int kcen = 658;
        int lmean=0,rmean=0;
        while(true)
        {
            // find the global max:
            float m = 0;
            int mi=-1;
            for ( int i=64; i<NUM_FREQ; i++ ) // skip low freq
            {
                if ( data.freqs[i] > m ) 
                {
                    m = data.freqs[i];
                    mi = i;
                }
            }
            kcen = ipol(kcen,mi,4);
            // get the mean of the lower and the higher neighbours
            int lsum=0,rsum=0;
            for( int i=-steps; i<-2; i++ )
            {
                lsum += data.value(kcen+i,scale);
            }
            for( int i=2; i<steps; i++ )
            {
                rsum += data.value(kcen+i,scale);
            }
            rsum /= (steps-2);
            lsum /= (steps-2);
            int rd = rsum-rmean;
            int ld = lsum-lmean;
            lmean=ipol(lmean,lsum,256);
            rmean=ipol(rmean,rsum,256);
            int lc=' ',rc=' ';
            if ( rd>rthresh )
                rc='r';
            if ( ld>lthresh )
                lc='l';

            //if ( ld>lthresh || ld>lthresh )
                std::cerr << char(lc) << " " << char(rc) << std::endl;

            main_idle(data,kcen);
        }
        // Stop the stream.
        adac.stopStream();
    }
    catch ( RtError& e ) 
    {
        e.printMessage();
        goto cleanup;
    }

cleanup:
   if ( adac.isStreamOpen() ) adac.closeStream();

  return 0;
}
示例#24
0
int main( int argc, char *argv[])
{
  // COMMAND LINE ARG HANDLING
  map<string, ugen> ugens;
  ugens["--sine"] = &sine;
  ugens["--saw"] = &saw;
  ugens["--pulse"] = &pulse;
  ugens["--noise"] = &noise;
  ugens["--impulse"] = &impulse;

  if (argc < 4 || argc > 10 ) usage();

  string type_arg = argv[1];
  g_active_ugen = ugens[type_arg];
  if (g_active_ugen == NULL)
    usage();

  double freq_arg = atof(argv[2]);
  if (freq_arg <= 0)
    usage();
  g_frequency = freq_arg;

  double width_arg = atof(argv[3]);
  if (width_arg < 0 || width_arg > 1)
    usage();
  g_width = width_arg;

  if (argc > 4) { // modulation parameters present
    for (int i = 4; i < argc;) {
      if (string(argv[i]).compare("--input") == 0) {
	g_modulate_input = true;
	i++;
      } 
      else if (string(argv[i]).compare("--fm") == 0) {
	g_fm_on = true;

	string fm_type_arg = argv[++i];
	g_active_fm_ugen = ugens[fm_type_arg];
	if (g_active_fm_ugen == NULL)
	  usage();
	
	double fm_freq_arg = atof(argv[++i]);
	if (fm_freq_arg <= 0)
	  usage();
	g_fm_frequency = fm_freq_arg;
	
	double fm_width_arg = atof(argv[++i]);
	if (fm_width_arg < 0 || fm_width_arg > 1)
	  usage();
	g_fm_width = fm_width_arg;
	
	double fm_index_arg = atoi(argv[++i]);
	g_fm_index = fm_index_arg;

	i++;
      }
      else
	usage();
    }
  }

  // AUDIO SETUP
  RtAudio audio;
  audio.showWarnings( true );

  RtAudio::StreamParameters output_params;
  RtAudio::StreamParameters input_params;

  // Choose an audio device and a sample rate
  unsigned int sample_rate;
  unsigned int devices = audio.getDeviceCount();
  if ( devices < 1 ) {
    cerr << "No audio device found!" << endl;
    exit(1);
  }
  RtAudio::DeviceInfo info;
  for (unsigned int i = 0; i < devices; i++ ) {
    info = audio.getDeviceInfo(i);
    if ( info.isDefaultOutput ) {
      output_params.deviceId = i;
      output_params.nChannels = 2;

      if (info.sampleRates.size() < 1) {
	cerr << "No supported sample rates found!" << endl;
	exit(1);
      }
      for (int i = 0; i < info.sampleRates.size(); i++) {
	sample_rate = info.sampleRates[i];
	if (sample_rate == 44100 || sample_rate == 48000) {
	  // Found a nice sample rate, stop looking
	  break;
	}
      }
      cout << "Using sample rate: " << sample_rate << endl;

    }
    if ( info.isDefaultInput ) {
      input_params.deviceId = i;
      input_params.nChannels = 1;
    }
  }

  cout << "Using output device ID " << output_params.deviceId << " which has " << 
    output_params.nChannels << " output channels." << endl;
  cout << "Using input device ID " << input_params.deviceId << " which has " << 
    input_params.nChannels << " input channels." << endl;

  RtAudio::StreamOptions options;
  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;

  unsigned int buffer_frames = 256;

  try {
    audio.openStream( &output_params,     // output params
		      &input_params,      // input params
		      RTAUDIO_FLOAT64,    // audio format 
		      sample_rate,        // sample rate
		      &buffer_frames,     // num frames per buffer (mutable by rtaudio)
		      &callback,          // audio callback
		      &audio,             // user data pointer HACK HACK :D
		      &options);          // stream options
    audio.startStream();
  } catch ( RtError &e ) {
    e.printMessage();
    goto cleanup;
  }

  char input;
  cout << "Playing, press enter to quit (buffer frames = " << buffer_frames << ")." << endl;
  cin.get( input );

  try {
    audio.stopStream();
  }
  catch ( RtError &e ) {
    e.printMessage();
  }

 cleanup:
  if ( audio.isStreamOpen() ) {
    audio.closeStream();
  }

  return 0;
}
示例#25
0
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, oDevice = 0, iDevice = 0, iOffset = 0, oOffset = 0;
  char input;

  // minimal command-line checking
  if (argc < 3 || argc > 7 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    iDevice = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    oDevice = (unsigned int) atoi(argv[4]);
  if ( argc > 5 )
    iOffset = (unsigned int) atoi(argv[5]);
  if ( argc > 6 )
    oOffset = (unsigned int) atoi(argv[6]);

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams, iParams;
  oParams.deviceId = oDevice;
  oParams.nChannels = channels;
  oParams.firstChannel = oOffset;

  RtAudio::StreamOptions options;
  options.flags = RTAUDIO_HOG_DEVICE;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawi, (void *)data, &options );
    std::cout << "\nStream latency = " << dac.getStreamLatency() << std::endl;

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();

    // Restart again
    std::cout << "Press <enter> to restart.\n";
    std::cin.get( input );
    dac.startStream();

    // Test abort function
    std::cout << "Playing again ... press <enter> to abort.\n";
    std::cin.get( input );
    dac.abortStream();

    // Restart another time
    std::cout << "Press <enter> to restart again.\n";
    std::cin.get( input );
    dac.startStream();

    std::cout << "Playing again ... press <enter> to close the stream.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Test non-interleaved functionality
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, NULL, RTAUDIO_FLOAT64, fs, &bufferFrames, &sawni, (void *)data, &options );

    std::cout << "Press <enter> to start non-interleaved playback.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nPlaying ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( dac.isStreamOpen() ) dac.closeStream();

  // Now open a duplex stream.
  unsigned int bufferBytes;
  iParams.deviceId = iDevice;
  iParams.nChannels = channels;
  iParams.firstChannel = iOffset;
  options.flags = RTAUDIO_NONINTERLEAVED;
  try {
    dac.openStream( &oParams, &iParams, RTAUDIO_SINT32, fs, &bufferFrames, &inout, (void *)&bufferBytes, &options );

    bufferBytes = bufferFrames * channels * 4;

    std::cout << "Press <enter> to start duplex operation.\n";
    std::cin.get( input );

    // Start the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );

    // Stop the stream
    dac.stopStream();
    std::cout << "\nStopped ... press <enter> to restart.\n";
    std::cin.get( input );

    // Restart the stream
    dac.startStream();
    std::cout << "\nRunning ... press <enter> to stop.\n";
    std::cin.get( input );
  }
  catch ( RtError& e ) {
    e.printMessage();
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
示例#26
0
// ========
// = Main =
// ========
// Entry point
int main (int argc, char *argv[])
{
	cout<<argc<<"  "<<argv[0];
	if (argc>3) {cerr<<"\nERROR - wrong number of arguments\n";exit(1);}
	if (argc==3) 
		g_audio_history = atoi(argv[2]); 
	else
		g_audio_history = 30;
	if (argc>1) 
		g_fft_history = atoi(argv[1]);
	else
		g_fft_history = 100;
	help();
    // RtAudio config + init

    // pointer to RtAudio object
    RtAudio *  audio = NULL;

    // create the object
    try
    {
        audio = new RtAudio();
    }
        catch( RtError & err ) {
        err.printMessage();
        exit(1);
    }

    if( audio->getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }
        
    // let RtAudio print messages to stderr.
    audio->showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = audio->getDefaultInputDevice();
    iParams.nChannels = 1;
    iParams.firstChannel = 0;
    oParams.deviceId = audio->getDefaultOutputDevice();
    oParams.nChannels = 1;
    oParams.firstChannel = 0;
        
    // create stream options
    RtAudio::StreamOptions options;

    // set the callback and start stream
    try
    {
        audio->openStream( &oParams, &iParams, RTAUDIO_FLOAT64, MY_SRATE, &g_buffSize, &audioCallback, NULL, &options);
        
        cerr << "Buffer size defined by RtAudio: " << g_buffSize << endl;
        
        // allocate the buffer for the fft
        g_fftBuff = new float[g_buffSize * ZPF];
		g_audioBuff = new float[g_buffSize * ZPF];
        if ( g_fftBuff == NULL ) {
            cerr << "Something went wrong when creating the fft and audio buffers" << endl;
            exit (1);
        }
        
        // allocate the buffer for the time domain window
        g_window = new float[g_buffSize];
        if ( g_window == NULL ) {
            cerr << "Something went wrong when creating the window" << endl;
            exit (1);
        }

        // create a hanning window
        make_window( g_window, g_buffSize );
        
        // start the audio stream
        audio->startStream();
        
        // test RtAudio functionality for reporting latency.
        cout << "stream latency: " << audio->getStreamLatency() << " frames" << endl;
    }
    catch( RtError & err )
    {
        err.printMessage();
        goto cleanup;
    }


    // ============
    // = GL stuff =
    // ============

    // initialize GLUT
    glutInit( &argc, argv );
    // double buffer, use rgb color, enable depth buffer
    glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH );
    // initialize the window size
    glutInitWindowSize( g_width, g_height );
    // set the window postion
    glutInitWindowPosition( 100, 100 );
    // create the window
    glutCreateWindow( "Hello GL" );
    //glutEnterGameMode();

    // set the idle function - called when idle
    glutIdleFunc( idleFunc );
    // set the display function - called when redrawing
    glutDisplayFunc( displayFunc );
    // set the reshape function - called when client area changes
    glutReshapeFunc( reshapeFunc );
    // set the keyboard function - called on keyboard events
    glutKeyboardFunc( keyboardFunc );
    // set the mouse function - called on mouse stuff
    glutMouseFunc( mouseFunc );
    // set the special function - called on special keys events (fn, arrows, pgDown, etc)
    glutSpecialFunc( specialFunc );

    // do our own initialization
    initialize();

    // let GLUT handle the current thread from here
    glutMainLoop();

        
    // if we get here, stop!
    try
    {
        audio->stopStream();
    }
    catch( RtError & err )
    {
        err.printMessage();
    }

    // Clean up
    cleanup:
    if(audio)
    {
        audio->closeStream();
        delete audio;
    }

    
    return 0;
}
示例#27
0
文件: playsaw.cpp 项目: wthibault/MoM
int main( int argc, char *argv[] )
{
  unsigned int bufferFrames, fs, device = 0, offset = 0;

  // minimal command-line checking
  if (argc < 3 || argc > 6 ) usage();

  RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    exit( 1 );
  }

  channels = (unsigned int) atoi( argv[1] );
  fs = (unsigned int) atoi( argv[2] );
  if ( argc > 3 )
    device = (unsigned int) atoi( argv[3] );
  if ( argc > 4 )
    offset = (unsigned int) atoi( argv[4] );
  if ( argc > 5 )
    nFrames = (unsigned int) (fs * atof( argv[5] ));
  if ( nFrames > 0 ) checkCount = true;

  double *data = (double *) calloc( channels, sizeof( double ) );

  // Let RtAudio print messages to stderr.
  dac.showWarnings( true );

  // Set our stream parameters for output only.
  bufferFrames = 256;
  RtAudio::StreamParameters oParams;
  oParams.deviceId = device;
  oParams.nChannels = channels;
  oParams.firstChannel = offset;

  options.flags |= RTAUDIO_HOG_DEVICE;
  options.flags |= RTAUDIO_SCHEDULE_REALTIME;
#if !defined( USE_INTERLEAVED )
  options.flags |= RTAUDIO_NONINTERLEAVED;
#endif
  try {
    dac.openStream( &oParams, NULL, FORMAT, fs, &bufferFrames, &saw, (void *)data, &options );
    dac.startStream();
  }
  catch ( RtError& e ) {
    e.printMessage();
    goto cleanup;
  }

  if ( checkCount ) {
    while ( dac.isStreamRunning() == true ) SLEEP( 100 );
  }
  else {
    char input;
    //std::cout << "Stream latency = " << dac.getStreamLatency() << "\n" << std::endl;
    std::cout << "\nPlaying ... press <enter> to quit (buffer size = " << bufferFrames << ").\n";
    std::cin.get( input );

    try {
      // Stop the stream
      dac.stopStream();
    }
    catch ( RtError& e ) {
      e.printMessage();
    }
  }

 cleanup:
  if ( dac.isStreamOpen() ) dac.closeStream();
  free( data );

  return 0;
}
int main () {
  // Damit das Programm funktioniert, muss eine 16Bit PCM Wave-Datei im
  // gleichen Ordner liegen !
	const char * fname = "test.flac" ;

  // Soundfile-Handle aus der libsndfile-Bibliothek
	SndfileHandle file = SndfileHandle (fname) ;

  // Alle möglichen Infos über die Audio-Datei ausgeben !
  std::cout << "Reading file: " << fname << std::endl;
  std::cout << "File format: " << file.format() << std::endl;
  std::cout << "PCM 16 BIT: " << (SF_FORMAT_WAV | SF_FORMAT_PCM_16) << std::endl;
  std::cout << "Samples in file: " << file.frames() << std::endl;
  std::cout << "Samplerate " << file.samplerate() << std::endl;
  std::cout << "Channels: " << file.channels() << std::endl;

  // Die RtAudio-Klasse ist gleichermassen dac und adc, wird hier aber nur als dac verwendet !
	RtAudio dac;
  if ( dac.getDeviceCount() < 1 ) {
    std::cout << "\nNo audio devices found!\n";
    return 0;
  }

  // Output params ...
  RtAudio::StreamParameters parameters;
  parameters.deviceId = dac.getDefaultOutputDevice();
  parameters.nChannels = 2;
  parameters.firstChannel = 0;
  unsigned int sampleRate = 44100;

  // ACHTUNG! Frames != Samples
  // ein Frame = Samples für alle Kanäle
  // d.h. |Samples| = Kanäle*Frames !
  unsigned int bufferFrames = 1024;

  // Da wir 16 Bit PCM-Daten lesen, sollte als Datenformat RTAUDIO_SINT16 genutzt
  // werden.
  // Als Daten wird der Callback-Struktur hier das Soundfile-Handle übergeben.
  // Sollte man in einer "ernsthaften" Lösung anders machen !
  // Inkompatible Formate können übrigens "interessante" Effekte ergeben !
  try {
    dac.openStream( &parameters, NULL, RTAUDIO_SINT16,
                    sampleRate, &bufferFrames, &fplay, (void *)&file);
    dac.startStream();
  }
  catch ( RtAudioError& e ) {
    e.printMessage();
    return 0;
  }

  char input;
  std::cout << "\nPlaying ... press <enter> to quit.\n";
  std::cin.get( input );
  try {
    // Stop the stream
    dac.stopStream();
  }
  catch (RtAudioError& e) {
    e.printMessage();
  }
  if ( dac.isStreamOpen() ) dac.closeStream();

  return 0 ;

}
unsigned int getAudioDeviceCount() {
    RtAudio audio;
    return audio.getDeviceCount();
}
//-----------------------------------------------------------------------------
// name: main()
// desc: entry point
//-----------------------------------------------------------------------------
int main( int argc, char ** argv )
{

	
	
	callbackData data;
		// global for frequency
		data.g_freq=440;
		// global sample number variable
		data.g_t = 0;
		// global for width;
		data.g_width = 0;
		//global for input
		data.g_input=0;
		
	
	//check parameters and parse input
	if (!parse(argc,argv,data))
	{
		exit(0);
	}

    // instantiate RtAudio object
    RtAudio adac;
    // variables
    unsigned int bufferBytes = 0;
    // frame size
    unsigned int bufferFrames = 512;
    
    // check for audio devices
    if( adac.getDeviceCount() < 1 )
    {
        // nopes
        cout << "no audio devices found!" << endl;
        exit( 1 );
    }

    // let RtAudio print messages to stderr.
    adac.showWarnings( true );

    // set input and output parameters
    RtAudio::StreamParameters iParams, oParams;
    iParams.deviceId = adac.getDefaultInputDevice();
    iParams.nChannels = MY_CHANNELS;
    iParams.firstChannel = 0;
    oParams.deviceId = adac.getDefaultOutputDevice();
    oParams.nChannels = MY_CHANNELS;
    oParams.firstChannel = 0;
    
    // create stream options
    RtAudio::StreamOptions options;

    // go for it
    try {
        // open a stream
        adac.openStream( &oParams, &iParams, MY_FORMAT, MY_SRATE, &bufferFrames, &callme, (void *)&data, &options );
    }
    catch( RtError& e )
    {
        // error!
        cout << e.getMessage() << endl;
        exit( 1 );
    }

    // compute
    bufferBytes = bufferFrames * MY_CHANNELS * sizeof(SAMPLE);
    
    // test RtAudio functionality for reporting latency.
    cout << "stream latency: " << adac.getStreamLatency() << " frames" << endl;

    // go for it
    try {
        // start stream
        adac.startStream();

        // get input
        char input;
        std::cout << "running... press <enter> to quit (buffer frames: " << bufferFrames << ")" << endl;
        std::cin.get(input);
        
        // stop the stream.
        adac.stopStream();
    }
    catch( RtError& e )
    {
        // print error message
        cout << e.getMessage() << endl;
        goto cleanup;
    }
    
cleanup:
    // close if open
    if( adac.isStreamOpen() )
        adac.closeStream();
    
    // done
    outfile<<"];\nplot(x)";
    return 0;
}