示例#1
0
/* The load_voice method loads a sample into the driver's memory. The voice's
   'streaming' field will be set to false for these voices, and it's
   'buffer_size' field will be the total length in bytes of the sample data.
   The voice's attached sample's looping mode should be honored, and loading
   must fail if it cannot be. */
static int _openal_load_voice(ALLEGRO_VOICE *voice, const void *data)
{
   ALLEGRO_AL_DATA *ex_data = voice->extra;
   ALenum openal_err;

   if (voice->attached_stream->loop != ALLEGRO_PLAYMODE_ONCE &&
           voice->attached_stream->loop != ALLEGRO_PLAYMODE_LOOP) {
      return 1;
   }

   ex_data->buffer_size = voice->buffer_size;
   if (!ex_data->buffer_size) {
      ALLEGRO_ERROR("Voice buffer and data buffer size mismatch\n");
      return 1;
   }
   ex_data->num_buffers = 1;

   alGenSources(1, &ex_data->source);
   if ((openal_err = alGetError()) != AL_NO_ERROR) {
      ALLEGRO_ERROR("Could not generate (voice) source: %s\n",
         openal_get_err_str(openal_err));
      return 1;
   }

   ex_data->buffers = al_malloc(sizeof(ALuint) * ex_data->num_buffers);
   if (!ex_data->buffers) {
      alSourcei(ex_data->source, AL_BUFFER, 0);
      alDeleteSources(1, &ex_data->source);
      ALLEGRO_ERROR("Could not allocate voice buffer memory\n");
      return 1;
   }

   alGenBuffers(ex_data->num_buffers, ex_data->buffers);
   if ((openal_err = alGetError()) != AL_NO_ERROR) {
      alSourcei(ex_data->source, AL_BUFFER, 0);
      alDeleteSources(1, &ex_data->source);
      al_free(ex_data->buffers);
      ex_data->buffers = NULL;
      ALLEGRO_ERROR("Could not generate (voice) buffer: %s\n",
         openal_get_err_str(openal_err));
      return 1;
   }

   /* copies data into a buffer */
   alBufferData(ex_data->buffers[0], ex_data->format,
                data, ex_data->buffer_size, voice->frequency);

   /* sets the buffer */
   alSourcei(ex_data->source, AL_BUFFER, ex_data->buffers[0]);

   /* Loop / no loop? */
   alSourcei(ex_data->source, AL_LOOPING,
      (voice->attached_stream->loop != ALLEGRO_PLAYMODE_ONCE));

   /* make sure the volume is on */
   alSourcef(ex_data->source, AL_GAIN, 1.0f);

   if ((openal_err = alGetError()) != AL_NO_ERROR) {
      alSourcei(ex_data->source, AL_BUFFER, 0);
      alDeleteSources(1, &ex_data->source);
      alDeleteBuffers(ex_data->num_buffers, ex_data->buffers);
      al_free(ex_data->buffers);
      ex_data->buffers = NULL;
      ALLEGRO_ERROR("Could not attach voice source: %s\n",
         openal_get_err_str(openal_err));
      return 1;
   }

   return 0;
}
SoundSource::~SoundSource()
{
    alCheck(alSourcei(m_source, AL_BUFFER, 0));
    alCheck(alDeleteSources(1, &m_source));
}
示例#3
0
SoundChannel::~SoundChannel()
{
#ifdef __DAVASOUND_AL__
	AL_VERIFY(alDeleteSources(1, &source));
#endif //#ifdef __DAVASOUND_AL__
}
示例#4
0
 ~OpenAlChannel() {
     alDeleteSources(1, &_source);
     alGetError();  // discard.
 }
示例#5
0
文件: av.cpp 项目: Nokius/cyanide
void Cyanide::audio_thread()
{
    return;
    const char *device_list, *output_device = NULL;
    //void *audio_device = NULL;

    bool call[MAX_CALLS] = {0}, preview = 0;
    bool audio_filtering_enabled;
    // bool groups_audio[MAX_NUM_GROUPS] = {0};

    int perframe = (av_DefaultSettings.audio_frame_duration * av_DefaultSettings.audio_sample_rate) / 1000;
    uint8_t buf[perframe * 2 * av_DefaultSettings.audio_channels], dest[perframe * 2 * av_DefaultSettings.audio_channels];
    memset(buf, 0, sizeof(buf));

    uint8_t audio_count = 0;
    bool record_on = 0;
    #ifdef AUDIO_FILTERING
    qDebug() << "Audio Filtering enabled";
    #ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
    qDebug() << "Echo cancellation enabled";
    #endif
    #endif

    qDebug() << "frame size:" << perframe;

    device_list = alcGetString(NULL, ALC_ALL_DEVICES_SPECIFIER);
    if(device_list) {
        output_device = device_list;
        qDebug() << "Output Device List:";
        while(*device_list) {
            qDebug() << device_list;
            //postmessage(NEW_AUDIO_OUT_DEVICE, 0, 0, (void*)device_list);
            device_list += strlen(device_list) + 1;
        }
    }

    device_out = alcOpenDevice(output_device);
    if(!device_out) {
        qDebug() << "alcOpenDevice() failed";
        return;
    }

    int attrlist[] = {  ALC_FREQUENCY, av_DefaultSettings.audio_sample_rate,
                        ALC_INVALID };

    context = alcCreateContext(device_out, attrlist);
    if(!alcMakeContextCurrent(context)) {
        qDebug() << "alcMakeContextCurrent() failed";
        alcCloseDevice(device_out);
        return;
    }

    alGenSources(countof(source), source);

    static ALuint ringSrc[MAX_CALLS];
    alGenSources(MAX_CALLS, ringSrc);

    /* Create buffer to store samples */
    ALuint RingBuffer;
    alGenBuffers(1, &RingBuffer);

    {
        float frequency1 = 441.f;
        float frequency2 = 882.f;
        int seconds = 4;
        unsigned sample_rate = 22050;
        size_t buf_size = seconds * sample_rate * 2; //16 bit (2 bytes per sample)
        int16_t *samples = (int16_t*)malloc(buf_size * sizeof(int16_t));
        if (!samples)
            return;

        /*Generate an electronic ringer sound that quickly alternates between two frequencies*/
        int index = 0;
        for(index = 0; index < buf_size; ++index) {
            if ((index / (sample_rate)) % 4 < 2 ) {//4 second ring cycle, first 2 secondsring, the rest(2 seconds) is silence
                if((index / 1000) % 2 == 1) {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency1) / sample_rate * index); //5000=amplitude(volume level). It can be from zero to 32700
                } else {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency2) / sample_rate * index);
                }
            } else {
                samples[index] = 0;
            }
        }

        alBufferData(RingBuffer, AL_FORMAT_MONO16, samples, buf_size, sample_rate);
        free(samples);
    }

    {
        unsigned int i;
        for (i = 0; i < MAX_CALLS; ++i) {
            alSourcei(ringSrc[i], AL_LOOPING, AL_TRUE);
            alSourcei(ringSrc[i], AL_BUFFER, RingBuffer);
        }
    }
    #ifdef AUDIO_FILTERING
    Filter_Audio *f_a = NULL;
    #endif

    while(loop == LOOP_RUN || loop == LOOP_SUSPEND) {
        #ifdef AUDIO_FILTERING
        if (!f_a && audio_filtering_enabled) {
            f_a = new_filter_audio(av_DefaultSettings.audio_sample_rate);
            if (!f_a) {
                audio_filtering_enabled = 0;
                qDebug() << "filter audio failed";
            } else {
                qDebug() << "filter audio on";
            }
        } else if (f_a && !audio_filtering_enabled) {
            kill_filter_audio(f_a);
            f_a = NULL;
            qDebug() << "filter audio off";
        }
        #else
        if (audio_filtering_enabled)
            audio_filtering_enabled = 0;
        #endif

        bool sleep = 1;

        if(record_on) {
            ALint samples;
            alcGetIntegerv(device_in, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
            if(samples >= perframe) {
                alcCaptureSamples(device_in, buf, perframe);
                if (samples >= perframe * 2) {
                    sleep = 0;
                }
            }
        }

        #ifdef AUDIO_FILTERING
        #ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
        if (f_a && audio_filtering_enabled) {
            ALint samples;
            alcGetIntegerv(device_out, ALC_LOOPBACK_CAPTURE_SAMPLES, sizeof(samples), &samples);
            if(samples >= perframe) {
                int16_t buffer[perframe];
                alcCaptureSamplesLoopback(device_out, buffer, perframe);
                pass_audio_output(f_a, buffer, perframe);
                set_echo_delay_ms(f_a, 5);
                    if (samples >= perframe * 2) {
                    sleep = 0;
                }
            }
        }
        #endif
        #endif

        #ifdef AUDIO_FILTERING
        if (f_a && filter_audio(f_a, (int16_t*)buf, perframe) == -1) {
            qDebug() << "filter audio error";
        }
        #endif
        if(preview) {
            audio_play(0, (int16_t*)buf, perframe, av_DefaultSettings.audio_channels, av_DefaultSettings.audio_sample_rate);
        }

        int i;
        for(i = 0; i < MAX_CALLS; i++) {
            if(call[i]) {
                int r;
                if((r = toxav_prepare_audio_frame(toxav, i, dest, sizeof(dest), (const int16_t*)buf, perframe)) < 0) {
                    qDebug() << "toxav_prepare_audio_frame error" << r;
                    continue;
                }

                if((r = toxav_send_audio(toxav, i, dest, r)) < 0) {
                    qDebug() << "toxav_send_audio error" << r;
                }
            }
        }

        if (sleep) {
            usleep(5000);
        }
    }

    #ifdef AUDIO_FILTERING
    kill_filter_audio(f_a);
    #endif

    //missing some cleanup ?
    alDeleteSources(MAX_CALLS, ringSrc);
    alDeleteSources(countof(source), source);
    alDeleteBuffers(1, &RingBuffer);

    if(device_in) {
        if(record_on) {
            alcCaptureStop(device_in);
        }
        alcCaptureCloseDevice(device_in);
    }

    alcMakeContextCurrent(NULL);
    alcDestroyContext(context);
    alcCloseDevice(device_out);
}
示例#6
0
文件: aq.c 项目: kuniyoshi/http-pulse
int
main() {
    char input[INPUT_BUFFER];
    char *words[MAX_WORDS];
    int count_of_words, signal_index, sleep_usec, count_of_processed, count_of_queued;
    int i;
    ALint state;
    ALshort signals[BUFFER];
    ALCdevice *device;
    ALCcontext *context;
    ALuint buffer, source;

    sleep_usec = (int)(((1.0 / SAMPLING_FREQUENCY) * MAX_WORDS / 2 ) * 1000 * 1000);
    device     = alcOpenDevice(NULL);
    context    = alcCreateContext(device, NULL);
    alcMakeContextCurrent(context);
    alGenSources(1, &source);
    alSourcei(source, AL_SOURCE_TYPE, AL_STREAMING);

    while (fgets(input, INPUT_BUFFER, stdin) != NULL) {
puts("### got input");
        count_of_words = split_string(input, words);
        signal_index   = 0;

        for (i = 0; i < count_of_words; i++) {
            signals[signal_index++] = (ALshort)atoi(words[i]);
        }

        if (alGetSourcei(source, AL_BUFFERS_QUEUED, &count_of_queued), count_of_queued < COUNT_OF_BUFFERS) {
            alGenBuffers(1, &buffer);
        }
        else {
            while (alGetSourcei(source, AL_BUFFERS_PROCESSED, &count_of_processed), count_of_processed == 0) {
                usleep(sleep_usec);
            }

            for (i = 0; i < count_of_processed; i++) {
                alSourceUnqueueBuffers(source, 1, &buffer);
            }
        }

        alBufferData(buffer, AL_FORMAT_MONO16, signals, sizeof(ALshort) * signal_index, SAMPLING_FREQUENCY);
        alSourceQueueBuffers(source, 1, &buffer);

        if (alGetSourcei(source, AL_SOURCE_STATE, &state), state != AL_PLAYING) {
puts("--- start playing");
            alGetSourcei(source, AL_BUFFERS_PROCESSED, &count_of_processed);
            
            for (i = 0; i < count_of_processed - 1; i++) {
printf("--- unqueue: %d\n", i);
                alSourceUnqueueBuffers(source, 1, &buffer);
            }

            alSourcePlay(source);
        }
    }

    alSourceStop(source);
    alDeleteSources(1, &source);
    alDeleteBuffers(1, &buffer);
    alcMakeContextCurrent(NULL);
    alcDestroyContext(context);
    alcCloseDevice(device);

    return 0;
}
示例#7
0
int main(void) 
{ 
/*Отсюда нужно две функции - 
1) получение в виде массива (строки) данных с микрофона за время необработки
2)Проигрвание строки равной значению

*/
const ALCchar *   devices; 
const ALCchar *         ptr; 
ALCdevice *       mainDev; 
ALCcontext *      mainContext; 
ALCdevice *       captureDev; 
ALubyte           captureBuffer[1048576]; 
ALubyte           *captureBufPtr; 
ALint             samplesAvailable; 
ALint             samplesCaptured; 
time_t            currentTime; 
time_t            lastTime; 
ALuint            buffer; 
ALuint            source; 
ALint             playState; 
int               i; 

// Print the list of capture devices 
printf("Available playback devices:\n");

devices = alcGetString(NULL, ALC_DEVICE_SPECIFIER); 
ptr = devices; 
//while (ptr[0] != NULL)
while (*ptr)
{ 
   printf("   %s\n", ptr); 
   ptr += strlen(ptr) + 1; 
} 

// Open a playback device and create a context first 
printf("Opening playback device:\n"); 
mainDev = alcOpenDevice(NULL); 
if (mainDev == NULL) 
{ 
  printf("Unable to open playback device!\n"); 
  exit(1); 
} 
devices = alcGetString(mainDev, ALC_DEVICE_SPECIFIER); 
printf("   opened device '%s'\n", devices); 
mainContext = alcCreateContext(mainDev, NULL); 
if (mainContext == NULL) 
{ 
  printf("Unable to create playback context!\n"); 
  exit(1); 
} 
printf("   created playback context\n"); 

// Make the playback context current 
alcMakeContextCurrent(mainContext); 
alcProcessContext(mainContext); 

// Print the list of capture devices 

printf("Available capture devices:\n"); 
devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER); 
ptr = devices; 

//while (ptr[0] != NULL)
while (*ptr)
{ 
   printf("   %s\n", ptr); 
   ptr += strlen(ptr) + 1; 
}

// Open the default device 
printf("Opening capture device:\n"); 
captureDev = alcCaptureOpenDevice(NULL, 8000, AL_FORMAT_MONO16, 800); 
if (captureDev == NULL) 
{  
  printf("   Unable to open device!\n"); 
  exit(1); 
} 
devices = alcGetString(captureDev, ALC_CAPTURE_DEVICE_SPECIFIER); 
printf("   opened device %s\n", devices); 

// Wait for three seconds to prompt the user 
for (i = 3; i > 0; i--) 
{ 
  printf("Starting capture in %d...\r", i); 
  fflush(stdout); 
  lastTime = time(NULL); 
  currentTime = lastTime; 
  while (currentTime == lastTime) 
  { 
     currentTime = time(NULL); 
     usleep(100000); 
  } 
} 

printf("Starting capture NOW!\n"); 
fflush(stdout); 
lastTime = currentTime; 

// Capture (roughly) five seconds of audio 
alcCaptureStart(captureDev); 
samplesCaptured = 0; 
captureBufPtr = captureBuffer; 
while (currentTime < (lastTime + 5)) 
{ 
  // Get the number of samples available 
  alcGetIntegerv(captureDev, ALC_CAPTURE_SAMPLES, 1, &samplesAvailable); 

  // Copy the samples to our capture buffer 
  if (samplesAvailable > 0) 
  { 
     alcCaptureSamples(captureDev, captureBufPtr, samplesAvailable); 
     samplesCaptured += samplesAvailable; 
     printf("Captured %d samples (adding %d)\r", samplesCaptured, 
        samplesAvailable); 
     fflush(stdout); 

     // Advance the buffer (two bytes per sample * number of samples) 
     captureBufPtr += samplesAvailable * 2; 
  } 

  // Wait for a bit 
  usleep(10000); 

  // Update the clock 
  currentTime = time(NULL); 
} 
printf("\nPausing capture.\n"); 
alcCaptureStop(captureDev); 

// Wait for three seconds to prompt the user 
for (i = 3; i > 0; i--) 
{ 
  printf("Resuming capture in %d...\r", i); 
  fflush(stdout); 
  lastTime = time(NULL); 
  currentTime = lastTime; 
  while (currentTime == lastTime) 
  { 
     currentTime = time(NULL); 
     usleep(100000); 
  } 
} 

printf("Resuming capture NOW!\n"); 
fflush(stdout); 
lastTime = currentTime; 

// Capture (roughly) five seconds of audio 
alcCaptureStart(captureDev); 
while (currentTime < (lastTime + 5)) 
{ 
  // Get the number of samples available 
  alcGetIntegerv(captureDev, ALC_CAPTURE_SAMPLES, 1, &samplesAvailable); 

  // Copy the samples to our capture buffer 
  if (samplesAvailable > 0) 
  { 
     alcCaptureSamples(captureDev, captureBufPtr, samplesAvailable); 
     samplesCaptured += samplesAvailable; 
     printf("Captured %d samples (adding %d)\r", samplesCaptured, 
        samplesAvailable); 
     fflush(stdout); 

     // Advance the buffer (two bytes per sample * number of samples) 
     captureBufPtr += samplesAvailable * 2; 
  } 

  // Wait for a bit 
  usleep(10000); 

  // Update the clock 
  currentTime = time(NULL); 
} 

printf("\nDone capturing.\n"); 
alcCaptureStop(captureDev); 

// Play back the captured data 
printf("Starting playback...\n"); 
fflush(stdout); 

// Generate an OpenAL buffer for the captured data 
alGenBuffers(1, &buffer); 
alGenSources(1, &source); 
alBufferData(buffer, AL_FORMAT_MONO16, captureBuffer,samplesCaptured*2, 8000); 
alSourcei(source, AL_BUFFER, buffer); 
alSourcePlay(source); 

// Wait for the source to stop playing 
playState = AL_PLAYING; 
while (playState == AL_PLAYING) 
{ 
  printf("  source %d is playing...\r", source); 
  fflush(stdout); 
  alGetSourcei(source, AL_SOURCE_STATE, &playState); 
  usleep(100000); 
} 
printf("\nDone with playback.\n"); 
fflush(stdout); 

// Shut down OpenAL 
alDeleteSources(1, &source); 
alDeleteBuffers(1, &buffer); 
alcMakeContextCurrent(NULL); 
alcCloseDevice(mainDev); 
alcCaptureCloseDevice(captureDev); 
}
示例#8
0
	~source() {
		SET_CONTEXT(ctx_);
		alSourcePlay(src_);
		alDeleteSources(1, &src_);
	}
示例#9
0
void Audio::deleteSoundSource(const SoundSourceID source){
	if (soundSources[source].sound != SOUND_NONE){
		alDeleteSources(1, &soundSources[source].source);
		soundSources[source].sound = SOUND_NONE;
	}
}
示例#10
0
void ALDevice::Stop(ALuint Source)
{
	alSourceStop(Source);
	alDeleteSources(1, &Source);
}
Sound::~Sound() {
	alDeleteSources(1, &source);
}
	AudioSource::~AudioSource()
	{
		alDeleteSources(1, &m_ID);
	}
示例#13
0
int main(int argc, char *argv[])
{
    enum WaveType wavetype = WT_Sine;
    ALuint source, buffer;
    ALint last_pos, num_loops;
    ALint max_loops = 4;
    ALint srate = -1;
    ALint tone_freq = 1000;
    ALCint dev_rate;
    ALenum state;
    int i;

    for(i = 1;i < argc;i++)
    {
        if(strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0)
        {
            fprintf(stderr, "OpenAL Tone Generator\n"
"\n"
"Usage: %s <options>\n"
"\n"
"Available options:\n"
"  --help/-h                 This help text\n"
"  -t <seconds>              Time to play a tone (default 5 seconds)\n"
"  --waveform/-w <type>      Waveform type: sine (default), square, sawtooth,\n"
"                                triangle, impulse\n"
"  --freq/-f <hz>            Tone frequency (default 1000 hz)\n"
"  --srate/-s <sample rate>  Sampling rate (default output rate)\n",
                argv[0]
            );
            return 1;
        }
        else if(i+1 < argc && strcmp(argv[i], "-t") == 0)
        {
            i++;
            max_loops = atoi(argv[i]) - 1;
        }
        else if(i+1 < argc && (strcmp(argv[i], "--waveform") == 0 || strcmp(argv[i], "-w") == 0))
        {
            i++;
            if(strcmp(argv[i], "sine") == 0)
                wavetype = WT_Sine;
            else if(strcmp(argv[i], "square") == 0)
                wavetype = WT_Square;
            else if(strcmp(argv[i], "sawtooth") == 0)
                wavetype = WT_Sawtooth;
            else if(strcmp(argv[i], "triangle") == 0)
                wavetype = WT_Triangle;
            else if(strcmp(argv[i], "impulse") == 0)
                wavetype = WT_Impulse;
            else
                fprintf(stderr, "Unhandled waveform: %s\n", argv[i]);
        }
        else if(i+1 < argc && (strcmp(argv[i], "--freq") == 0 || strcmp(argv[i], "-f") == 0))
        {
            i++;
            tone_freq = atoi(argv[i]);
            if(tone_freq < 1)
            {
                fprintf(stderr, "Invalid tone frequency: %s (min: 1hz)\n", argv[i]);
                tone_freq = 1;
            }
        }
        else if(i+1 < argc && (strcmp(argv[i], "--srate") == 0 || strcmp(argv[i], "-s") == 0))
        {
            i++;
            srate = atoi(argv[i]);
            if(srate < 40)
            {
                fprintf(stderr, "Invalid sample rate: %s (min: 40hz)\n", argv[i]);
                srate = 40;
            }
        }
    }

    InitAL();

    if(!alIsExtensionPresent("AL_EXT_FLOAT32"))
    {
        fprintf(stderr, "Required AL_EXT_FLOAT32 extension not supported on this device!\n");
        CloseAL();
        return 1;
    }

    {
        ALCdevice *device = alcGetContextsDevice(alcGetCurrentContext());
        alcGetIntegerv(device, ALC_FREQUENCY, 1, &dev_rate);
        assert(alcGetError(device)==ALC_NO_ERROR && "Failed to get device sample rate");
    }
    if(srate < 0)
        srate = dev_rate;

    /* Load the sound into a buffer. */
    buffer = CreateWave(wavetype, tone_freq, srate);
    if(!buffer)
    {
        CloseAL();
        return 1;
    }

    printf("Playing %dhz %s-wave tone with %dhz sample rate and %dhz output, for %d second%s...\n",
           tone_freq, GetWaveTypeName(wavetype), srate, dev_rate, max_loops+1, max_loops?"s":"");
    fflush(stdout);

    /* Create the source to play the sound with. */
    source = 0;
    alGenSources(1, &source);
    alSourcei(source, AL_BUFFER, buffer);
    assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source");

    /* Play the sound for a while. */
    num_loops = 0;
    last_pos = 0;
    alSourcei(source, AL_LOOPING, (max_loops > 0) ? AL_TRUE : AL_FALSE);
    alSourcePlay(source);
    do {
        ALint pos;
        al_nssleep(10000000);
        alGetSourcei(source, AL_SAMPLE_OFFSET, &pos);
        alGetSourcei(source, AL_SOURCE_STATE, &state);
        if(pos < last_pos && state == AL_PLAYING)
        {
            ++num_loops;
            if(num_loops >= max_loops)
                alSourcei(source, AL_LOOPING, AL_FALSE);
            printf("%d...\n", max_loops - num_loops + 1);
            fflush(stdout);
        }
        last_pos = pos;
    } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);

    /* All done. Delete resources, and close OpenAL. */
    alDeleteSources(1, &source);
    alDeleteBuffers(1, &buffer);

    /* Close up OpenAL. */
    CloseAL();

    return 0;
}
示例#14
0
/* The start_voice should, surprise, start the voice. For streaming voices, it
   should start polling the device and call _al_voice_update for audio data.
   For non-streaming voices, it should resume playing from the last set
   position */
static int _openal_start_voice(ALLEGRO_VOICE *voice)
{
   ALLEGRO_AL_DATA *ex_data = voice->extra;
   ALenum openal_err;

   /* playing a sample instead of a stream */
   if (!voice->is_streaming) {
      alSourcePlay(ex_data->source);
      if ((openal_err = alGetError()) != AL_NO_ERROR) {
         ALLEGRO_ERROR("Could not start voice: %s\n",
            openal_get_err_str(openal_err));
         return 1;
      }

      ALLEGRO_INFO("Starting voice\n");
      return 0;
   }

   {
      ex_data->buffer_size = voice->buffer_size;
      if (!ex_data->buffer_size) {
         switch (ex_data->format) {
            case AL_FORMAT_STEREO16:
               ex_data->buffer_size = preferred_frag_size * 4;
               break;
            case AL_FORMAT_STEREO8:
            case AL_FORMAT_MONO16:
               ex_data->buffer_size = preferred_frag_size * 2;
               break;
            default:
               ex_data->buffer_size = preferred_frag_size;
               break;
         }
      }

      ex_data->num_buffers = voice->num_buffers;
      if (!ex_data->num_buffers)
         ex_data->num_buffers = preferred_buf_count;

      alGenSources(1, &ex_data->source);
      if (alGetError() != AL_NO_ERROR)
         return 1;

      ex_data->buffers = al_malloc(sizeof(ALuint) * ex_data->num_buffers);
      if (!ex_data->buffers) {
         alSourcei(ex_data->source, AL_BUFFER, 0);
         alDeleteSources(1, &ex_data->source);
         return 1;
      }

      alGenBuffers(ex_data->num_buffers, ex_data->buffers);
      if (alGetError() != AL_NO_ERROR) {
         alSourcei(ex_data->source, AL_BUFFER, 0);
         alDeleteSources(1, &ex_data->source);
         al_free(ex_data->buffers);
         ex_data->buffers = NULL;
         return 1;
      }

      alSourcef(ex_data->source, AL_GAIN, 1.0f);
      if (alGetError() != AL_NO_ERROR) {
         alSourcei(ex_data->source, AL_BUFFER, 0);
         alDeleteSources(1, &ex_data->source);
         alDeleteBuffers(ex_data->num_buffers, ex_data->buffers);
         al_free(ex_data->buffers);
         ex_data->buffers = NULL;
         return 1;
      }

      ex_data->stopped = false;
      ex_data->thread = al_create_thread(_openal_update, (void *)voice);
      al_start_thread(ex_data->thread);
   }

   ALLEGRO_INFO("Starting voice\n");
   return 0;
}
示例#15
0
文件: Sound.hpp 项目: fulezi/soleil
 virtual ~Sound(void)
 {
   stop();
   alDeleteSources(1, &_source);
   alDeleteBuffers(1, &_buffer);
 }
示例#16
0
AudioPlayer::~AudioPlayer() {
	pause();
	if(source)alDeleteSources(1, &source);
}
示例#17
0
SoundSource::~SoundSource()
{
	alDeleteSources(1, &mALSource);
	SoundGeneral::checkAlError("Deleting sound source.");
}
示例#18
0
static void _cleanup(music_context_t* context) {
    alSourceStop(context->source);
    alDeleteSources(1, &context->source);
    alDeleteBuffers(NUM_BUFFERS, context->buffers);
    ov_clear(&context->oggStream);
}
	sound::~sound()
	{
		// Clean up buffers and sources
		alDeleteSources(1, &m_uiSource);
		alDeleteBuffers(1, &m_uiBuffer);
	}
示例#20
0
void Sound3D::KillALData()
{
	//alDeleteBuffers(1, &Buffer);
	alDeleteSources(1, &Source1);

}
示例#21
0
LLAudioChannelOpenAL::~LLAudioChannelOpenAL()
{
	cleanup();
	alDeleteSources(1, &mALSource);
}
示例#22
0
	ActiveSound::~ActiveSound()
	{
		delete mOgg;
		clear();
		alDeleteSources(1,&mSource);
	}
示例#23
0
文件: testctx1.c 项目: Aye1/RVProject
int main(int argc, char *argv[])
{
	ALuint freq;
	ALenum format;
	ALvoid *data;
	ALsizei i, size;
	thread_id thread1, thread2;
	status_t status;
	
	/* listener parameters */
	ALfloat listenerOrientation[] = { 0.0f, 0.0f, 1.0f,  0.0f, 1.0f, 0.0f };
	ALfloat listenerPosition[] = { 0.0f, 0.0f, 0.0f };
	ALfloat listenerVelocity[] = { 0.0f, 0.0f, 0.0f };

	/* source parameters */
	ALfloat sourcePosition[] = { 0.0f, 0.0f, 1.0f };
	ALfloat sourceVelocity[] = { 0.0f, 0.0f, 0.0f };
	ALfloat sourcePitch = 1.0f;
	ALfloat sourceGain = 1.0f;



	/* initialize */
	print("Main: initialize");
	alInit((ALint *) &argc, (ALubyte **) argv);

	/* create context */
	print("Main: create context");
	context = alcCreateContext(22050, AL_FORMAT_STEREO16, 2048);

	/* lock the context */
	print("Main: make current");
	alcMakeCurrent(context);


	/* create buffers and sources */
	if (alGenBuffers(kNumBuffers, buffer) != kNumBuffers)
		quit("Can't create buffers");

	if (alGenSources(kNumSources, source) != kNumSources)
		quit("Can't create sources");

	/* load buffers with data */
	alutLoadWAV(kWaveFileName, &format, &data, &size, &freq);
	for (i = 0; i < kNumBuffers; i++) {
		alBufferData(buffer[i], format, data, size, freq);
	}
	free(data);


	/* initialize listener */
	alListenerfv(AL_POSITION, listenerPosition);
	alListenerfv(AL_VELOCITY, listenerVelocity);
	alListenerfv(AL_ORIENTATION, listenerOrientation);

	/* initialize sources */
	for (i = 0; i < kNumSources; i++) {
		alSourcefv(source[i], AL_POSITION, sourcePosition);
		alSourcefv(source[i], AL_VELOCITY, sourceVelocity);

		alSourcef(source[i], AL_PITCH, sourcePitch);
		alSourcef(source[i], AL_GAIN, sourceGain);

		alSourcei(source[i], AL_BUFFER, buffer[i % kNumBuffers]);
		alSourcei(source[i], AL_LOOPING, AL_TRUE);
	}

	/* start the sources */
	print("Main: play");
	for (i = 0; i < kNumSources; i++)
		alSourcePlay(source[i]);
	
	/* release the context */
	print("Main: release current");
	alcMakeCurrent(NULL);
	

	/* spawn two threads */
	print("Main: spawn thread 1");
	thread1 = spawn_thread(threadFunc1, "thread 1", B_NORMAL_PRIORITY, NULL);
	print("Main: spawn thread 2");
	thread2 = spawn_thread(threadFunc2, "thread 2", B_NORMAL_PRIORITY, NULL);

	/* resume the threads */	
	print("Main: resume thread 1");
	resume_thread(thread1);
	print("Main: resume thread 2");
	resume_thread(thread2);

	/* acquire context, snooze and release context */
	print("Main: make current");
	alcMakeCurrent(context);
	
	print("Main: snooze...");
	snooze(500000);

	print("Main: release current");
	alcMakeCurrent(NULL);
	

	/* wait until the threads end */
	print("Main: wait thread 1");
	wait_for_thread(thread1, &status);
	if (status != 0)
		print("Main: thread 1 failed?");
	print("Main: wait thread 2");
	wait_for_thread(thread2, &status);
	if (status != 0)
		print("Main: thread 2 failed?");


	/* acquire the context */
	print("Main: make current");
	alcMakeCurrent(context);

	/* delete buffers and sources */
	print("Main: delete sources");
	alDeleteSources(kNumSources, source);
	print("Main: delete buffers");
	alDeleteBuffers(kNumBuffers, buffer);

	/* release the context */
	print("Main: release current");
	alcMakeCurrent(NULL);
	
	
	/* shutdown */
	print("Main: delete context");
	alcDeleteContext(context);

	/* bye */
	print("Main: bye");
	alExit();
	
	return 0;
}
示例#24
0
/** Plays the audio data from the given file
 *  \param fileHandle,volume,onFinished,user_data see sound_PlayStream()
 *  \param streamBufferSize the size to use for the decoded audio buffers
 *  \param buffer_count the amount of audio buffers to use
 *  \see sound_PlayStream() for details about the rest of the function
 *       parameters and other details.
 */
AUDIO_STREAM* sound_PlayStreamWithBuf(PHYSFS_file* fileHandle, float volume, void (*onFinished)(void*), void* user_data, size_t streamBufferSize, unsigned int buffer_count)
{
#if !defined(WZ_NOSOUND)
	AUDIO_STREAM* stream;
	ALuint*       buffers = alloca(sizeof(ALuint) * buffer_count);
	ALint error;
	unsigned int i;

	if ( !openal_initialized )
	{
		debug(LOG_WARNING, "OpenAL isn't initialized, not creating an audio stream");
		return NULL;
	}

	stream = malloc(sizeof(AUDIO_STREAM));
	if (stream == NULL)
	{
		debug(LOG_FATAL, "sound_PlayStream: Out of memory");
		abort();
		return NULL;
	}

	// Clear error codes
	alGetError();

	// Retrieve an OpenAL sound source
	alGenSources(1, &(stream->source));

	error = sound_GetError();
	if (error != AL_NO_ERROR)
	{
		// Failed to create OpenAL sound source, so bail out...
		debug(LOG_SOUND, "alGenSources failed, most likely out of sound sources");
		free(stream);
		return NULL;
	}

	stream->fileHandle = fileHandle;

	stream->decoder = sound_CreateOggVorbisDecoder(stream->fileHandle, false);
	if (stream->decoder == NULL)
	{
		debug(LOG_ERROR, "sound_PlayStream: Failed to open audio file for decoding");
		free(stream);
		return NULL;
	}

	stream->volume = volume;
	stream->bufferSize = streamBufferSize;

	alSourcef(stream->source, AL_GAIN, stream->volume);

	// HACK: this is a workaround for a bug in the 64bit implementation of OpenAL on GNU/Linux
	// The AL_PITCH value really should be 1.0.
	alSourcef(stream->source, AL_PITCH, 1.001f);

	// Create some OpenAL buffers to store the decoded data in
	alGenBuffers(buffer_count, buffers);
	sound_GetError();

	// Fill some buffers with audio data
	for (i = 0; i < buffer_count; ++i)
	{
		// Decode some audio data
		soundDataBuffer* soundBuffer = sound_DecodeOggVorbis(stream->decoder, stream->bufferSize);

		// If we actually decoded some data
		if (soundBuffer && soundBuffer->size > 0)
		{
			// Determine PCM data format
			ALenum format = (soundBuffer->channelCount == 1) ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16;

			// Copy the audio data into one of OpenAL's own buffers
			alBufferData(buffers[i], format, soundBuffer->data, soundBuffer->size, soundBuffer->frequency);
			sound_GetError();

			// Clean up our memory
			free(soundBuffer);
		}
		else
		{
			// If no data has been decoded we're probably at the end of our
			// stream. So cleanup the excess stuff here.

			// First remove the data buffer itself
			free(soundBuffer);

			// Then remove OpenAL's buffers
			alDeleteBuffers(buffer_count - i, &buffers[i]);
			sound_GetError();

			break;
		}
	}

	// Bail out if we didn't fill any buffers
	if (i == 0)
	{
		debug(LOG_ERROR, "Failed to fill buffers with decoded audio data!");

		// Destroy the decoder
		sound_DestroyOggVorbisDecoder(stream->decoder);

		// Destroy the OpenAL source
		alDeleteSources(1, &stream->source);

		// Free allocated memory
		free(stream);

		return NULL;
	}

	// Attach the OpenAL buffers to our OpenAL source
	// (i = the amount of buffers we worked on in the above for-loop)
	alSourceQueueBuffers(stream->source, i, buffers);
	sound_GetError();

	// Start playing the source
	alSourcePlay(stream->source);

	sound_GetError();

	// Set callback info
	stream->onFinished = onFinished;
	stream->user_data = user_data;

	// Prepend this stream to the linked list
	stream->next = active_streams;
	active_streams = stream;

	return stream;
#else
	return NULL;
#endif
}
示例#25
0
void KillALData()
{
    alDeleteBuffers(1, &Buffer_dungeonWAV);
    alDeleteSources(1, &Source_dungeonWAV);
    alutExit();
}
示例#26
0
Source::~Source()
{
#ifdef ENABLE_OPENAL
	alDeleteSources(1, &source);
#endif
};
示例#27
0
文件: source.c 项目: Murii/CLove
void audio_SourceCommon_free(audio_SourceCommon *source) {
    audio_SourceCommon_stop(source);
    alDeleteSources(1, &source->source);
}
示例#28
0
void sound_destroy_source(sound_source_t source) {
    sound_stop(source);
    alDeleteSources(1, &source);
}
int main (int argc, const char * argv[]) {
	MyLoopPlayer player;
	
	// convert to an OpenAL-friendly format and read into memory
	CheckError(loadLoopIntoBuffer(&player),
			   "Couldn't load loop into buffer") ;
	
	// set up OpenAL buffer
	ALCdevice* alDevice = alcOpenDevice(NULL);
	CheckALError ("Couldn't open AL device"); // default device
	ALCcontext* alContext = alcCreateContext(alDevice, 0);
	CheckALError ("Couldn't open AL context");
	alcMakeContextCurrent (alContext);
	CheckALError ("Couldn't make AL context current");
	ALuint buffers[1];
	alGenBuffers(1, buffers);
	CheckALError ("Couldn't generate buffers");
	alBufferData(*buffers,
				 AL_FORMAT_MONO16,
				 player.sampleBuffer,
				 player.bufferSizeBytes,
				 player.dataFormat.mSampleRate);
	
	// AL copies the samples, so we can free them now
	free(player.sampleBuffer);
	
	// set up OpenAL source
	alGenSources(1, player.sources);
	CheckALError ("Couldn't generate sources");
	alSourcei(player.sources[0], AL_LOOPING, AL_TRUE);
	CheckALError ("Couldn't set source looping property");
	alSourcef(player.sources[0], AL_GAIN, AL_MAX_GAIN);
	CheckALError("Couldn't set source gain");
	updateSourceLocation(player);
	CheckALError ("Couldn't set initial source position");
	
	// connect buffer to source
	alSourcei(player.sources[0], AL_BUFFER, buffers[0]);
	CheckALError ("Couldn't connect buffer to source");
	
	// set up listener
	alListener3f (AL_POSITION, 0.0, 0.0, 0.0);
	CheckALError("Couldn't set listner position");
	
	//	ALfloat listenerOrientation[6]; // 3 vectors: forward x,y,z components, then up x,y,z
	//	listenerOrientation[2] = -1.0;
	//	listenerOrientation[0] = listenerOrientation [1] = 0.0;
	//	listenerOrientation[3] = listenerOrientation [4] =  listenerOrientation[5] = 0.0;
	//	alListenerfv (AL_ORIENTATION, listenerOrientation);
	
	// start playing
	// alSourcePlayv (1, player.sources);
	alSourcePlay(player.sources[0]);
	CheckALError ("Couldn't play");
	
	// and wait
	printf("Playing...\n");
	time_t startTime = time(NULL);
	do
	{
		// get next theta
		updateSourceLocation(player);
		CheckALError ("Couldn't set looping source position");
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, false);
	} while (difftime(time(NULL), startTime) < RUN_TIME);
	
	// cleanup:
	alSourceStop(player.sources[0]);
	alDeleteSources(1, player.sources);
	alDeleteBuffers(1, buffers);
	alcDestroyContext(alContext);
	alcCloseDevice(alDevice);
	printf ("Bottom of main\n");
}
示例#30
0
void utox_audio_thread(void *args) {
    ToxAV *av = args;
    const char *device_list, *output_device = NULL;
    void *audio_device = NULL;

    _Bool call[MAX_CALLS] = {0}, preview = 0;
    _Bool groups_audio[MAX_NUM_GROUPS] = {0};

    int perframe = (UTOX_DEFAULT_FRAME_A * UTOX_DEFAULT_SAMPLE_RATE_A) / 1000;
    uint8_t buf[perframe * 2 * UTOX_DEFAULT_AUDIO_CHANNELS]; //, dest[perframe * 2 * UTOX_DEFAULT_AUDIO_CHANNELS];
    memset(buf, 0, sizeof(buf));

    uint8_t audio_count = 0;
    _Bool record_on = 0;
#ifdef AUDIO_FILTERING
    debug("Audio Filtering");
#ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
    debug(" and Echo cancellation");
#endif
    debug(" enabled in this build\n");
#endif

    debug("frame size: %u\n", perframe);

    device_list = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER);
    if (device_list) {
        audio_device = (void*)device_list;
        debug("uTox audio input device list:\n");
        while(*device_list) {
            debug("\t%s\n", device_list);
            postmessage(AUDIO_IN_DEVICE, UI_STRING_ID_INVALID, 0, (void*)device_list);
            device_list += strlen(device_list) + 1;
        }
    }

    postmessage(AUDIO_IN_DEVICE, STR_AUDIO_IN_NONE, 0, NULL);
    audio_detect();

    if (alcIsExtensionPresent(NULL, "ALC_ENUMERATE_ALL_EXT")) {
        device_list = alcGetString(NULL, ALC_ALL_DEVICES_SPECIFIER);
    } else {
        device_list = alcGetString(NULL, ALC_DEVICE_SPECIFIER);
    }

    if(device_list) {
        output_device = device_list;
        debug("uTox audio output device list:\n");
        while(*device_list) {
            debug("\t%s\n", device_list);
            postmessage(AUDIO_OUT_DEVICE, 0, 0, (void*)device_list);
            device_list += strlen(device_list) + 1;
        }
    }

    device_out = alcOpenDevice(output_device);
    if(!device_out) {
        debug("alcOpenDevice() failed\n");
        return;
    }

    int attrlist[] = {  ALC_FREQUENCY, UTOX_DEFAULT_SAMPLE_RATE_A,
                        ALC_INVALID
                     };

    context = alcCreateContext(device_out, attrlist);
    if(!alcMakeContextCurrent(context)) {
        debug("alcMakeContextCurrent() failed\n");
        alcCloseDevice(device_out);
        return;
    }

    alGenSources(countof(source), source);
    /* TODO hacky fix. This source list should be a VLA with a way to link sources to friends.
     * NO SRSLY don't leave this like this! */
    static ALuint ringSrc[UTOX_MAX_NUM_FRIENDS];
    alGenSources(UTOX_MAX_NUM_FRIENDS, ringSrc);

    /* Create buffer to store samples */
    ALuint RingBuffer;
    alGenBuffers(1, &RingBuffer);

    {   /* wrapped to keep this data on the stack... I think... */
        float frequency1 = 441.f;
        float frequency2 = 882.f;
        int seconds = 4;
        unsigned sample_rate = 22050;
        size_t buf_size = seconds * sample_rate * 2; //16 bit (2 bytes per sample)
        int16_t *samples = malloc(buf_size * sizeof(int16_t));
        if (!samples)
            return;

        /*Generate an electronic ringer sound that quickly alternates between two frequencies*/
        int index = 0;
        for(index = 0; index < buf_size; ++index) {
            if ((index / (sample_rate)) % 4 < 2 ) {//4 second ring cycle, first 2 secondsring, the rest(2 seconds) is silence
                if((index / 1000) % 2 == 1) {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency1) / sample_rate * index); //5000=amplitude(volume level). It can be from zero to 32700
                } else {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency2) / sample_rate * index);
                }
            } else {
                samples[index] = 0;
            }
        }

        alBufferData(RingBuffer, AL_FORMAT_MONO16, samples, buf_size, sample_rate);
        free(samples);
    }

    {
        unsigned int i;
        for (i = 0; i < UTOX_MAX_NUM_FRIENDS; ++i) {
            alSourcei(ringSrc[i], AL_LOOPING, AL_TRUE);
            alSourcei(ringSrc[i], AL_BUFFER, RingBuffer);
        }
    }
    Filter_Audio *f_a = NULL;

    audio_thread_init = 1;

    int16_t *preview_buffer = NULL;
    unsigned int preview_buffer_index = 0;
#define PREVIEW_BUFFER_SIZE (UTOX_DEFAULT_SAMPLE_RATE_A / 2)

    while(1) {
        if(audio_thread_msg) {
            TOX_MSG *m = &audio_msg;
            if(!m->msg) {
                break;
            }

            switch(m->msg) {
            case AUDIO_SET_INPUT: {
                audio_device = m->data;

                if(record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                }

                if(audio_count) {
                    device_in = alcopencapture(audio_device);
                    if(!device_in) {
                        record_on = 0;
                    } else {
                        alccapturestart(device_in);
                        record_on = 1;
                    }
                }

                debug("set audio in\n");
                break;
            }

            case AUDIO_SET_OUTPUT: {
                output_device = m->data;

                ALCdevice *device = alcOpenDevice(output_device);
                if(!device) {
                    debug("alcOpenDevice() failed\n");
                    break;
                }

                ALCcontext *con = alcCreateContext(device, NULL);
                if(!alcMakeContextCurrent(con)) {
                    debug("alcMakeContextCurrent() failed\n");
                    alcCloseDevice(device);
                    break;
                }

                alcDestroyContext(context);
                alcCloseDevice(device_out);
                context = con;
                device_out = device;

                alGenSources(countof(source), source);
                alGenSources(MAX_CALLS, ringSrc);

                Tox *tox = toxav_get_tox(av);
                uint32_t num_chats = tox_count_chatlist(tox);

                if (num_chats != 0) {
                    int32_t chats[num_chats];
                    uint32_t max = tox_get_chatlist(tox, chats, num_chats);

                    unsigned int i;
                    for (i = 0; i < max; ++i) {
                        if (tox_group_get_type(tox, chats[i]) == TOX_GROUPCHAT_TYPE_AV) {
                            GROUPCHAT *g = &group[chats[i]];
                            alGenSources(g->peers, g->source);
                        }
                    }
                }

                debug("set audio out\n");
                break;
            }

            case AUDIO_PREVIEW_START: {
                preview = 1;
                audio_count++;
                preview_buffer = calloc(PREVIEW_BUFFER_SIZE, 2);
                preview_buffer_index = 0;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Starting Audio Preview\n");
                    }
                }
                break;
            }

            case AUDIO_START: {
                audio_count++;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Listening to audio\n");
                        yieldcpu(20);
                    }
                }
                break;
            }

            case GROUP_AUDIO_CALL_START: {
                break; // TODO, new groups API
                audio_count++;
                groups_audio[m->param1] = 1;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Starting Audio GroupCall\n");
                    }
                }
                break;
            }

            case AUDIO_PREVIEW_END: {
                preview = 0;
                audio_count--;
                free(preview_buffer);
                preview_buffer = NULL;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("Audio Preview Stopped\n");
                }
                break;
            }

            case AUDIO_END: {
                if(!call[m->param1]) {
                    break;
                }
                call[m->param1] = 0;
                audio_count--;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("stop\n");
                }
                break;
            }

            case GROUP_AUDIO_CALL_END: {
                break; // TODO, new groups API
                if(!groups_audio[m->param1]) {
                    break;
                }
                audio_count--;
                groups_audio[m->param1] = 0;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("stop\n");
                }
                break;
            }

            case AUDIO_PLAY_RINGTONE: {
                if(!audible_notifications_enabled) {
                    break;
                }
                alSourcePlay(ringSrc[m->param1]);
                break;
            }

            case AUDIO_STOP_RINGTONE: {
                ALint state;
                alGetSourcei(ringSrc[m->param1], AL_SOURCE_STATE, &state);
                if(state == AL_PLAYING) {
                    alSourceStop(ringSrc[m->param1]);
                }
                break;
            }
            }

            audio_thread_msg = 0;
        }

        // TODO move this code to filter_audio.c
#ifdef AUDIO_FILTERING
        if (!f_a && audio_filtering_enabled) {
            f_a = new_filter_audio(UTOX_DEFAULT_SAMPLE_RATE_A);
            if (!f_a) {
                audio_filtering_enabled = 0;
                debug("filter audio failed\n");
            } else {
                debug("filter audio on\n");
            }
        } else if (f_a && !audio_filtering_enabled) {
            kill_filter_audio(f_a);
            f_a = NULL;
            debug("filter audio off\n");
        }
#else
        if (audio_filtering_enabled) {
            audio_filtering_enabled = 0;
        }
#endif

        _Bool sleep = 1;

        if(record_on) {
            ALint samples;
            _Bool frame = 0;
            /* If we have a device_in we're on linux so we can just call OpenAL, otherwise we're on something else so
             * we'll need to call audio_frame() to add to the buffer for us. */
            if (device_in == (void*)1) {
                frame = audio_frame((void*)buf);
                if (frame) {
                    /* We have an audio frame to use, continue without sleeping. */
                    sleep = 0;
                }
            } else {
                alcGetIntegerv(device_in, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
                if(samples >= perframe) {
                    alcCaptureSamples(device_in, buf, perframe);
                    frame = 1;
                    if (samples >= perframe * 2) {
                        sleep = 0;
                    }
                }
            }

#ifdef AUDIO_FILTERING
#ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
            if (f_a && audio_filtering_enabled) {
                alcGetIntegerv(device_out, ALC_LOOPBACK_CAPTURE_SAMPLES, sizeof(samples), &samples);
                if(samples >= perframe) {
                    int16_t buffer[perframe];
                    alcCaptureSamplesLoopback(device_out, buffer, perframe);
                    pass_audio_output(f_a, buffer, perframe);
                    set_echo_delay_ms(f_a, UTOX_DEFAULT_FRAME_A);
                    if (samples >= perframe * 2) {
                        sleep = 0;
                    }
                }
            }
#endif
#endif

            if (frame) {
                _Bool voice = 1;
#ifdef AUDIO_FILTERING
                if (f_a) {
                    int ret = filter_audio(f_a, (int16_t*)buf, perframe);

                    if (ret == -1) {
                        debug("filter audio error\n");
                    }

                    if (ret == 0) {
                        voice = 0;
                    }
                }
#endif

                /* If push to talk, we don't have to do anything */
                if (!check_ptt_key()) {
                    voice = 0; //PTT is up, send nothing.
                }

                if (preview) {
                    if (preview_buffer_index + perframe > PREVIEW_BUFFER_SIZE) {
                        preview_buffer_index = 0;
                    }

                    sourceplaybuffer(0, preview_buffer + preview_buffer_index, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A);
                    if (voice) {
                        memcpy(preview_buffer + preview_buffer_index, buf, perframe * sizeof(int16_t));
                    } else {
                        memset(preview_buffer + preview_buffer_index, 0, perframe * sizeof(int16_t));
                    }
                    preview_buffer_index += perframe;
                }

                if (voice) {
                    int i, active_call_count = 0;
                    for(i = 0; i < UTOX_MAX_NUM_FRIENDS; i++) {
                        if( UTOX_SEND_AUDIO(i) ) {
                            active_call_count++;
                            TOXAV_ERR_SEND_FRAME error = 0;
                            toxav_audio_send_frame(av, friend[i].number, (const int16_t *)buf, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A, &error);
                            if (error) {
                                debug("toxav_send_audio error friend == %i, error ==  %i\n", i, error);
                            } else {
                                // debug("Send a frame to friend %i\n",i);
                                if (active_call_count >= UTOX_MAX_CALLS) {
                                    debug("We're calling more peers than allowed by UTOX_MAX_CALLS, This is a bug\n");
                                    break;
                                }
                            }
                        }
                    }

                    // TODO REMOVED until new groups api can be implemented.
                    /*Tox *tox = toxav_get_tox(av);
                    uint32_t num_chats = tox_count_chatlist(tox);

                    if (num_chats != 0) {
                        int32_t chats[num_chats];
                        uint32_t max = tox_get_chatlist(tox, chats, num_chats);
                        for (i = 0; i < max; ++i) {
                            if (groups_audio[chats[i]]) {
                                toxav_group_send_audio(tox, chats[i], (int16_t *)buf, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A);
                            }
                        }
                    }*/
                }
            }
        }

        if (sleep) {
            yieldcpu(5);
        }
    }

    utox_filter_audio_kill(f_a);

    //missing some cleanup ?
    alDeleteSources(MAX_CALLS, ringSrc);
    alDeleteSources(countof(source), source);
    alDeleteBuffers(1, &RingBuffer);

    if(device_in) {
        if(record_on) {
            alcCaptureStop(device_in);
        }
        alcCaptureCloseDevice(device_in);
    }

    alcMakeContextCurrent(NULL);
    alcDestroyContext(context);
    alcCloseDevice(device_out);

    audio_thread_msg = 0;
    audio_thread_init = 0;
    debug("UTOX AUDIO:\tClean thread exit!\n");
}