extern "C" void writeSound(void) { int tmp; int ret = soundBufferLen; ao_play(snd_ao, (char*)soundFinalWave, ret); decode_pos_ms += (ret/(2*sndNumChannels) * 1000)/(float)sndSamplesPerSec; }
void au_playWait(int snd){ if(!mute || effects){ //need to repeat this code because au_deinitEach() calls exit(); default_driver = ao_default_driver_id(); device = ao_open_live(default_driver, &format, NULL); ao_play(device, sounds[snd], sz[snd]); ao_close(device); } }
int cAudio::WriteClip(unsigned char *buffer, int size) { lt_debug("cAudio::%s buf 0x%p size %d\n", __func__, buffer, size); if (!adevice) { lt_info("%s: adevice not opened?\n", __func__); return 0; } ao_play(adevice, (char *)buffer, size); return size; };
/** * For whatever reason, libao wants a non-const pointer. Let's hope * it does not write to the buffer, and use the union deconst hack to * work around this API misdesign. */ static int ao_play_deconst(ao_device *device, const void *output_samples, uint_32 num_bytes) { union { const void *in; void *out; } u; u.in = output_samples; return ao_play(device, u.out, num_bytes); }
int audio_devices_write(audio_device_t *d, void *ptr, int nbytes) { while (d != NULL) { if (ao_play(d->device, ptr, nbytes) == 0) return 0; /* error occurred */ d = d->next_device; } return 1; }
void sal_SubmitSamples(void *buff, int len) { ao_play(ao_dev, buff, len); if (mSb+1 >= SOUND_BUFFER_COUNT) { mSb=0; } else { mSb++; } }
void play(char *audio_buffer, int buff, bool pauseD) { if ( pauseD ) return; #ifdef Q_WS_WIN if ( fmt.bits == 8 ) convert_Signed_Unsigned_PCM(audio_buffer, buff); #endif if ( !ao_play(AO_Device,audio_buffer,buff) ) PlErr = true; }
void beep(double freq, double duration) { beep_init(); /* If different from last call, re-create the sine wave */ beep_buffer_generate_sine_data(&(g_beep.buffer), freq, duration); if (!ao_play(g_beep.device, g_beep.buffer.data, g_beep.buffer.len)) { fputs("error rendering sound sample, restarting audio system\n", stderr); beep_shutdown(); beep_init(); } }
/* SNDDMA_Submit: send sound to device if buffer isn't really the dma buffer */ void SNDDMA_Submit(void){ if(!snd_inited) return; /* ao_play returns success, not number of samples successfully output * unlike alsa or arts, so we can only assume that the whole buffer * made it out... though this makes updating si->dma->samplepos easy */ if(ao_play(device, si->dma->buffer, si->dma->samples * samplesize) == 0){ Com_Printf("W: error occurred while playing buffer\n"); ao_close(device); ao_shutdown(); snd_inited = 0; } si->dma->samplepos += si->dma->samples; }
void play_sampled (char *data, size_t len) { ao_device *device = ao_open_live(DEFAULT, &format, NULL); if (!device) { LOGE("%p (could not open audio device)", (void *) (device)); return; } if (!ao_play(device, data, len)) { LOGE("%p (device could not play sample)", (void *) (device)); } if (!ao_close(device)) { LOGW("%p (could not close audio device)", (void *) (device)); } }
void *poly_gen_kernel(void *ptr) { ((void)ptr); // We will modify this sample and present it as one frame int16_t *sample = calloc(poly_format->channels, sizeof(*sample)); while(poly_playback == 1) { poly_next_frame(sample); // Send to sound device, block until frame requested uint32_t frame_size = (sizeof(int16_t) * poly_format->channels); ao_play(poly_card, (char *)sample, frame_size); } free(sample); return NULL; }
static void* play_thread(void *data) { int err; off_t off; unsigned char* audio; size_t size; fm_player_t *pl = (fm_player_t*) data; while (pl->status != FM_PLAYER_STOP) { pthread_mutex_lock(&pl->mutex_status); while (pl->status == FM_PLAYER_PAUSE) { pthread_cond_wait(&pl->cond_play, &pl->mutex_status); } pthread_mutex_unlock(&pl->mutex_status); if (pl->status == FM_PLAYER_STOP) { break; } err = mpg123_decode_frame(pl->mh, &off, &audio, &size); switch (err) { case MPG123_OK: ao_play(pl->dev, (char*) audio, size); break; case MPG123_NEED_MORE: if (pthread_kill(pl->tid_dl, 0) == 0) { pthread_mutex_lock(&pl->mutex_status); pthread_cond_wait(&pl->cond_play, &pl->mutex_status); pthread_mutex_unlock(&pl->mutex_status); } else { if (pl->tid_ack > 0) { pthread_kill(pl->tid_ack, pl->sig_ack); } return pl; } break; case MPG123_NEW_FORMAT: break; default: fprintf(stderr, "mpg123 deocde return: %d\n", err); break; } } return pl; }
void play_note(ao_device *device, double freq, double duration) { unsigned long i; size_t length = SAMPLE_RATE * duration; int16_t *audio = malloc(length*2); for (i=0; i<length; i++) { double value = sin(i * 2 * M_PI * (freq / (double)SAMPLE_RATE)); audio[i] = (int16_t)(value * 0x7FFF); } ao_play(device, (char*)audio, length*2); free(audio); }
int main(int argc, char** argv) { int d,x; char *samps; //initialise AO ao_device *ao; ao_sample_format form; ao_initialize(); d=ao_default_driver_id(); form.bits=8; form.rate=DDS_FREQ; form.channels=1; form.byte_format=AO_FMT_NATIVE; #ifdef TO_FILE ao=ao_open_file(ao_driver_id("wav"),"music.wav",1,&form,NULL); #else ao=ao_open_live(d,&form,NULL); if (ao==NULL) { printf("Error opening device %d\n"); exit(0); } #endif //Libao is opened and initialised now. //allocate sample buffer samps=malloc(AO_BUFF); d=0; //reset player player_reset(); while(1) { //generate AO_BUFF samples, advance the player 60 times per second for (x=0; x<AO_BUFF; x++) { d++; if (d>(DDS_FREQ/PLAYER_FREQ)) { d=0; player_tick(); } samps[x]=dds_get_next_sample()<<5; } //let libao play the samples ao_play(ao,samps,AO_BUFF); } //never happens because the while() above ao_close(ao); ao_shutdown(); }
void mp3_play() { ao_device *device; mpg123_handle *mh; size_t done; if (mp3_index == 0) { device = mp3_device_0; mh = mh_0; } else { device = mp3_device_1; mh = mh_1; } if (mpg123_read(mh, (unsigned char *) mp3_buffer, mp3_buffer_size, &done) == MPG123_OK) { ao_play(device, mp3_buffer, done); } }
int mpeg_play(int fd) { int rv = -1; size_t bytes; if((handle != NULL) && (fd != -1)) { if(mpg123_read(handle->mh, handle->buf, handle->bufsize, &bytes) == MPG123_OK) { if(ao_play(handle->device, (char *)handle->buf, bytes) != 0) { rv = 0; } } } return rv; }
int play_aplayer(struct aplayer_t *player) { // unpacks the various attributes from the player // structure that are going to be used ao_device *device = player->device; int stream_id = player->stream_id; AVFrame *frame = player->frame; AVFormatContext *container = player->container; AVCodecContext *codec_ctx = player->codec_ctx; // sets the running flag so that the player starts // in that state player->running = 1; // initializes the flag indicating if the frame processing // has been finished and then iterates over the various packets // to try to decode the various frames int frame_finished = 0; for(int i = 0; i < 300; i++) { //while(player->running) { // reads a frame from the container file and check // if a valid one was returned in case not breaks // the loop (end of the file) int result = av_read_frame(container, &player->packet); if(result < 0) { break; } // checks if the stream index of the current packet // is the same as the just detected audio stream if(player->packet.stream_index != stream_id) { continue; } // decodes the current packet as an audio packed with // the current codec context and in case the frame is // not finished continues the loop, otherwise plays the // frame using the ao library avcodec_decode_audio4(codec_ctx, frame, &frame_finished, &player->packet); if(!frame_finished) { continue; } ao_play(device, (char *) frame->extended_data[0], frame->linesize[0]); av_free_packet(&player->packet); } return 0; }
static PyObject * py_ao_play(PyObject *self, PyObject *args) { char *output_samples; uint_32 num_bytes = 0; int len; ao_Object *ao_self = (ao_Object *) self; if (!(PyArg_ParseTuple(args, "s#|l", &output_samples, &len, &num_bytes))) return NULL; if (num_bytes == 0) num_bytes = len; Py_BEGIN_ALLOW_THREADS ao_play(ao_self->dev, output_samples, num_bytes); Py_END_ALLOW_THREADS Py_INCREF(Py_None); return Py_None; }
static void start_sound(gpointer ok) { size_t done; G_LOCK(sound); if (mh == NULL || dev == NULL || sound_file == NULL) { G_UNLOCK(sound); return; } mpg123_seek(mh, 0, SEEK_SET); while (play && (*((gboolean *)ok)) && mpg123_read(mh, (unsigned char *)buffer, buffer_size, &done) == MPG123_OK) ao_play(dev, buffer, done); G_UNLOCK(sound); }
void audio_play() { if (paused) { updateok = true; return; } bufsize = 2 * channels * (conf.audio_sample_rate / framerate); if (conf.audio_api == 0) { // SDL #if SDL_VERSION_ATLEAST(2,0,4) SDL_QueueAudio(dev, (const void*)audiobuf, bufsize); // Clear the audio queue arbitrarily to avoid it backing up too far if (SDL_GetQueuedAudioSize(dev) > (Uint32)(bufsize * 3)) { SDL_ClearQueuedAudio(dev); } #endif } #ifndef _MINGW else if (conf.audio_api == 1) { // libao ao_play(aodevice, (char*)audiobuf, bufsize); } #endif updateok = true; }
void play( void *output_samples, uint32_t num_bytes ) { ao_play( dev, output_samples, num_bytes ); return; pthread_mutex_lock( &bufmutex ); memcpy( output_buffer, output_samples, num_bytes ); bytes_in_buffer = num_bytes; buffer_ready = 1; // Send signal debugp( DEBUGP_DEFAULT, 7, "signaling\n" ); pthread_cond_signal( &buftrig ); pthread_mutex_unlock( &bufmutex ); pthread_mutex_lock( &bufmutex ); pthread_cond_wait( &buftrig, &bufmutex ); pthread_mutex_unlock( &bufmutex ); return; }
static int write_to_ao(struct MPContext *mpctx, void *data, int len, int flags, double pts) { if (mpctx->paused) return 0; struct ao *ao = mpctx->ao; double bps = ao->bps / mpctx->opts->playback_speed; int unitsize = ao->channels.num * af_fmt2bits(ao->format) / 8; ao->pts = pts; int played = ao_play(mpctx->ao, data, len, flags); assert(played <= len); assert(played % unitsize == 0); if (played > 0) { mpctx->shown_aframes += played / unitsize; mpctx->delay += played / bps; // Keep correct pts for remaining data - could be used to flush // remaining buffer when closing ao. ao->pts += played / bps; return played; } return 0; }
static int audio_output(shairplay_session_t *session, const void *buffer, int buflen) { short *shortbuf; char tmpbuf[4096]; int tmpbuflen, i; tmpbuflen = (buflen > sizeof(tmpbuf)) ? sizeof(tmpbuf) : buflen; memcpy(tmpbuf, buffer, tmpbuflen); if (ao_is_big_endian()) { for (i=0; i<tmpbuflen/2; i++) { char tmpch = tmpbuf[i*2]; tmpbuf[i*2] = tmpbuf[i*2+1]; tmpbuf[i*2+1] = tmpch; } } shortbuf = (short *)tmpbuf; for (i=0; i<tmpbuflen/2; i++) { shortbuf[i] = shortbuf[i] * session->volume; } ao_play(session->device, tmpbuf, tmpbuflen); return tmpbuflen; }
//INUTILE POUR LE MOMENT void play_musique(char recv_data[]){ ao_device *device; ao_sample_format format; int default_driver; /* -- Initialize -- */ ao_initialize(); /* -- Setup for default driver -- */ default_driver = ao_default_driver_id(); memset(&format, 0, sizeof(format)); format.bits = 16; format.channels = 2; format.rate = 44100; format.byte_format = AO_FMT_LITTLE; /* -- Open driver -- */ device = ao_open_live(default_driver, &format, NULL /* no options */); if (device == NULL) { fprintf(stderr, "Error opening device.\n"); return; } ao_play(device, recv_data, sizeof(recv_data)); /* -- Close and shutdown -- */ ao_close(device); ao_shutdown(); }
int audio_init(int frequency) { int driver; ao_sample_format format = {.bits = UADE_BYTES_PER_SAMPLE * 8, .channels = UADE_CHANNELS, .rate = frequency, .byte_format = AO_FMT_NATIVE, }; ao_initialize(); driver = ao_default_driver_id(); libao_device = ao_open_live(driver, &format, NULL); if (libao_device == NULL) fprintf(stderr, "Error opening device: errno %d\n", errno); return libao_device != NULL; } int audio_play(char *samples, int bytes) { /* ao_play returns 0 on failure */ return ao_play(libao_device, samples, bytes); }
size_t write_callback(char *delivered_data, size_t size, size_t nmemb, void *user_data) { if (!playback_pause) { int err; off_t frame_offset; unsigned char *audio; size_t done = 1; ao_sample_format format; int channels, encoding; long rate; mpg123_feed(mh, delivered_data, size * nmemb); while (done > 0) { err = mpg123_decode_frame(mh, &frame_offset, &audio, &done); switch (err) { case MPG123_NEW_FORMAT: mpg123_getformat(mh, &rate, &channels, &encoding); format.bits = mpg123_encsize(encoding) * 8; format.rate = rate; format.channels = channels; format.byte_format = AO_FMT_NATIVE; format.matrix = 0; device = ao_open_live(ao_default_driver_id(), &format, NULL); break; case MPG123_OK: ao_play(device, audio, done); break; default: break; } } return size * nmemb; } return CURL_WRITEFUNC_PAUSE; }
size_t play_stream(void *buffer, size_t size, size_t nmemb, void *userp) { int err; off_t frame_offset; unsigned char *audio; size_t done; ao_sample_format format; int channels, encoding; long rate; mpg123_feed(mh, (const unsigned char*) buffer, size *nmemb); do { err = mpg123_decode_frame(mh, &frame_offset, &audio, &done); switch(err) { case MPG123_NEW_FORMAT: mpg123_getformat(mh, &rate, &channels, &encoding); format.bits = mpg123_encsize(encoding) * BITS; format.rate = rate; format.channels = channels; format.byte_format = AO_FMT_NATIVE; format.matrix = 0; dev = ao_open_live(ao_default_driver_id(), &format, NULL); break; case MPG123_OK: ao_play(dev, (char*)audio, done); break; case MPG123_NEED_MORE: break; default: break; } } while(done > 0); return size * nmemb; }
enum mad_flow output(void *data, struct mad_header const *header, struct mad_pcm *pcm) { register int nsamples = pcm->length; mad_fixed_t const *left_ch = pcm->samples[0], *right_ch = pcm->samples[1]; static unsigned char stream[1152*4]; /* 1152 because that's what mad has as a max; *4 because there are 4 distinct bytes per sample (in 2 channel case) */ static unsigned int rate = 0; static int channels = 0; static struct audio_dither dither; register char * ptr = stream; register signed int sample; register mad_fixed_t tempsample; /* Semaphore operations */ static struct sembuf read_sops = {0, -1, 0}; static struct sembuf write_sops = {1, 1, 0}; if(options.opt & MPG321_ENABLE_BUFFER) { semop(semarray,&read_sops,1); ptr = (Output_Queue+mad_decoder_position)->data; memcpy(&((Output_Queue+mad_decoder_position)->header),header,sizeof(struct mad_header)); }else{ /* We need to know information about the file before we can open the playdevice in some cases. So, we do it here. */ if (!playdevice) { channels = MAD_NCHANNELS(header); rate = header->samplerate; open_ao_playdevice(header); } else if ((channels != MAD_NCHANNELS(header) || rate != header->samplerate) && playdevice_is_live()) { ao_close(playdevice); channels = MAD_NCHANNELS(header); rate = header->samplerate; open_ao_playdevice(header); } } static int peak_rate = 0; static unsigned short int peak = 0; if (pcm->channels == 2) { while (nsamples--) { tempsample = mad_f_mul(*left_ch++, options.volume); sample = (signed int) audio_linear_dither(16, tempsample, &dither); peak = abs(sample) > peak ? abs(sample) : peak; #ifndef WORDS_BIGENDIAN *ptr++ = (unsigned char) (sample >> 0); *ptr++ = (unsigned char) (sample >> 8); #else *ptr++ = (unsigned char) (sample >> 8); *ptr++ = (unsigned char) (sample >> 0); #endif tempsample = mad_f_mul(*right_ch++, options.volume); sample = (signed int) audio_linear_dither(16, tempsample, &dither); peak = abs(sample) > peak ? abs(sample) : peak; #ifndef WORDS_BIGENDIAN *ptr++ = (unsigned char) (sample >> 0); *ptr++ = (unsigned char) (sample >> 8); #else *ptr++ = (unsigned char) (sample >> 8); *ptr++ = (unsigned char) (sample >> 0); #endif } process_fft(stream, pcm->length * 4); if(options.opt & MPG321_ENABLE_BUFFER) { (Output_Queue+mad_decoder_position)->length = pcm->length * 4; }else { ao_play(playdevice, stream, pcm->length * 4); } } else if (options.opt & MPG321_FORCE_STEREO)
static gpointer audio_output_task(gpointer arg) { struct audio_output *ao = arg; g_mutex_lock(ao->mutex); while (1) { switch (ao->command) { case AO_COMMAND_NONE: break; case AO_COMMAND_ENABLE: ao_enable(ao); ao_command_finished(ao); break; case AO_COMMAND_DISABLE: ao_disable(ao); ao_command_finished(ao); break; case AO_COMMAND_OPEN: ao_open(ao); ao_command_finished(ao); break; case AO_COMMAND_REOPEN: ao_reopen(ao); ao_command_finished(ao); break; case AO_COMMAND_CLOSE: assert(ao->open); assert(ao->pipe != NULL); ao_close(ao, false); ao_command_finished(ao); break; case AO_COMMAND_PAUSE: if (!ao->open) { /* the output has failed after audio_output_all_pause() has submitted the PAUSE command; bail out */ ao_command_finished(ao); break; } ao_pause(ao); /* don't "break" here: this might cause ao_play() to be called when command==CLOSE ends the paused state - "continue" checks the new command first */ continue; case AO_COMMAND_DRAIN: if (ao->open) { assert(ao->chunk == NULL); assert(music_pipe_peek(ao->pipe) == NULL); g_mutex_unlock(ao->mutex); ao_plugin_drain(ao->plugin, ao->data); g_mutex_lock(ao->mutex); } ao_command_finished(ao); continue; case AO_COMMAND_CANCEL: ao->chunk = NULL; if (ao->open) { g_mutex_unlock(ao->mutex); ao_plugin_cancel(ao->plugin, ao->data); g_mutex_lock(ao->mutex); } ao_command_finished(ao); continue; case AO_COMMAND_KILL: ao->chunk = NULL; ao_command_finished(ao); g_mutex_unlock(ao->mutex); return NULL; } if (ao->open && ao->allow_play && ao_play(ao)) /* don't wait for an event if there are more chunks in the pipe */ continue; if (ao->command == AO_COMMAND_NONE) g_cond_wait(ao->cond, ao->mutex); } }
void wave_out_play(void) { struct audio_packet *packet; STREAM out; char outbuf[WAVEOUTBUF]; int offset, len, i; static long prev_s, prev_us; unsigned int duration; struct timeval tv; int next_tick; if (g_reopened) { g_reopened = False; gettimeofday(&tv, NULL); prev_s = tv.tv_sec; prev_us = tv.tv_usec; } if (queue_lo == queue_hi) { g_dsp_busy = 0; return; } packet = &packet_queue[queue_lo]; out = &packet->s; if (((queue_lo + 1) % MAX_QUEUE) != queue_hi) { next_tick = packet_queue[(queue_lo + 1) % MAX_QUEUE].tick; } else { next_tick = (packet->tick + 65535) % 65536; } len = 0; if (g_samplerate == 22050) { /* Resample to 44100 */ for (i = 0; (i < ((WAVEOUTBUF / 4) * (3 - g_samplewidth))) && (out->p < out->end); i++) { /* On a stereo-channel we must make sure that left and right does not get mixed up, so we need to expand the sample- data with channels in mind: 1234 -> 12123434 If we have a mono-channel, we can expand the data by simply doubling the sample-data: 1234 -> 11223344 */ if (g_channels == 2) offset = ((i * 2) - (i & 1)) * g_samplewidth; else offset = (i * 2) * g_samplewidth; memcpy(&outbuf[offset], out->p, g_samplewidth); memcpy(&outbuf[g_channels * g_samplewidth + offset], out->p, g_samplewidth); out->p += g_samplewidth; len += 2 * g_samplewidth; } } else { len = (WAVEOUTBUF > (out->end - out->p)) ? (out->end - out->p) : WAVEOUTBUF; memcpy(outbuf, out->p, len); out->p += len; } ao_play(o_device, outbuf, len); gettimeofday(&tv, NULL); duration = ((tv.tv_sec - prev_s) * 1000000 + (tv.tv_usec - prev_us)) / 1000; if (packet->tick > next_tick) next_tick += 65536; if ((out->p == out->end) || duration > next_tick - packet->tick + 500) { prev_s = tv.tv_sec; prev_us = tv.tv_usec; if (abs((next_tick - packet->tick) - duration) > 20) { DEBUG(("duration: %d, calc: %d, ", duration, next_tick - packet->tick)); DEBUG(("last: %d, is: %d, should: %d\n", packet->tick, (packet->tick + duration) % 65536, next_tick % 65536)); } /* Until all drivers are using the windows sound-ticks, we need to substract the 50 ticks added later by rdpsnd.c */ rdpsnd_send_completion(((packet->tick + duration) % 65536) - 50, packet->index); free(out->data); queue_lo = (queue_lo + 1) % MAX_QUEUE; } g_dsp_busy = 1; return; }