static int audiounits_start(void *usr) { au_instance_t *ap = (au_instance_t*) usr; OSStatus err; if (ap->kind == AI_RECORDER) { #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED>=MAC_OS_X_VERSION_10_5) err = AudioDeviceStart(ap->inDev, ap->inIOProcID); #else err = AudioDeviceStart(ap->inDev, inputRenderProc); #endif if (err) Rf_error("unable to start recording (%08x)", err); } else { AURenderCallbackStruct renderCallback = { outputRenderProc, usr }; ap->done = NO; /* set format */ ap->fmtOut.mSampleRate = ap->sample_rate; err = AudioUnitSetProperty(ap->outUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ap->fmtOut, sizeof(ap->fmtOut)); if (err) Rf_error("unable to set output audio format (%08x)", err); /* set callback */ err = AudioUnitSetProperty(ap->outUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &renderCallback, sizeof(renderCallback)); if (err) Rf_error("unable to register audio callback (%08x)", err); /* start audio */ err = AudioOutputUnitStart(ap->outUnit); if (err) Rf_error("unable to start playback (%08x)", err); } return 1; }
static GVBool gviHardwareStartPlayback(GVDevice device) { GVIHardwareData * data = (GVIHardwareData *)device->m_data; OSStatus result; // now playing data->m_playing = GVTrue; // reset the playback clock data->m_playbackClock = 0; // add the IO proc result = AudioDeviceAddIOProc((AudioDeviceID)device->m_deviceID, gviHardwarePlaybackIOProc, device); if(result != noErr) { data->m_playing = GVFalse; return GVFalse; } // start it result = AudioDeviceStart((AudioDeviceID)device->m_deviceID, gviHardwarePlaybackIOProc); if(result != noErr) { data->m_playing = GVFalse; return GVFalse; } return GVTrue; }
static void macosx_play (get_audio_callback_t callback, AUDIO_OUT *audio_out, void *callback_data) { MACOSX_AUDIO_OUT *macosx_out ; OSStatus err ; if ((macosx_out = (MACOSX_AUDIO_OUT*) audio_out) == NULL) { printf ("macosx_play : AUDIO_OUT is NULL.\n") ; return ; } ; if (macosx_out->magic != MACOSX_MAGIC) { printf ("macosx_play : Bad magic number.\n") ; return ; } ; /* Set the callback function and callback data. */ macosx_out->callback = callback ; macosx_out->callback_data = callback_data ; err = AudioDeviceStart (macosx_out->device, macosx_audio_out_callback) ; if (err != noErr) printf ("AudioDeviceStart failed.\n") ; while (macosx_out->done_playing == SF_FALSE) usleep (10 * 1000) ; /* 10 000 milliseconds. */ return ; } /* macosx_play */
static int output_data(char *buf, int32 nbytes) { OSStatus err = 0; int next_nextbuf, outbytes=nbytes; int inBytesPerQuant, max_quant, max_inbytes, out_quant, i; if (dpm.encoding & PE_16BIT){ inBytesPerQuant = 2; }else{ ctl->cmsg(CMSG_ERROR, VERB_NORMAL, "Sorry, not support 8bit sound."); exit(1); } max_quant = globals.deviceBufferSize / (globals.deviceFormat.mBytesPerPacket/2); max_inbytes = max_quant * inBytesPerQuant; redo: outbytes=nbytes; if( outbytes > max_inbytes ){ outbytes = max_inbytes; } out_quant = outbytes/inBytesPerQuant; next_nextbuf = globals.nextBuf+1; next_nextbuf %= BUFNUM; while( globals.currBuf==next_nextbuf ){ //queue full usleep(10000); //0.01sec } for( i=0; i<out_quant; i++){ ((float*)(globals.buffer[globals.nextBuf]))[i] = ((short*)buf)[i]/32768.0; } globals.buffer_len[globals.nextBuf] = outbytes; err = AudioDeviceStart(globals.device, appIOProc); // start playing sound through the device if (err != 0) goto Bail; globals.nextBuf = next_nextbuf; globals.soundPlaying = 1; // set the playing status global to true nbytes -= outbytes; buf += outbytes; mac_buf_using_num++; globals.get_samples += outbytes/globals.deviceFormat.mBytesPerPacket; if( nbytes ){ goto redo; } Bail: return (err); }
void audev_close_device() { OSStatus status; int bx; if (audevice == kAudioDeviceUnknown) { fprintf(stderr, "Unable to close sound device which was never opened.\n"); return; } bailing = TRUE; if (!started) { /* We never got to the point of starting playback. Do it now. */ started = TRUE; status = AudioDeviceStart(audevice, PlaybackIOProc); if (status) { fprintf(stderr, "Could not late-start audio device.\n"); return; } } /* Wait on each buffer to make sure they're all drained. */ for (bx=0; bx<bufcount; bx++) { buffer_t *buffer = &rawbuffer[bx]; pthread_mutex_lock(&buffer->mutex); while (buffer->full) pthread_cond_wait(&buffer->cond, &buffer->mutex); pthread_mutex_unlock(&buffer->mutex); } status = AudioDeviceStop(audevice, PlaybackIOProc); if (status) { fprintf(stderr, "Could not stop audio device; continuing.\n"); } audevice = kAudioDeviceUnknown; for (bx=0; bx<bufcount; bx++) { buffer_t *buffer = &rawbuffer[bx]; if (buffer->buf) { free(buffer->buf); buffer->buf = NULL; } pthread_mutex_destroy(&buffer->mutex); pthread_cond_destroy(&buffer->cond); } free(rawbuffer); if (valbuffer) { free(valbuffer); valbuffer = NULL; } }
static inline gboolean _io_proc_spdif_start (GstCoreAudio * core_audio) { OSErr status; GST_DEBUG_OBJECT (core_audio, "osx ring buffer start ioproc ID: %p device_id %lu", core_audio->procID, (gulong) core_audio->device_id); if (!core_audio->io_proc_active) { /* Add IOProc callback */ status = AudioDeviceCreateIOProcID (core_audio->device_id, (AudioDeviceIOProc) _io_proc_spdif, (void *) core_audio, &core_audio->procID); if (status != noErr) { GST_ERROR_OBJECT (core_audio->osxbuf, ":AudioDeviceCreateIOProcID failed: %d", (int) status); return FALSE; } core_audio->io_proc_active = TRUE; } core_audio->io_proc_needs_deactivation = FALSE; /* Start device */ status = AudioDeviceStart (core_audio->device_id, core_audio->procID); if (status != noErr) { GST_ERROR_OBJECT (core_audio->osxbuf, "AudioDeviceStart failed: %d", (int) status); return FALSE; } return TRUE; }
/* * QuartzCoreAudioBell * Play a tone using the CoreAudio API */ static void QuartzCoreAudioBell( int volume, // volume is % of max int pitch, // pitch is Hz int duration ) // duration is milliseconds { if (quartzAudioDevice == kAudioDeviceUnknown) return; pthread_mutex_lock(&data.lock); // fade previous sound, if any data.prevFrequency = data.frequency; data.prevAmplitude = data.amplitude; data.prevFrame = data.curFrame; // set new sound data.frequency = pitch; data.amplitude = volume / 100.0; data.curFrame = 0; data.totalFrames = (int)(data.sampleRate * duration / 1000.0); data.remainingFrames = data.totalFrames; if (! data.playing) { OSStatus status; status = AudioDeviceStart(quartzAudioDevice, QuartzAudioIOProc); if (status) { ErrorF("QuartzAudioBell: AudioDeviceStart returned %d\n", status); } else { data.playing = TRUE; } } pthread_mutex_unlock(&data.lock); }
static int coreaudio_ctl_out (HWVoiceOut *hw, int cmd, ...) { OSStatus status; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; switch (cmd) { case VOICE_ENABLE: /* start playback */ if (!isPlaying(core->outputDeviceID)) { status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc); if (status != kAudioHardwareNoError) { coreaudio_logerr (status, "Could not resume playback\n"); } } break; case VOICE_DISABLE: /* stop playback */ if (!isAtexit) { if (isPlaying(core->outputDeviceID)) { status = AudioDeviceStop(core->outputDeviceID, audioDeviceIOProc); if (status != kAudioHardwareNoError) { coreaudio_logerr (status, "Could not pause playback\n"); } } } break; } return 0; }
tbool CDeviceCoreAudio::Start() { OSErr err1 = AudioDeviceAddIOProc(mAudioDeviceID, HALIOProc, (void*)this); OSErr err2 = AudioDeviceStart(mAudioDeviceID, HALIOProc); return ((err1 == 0) && (err2 == 0)); } // Start
static int coreaudio_voice_ctl (coreaudioVoice* core, int cmd) { OSStatus status; switch (cmd) { case VOICE_ENABLE: /* start playback */ D("%s: %s started\n", __FUNCTION__, core->isInput ? "input" : "output"); if (!coreaudio_voice_isPlaying(core)) { status = AudioDeviceStart(core->deviceID, core->ioproc); if (status != kAudioHardwareNoError) { coreaudio_logerr (status, "Could not resume playback\n"); } } break; case VOICE_DISABLE: /* stop playback */ D("%s: %s stopped\n", __FUNCTION__, core->isInput ? "input" : "output"); if (!conf.isAtexit) { if (coreaudio_voice_isPlaying(core)) { status = AudioDeviceStop(core->deviceID, core->ioproc); if (status != kAudioHardwareNoError) { coreaudio_logerr (status, "Could not pause playback\n"); } } } break; } return 0; }
static GVBool gviHardwareStartCapture(GVDevice device) { GVIHardwareData * data = (GVIHardwareData *)device->m_data; OSStatus result; // now capturing data->m_capturing = GVTrue; // add the IO proc result = AudioDeviceAddIOProc((AudioDeviceID)device->m_deviceID, gviHardwareCaptureIOProc, device); if(result != noErr) { data->m_capturing = GVFalse; return GVFalse; } // start it result = AudioDeviceStart((AudioDeviceID)device->m_deviceID, gviHardwareCaptureIOProc); if(result != noErr) { data->m_capturing = GVFalse; return GVFalse; } return GVTrue; }
int audio_write(struct mad_pcm *pcm, error_t *error) { if (!audio_initialized) { audio.channels = pcm->channels; audio.samplerate = pcm->samplerate; if (!audio_init(error)) { error_prepend(error, "Could not initialize audio"); return 0; } } if ((audio.channels != pcm->channels) || (audio.samplerate != pcm->samplerate)) { /* XXX */ error_set(error, "Changing the audio parameters is not supported"); return 0; } if (pcm->length != 1152) { error_printf(error, "Unknown number of samples in the mad buffer: %d", pcm->length); return 0; } if (pcm->channels != 2) { error_set(error, "Only stereo PCM data supported"); return 0; } int ret; float buf[1152 * pcm->channels]; float *ptr = buf; int i; mad_fixed_t const *left_ch, *right_ch; left_ch = pcm->samples[0]; right_ch = pcm->samples[1]; for (i = 0; i < pcm->length; i++) { signed int sample; *ptr++ = mad_scale(*left_ch++) / 32768.0; *ptr++ = mad_scale(*right_ch++) / 32768.0; } ret = rb_enqueue(&audio.rb, buf, 1152 * pcm->channels); if (ret == 0) { error_set(error, "Could not enqueue the PCM samples"); return 0; } if (!audio_started) { ret = AudioDeviceStart(audio.device, audio_play_proc); if (ret) { error_set(error, "Could not start the audio playback"); return 0; } audio_started = 1; } return 1; }
static int ca_unpause (void) { if (AudioDeviceStart (device_id, ca_buffer_callback)) { return -1; } state = OUTPUT_STATE_PLAYING; return 0; }
void CCoreAudioDevice::Start() { if (!m_DeviceId || m_Started) return; OSStatus ret = AudioDeviceStart(m_DeviceId, m_IoProc); if (ret) CLog::Log(LOGERROR, "CCoreAudioDevice::Start: Unable to start device. Error = 0x%08x (%4.4s).", ret, CONVERT_OSSTATUS(ret)); else m_Started = true; }
void CCoreAudioDevice::Start() { if (!m_DeviceId || m_Started) return; OSStatus ret = AudioDeviceStart(m_DeviceId, m_IoProc); if (ret) CLog::Log(LOGERROR, "CCoreAudioDevice::Start: " "Unable to start device. Error = %s", GetError(ret).c_str()); else m_Started = true; }
static PaError StartStream( PaStream *s ) { PaError err = paNoError; PaMacCoreStream *stream = (PaMacCoreStream*)s; if (stream->inputDevice != kAudioDeviceUnknown) { if (stream->outputDevice == kAudioDeviceUnknown || stream->outputDevice == stream->inputDevice) { err = conv_err(AudioDeviceStart(stream->inputDevice, AudioIOProc)); } else { err = conv_err(AudioDeviceStart(stream->inputDevice, AudioInputProc)); err = conv_err(AudioDeviceStart(stream->outputDevice, AudioOutputProc)); } } else { err = conv_err(AudioDeviceStart(stream->outputDevice, AudioIOProc)); } stream->isActive = 1; stream->isStopped = 0; return err; }
void AudioTee::start() { if (mInputDevice.mID == kAudioDeviceUnknown || mOutputDevice.mID == kAudioDeviceUnknown) return; if (mInputDevice.mFormat.mSampleRate != mOutputDevice.mFormat.mSampleRate) { printf("Error in AudioTee::Start() - sample rate mismatch: %f / %f\n", mInputDevice.mFormat.mSampleRate, mOutputDevice.mFormat.mSampleRate); return; } mWorkBuf = new Byte[mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame]; memset(mWorkBuf, 0, mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame); UInt32 framesInHistoryBuffer = NextPowerOfTwo(mInputDevice.mFormat.mSampleRate * mSecondsInHistoryBuffer); mHistoryBufferMaxByteSize = mInputDevice.mFormat.mBytesPerFrame * framesInHistoryBuffer; mHistBuf = new CARingBuffer(); mHistBuf->Allocate(2, mInputDevice.mFormat.mBytesPerFrame, framesInHistoryBuffer); printf("Initializing history buffer with byte capacity %u — %f seconds at %f kHz", mHistoryBufferMaxByteSize, (mHistoryBufferMaxByteSize / mInputDevice.mFormat.mSampleRate / (4 * 2)), mInputDevice.mFormat.mSampleRate); printf("Initializing work buffer with mBufferSizeFrames:%u and mBytesPerFrame %u\n", mInputDevice.mBufferSizeFrames, mInputDevice.mFormat.mBytesPerFrame); mInputIOProcID = NULL; AudioDeviceCreateIOProcID(mInputDevice.mID, InputIOProc, this, &mInputIOProcID); AudioDeviceStart(mInputDevice.mID, mInputIOProcID); mOutputIOProc = OutputIOProc; mOutputIOProcID = NULL; AudioDeviceCreateIOProcID(mOutputDevice.mID, mOutputIOProc, this, &mOutputIOProcID); AudioDeviceStart(mOutputDevice.mID, mOutputIOProcID); }
enum audio::orchestra::error audio::orchestra::api::Core::startStream() { // TODO : Check return ... audio::orchestra::Api::startStream(); if (verifyStream() != audio::orchestra::error_none) { return audio::orchestra::error_fail; } if (m_state == audio::orchestra::state::running) { ATA_ERROR("the stream is already running!"); return audio::orchestra::error_warning; } OSStatus result = noErr; if ( m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) { result = AudioDeviceStart(m_private->id[0], &audio::orchestra::api::Core::callbackEvent); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_device[0] << ")."); goto unlock; } } if ( m_mode == audio::orchestra::mode_input || ( m_mode == audio::orchestra::mode_duplex && m_device[0] != m_device[1])) { result = AudioDeviceStart(m_private->id[1], &audio::orchestra::api::Core::callbackEvent); if (result != noErr) { ATA_ERROR("system error starting input callback procedure on device (" << m_device[1] << ")."); goto unlock; } } m_private->drainCounter = 0; m_private->internalDrain = false; m_state = audio::orchestra::state::running; ATA_VERBOSE("Set state as running"); unlock: if (result == noErr) { return audio::orchestra::error_none; } return audio::orchestra::error_systemError; }
static int ca_play (void) { if (!device_id) { if (ca_init()) { return -1; } } if (AudioDeviceStart (device_id, ca_buffer_callback)) { return -1; } state = OUTPUT_STATE_PLAYING; return 0; }
void start_audio() { if (m_outputProcState != kOff) return; m_logger(2, "Starting audio..."); m_outputProcState = Impl::kStarting; //AudioDeviceAddIOProc(m_ID, Impl::OutputIOProc, this); AudioDeviceStart(m_ID, Impl::OutputIOProc); m_logger(2, "Done!"); }
void ca_start(phastream_t *as) { OSStatus err; ca_dev *cadev = (ca_dev *) as->drvinfo; DBG_DYNA_AUDIO_DRV("** Starting audio stream\n"); printf("** Starting audio stream\n"); // power pc hack 1/2 err = noErr; verify_noerr(err = AudioOutputUnitStart (cadev->outputAU)); err = noErr; verify_noerr(err = AudioDeviceStart(get_audiodeviceid(cadev->inputID), input_proc)); }
int audio_output_play(struct audio_file *fd) { UInt32 len, size; int16_t *tmp; /* Read data in our temporary buffer */ if ((len = audio_file_read(fd, abufnew, abuflen)) == 0) return (-1); if (fd->srate != afmt.mSampleRate || fd->channels != afmt.mChannelsPerFrame) { /* Sample rate or the amount of channels has changed */ afmt.mSampleRate = fd->srate; afmt.mChannelsPerFrame = fd->channels; if (AudioDeviceSetProperty(adid, 0, 0, 0, kAudioDevicePropertyStreamFormat, sizeof afmt, &afmt) != 0) { /* Get current settings back */ size = sizeof afmt; AudioDeviceGetProperty(adid, 0, false, kAudioDevicePropertyStreamFormat, &size, &afmt); gui_msgbar_warn(_("Sample rate or amount of channels not supported.")); return (-1); } } /* XXX: Mutex not actually needed - only for the condvar */ g_mutex_lock(abuflock); while (g_atomic_int_get(&abufulen) != 0) g_cond_wait(abufdrained, abuflock); g_mutex_unlock(abuflock); /* Toggle the buffers */ tmp = abufcur; abufcur = abufnew; abufnew = tmp; /* Atomically set the usage length */ g_atomic_int_set(&abufulen, len); /* Start processing of the data */ AudioDeviceStart(adid, aprocid); return (0); }
// ---------------------------------------------------------------------------- bool AudioCoreDriver::startPreRenderedBufferPlayback() // ---------------------------------------------------------------------------- { if (mInstanceId != 0) return false; if (!mIsInitialized) return false; stopPlayback(); mIsPlayingPreRenderedBuffer = true; memset(mSampleBuffer, 0, sizeof(short) * mNumSamplesInBuffer); AudioDeviceStart(mDeviceID, mPreRenderedBufferPlaybackProcID); return true; }
// start the device attached to the stream. // static int Stream_startSema(Stream *s, int semaIndex) { AudioDeviceIOProc ioProc= s->direction ? ioProcInput : ioProcOutput; debugf("stream %p[%d] startSema: %d\n", s, s->direction, semaIndex); s->semaphore= semaIndex; // can be zero if (checkError(AudioDeviceAddIOProc(s->id, ioProc, (void *)s), "Add", "ioProcOut")) return 0; if (checkError(AudioDeviceStart(s->id, ioProc), "DeviceStart", "ioProcOut")) { AudioDeviceRemoveIOProc(s->id, ioProc); return 0; } debugf("stream %p[%d] running\n", s, s->direction); return 1; }
/* resume playing, after audio_pause() */ static void audio_resume(void) { OSErr err=noErr; if (!ao->paused) return; /* Start callback. */ if (!ao->b_digital) { err = AudioOutputUnitStart(ao->theOutputUnit); if (err != noErr) ao_msg(MSGT_AO,MSGL_WARN, "AudioOutputUnitStart returned [%4.4s]\n", (char *)&err); } else { err = AudioDeviceStart(ao->i_selected_dev, ao->renderCallback); if (err != noErr) ao_msg(MSGT_AO, MSGL_WARN, "AudioDeviceStart failed: [%4.4s]\n", (char *)&err); } ao->paused = 0; }
bool start (AudioIODeviceCallback* cb) { if (! started) { callback = nullptr; if (deviceID != 0) { #if MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_5 if (OK (AudioDeviceAddIOProc (deviceID, audioIOProc, this))) #else if (OK (AudioDeviceCreateIOProcID (deviceID, audioIOProc, this, &audioProcID))) #endif { if (OK (AudioDeviceStart (deviceID, audioIOProc))) { started = true; } else { #if MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_5 OK (AudioDeviceRemoveIOProc (deviceID, audioIOProc)); #else OK (AudioDeviceDestroyIOProcID (deviceID, audioProcID)); audioProcID = 0; #endif } } } } if (started) { const ScopedLock sl (callbackLock); callback = cb; } return started && (inputDevice == nullptr || inputDevice->start (cb)); }
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt) { OSStatus err = noErr; UInt32 propertySize; Boolean writable; obtained_ = false; add = ad; //dev[0] = devices[ad]; UNUSED(ofmt); // Get the default input device ID. err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable); if (err != noErr) { return 0; } err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_)); if (err != noErr) { debug_msg("error kAudioHardwarePropertyDefaultInputDevice"); return 0; } if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) { debug_msg("error kAudioDeviceUnknown"); return 0; } // Get the input stream description. err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable); if (err != noErr) { debug_msg("error AudioDeviceGetPropertyInfo"); return 0; } err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_)); //printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_); if (err != noErr) { debug_msg("error AudioDeviceGetProperty"); return 0; } // nastavime maly endian devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0); if (writable) { err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_)); if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n"); } /* set the buffer size of the device */ /* int bufferByteSize = 8192; propertySize = sizeof(bufferByteSize); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize); if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); */ // Set the device sample rate -- a temporary fix for the G5's // built-in audio and possibly other audio devices. Boolean IsInput = 0; int inChannel = 0; Float64 theAnswer = 44100; UInt32 theSize = sizeof(theAnswer); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput, kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer); if (err != noErr) { debug_msg("error AudioDeviceSetProperty\n"); return 0; } debug_msg("Sample rate, %f\n", theAnswer); #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_); if (err != noErr) { debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err)); return 0; } err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_)); // The HAL AU maybe a better way to in the future... //err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenADefaultComponent\n"); return 0; } #else // Register the AudioDeviceIOProc. err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL); if (err != noErr) { debug_msg("error AudioDeviceAddIOProc\n"); return 0; } err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenDefaultAudioOutput\n"); return 0; } #endif // Register a callback function to provide output data to the unit. devices[ad].input.inputProc = outputRenderer; devices[ad].input.inputProcRefCon = 0; /* These would be needed if HAL used * UInt32 enableIO =1; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32)); enableIO=0; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32)); if (err != noErr) { debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; }*/ #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #else err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #endif if (err != noErr) { debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; } // Define the Mash stream description. Mash puts 20ms of data into each read // and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char, // so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert // to 16-bit linear before using the audio data. devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0; //devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate; devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM; #ifdef WORDS_BIGENDIAN devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked; #else devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; #endif devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2; devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1; devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2; devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1; devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16; // Inform the default output unit of our source format. err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty2"); printf("error setting output unit source format\n"); return 0; } // check the stream format err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable); if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n"); err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize); if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n"); char name[128]; audio_format_name(ifmt, name, 128); debug_msg("Requested ifmt %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); // handle the requested format if (ifmt->encoding != DEV_S16) { audio_format_change_encoding(ifmt, DEV_S16); debug_msg("Requested ifmt changed to %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); } audio_format_name(ofmt, name, 128); debug_msg("Requested ofmt %s\n",name); debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block); // Allocate the read buffer and Z delay line. //readBufferSize_ = 8192; readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_; //readBufferSize_ = 320; //printf("readBufferSize_ %d\n", readBufferSize_); readBuffer_ = malloc(sizeof(u_char)*readBufferSize_); bzero(readBuffer_, readBufferSize_ * sizeof(u_char)); //memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_); //inputReadIndex_ = -1; inputReadIndex_ = 0; inputWriteIndex_ = 0; zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80); availableInput_ = 0; // Allocate the write buffer. //writeBufferSize_ = 8000; writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_; writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_); bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16)); outputReadIndex_ = 0; outputWriteIndex_ = 0; //outputWriteIndex_ = -1; // Start audio processing. err = AudioUnitInitialize(devices[ad].outputUnit_); if (err != noErr) { debug_msg("error AudioUnitInitialize\n"); return 0; } err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc); if (err != noErr) { fprintf(stderr, "Input device error: AudioDeviceStart\n"); return 0; } err = AudioOutputUnitStart(devices[ad].outputUnit_); if (err != noErr) { fprintf(stderr, "Output device error: AudioOutputUnitStart\n"); return 0; } // Inform the default output unit of our source format. /* err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty3"); return 0; } */ return 1; };
static int coreaudio_voice_init (coreaudioVoice* core, struct audsettings* as, int frameSize, AudioDeviceIOProc ioproc, void* hw, int input) { OSStatus status; UInt32 propertySize; int err; int bits = 8; AudioValueRange frameRange; const char* typ = input ? "input" : "playback"; core->isInput = input ? true : false; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } if (as->fmt == AUD_FMT_S16 || as->fmt == AUD_FMT_U16) { bits = 16; } // TODO: audio_pcm_init_info (&hw->info, as); /* open default output device */ /* note: we use DefaultSystemOutputDevice because DefaultOutputDevice seems to * always link to the internal speakers, and not the ones selected through system properties * go figure... */ propertySize = sizeof(core->deviceID); status = AudioHardwareGetProperty( input ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultSystemOutputDevice, &propertySize, &core->deviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default %s device\n", typ); return -1; } if (core->deviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ propertySize = sizeof(frameRange); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyBufferFrameSizeRange, &propertySize, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } if (frameRange.mMinimum > frameSize) { core->bufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Output Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < frameSize) { core->bufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Output Buffer Frames to %f\n", frameRange.mMaximum); } else { core->bufferFrameSize = frameSize; } /* set Buffer Frame Size */ propertySize = sizeof(core->bufferFrameSize); status = AudioDeviceSetProperty( core->deviceID, NULL, 0, core->isInput, kAudioDevicePropertyBufferFrameSize, propertySize, &core->bufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %ld\n", core->bufferFrameSize); return -1; } /* get Buffer Frame Size */ propertySize = sizeof(core->bufferFrameSize); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyBufferFrameSize, &propertySize, &core->bufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } // TODO: hw->samples = *pNBuffers * core->bufferFrameSize; /* get StreamFormat */ propertySize = sizeof(core->streamBasicDescription); status = AudioDeviceGetProperty( core->deviceID, 0, core->isInput, kAudioDevicePropertyStreamFormat, &propertySize, &core->streamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->deviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->streamBasicDescription.mSampleRate = (Float64) as->freq; propertySize = sizeof(core->streamBasicDescription); status = AudioDeviceSetProperty( core->deviceID, 0, 0, core->isInput, kAudioDevicePropertyStreamFormat, propertySize, &core->streamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->deviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ core->ioproc = ioproc; status = AudioDeviceAddIOProc(core->deviceID, ioproc, hw); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->deviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!input && !coreaudio_voice_isPlaying(core)) { status = AudioDeviceStart(core->deviceID, core->ioproc); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceRemoveIOProc(core->deviceID, core->ioproc); core->deviceID = kAudioDeviceUnknown; return -1; } } return 0; }
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) { OSStatus status; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; UInt32 propertySize; int err; const char *typ = "playback"; AudioValueRange frameRange; CoreaudioConf *conf = drv_opaque; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } audio_pcm_init_info (&hw->info, as); /* open default output device */ propertySize = sizeof(core->outputDeviceID); status = AudioHardwareGetProperty( kAudioHardwarePropertyDefaultOutputDevice, &propertySize, &core->outputDeviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default output Device\n"); return -1; } if (core->outputDeviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ propertySize = sizeof(frameRange); status = AudioDeviceGetProperty( core->outputDeviceID, 0, 0, kAudioDevicePropertyBufferFrameSizeRange, &propertySize, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } if (frameRange.mMinimum > conf->buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < conf->buffer_frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum); } else { core->audioDevicePropertyBufferFrameSize = conf->buffer_frames; } /* set Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceSetProperty( core->outputDeviceID, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %" PRIu32 "\n", (uint32_t)core->audioDevicePropertyBufferFrameSize); return -1; } /* get Buffer Frame Size */ propertySize = sizeof(core->audioDevicePropertyBufferFrameSize); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyBufferFrameSize, &propertySize, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } hw->samples = conf->nbuffers * core->audioDevicePropertyBufferFrameSize; /* get StreamFormat */ propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceGetProperty( core->outputDeviceID, 0, false, kAudioDevicePropertyStreamFormat, &propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq; propertySize = sizeof(core->outputStreamBasicDescription); status = AudioDeviceSetProperty( core->outputDeviceID, 0, 0, 0, kAudioDevicePropertyStreamFormat, propertySize, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ status = AudioDeviceAddIOProc(core->outputDeviceID, audioDeviceIOProc, hw); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!isPlaying(core->outputDeviceID)) { status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceRemoveIOProc(core->outputDeviceID, audioDeviceIOProc); core->outputDeviceID = kAudioDeviceUnknown; return -1; } } return 0; }
int audev_loop(mix_func_t mixfunc, generate_func_t genfunc, void *rock) { int ix, res; if (audevice == kAudioDeviceUnknown) { fprintf(stderr, "Sound device is not open.\n"); return FALSE; } while (1) { float *destptr; long *srcptr; buffer_t *buffer; if (bailing) { return FALSE; } res = mixfunc(valbuffer, genfunc, rock); if (res) { bailing = TRUE; return TRUE; } buffer = &rawbuffer[filling]; pthread_mutex_lock(&buffer->mutex); while (buffer->full) { pthread_cond_wait(&buffer->cond, &buffer->mutex); } srcptr = valbuffer; destptr = buffer->buf; for (ix=0; ix<samplesperbuf; ix++, srcptr++, destptr++) { long samp = *srcptr; if (samp > 0x7FFF) samp = 0x7FFF; else if (samp < -0x7FFF) samp = -0x7FFF; *destptr = ((float)samp) * (float)0.00003051757; /* that is, dest = (samp/32768) */ } buffer->full = TRUE; filling += 1; if (filling >= bufcount) filling = 0; pthread_mutex_unlock(&buffer->mutex); if (!started && filling == 0) { /* When all the buffers are filled for the first time, we can start the device playback. */ OSStatus status; started = TRUE; status = AudioDeviceStart(audevice, PlaybackIOProc); if (status) { fprintf(stderr, "Could not start audio device.\n"); return FALSE; } } } }