gboolean gst_core_audio_open (GstCoreAudio * core_audio) { OSStatus status; /* core_audio->osxbuf is already locked at this point */ core_audio->cached_caps_valid = FALSE; gst_caps_replace (&core_audio->cached_caps, NULL); if (!gst_core_audio_open_impl (core_audio)) return FALSE; /* Add property listener */ status = AudioUnitAddPropertyListener (core_audio->audiounit, kAudioUnitProperty_AudioChannelLayout, _audio_unit_property_listener, core_audio); if (status != noErr) { GST_ERROR_OBJECT (core_audio, "Failed to add audio channel layout property " "listener for AudioUnit: %d", (int) status); } status = AudioUnitAddPropertyListener (core_audio->audiounit, kAudioUnitProperty_StreamFormat, _audio_unit_property_listener, core_audio); if (status != noErr) { GST_ERROR_OBJECT (core_audio, "Failed to add stream format property " "listener for AudioUnit: %d", (int) status); } /* Initialize the AudioUnit. We keep the audio unit initialized early so that * we can probe the underlying device. */ status = AudioUnitInitialize (core_audio->audiounit); if (status) { GST_ERROR_OBJECT (core_audio, "Failed to initialize AudioUnit: %d", (int) status); return FALSE; } return TRUE; }
gboolean gst_core_audio_initialize (GstCoreAudio * core_audio, AudioStreamBasicDescription format, GstCaps * caps, gboolean is_passthrough) { guint32 frame_size; OSStatus status; GST_DEBUG_OBJECT (core_audio, "Initializing: passthrough:%d caps:%" GST_PTR_FORMAT, is_passthrough, caps); if (!gst_core_audio_initialize_impl (core_audio, format, caps, is_passthrough, &frame_size)) { goto error; } if (core_audio->is_src) { /* create AudioBufferList needed for recording */ core_audio->recBufferList = buffer_list_alloc (format.mChannelsPerFrame, frame_size * format.mBytesPerFrame); } /* Initialize the AudioUnit */ status = AudioUnitInitialize (core_audio->audiounit); if (status) { GST_ERROR_OBJECT (core_audio, "Failed to initialise AudioUnit: %d", (int) status); goto error; } return TRUE; error: if (core_audio->is_src && core_audio->recBufferList) { buffer_list_free (core_audio->recBufferList); core_audio->recBufferList = NULL; } return FALSE; }
bool auLoader::initialize() { /** Algorithm: **/ /** Call the AU's Initialize method **/ OSStatus err = AudioUnitInitialize(m_plugin); if(err != noErr) { debug(LOG_ERROR, "Could not initialize plugin"); return false; } /** Set up output buffers **/ m_buffer_list = (AudioBufferList *)malloc(offsetof(AudioBufferList, mBuffers[MAX_CHANNELS])); m_buffer_list->mNumberBuffers = MAX_CHANNELS; /** Connect input properties **/ AURenderCallbackStruct callback; callback.inputProc = this->inputCallback; callback.inputProcRefCon = this; /** Set up render notifications **/ err = AudioUnitSetProperty(m_plugin, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback)); if(err != noErr) { debug(LOG_WARN, "Could not configure inputs"); } err = AudioUnitAddRenderNotify(m_plugin, this->renderNotify, NULL); if(err != noErr) { debug(LOG_ERROR, "Could not set up render notification"); } debug(LOG_INFO, "AU initialized"); return true; }
void iOSCoreAudioInit() { if (!audioInstance) { OSErr err; // first, grab the default output AudioComponentDescription defaultOutputDescription; defaultOutputDescription.componentType = kAudioUnitType_Output; defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO; defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; defaultOutputDescription.componentFlags = 0; defaultOutputDescription.componentFlagsMask = 0; AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription); // create our instance err = AudioComponentInstanceNew(defaultOutput, &audioInstance); if (err != noErr) { audioInstance = nil; return; } // create our callback so we can give it the audio data AURenderCallbackStruct input; input.inputProc = iOSCoreAudioCallback; input.inputProcRefCon = NULL; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // setup the audio format we'll be using (stereo pcm) AudioStreamBasicDescription streamFormat; memset(&streamFormat, 0, sizeof(streamFormat)); streamFormat.mSampleRate = SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8; streamFormat.mChannelsPerFrame = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame; streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // k, all setup, so init err = AudioUnitInitialize(audioInstance); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // finally start playback err = AudioOutputUnitStart(audioInstance); if (err != noErr) { AudioUnitUninitialize(audioInstance); AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // we're good to go } }
int AuHAL_open(CSOUND *csound, const csRtAudioParams * parm, csdata *cdata, int isInput) { UInt32 psize, devnum, devnos; AudioDeviceID dev; AudioDeviceID *sysdevs; AudioStreamBasicDescription format; int i; Device_Info *devinfo; UInt32 bufframes, nchnls; int devouts = 0, devins = 0; double srate; UInt32 enableIO, maxFPS; AudioComponent HALOutput; AudioComponentInstance *aunit; AudioComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0}; AudioObjectPropertyAddress prop = { kAudioObjectPropertyName, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; CFStringRef devName; CFStringEncoding defaultEncoding = CFStringGetSystemEncoding(); prop.mSelector = (isInput ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultOutputDevice); psize = sizeof(AudioDeviceID); AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &psize, &dev); if(isInput) cdata->defdevin = dev; else cdata->defdevout = dev; prop.mSelector = kAudioHardwarePropertyDevices; AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &psize); devnos = psize / sizeof(AudioDeviceID); sysdevs = (AudioDeviceID *) malloc(psize); devinfo = (Device_Info *) malloc(devnos*sizeof(Device_Info)); AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &psize, sysdevs); cdata->devnos = devnos; for (i = 0; (unsigned int) i < devnos; i++) { AudioBufferList *b; int devchannels, k, n; int numlists; psize = sizeof(CFStringRef); prop.mScope = kAudioObjectPropertyScopeGlobal; prop.mSelector = kAudioObjectPropertyName; AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, &devName); strcpy(devinfo[i].name, CFStringGetCStringPtr(devName, defaultEncoding)); CFRelease(devName); devchannels = 0; prop.mScope = kAudioDevicePropertyScopeInput; prop.mSelector = kAudioDevicePropertyStreamConfiguration; AudioObjectGetPropertyDataSize(sysdevs[i], &prop, 0, NULL, &psize); b = (AudioBufferList *) malloc(psize); numlists = psize / sizeof(AudioBufferList); AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, b); for(n=0; n < numlists; n++){ for(k=0; (unsigned int) k < b[n].mNumberBuffers; k++) devchannels += b[n].mBuffers[k].mNumberChannels; } devinfo[i].inchannels = devchannels; if(devchannels) { devins++; devinfo[i].indevnum = devins; } else devinfo[i].indevnum = -1; free(b); devchannels = 0; prop.mScope = kAudioDevicePropertyScopeOutput; AudioObjectGetPropertyDataSize(sysdevs[i], &prop, 0, NULL, &psize); b = (AudioBufferList *) malloc(psize); numlists = psize /sizeof(AudioBufferList); AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, b); for(n=0; n < numlists; n++){ for(k=0; (unsigned int) k < b[n].mNumberBuffers; k++) devchannels += b[n].mBuffers[k].mNumberChannels; } devinfo[i].outchannels = devchannels; if(devchannels) { devouts++; devinfo[i].outdevnum = devouts; } else devinfo[i].outdevnum = -1; free(b); } if(cdata->disp) csound->Message(csound, "==========================================================\n"); if (isInput) csound->Message(csound, Str("AuHAL Module: found %d input device(s):\n"), devins); else csound->Message(csound, Str("AuHAL Module: found %d output device(s):\n"), devouts); for (i = 0; (unsigned int) i < devnos; i++) { if (isInput) { if(devinfo[i].inchannels) { csound->Message(csound, Str("%d: %s (%d channels) \n"), devinfo[i].indevnum, devinfo[i].name, devinfo[i].inchannels); } } else { if(devinfo[i].outchannels) csound->Message(csound, Str("%d: %s (%d channels) \n"), devinfo[i].outdevnum, devinfo[i].name, devinfo[i].outchannels); } } if (parm->devName != NULL) devnum = atoi(parm->devName); else devnum = parm->devNum; if (devnum > 0 && devnum < 1024) { int CoreAudioDev = -1; prop.mSelector = kAudioHardwarePropertyDevices; if (isInput) { for(i=0; (unsigned int) i < devnos; i++) { if((unsigned int) devinfo[i].indevnum == devnum) CoreAudioDev = i; } if(CoreAudioDev >= 0) { prop.mSelector = kAudioHardwarePropertyDefaultInputDevice; dev = sysdevs[CoreAudioDev]; AudioObjectSetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, sizeof(AudioDeviceID), &dev); } else csound->Warning(csound, Str("requested device %d out of range"), devnum); } else { prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice; for(i=0;(unsigned int) i < devnos; i++) { if((unsigned int) devinfo[i].outdevnum == devnum) CoreAudioDev = i; } if(CoreAudioDev >= 0) { dev = sysdevs[CoreAudioDev]; AudioObjectSetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, sizeof(AudioDeviceID), &dev); } else csound->Warning(csound, Str("requested device %d out of range"), devnum, devinfo[CoreAudioDev].name); } } free(sysdevs); free(devinfo); psize = sizeof(CFStringRef); prop.mSelector = kAudioObjectPropertyName; AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &devName); if(isInput) csound->Message(csound, Str("selected input device: %s \n"), CFStringGetCStringPtr(devName, defaultEncoding)); else csound->Message(csound, Str("selected output device: %s \n"), CFStringGetCStringPtr(devName, defaultEncoding)); CFRelease(devName); srate = csound->GetSr(csound); if(!isInput){ nchnls =cdata->onchnls = parm->nChannels; bufframes = csound->GetOutputBufferSize(csound)/nchnls; } else { nchnls = cdata->inchnls = parm->nChannels; bufframes = csound->GetInputBufferSize(csound)/nchnls; } /* although the SR is set in the stream properties, we also need to set the device to match */ double sr; prop.mSelector = kAudioDevicePropertyNominalSampleRate; if(!isInput){ AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &sr); csound->system_sr(csound, sr); } psize = sizeof(double); AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &srate); AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &sr); if(sr != srate) { csound->Warning(csound, Str("Attempted to set device SR, tried %.1f, got %.1f \n"), srate, sr); } HALOutput = AudioComponentFindNext(NULL, &cd); if (isInput) { AudioComponentInstanceNew(HALOutput, &(cdata->inunit)); enableIO = 1; AudioUnitSetProperty(cdata->inunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(cdata->inunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); psize = sizeof(AudioDeviceID); /* for input, select device AFTER enabling IO */ AudioUnitSetProperty(cdata->inunit,kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); aunit = &(cdata->inunit); } else { AudioComponentInstanceNew(HALOutput, &(cdata->outunit)); psize = sizeof(AudioDeviceID); /* for output, select device BEFORE enabling IO */ AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); enableIO = 1; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); aunit = &(cdata->outunit); } /* now set the buffer size */ psize = sizeof(AudioDeviceID); AudioUnitGetProperty(*aunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, &psize); prop.mSelector = kAudioDevicePropertyBufferFrameSize; psize = 4; AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &bufframes); psize = sizeof(maxFPS); AudioUnitGetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &maxFPS, &psize); AudioUnitSetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &bufframes, sizeof(UInt32)); /* set the stream properties */ psize = sizeof(AudioStreamBasicDescription); AudioUnitGetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, &psize); format.mSampleRate = srate; format.mFormatID = kAudioFormatLinearPCM; format.mFormatFlags = kAudioFormatFlagsCanonical | kLinearPCMFormatFlagIsNonInterleaved; format.mBytesPerPacket = sizeof(AudioUnitSampleType); format.mFramesPerPacket = 1; format.mBytesPerFrame = sizeof(AudioUnitSampleType); format.mChannelsPerFrame = nchnls; format.mBitsPerChannel = sizeof(AudioUnitSampleType)*8; AudioUnitSetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, sizeof(AudioStreamBasicDescription)); /* set the callbacks and open the device */ if(!isInput) { AURenderCallbackStruct output; output.inputProc = Csound_Render; output.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, isInput, &output, sizeof(output)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, Str("***** AuHAL module: output device open with %d " "buffer frames\n"), bufframes); cdata->disp = 0; } else { AURenderCallbackStruct input; AudioBufferList *CAInputData = (AudioBufferList*)malloc(sizeof(UInt32) + cdata->inchnls * sizeof(AudioBuffer)); CAInputData->mNumberBuffers = cdata->inchnls; for (i = 0; i < cdata->inchnls; i++) { CAInputData->mBuffers[i].mNumberChannels = 1; CAInputData->mBuffers[i].mDataByteSize = bufframes * sizeof(AudioUnitSampleType); CAInputData->mBuffers[i].mData = calloc(bufframes, sizeof(AudioUnitSampleType)); } cdata->inputdata = CAInputData; input.inputProc = Csound_Input; input.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, isInput, &input, sizeof(input)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, "***** AuHAL module: input device open with %d buffer frames\n", bufframes); } if(!cdata->disp) csound->Message(csound, "==========================================================\n"); cdata->disp = 0; return 0; }
int CoreAudioDriver::init( unsigned bufferSize ) { OSStatus err = noErr; m_pOut_L = new float[ m_nBufferSize ]; m_pOut_R = new float[ m_nBufferSize ]; memset ( m_pOut_L, 0, m_nBufferSize * sizeof( float ) ); memset ( m_pOut_R, 0, m_nBufferSize * sizeof( float ) ); // Get Component AudioComponent compOutput; AudioComponentDescription descAUHAL; descAUHAL.componentType = kAudioUnitType_Output; descAUHAL.componentSubType = kAudioUnitSubType_HALOutput; descAUHAL.componentManufacturer = kAudioUnitManufacturer_Apple; descAUHAL.componentFlags = 0; descAUHAL.componentFlagsMask = 0; compOutput = AudioComponentFindNext( NULL, &descAUHAL ); if ( compOutput == NULL ) { ERRORLOG( "Error in FindNextComponent" ); //exit (-1); } err = AudioComponentInstanceNew( compOutput, &m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Error Opening Component" ); } // Get Current Output Device retrieveDefaultDevice(); // Set AUHAL to Current Device err = AudioUnitSetProperty( m_outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &m_outputDevice, sizeof( m_outputDevice ) ); if ( err != noErr ) { ERRORLOG( "Could not set Current Device" ); } AudioStreamBasicDescription asbdesc; asbdesc.mSampleRate = ( Float64 )m_nSampleRate; asbdesc.mFormatID = kAudioFormatLinearPCM; asbdesc.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; asbdesc.mBytesPerPacket = sizeof( Float32 ); asbdesc.mFramesPerPacket = 1; asbdesc.mBytesPerFrame = sizeof( Float32 ); asbdesc.mChannelsPerFrame = 2; // comix: was set to 1 asbdesc.mBitsPerChannel = 32; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbdesc, sizeof( AudioStreamBasicDescription ) ); // Set Render Callback AURenderCallbackStruct out; out.inputProc = renderProc; out.inputProcRefCon = ( void * )this; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &out, sizeof( out ) ); if ( err != noErr ) { ERRORLOG( "Could not Set Render Callback" ); } //Initialize AUHAL err = AudioUnitInitialize( m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Could not Initialize AudioUnit" ); } return 0; }
CoreAudioOutput::CoreAudioOutput(size_t bufferSamples, size_t sampleSize) { OSStatus error = noErr; _spinlockAU = (OSSpinLock *)malloc(sizeof(OSSpinLock)); *_spinlockAU = OS_SPINLOCK_INIT; _buffer = new RingBuffer(bufferSamples, sampleSize); _volume = 1.0f; // Create a new audio unit #if defined(MAC_OS_X_VERSION_10_6) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 if (IsOSXVersionSupported(10, 6, 0)) { AudioComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = AudioComponentInstanceNew(audioComponent, &_au); if (error != noErr) { return; } } else { ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } } #else ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } #endif // Set the render callback AURenderCallbackStruct callback; callback.inputProc = &CoreAudioOutputRenderCallback; callback.inputProcRefCon = _buffer; error = AudioUnitSetProperty(_au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback) ); if(error != noErr) { return; } // Set up the audio unit for audio streaming AudioStreamBasicDescription outputFormat; outputFormat.mSampleRate = SPU_SAMPLE_RATE; outputFormat.mFormatID = kAudioFormatLinearPCM; outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; outputFormat.mBytesPerPacket = SPU_SAMPLE_SIZE; outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = SPU_SAMPLE_SIZE; outputFormat.mChannelsPerFrame = SPU_NUMBER_CHANNELS; outputFormat.mBitsPerChannel = SPU_SAMPLE_RESOLUTION; error = AudioUnitSetProperty(_au, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat) ); if(error != noErr) { return; } // Initialize our new audio unit error = AudioUnitInitialize(_au); if(error != noErr) { return; } }
int sa_stream_open(sa_stream_t *s) { ComponentDescription desc; Component comp; AURenderCallbackStruct input; AudioStreamBasicDescription fmt; if (s == NULL) { return SA_ERROR_NO_INIT; } if (s->output_unit != NULL) { return SA_ERROR_INVALID; } /* * Open the default audio output unit. */ desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if (comp == NULL) { return SA_ERROR_NO_DEVICE; } if (OpenAComponent(comp, &s->output_unit) != noErr) { return SA_ERROR_NO_DEVICE; } /* * Set up the render callback used to feed audio data into the output unit. */ input.inputProc = audio_callback; input.inputProcRefCon = s; if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)) != 0) { return SA_ERROR_SYSTEM; } /* * Set up the format description for our audio data. Apple uses the * following terminology: * * sample = a single data value for one channel * frame = a set of samples that includes one sample for each channel * packet = the smallest indivisible block of audio data; for uncompressed * audio (which is what we have), this is one frame * rate = the number of complete frames per second * * Note that this definition of frame differs from, well, pretty much everyone * else's. See this really long link for more info: * * http://developer.apple.com/documentation/MusicAudio/Reference/CoreAudioDataTypesRef/Reference/reference.html#//apple_ref/c/tdef/AudioStreamBasicDescription */ fmt.mFormatID = kAudioFormatLinearPCM; fmt.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | #ifdef __BIG_ENDIAN__ kLinearPCMFormatFlagIsBigEndian | #endif kLinearPCMFormatFlagIsPacked; fmt.mSampleRate = s->rate; fmt.mChannelsPerFrame = s->n_channels; fmt.mBitsPerChannel = s->bytes_per_ch * 8; fmt.mFramesPerPacket = 1; /* uncompressed audio */ fmt.mBytesPerFrame = fmt.mChannelsPerFrame * fmt.mBitsPerChannel / 8; fmt.mBytesPerPacket = fmt.mBytesPerFrame * fmt.mFramesPerPacket; /* * We're feeding data in to the output bus of the audio system, so we set * the format description on the input scope of the device, using the very * obvious element value of 0 to indicate the output bus. * * http://developer.apple.com/technotes/tn2002/tn2091.html */ if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &fmt, sizeof(AudioStreamBasicDescription)) != 0) { return SA_ERROR_NOT_SUPPORTED; } if (AudioUnitInitialize(s->output_unit) != 0) { return SA_ERROR_SYSTEM; } return SA_SUCCESS; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size; OSStatus r; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
Error AudioDriverCoreAudio::init() { mutex = Mutex::create(); AudioComponentDescription desc; zeromem(&desc, sizeof(desc)); desc.componentType = kAudioUnitType_Output; #ifdef OSX_ENABLED desc.componentSubType = kAudioUnitSubType_HALOutput; #else desc.componentSubType = kAudioUnitSubType_RemoteIO; #endif desc.componentManufacturer = kAudioUnitManufacturer_Apple; AudioComponent comp = AudioComponentFindNext(NULL, &desc); ERR_FAIL_COND_V(comp == NULL, FAILED); OSStatus result = AudioComponentInstanceNew(comp, &audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); #ifdef OSX_ENABLED AudioObjectPropertyAddress prop; prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice; prop.mScope = kAudioObjectPropertyScopeGlobal; prop.mElement = kAudioObjectPropertyElementMaster; result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this); ERR_FAIL_COND_V(result != noErr, FAILED); prop.mSelector = kAudioHardwarePropertyDefaultInputDevice; result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this); ERR_FAIL_COND_V(result != noErr, FAILED); #endif AudioStreamBasicDescription strdesc; zeromem(&strdesc, sizeof(strdesc)); UInt32 size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 2: // Stereo case 4: // Surround 3.1 case 6: // Surround 5.1 case 8: // Surround 7.1 channels = strdesc.mChannelsPerFrame; break; default: // Unknown number of channels, default to stereo channels = 2; break; } zeromem(&strdesc, sizeof(strdesc)); size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 1: // Mono capture_channels = 1; break; case 2: // Stereo capture_channels = 2; break; default: // Unknown number of channels, default to stereo capture_channels = 2; break; } mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE); zeromem(&strdesc, sizeof(strdesc)); strdesc.mFormatID = kAudioFormatLinearPCM; strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; strdesc.mChannelsPerFrame = channels; strdesc.mSampleRate = mix_rate; strdesc.mFramesPerPacket = 1; strdesc.mBitsPerChannel = 16; strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8; strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); strdesc.mChannelsPerFrame = capture_channels; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY); // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) buffer_frames = closest_power_of_2(latency * mix_rate / 1000); #ifdef OSX_ENABLED result = AudioUnitSetProperty(audio_unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, kOutputBus, &buffer_frames, sizeof(UInt32)); ERR_FAIL_COND_V(result != noErr, FAILED); #endif unsigned int buffer_size = buffer_frames * channels; samples_in.resize(buffer_size); input_buf.resize(buffer_size); input_buffer.resize(buffer_size * 8); input_position = 0; input_size = 0; print_verbose("CoreAudio: detected " + itos(channels) + " channels"); print_verbose("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms"); AURenderCallbackStruct callback; zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::output_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::input_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); result = AudioUnitInitialize(audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); return OK; }
void *runPluginLoop(void *plug) { AudioUnit outputUnit; OSStatus err = noErr; // Open the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if(comp == NULL) { debug(LOG_ERROR, "FindNextComponent failed"); return NULL; } err = OpenAComponent(comp, &outputUnit); if(comp == NULL) { debug(LOG_ERROR, "OpenAComponent failed with error code %ld\n", err); return NULL; } // Set up a callback function to generate output to the output unit AURenderCallbackStruct input; input.inputProc = processData; input.inputProcRefCon = plug; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); AudioStreamBasicDescription streamFormat; streamFormat.mSampleRate = DEF_SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved; streamFormat.mBytesPerPacket = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = 2; streamFormat.mChannelsPerFrame = 2; streamFormat.mBitsPerChannel = 16; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-SF failed with code %4.4s, %ld\n", (char*)&err, err); return NULL; } // Initialize unit err = AudioUnitInitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitInitialize failed with code %ld\n", err); return NULL; } Float64 outSampleRate; UInt32 size = sizeof(Float64); err = AudioUnitGetProperty(outputUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &outSampleRate, &size); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-GF failed with code %4.4s, %ld\n", (char*)&err, err); return NULL; } // Start the rendering // The DefaultOutputUnit will do any format conversions to the format of the default device err = AudioOutputUnitStart(outputUnit); if(err) { debug(LOG_ERROR, "AudioOutputUnitStart failed with code %ld\n", err); return NULL; } // Loop until this thread is killed CFRunLoopRun(); // REALLY after you're finished playing STOP THE AUDIO OUTPUT UNIT!!!!!! // but we never get here because we're running until the process is nuked... AudioOutputUnitStop(outputUnit); err = AudioUnitUninitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitUninitialize failed with code %ld\n", err); return NULL; } return NULL; }
void InputImplAudioUnit::setup() { if( mIsSetup ) return; OSStatus err = noErr; //get default input device if( ! mDevice ) { mDevice = InputImplAudioUnit::getDefaultDevice(); } //create AudioOutputUnit AudioComponent component; AudioComponentDescription description; description.componentType = kAudioUnitType_Output; #if defined( CINDER_MAC ) description.componentSubType = kAudioUnitSubType_HALOutput; #elif defined( CINDER_COCOA_TOUCH ) description.componentSubType = kAudioUnitSubType_RemoteIO; #endif description.componentManufacturer = kAudioUnitManufacturer_Apple; description.componentFlags = 0; description.componentFlagsMask = 0; component = AudioComponentFindNext( NULL, &description ); if( ! component ) { std::cout << "Error finding next component" << std::endl; throw; } err = AudioComponentInstanceNew( component, &mInputUnit ); if( err != noErr ) { mInputUnit = NULL; std::cout << "Error getting output unit" << std::endl; throw; } // Initialize the AU /*err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; }*/ UInt32 param; //enable IO on AudioUnit's input scope param = 1; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error enable IO on Output unit input" << std::endl; throw; } //disable IO on AudioUnit's output scope param = 0; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error disabling IO on Output unit output" << std::endl; throw; } #if defined( CINDER_MAC ) AudioDeviceID nativeDeviceId = static_cast<AudioDeviceID>( mDevice->getDeviceId() ); // Set the current device to the default input unit. err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &nativeDeviceId, sizeof(AudioDeviceID) ); if( err != noErr ) { std::cout << "failed to set AU input device" << std::endl; throw; } #endif AURenderCallbackStruct callback; callback.inputProc = InputImplAudioUnit::inputCallback; callback.inputProcRefCon = this; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct) ); //Don't setup buffers until you know what the //input and output device audio streams look like. // Initialize the AudioUnit err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; } //Get Size of IO Buffers uint32_t sampleCount; param = sizeof(UInt32); #if defined( CINDER_MAC ) err = AudioUnitGetProperty( mInputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m ); if( err != noErr ) { std::cout << "Error getting buffer frame size" << std::endl; throw; } #elif defined( CINDER_COCOA_TOUCH ) AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m ); #endif AudioStreamBasicDescription deviceInFormat; AudioStreamBasicDescription desiredOutFormat; //StreamFormat setting: //get and the AudioUnit's default input and output scope stream formats //the AudioUnit has a built in AudioConverter than can do basic PCM format conversions //and channel mapping if the desired channel count is different from the device's channel count //Stream Format - Output Client Side param = sizeof( AudioStreamBasicDescription ); err = AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &deviceInFormat, ¶m ); if( err != noErr ) { std::cout << "failed to get input in device ASBD" << std::endl; throw; } //Stream format client side param = sizeof( AudioStreamBasicDescription ); err = AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &desiredOutFormat, ¶m ); if( err != noErr ) { std::cout << "failed to get input output device ASBD" << std::endl; throw; } //get the device's sample rate - this has to be the same as the AudioUnit's output format //this is actually already set on the AudioUnit's input default stream format //Float64 rate = 0; //param = sizeof(Float64); //AudioDeviceGetProperty( nativeDeviceId, 0, 1, kAudioDevicePropertyNominalSampleRate, ¶m, &rate ); //desiredOutFormat.mSampleRate = rate; //the output sample rate must be the same as the input device's sample rate desiredOutFormat.mSampleRate = deviceInFormat.mSampleRate; //output the same number of channels that are input desiredOutFormat.mChannelsPerFrame = deviceInFormat.mChannelsPerFrame; //one of the two above options is necessary, either getting the kAudioDevicePropertyNominalSampleRate //or just setting desiredOutFormat.mSampleRate = deviceInFormat.mSampleRate; //set the AudioUnit's output format to be float 32 linear non-interleaved PCM data desiredOutFormat.mFormatID = kAudioFormatLinearPCM; desiredOutFormat.mFormatFlags |= ( kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked ); uint32_t sampleSize = sizeof(float); desiredOutFormat.mFramesPerPacket = 1; desiredOutFormat.mBytesPerPacket = sampleSize; desiredOutFormat.mBytesPerFrame = sampleSize; desiredOutFormat.mBitsPerChannel = 8 * sampleSize; param = sizeof( AudioStreamBasicDescription ); err = AudioUnitSetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &desiredOutFormat, param ); if( err ) { throw; } mSampleRate = desiredOutFormat.mSampleRate; mChannelCount = desiredOutFormat.mChannelsPerFrame; //Buffer Setup - create the buffers necessary for holding input data //param = sizeof( AudioBufferList ); //AudioBufferList aBufferList; //AudioDeviceGetProperty( nativeDeviceId, 0, true, kAudioDevicePropertyStreamConfiguration, ¶m, &aBufferList); //setup buffer for recieving data in the callback mInputBufferData = (float *)malloc( sampleCount * desiredOutFormat.mBytesPerFrame ); float * inputBufferChannels[desiredOutFormat.mChannelsPerFrame]; for( int h = 0; h < desiredOutFormat.mChannelsPerFrame; h++ ) { inputBufferChannels[h] = &mInputBufferData[h * sampleCount]; } mInputBuffer = (AudioBufferList *)malloc( offsetof(AudioBufferList, mBuffers[0]) + ( desiredOutFormat.mChannelsPerFrame * sizeof(AudioBuffer) ) ); mInputBuffer->mNumberBuffers = desiredOutFormat.mChannelsPerFrame; //mBuffers.resize( mInputBuffer->mNumberBuffers ); mCircularBuffers.resize( mInputBuffer->mNumberBuffers ); for( int i = 0; i < mInputBuffer->mNumberBuffers; i++ ) { mInputBuffer->mBuffers[i].mNumberChannels = 1; mInputBuffer->mBuffers[i].mDataByteSize = sampleCount * desiredOutFormat.mBytesPerFrame; mInputBuffer->mBuffers[i].mData = inputBufferChannels[i]; //create a circular buffer for each channel //mBuffers[i] = new circular_buffer<float>( sampleCount * 4 ); mCircularBuffers[i] = new CircularBuffer<float>( sampleCount * 4 ); } mIsSetup = true; }
int input_init(void) { AudioBuffer *buf; theBufferList = malloc(sizeof(AudioBufferList)); theBufferList->mNumberBuffers = 1; int i; buf = theBufferList->mBuffers; buf->mNumberChannels = 3; buf->mDataByteSize = 3*1000; buf->mData = 0; // tell the audiounit to show us its 'buffers' AudioComponent comp; AudioComponentDescription desc; //There are several different types of Audio Units. //Some audio units serve as Outputs, Mixers, or DSP //units. See AUComponent.h for listing desc.componentType = kAudioUnitType_Output; //Every Component has a subType, which will give a clearer picture //of what this components function will be. desc.componentSubType = kAudioUnitSubType_HALOutput; //all Audio Units in AUComponent.h must use //"kAudioUnitManufacturer_Apple" as the Manufacturer desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; //Finds a component that meets the desc spec's comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) exit (-1); //gains access to the services provided by the component AudioComponentInstanceNew(comp, &auHAL); UInt32 enableIO; UInt32 size=0; //When using AudioUnitSetProperty the 4th parameter in the method //refer to an AudioUnitElement. When using an AudioOutputUnit //the input element will be '1' and the output element will be '0'. enableIO = 1; AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, // input element &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, //output element &enableIO, sizeof(enableIO)); OSStatus err =noErr; size = sizeof(AudioDeviceID); AudioDeviceID inputDevice; err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &size, &inputDevice); if (err) exit(err); err =AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(inputDevice)); if (err) exit(err); AudioStreamBasicDescription DeviceFormat; AudioStreamBasicDescription DesiredFormat; //Use CAStreamBasicDescriptions instead of 'naked' //AudioStreamBasicDescriptions to minimize errors. //CAStreamBasicDescription.h can be found in the CoreAudio SDK. size = sizeof(AudioStreamBasicDescription); //Get the input device format AudioUnitGetProperty (auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &DeviceFormat, &size); //set the desired format to the device's sample rate memcpy(&DesiredFormat, &DeviceFormat, sizeof(AudioStreamBasicDescription)); sampling_rate = DeviceFormat.mSampleRate; // for laser-emulating filters DesiredFormat.mSampleRate = DeviceFormat.mSampleRate; DesiredFormat.mChannelsPerFrame = 4; DesiredFormat.mBitsPerChannel = 16; DesiredFormat.mBytesPerPacket = DesiredFormat.mBytesPerFrame = DesiredFormat.mChannelsPerFrame * 2; DesiredFormat.mFramesPerPacket = 1; DesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //set format to output scope err = AudioUnitSetProperty( auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &DesiredFormat, sizeof(AudioStreamBasicDescription)); if (err) exit(err); SInt32 *channelMap =NULL; UInt32 numOfChannels = DesiredFormat.mChannelsPerFrame; //2 channels UInt32 mapSize = numOfChannels *sizeof(SInt32); channelMap = (SInt32 *)malloc(mapSize); //for each channel of desired input, map the channel from //the device's output channel. for(i=0;i<numOfChannels;i++) { channelMap[i]=i; } err = AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Output, 1, channelMap, size); if (err) exit(err); free(channelMap); AURenderCallbackStruct input; input.inputProc = callback; input.inputProcRefCon = 0; err = AudioUnitSetProperty( auHAL, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (err) exit(err); err = AudioUnitInitialize(auHAL); if (err) exit(err); err = AudioOutputUnitStart(auHAL); if (err) exit(err); }
int cubeb_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; ComponentDescription desc; cubeb_stream * stm; Component comp; AURenderCallbackStruct input; unsigned int buffer_size; OSStatus r; assert(context == (void *) 0xdeadbeef); *stream = NULL; if (stream_params.rate < 1 || stream_params.rate > 192000 || stream_params.channels < 1 || stream_params.channels > 32 || latency < 1 || latency > 2000) { return CUBEB_ERROR_INVALID_FORMAT; } memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; r = OpenAComponent(comp, &stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: OpenAComponent returned %ld\n", (long) r); } assert(r == 0); input.inputProc = audio_unit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(SetRenderCallback) returned %ld\n", (long) r); } assert(r == 0); r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(StreamFormat) returned %ld\n", (long) r); } assert(r == 0); buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitInitialize returned %ld\n", (long) r); } assert(r == 0); *stream = stm; return CUBEB_OK; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size, default_buffer_size; OSStatus r; UInt32 size; AudioDeviceID output_device_id; AudioValueRange latency_range; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; stm->current_latency_frames = 0; stm->hw_latency_frames = UINT64_MAX; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = latency / 1000.0 * ss.mSampleRate; /* Get the range of latency this particular device can work with, and clamp * the requested latency to this acceptable range. */ if (audiounit_get_acceptable_latency_range(&latency_range) != CUBEB_OK) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } if (buffer_size < (unsigned int) latency_range.mMinimum) { buffer_size = (unsigned int) latency_range.mMinimum; } else if (buffer_size > (unsigned int) latency_range.mMaximum) { buffer_size = (unsigned int) latency_range.mMaximum; } /** * Get the default buffer size. If our latency request is below the default, * set it. Otherwise, use the default latency. **/ size = sizeof(default_buffer_size); r = AudioUnitGetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &default_buffer_size, &size); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } // Setting the latency doesn't work well for USB headsets (eg. plantronics). // Keep the default latency for now. #if 0 if (buffer_size < default_buffer_size) { /* Set the maximum number of frame that the render callback will ask for, * effectively setting the latency of the stream. This is process-wide. */ r = AudioUnitSetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &buffer_size, sizeof(buffer_size)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } } #endif r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
static int prepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; #if MACOSX_COREAUDIO ComponentDescription desc; Component comp = NULL; #else AudioComponentDescription desc; AudioComponent comp = NULL; #endif const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); #if MACOSX_COREAUDIO if (!find_device_by_name(this, devname, iscapture)) { SDL_SetError("Couldn't find requested CoreAudio device"); return 0; } #endif SDL_zero(desc); desc.componentType = kAudioUnitType_Output; desc.componentManufacturer = kAudioUnitManufacturer_Apple; #if MACOSX_COREAUDIO desc.componentSubType = kAudioUnitSubType_DefaultOutput; comp = FindNextComponent(NULL, &desc); #else desc.componentSubType = kAudioUnitSubType_RemoteIO; /* !!! FIXME: ? */ comp = AudioComponentFindNext(NULL, &desc); #endif if (comp == NULL) { SDL_SetError("Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ #if MACOSX_COREAUDIO result = OpenAComponent(comp, &this->hidden->audioUnit); CHECK_RESULT("OpenAComponent"); #else /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); #endif this->hidden->audioUnitOpened = 1; #if MACOSX_COREAUDIO result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT ("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); #endif /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetRenderCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1; }
int Core_OpenAudio(_THIS, SDL_AudioSpec *spec) { OSStatus result = noErr; Component comp; ComponentDescription desc; struct AURenderCallbackStruct callback; AudioStreamBasicDescription requestedDesc; /* Setup a AudioStreamBasicDescription with the requested format */ requestedDesc.mFormatID = kAudioFormatLinearPCM; requestedDesc.mFormatFlags = kLinearPCMFormatFlagIsPacked; requestedDesc.mChannelsPerFrame = spec->channels; requestedDesc.mSampleRate = spec->freq; requestedDesc.mBitsPerChannel = spec->format & 0xFF; if (spec->format & 0x8000) requestedDesc.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger; if (spec->format & 0x1000) requestedDesc.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; requestedDesc.mFramesPerPacket = 1; requestedDesc.mBytesPerFrame = requestedDesc.mBitsPerChannel * requestedDesc.mChannelsPerFrame / 8; requestedDesc.mBytesPerPacket = requestedDesc.mBytesPerFrame * requestedDesc.mFramesPerPacket; /* Locate the default output audio unit */ desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent (NULL, &desc); if (comp == NULL) { SDL_SetError ("Failed to start CoreAudio: FindNextComponent returned NULL"); return -1; } /* Open & initialize the default output audio unit */ result = OpenAComponent (comp, &outputAudioUnit); CHECK_RESULT("OpenAComponent") result = AudioUnitInitialize (outputAudioUnit); CHECK_RESULT("AudioUnitInitialize") /* Set the input format of the audio unit. */ result = AudioUnitSetProperty (outputAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &requestedDesc, sizeof (requestedDesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)") /* Set the audio callback */ callback.inputProc = audioCallback; callback.inputProcRefCon = this; result = AudioUnitSetProperty (outputAudioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_SetInputCallback)") /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(spec); /* Allocate a sample buffer */ bufferOffset = bufferSize = this->spec.size; buffer = SDL_malloc(bufferSize); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart (outputAudioUnit); CHECK_RESULT("AudioOutputUnitStart") /* We're running! */ return(1); }
static int audio_unit_open(AUCommon *d, bool_t is_read) { OSStatus result; UInt32 param; ComponentDescription desc; Component comp; AudioStreamBasicDescription asbd; const int input_bus=1; const int output_bus=0; // Get Default Input audio unit desc.componentType = kAudioUnitType_Output; desc.componentSubType = d->dev!=-1?kAudioUnitSubType_HALOutput:kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if (comp == NULL) { ms_message("Cannot find audio component"); return -1; } result = OpenAComponent(comp, &d->au); if(result != noErr) { ms_message("Cannot open audio component %x", result); return -1; } param = is_read; if (d->dev!=-1) { CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, ¶m, sizeof(UInt32))); param = !is_read; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, ¶m, sizeof(UInt32))); // Set the current device CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, output_bus, &d->dev, sizeof(AudioDeviceID))); } param=0; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_ShouldAllocateBuffer, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output , is_read ? input_bus : output_bus , ¶m, sizeof(param))); UInt32 asbdsize = sizeof(AudioStreamBasicDescription); memset((char *)&asbd, 0, asbdsize); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output, is_read ? input_bus : output_bus, &asbd, &asbdsize)); show_format(is_read ? "Input audio unit" : "Output audio unit",&asbd); asbd.mSampleRate=d->rate; asbd.mBytesPerPacket=asbd.mBytesPerFrame = 2*d->nchannels; asbd.mChannelsPerFrame = d->nchannels; asbd.mBitsPerChannel=16; asbd.mFormatID=kAudioFormatLinearPCM; asbd.mFormatFlags=kAudioFormatFlagIsPacked|kAudioFormatFlagIsSignedInteger; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, sizeof(AudioStreamBasicDescription))); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, &asbdsize)); show_format(is_read ? "Input audio unit after configuration" : "Output audio unit after configuration",&asbd); // Get the number of frames in the IO buffer(s) param = sizeof(UInt32); UInt32 numFrames; CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Input, input_bus, &numFrames, ¶m)); ms_message("Number of frames per buffer = %i", numFrames); AURenderCallbackStruct cbs; cbs.inputProcRefCon = d; if (is_read) { cbs.inputProc = readRenderProc; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, input_bus, &cbs, sizeof(AURenderCallbackStruct))); } else { cbs.inputProc = writeRenderProc; CHECK_AURESULT(AudioUnitSetProperty (d->au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, output_bus, &cbs, sizeof(AURenderCallbackStruct))); } result = AudioUnitInitialize(d->au); if(result != noErr) { ms_error("failed to AudioUnitInitialize %i , is_read=%i", result,(int)is_read); return -1; } CHECK_AURESULT(AudioOutputUnitStart(d->au)); return 0; }
//_______________________________________________ // // //_______________________________________________ uint8_t coreAudioDevice::init(uint8_t channels, uint32_t fq) { _channels = channels; OSStatus err; ComponentDescription desc; AudioUnitInputCallback input; AudioStreamBasicDescription streamFormat; AudioDeviceID theDevice; UInt32 sz=0; UInt32 kFramesPerSlice=512; desc.componentType = 'aunt'; desc.componentSubType = kAudioUnitSubType_Output; desc.componentManufacturer = kAudioUnitID_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp= FindNextComponent(NULL, &desc); if (comp == NULL) { printf("coreAudio: Cannot find component\n"); return 0; } err = OpenAComponent(comp, &theOutputUnit); if(err) { printf("coreAudio: Cannot open component\n"); return 0; } // Initialize it verify_noerr(AudioUnitInitialize(theOutputUnit)); // Set up a callback function to generate output to the output unit #if 1 input.inputProc = MyRenderer; input.inputProcRefCon = NULL; verify_noerr(AudioUnitSetProperty(theOutputUnit, kAudioUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input))); #endif streamFormat.mSampleRate = fq; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked; streamFormat.mBytesPerPacket = channels * sizeof (UInt16); streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = channels * sizeof (UInt16); streamFormat.mChannelsPerFrame = channels; streamFormat.mBitsPerChannel = sizeof (UInt16) * 8; verify_noerr(AudioUnitSetProperty( theOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription))); printf("Rendering source:\n\t"); printf ("SampleRate=%f,", streamFormat.mSampleRate); printf ("BytesPerPacket=%ld,", streamFormat.mBytesPerPacket); printf ("FramesPerPacket=%ld,", streamFormat.mFramesPerPacket); printf ("BytesPerFrame=%ld,", streamFormat.mBytesPerFrame); printf ("BitsPerChannel=%ld,", streamFormat.mBitsPerChannel); printf ("ChannelsPerFrame=%ld\n", streamFormat.mChannelsPerFrame); sz=sizeof (theDevice); verify_noerr(AudioUnitGetProperty (theOutputUnit, kAudioOutputUnitProperty_CurrentDevice, 0, 0, &theDevice, &sz)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceSetProperty(theDevice, 0, 0, false, kAudioDevicePropertyBufferFrameSize, sz, &kFramesPerSlice)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceGetProperty(theDevice, 0, false, kAudioDevicePropertyBufferFrameSize, &sz, &kFramesPerSlice)); verify_noerr (AudioDeviceAddPropertyListener(theDevice, 0, false, kAudioDeviceProcessorOverload, OverloadListenerProc, 0)); printf ("size of the device's buffer = %ld frames\n", kFramesPerSlice); frameCount=0; audioBuffer=new int16_t[BUFFER_SIZE]; // between hald a sec and a sec should be enough :) return 1; }
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt) { OSStatus err = noErr; UInt32 propertySize; Boolean writable; obtained_ = false; add = ad; //dev[0] = devices[ad]; UNUSED(ofmt); // Get the default input device ID. err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable); if (err != noErr) { return 0; } err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_)); if (err != noErr) { debug_msg("error kAudioHardwarePropertyDefaultInputDevice"); return 0; } if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) { debug_msg("error kAudioDeviceUnknown"); return 0; } // Get the input stream description. err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable); if (err != noErr) { debug_msg("error AudioDeviceGetPropertyInfo"); return 0; } err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_)); //printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_); if (err != noErr) { debug_msg("error AudioDeviceGetProperty"); return 0; } // nastavime maly endian devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0); if (writable) { err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_)); if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n"); } /* set the buffer size of the device */ /* int bufferByteSize = 8192; propertySize = sizeof(bufferByteSize); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize); if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); */ // Set the device sample rate -- a temporary fix for the G5's // built-in audio and possibly other audio devices. Boolean IsInput = 0; int inChannel = 0; Float64 theAnswer = 44100; UInt32 theSize = sizeof(theAnswer); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput, kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer); if (err != noErr) { debug_msg("error AudioDeviceSetProperty\n"); return 0; } debug_msg("Sample rate, %f\n", theAnswer); #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_); if (err != noErr) { debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err)); return 0; } err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_)); // The HAL AU maybe a better way to in the future... //err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenADefaultComponent\n"); return 0; } #else // Register the AudioDeviceIOProc. err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL); if (err != noErr) { debug_msg("error AudioDeviceAddIOProc\n"); return 0; } err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenDefaultAudioOutput\n"); return 0; } #endif // Register a callback function to provide output data to the unit. devices[ad].input.inputProc = outputRenderer; devices[ad].input.inputProcRefCon = 0; /* These would be needed if HAL used * UInt32 enableIO =1; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32)); enableIO=0; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32)); if (err != noErr) { debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; }*/ #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #else err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #endif if (err != noErr) { debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; } // Define the Mash stream description. Mash puts 20ms of data into each read // and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char, // so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert // to 16-bit linear before using the audio data. devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0; //devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate; devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM; #ifdef WORDS_BIGENDIAN devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked; #else devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; #endif devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2; devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1; devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2; devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1; devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16; // Inform the default output unit of our source format. err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty2"); printf("error setting output unit source format\n"); return 0; } // check the stream format err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable); if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n"); err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize); if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n"); char name[128]; audio_format_name(ifmt, name, 128); debug_msg("Requested ifmt %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); // handle the requested format if (ifmt->encoding != DEV_S16) { audio_format_change_encoding(ifmt, DEV_S16); debug_msg("Requested ifmt changed to %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); } audio_format_name(ofmt, name, 128); debug_msg("Requested ofmt %s\n",name); debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block); // Allocate the read buffer and Z delay line. //readBufferSize_ = 8192; readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_; //readBufferSize_ = 320; //printf("readBufferSize_ %d\n", readBufferSize_); readBuffer_ = malloc(sizeof(u_char)*readBufferSize_); bzero(readBuffer_, readBufferSize_ * sizeof(u_char)); //memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_); //inputReadIndex_ = -1; inputReadIndex_ = 0; inputWriteIndex_ = 0; zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80); availableInput_ = 0; // Allocate the write buffer. //writeBufferSize_ = 8000; writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_; writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_); bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16)); outputReadIndex_ = 0; outputWriteIndex_ = 0; //outputWriteIndex_ = -1; // Start audio processing. err = AudioUnitInitialize(devices[ad].outputUnit_); if (err != noErr) { debug_msg("error AudioUnitInitialize\n"); return 0; } err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc); if (err != noErr) { fprintf(stderr, "Input device error: AudioDeviceStart\n"); return 0; } err = AudioOutputUnitStart(devices[ad].outputUnit_); if (err != noErr) { fprintf(stderr, "Output device error: AudioOutputUnitStart\n"); return 0; } // Inform the default output unit of our source format. /* err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty3"); return 0; } */ return 1; };
void configure(const AudioStreamBasicDescription& outDesc, UInt32 bufferSize) { // enable IO on input UInt32 param = 1; OSErr result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof(UInt32)); ASSERT(!result); // disable IO on output param = 0; result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof(UInt32)); ASSERT(!result); #if !TARGET_OS_IPHONE // set to use default device AudioDeviceID deviceId = kAudioObjectUnknown; param = sizeof(AudioDeviceID); AudioObjectPropertyAddress property_address = { kAudioHardwarePropertyDefaultInputDevice, // mSelector kAudioObjectPropertyScopeGlobal, // mScope kAudioObjectPropertyElementMaster // mElement }; UInt32 deviceIdSize = sizeof(deviceId); result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property_address, 0, // inQualifierDataSize NULL, // inQualifierData &deviceIdSize, &deviceId); ASSERT(!result); result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(AudioDeviceID)); ASSERT(!result); #endif // configure the callback AURenderCallbackStruct callback; callback.inputProc = inputCallback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(m_inputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct)); ASSERT(!result); // make the input buffer size match the output buffer size UInt32 bufferSizeVal = bufferSize; #if TARGET_OS_IPHONE result = AudioUnitSetProperty(m_inputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &bufferSizeVal, sizeof(bufferSizeVal)); #else result = AudioUnitSetProperty(m_inputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &bufferSizeVal, sizeof(bufferSizeVal)); #endif ASSERT(!result); // Initialize the AudioUnit result = AudioUnitInitialize(m_inputUnit); ASSERT(!result); // get Size of IO Buffers UInt32 sampleCount; param = sizeof(UInt32); #if TARGET_OS_IPHONE result = AudioUnitGetProperty(m_inputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m); #else result = AudioUnitGetProperty(m_inputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m); #endif ASSERT(!result); // The AudioUnit can do format conversions, so match the input configuration to the output. //// if this doesn't work try it the other way around - set up the input desc and force the output to match param = sizeof(AudioStreamBasicDescription); result = AudioUnitSetProperty(m_inputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outDesc, param); ASSERT(!result); m_audioBus = new AudioBus(2, bufferSize, true); m_buffers = (AudioBufferList*) malloc(offsetof(AudioBufferList, mBuffers[0]) + sizeof(AudioBuffer) * outDesc.mChannelsPerFrame); m_buffers->mNumberBuffers = outDesc.mChannelsPerFrame; for (uint32_t i = 0; i < m_buffers->mNumberBuffers; ++i) { m_buffers->mBuffers[i].mNumberChannels = 1; m_buffers->mBuffers[i].mDataByteSize = bufferSize * outDesc.mBytesPerFrame; m_buffers->mBuffers[i].mData = m_audioBus->channel(i)->mutableData(); } }
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName) { AudioStreamBasicDescription requestedFormat; // The application requested format AudioStreamBasicDescription hardwareFormat; // The hardware format AudioStreamBasicDescription outputFormat; // The AudioUnit output format AURenderCallbackStruct input; ComponentDescription desc; AudioDeviceID inputDevice; UInt32 outputFrameCount; UInt32 propertySize; UInt32 enableIO; Component comp; ca_data *data; OSStatus err; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; // Search for component with given description comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed\n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); device->ExtraData = data; // Open the component err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed\n"); goto error; } // Turn off AudioUnit output enableIO = 0; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Turn on AudioUnit input enableIO = 1; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Get the default input device propertySize = sizeof(AudioDeviceID); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); if(err != noErr) { ERR("AudioHardwareGetProperty failed\n"); goto error; } if(inputDevice == kAudioDeviceUnknown) { ERR("No input device found\n"); goto error; } // Track the input device err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // set capture callback input.inputProc = ca_capture_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Initialize the device err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed\n"); goto error; } // Get the hardware format propertySize = sizeof(AudioStreamBasicDescription); err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize); if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription)) { ERR("AudioUnitGetProperty failed\n"); goto error; } // Set up the requested format description switch(device->FmtType) { case DevFmtUByte: requestedFormat.mBitsPerChannel = 8; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtShort: requestedFormat.mBitsPerChannel = 16; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtInt: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtFloat: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtByte: case DevFmtUShort: case DevFmtUInt: ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); goto error; } switch(device->FmtChans) { case DevFmtMono: requestedFormat.mChannelsPerFrame = 1; break; case DevFmtStereo: requestedFormat.mChannelsPerFrame = 2; break; case DevFmtQuad: case DevFmtX51: case DevFmtX51Side: case DevFmtX61: case DevFmtX71: ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans)); goto error; } requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8; requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame; requestedFormat.mSampleRate = device->Frequency; requestedFormat.mFormatID = kAudioFormatLinearPCM; requestedFormat.mReserved = 0; requestedFormat.mFramesPerPacket = 1; // save requested format description for later use data->format = requestedFormat; data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); // Use intermediate format for sample rate conversion (outputFormat) // Set sample rate to the same as hardware for resampling later outputFormat = requestedFormat; outputFormat.mSampleRate = hardwareFormat.mSampleRate; // Determine sample rate ratio for resampling data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency; // The output format should be the requested format, but using the hardware sample rate // This is because the AudioUnit will automatically scale other properties, except for sample rate err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Set the AudioUnit output format frame count outputFrameCount = device->UpdateSize * data->sampleRateRatio; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount)); if(err != noErr) { ERR("AudioUnitSetProperty failed: %d\n", err); goto error; } // Set up sample converter err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter); if(err != noErr) { ERR("AudioConverterNew failed: %d\n", err); goto error; } // Create a buffer for use in the resample callback data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio); // Allocate buffer for the AudioUnit output data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio); if(data->bufferList == NULL) goto error; data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates); if(data->ring == NULL) goto error; al_string_copy_cstr(&device->DeviceName, deviceName); return ALC_NO_ERROR; error: DestroyRingBuffer(data->ring); free(data->resampleBuffer); destroy_buffer_list(data->bufferList); if(data->audioConverter) AudioConverterDispose(data->audioConverter); if(data->audioUnit) CloseComponent(data->audioUnit); free(data); device->ExtraData = NULL; return ALC_INVALID_VALUE; }
// ---------------------------------------------------------- bool ofxAudioUnitInput::configureInputDevice() // ---------------------------------------------------------- { UInt32 on = 1; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &on, sizeof(on)), "enabling input on HAL unit"); UInt32 off = 0; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &off, sizeof(off)), "disabling output on HAL unit"); UInt32 deviceIDSize = sizeof(AudioDeviceID); OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &_impl->inputDeviceID, deviceIDSize), "setting HAL unit's device ID"); AudioStreamBasicDescription deviceASBD = {0}; UInt32 ASBDSize = sizeof(deviceASBD); OFXAU_RET_FALSE(AudioUnitGetProperty(*_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &deviceASBD, &ASBDSize), "getting hardware stream format"); deviceASBD.mSampleRate = 44100; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &deviceASBD, sizeof(deviceASBD)), "setting input sample rate to 44100"); AURenderCallbackStruct inputCallback = {RenderCallback, &_impl->ctx}; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &inputCallback, sizeof(inputCallback)), "setting hardware input callback"); OFXAU_RET_BOOL(AudioUnitInitialize(*_unit), "initializing hardware input unit after setting it to input mode"); }
bool CoreAudioSound::Start() { OSStatus err; AURenderCallbackStruct callback_struct; AudioStreamBasicDescription format; ComponentDescription desc; Component component; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; component = FindNextComponent(nullptr, &desc); if (component == nullptr) { ERROR_LOG(AUDIO, "error finding audio component"); return false; } err = OpenAComponent(component, &audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error opening audio component"); return false; } FillOutASBDForLPCM(format, m_mixer->GetSampleRate(), 2, 16, 16, false, false, false); err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &format, sizeof(AudioStreamBasicDescription)); if (err != noErr) { ERROR_LOG(AUDIO, "error setting audio format"); return false; } callback_struct.inputProc = callback; callback_struct.inputProcRefCon = this; err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback_struct, sizeof callback_struct); if (err != noErr) { ERROR_LOG(AUDIO, "error setting audio callback"); return false; } err = AudioUnitSetParameter(audioUnit, kHALOutputParam_Volume, kAudioUnitParameterFlag_Output, 0, m_volume / 100., 0); if (err != noErr) ERROR_LOG(AUDIO, "error setting volume"); err = AudioUnitInitialize(audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error initializing audiounit"); return false; } err = AudioOutputUnitStart(audioUnit); if (err != noErr) { ERROR_LOG(AUDIO, "error starting audiounit"); return false; } return true; }
static ALCboolean ca_reset_playback(ALCdevice *device) { ca_data *data = (ca_data*)device->ExtraData; AudioStreamBasicDescription streamFormat; AURenderCallbackStruct input; OSStatus err; UInt32 size; err = AudioUnitUninitialize(data->audioUnit); if(err != noErr) ERR("-- AudioUnitUninitialize failed.\n"); /* retrieve default output unit's properties (output side) */ size = sizeof(AudioStreamBasicDescription); err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size); if(err != noErr || size != sizeof(AudioStreamBasicDescription)) { ERR("AudioUnitGetProperty failed\n"); return ALC_FALSE; } #if 0 TRACE("Output streamFormat of default output unit -\n"); TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket); TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame); TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel); TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket); TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame); TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate); #endif /* set default output unit's input side to match output side */ err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, size); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); return ALC_FALSE; } if(device->Frequency != streamFormat.mSampleRate) { device->UpdateSize = (ALuint)((ALuint64)device->UpdateSize * streamFormat.mSampleRate / device->Frequency); device->Frequency = streamFormat.mSampleRate; } /* FIXME: How to tell what channels are what in the output device, and how * to specify what we're giving? eg, 6.0 vs 5.1 */ switch(streamFormat.mChannelsPerFrame) { case 1: device->FmtChans = DevFmtMono; break; case 2: device->FmtChans = DevFmtStereo; break; case 4: device->FmtChans = DevFmtQuad; break; case 6: device->FmtChans = DevFmtX51; break; case 7: device->FmtChans = DevFmtX61; break; case 8: device->FmtChans = DevFmtX71; break; default: ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame); device->FmtChans = DevFmtStereo; streamFormat.mChannelsPerFrame = 2; break; } SetDefaultWFXChannelOrder(device); /* use channel count and sample rate from the default output unit's current * parameters, but reset everything else */ streamFormat.mFramesPerPacket = 1; streamFormat.mFormatFlags = 0; switch(device->FmtType) { case DevFmtUByte: device->FmtType = DevFmtByte; /* fall-through */ case DevFmtByte: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 8; break; case DevFmtUShort: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 16; break; case DevFmtUInt: device->FmtType = DevFmtInt; /* fall-through */ case DevFmtInt: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger; streamFormat.mBitsPerChannel = 32; break; case DevFmtFloat: streamFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat; streamFormat.mBitsPerChannel = 32; break; } streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame * streamFormat.mBitsPerChannel / 8; streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags |= kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); return ALC_FALSE; } /* setup callback */ data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); input.inputProc = ca_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); return ALC_FALSE; } /* init the default audio unit... */ err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed\n"); return ALC_FALSE; } return ALC_TRUE; }
/* * new_fluid_core_audio_driver2 */ fluid_audio_driver_t* new_fluid_core_audio_driver2(fluid_settings_t* settings, fluid_audio_func_t func, void* data) { char* devname = NULL; fluid_core_audio_driver_t* dev = NULL; int period_size, periods; double sample_rate; OSStatus status; UInt32 size; int i; dev = FLUID_NEW(fluid_core_audio_driver_t); if (dev == NULL) { FLUID_LOG(FLUID_ERR, "Out of memory"); return NULL; } FLUID_MEMSET(dev, 0, sizeof(fluid_core_audio_driver_t)); dev->callback = func; dev->data = data; // Open the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; //kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if (comp == NULL) { FLUID_LOG(FLUID_ERR, "Failed to get the default audio device"); goto error_recovery; } status = OpenAComponent(comp, &dev->outputUnit); if (status != noErr) { FLUID_LOG(FLUID_ERR, "Failed to open the default audio device. Status=%ld\n", (long int)status); goto error_recovery; } // Set up a callback function to generate output AURenderCallbackStruct render; render.inputProc = fluid_core_audio_callback; render.inputProcRefCon = (void *) dev; status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &render, sizeof(render)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the audio callback. Status=%ld\n", (long int)status); goto error_recovery; } fluid_settings_getnum(settings, "synth.sample-rate", &sample_rate); fluid_settings_getint(settings, "audio.periods", &periods); fluid_settings_getint(settings, "audio.period-size", &period_size); /* get the selected device name. if none is specified, use NULL for the default device. */ if (fluid_settings_dupstr(settings, "audio.coreaudio.device", &devname) /* alloc device name */ && devname && strlen (devname) > 0) { AudioObjectPropertyAddress pa; pa.mSelector = kAudioHardwarePropertyDevices; pa.mScope = kAudioObjectPropertyScopeWildcard; pa.mElement = kAudioObjectPropertyElementMaster; if (OK (AudioObjectGetPropertyDataSize (kAudioObjectSystemObject, &pa, 0, 0, &size))) { int num = size / (int) sizeof (AudioDeviceID); AudioDeviceID devs [num]; if (OK (AudioObjectGetPropertyData (kAudioObjectSystemObject, &pa, 0, 0, &size, devs))) { for (i = 0; i < num; ++i) { char name [1024]; size = sizeof (name); pa.mSelector = kAudioDevicePropertyDeviceName; if (OK (AudioObjectGetPropertyData (devs[i], &pa, 0, 0, &size, name))) { if (get_num_outputs (devs[i]) > 0 && strcasecmp(devname, name) == 0) { AudioDeviceID selectedID = devs[i]; status = AudioUnitSetProperty (dev->outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &selectedID, sizeof(AudioDeviceID)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the selected output device. Status=%ld\n", (long int)status); goto error_recovery; } } } } } } } if (devname) FLUID_FREE (devname); /* free device name */ dev->buffer_size = period_size * periods; // The DefaultOutputUnit should do any format conversions // necessary from our format to the device's format. dev->format.mSampleRate = sample_rate; // sample rate of the audio stream dev->format.mFormatID = kAudioFormatLinearPCM; // encoding type of the audio stream dev->format.mFormatFlags = kLinearPCMFormatFlagIsFloat; dev->format.mBytesPerPacket = 2*sizeof(float); dev->format.mFramesPerPacket = 1; dev->format.mBytesPerFrame = 2*sizeof(float); dev->format.mChannelsPerFrame = 2; dev->format.mBitsPerChannel = 8*sizeof(float); FLUID_LOG (FLUID_DBG, "mSampleRate %g", dev->format.mSampleRate); FLUID_LOG (FLUID_DBG, "mFormatFlags %08X", dev->format.mFormatFlags); FLUID_LOG (FLUID_DBG, "mBytesPerPacket %d", dev->format.mBytesPerPacket); FLUID_LOG (FLUID_DBG, "mFramesPerPacket %d", dev->format.mFramesPerPacket); FLUID_LOG (FLUID_DBG, "mChannelsPerFrame %d", dev->format.mChannelsPerFrame); FLUID_LOG (FLUID_DBG, "mBytesPerFrame %d", dev->format.mBytesPerFrame); FLUID_LOG (FLUID_DBG, "mBitsPerChannel %d", dev->format.mBitsPerChannel); status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &dev->format, sizeof(AudioStreamBasicDescription)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the audio format. Status=%ld\n", (long int)status); goto error_recovery; } status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Input, 0, &dev->buffer_size, sizeof(unsigned int)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Failed to set the MaximumFramesPerSlice. Status=%ld\n", (long int)status); goto error_recovery; } FLUID_LOG (FLUID_DBG, "MaximumFramesPerSlice = %d", dev->buffer_size); dev->buffers[0] = FLUID_ARRAY(float, dev->buffer_size); dev->buffers[1] = FLUID_ARRAY(float, dev->buffer_size); // Initialize the audio unit status = AudioUnitInitialize(dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioUnitInitialize(). Status=%ld\n", (long int)status); goto error_recovery; } // Start the rendering status = AudioOutputUnitStart (dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioOutputUnitStart(). Status=%ld\n", (long int)status); goto error_recovery; } return (fluid_audio_driver_t*) dev; error_recovery: delete_fluid_core_audio_driver((fluid_audio_driver_t*) dev); return NULL; }
// ---------------------------------------------------------- bool ofxAudioUnitOutput::configureOutputDevice(int deviceID) // ---------------------------------------------------------- { OSStatus err = noErr; UInt32 outSize; Boolean outWritable; err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDevices, &outSize, &outWritable); if ( err != noErr ) cout<<"err 1"<<endl; UInt16 devicesAvailable = outSize / sizeof(AudioDeviceID); if ( devicesAvailable < 1 ) { fprintf(stderr, "No devices\n" ); //return err; }else{ cout<<"*devicesAvailable "<<devicesAvailable<<endl; } UInt32 on = 1; UInt32 off = 0; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &off, sizeof(off)), "disabling input on HAL unit"); OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &on, sizeof(on)), "enabling output on HAL unit"); AudioDeviceID outputDeviceID = kAudioObjectUnknown; UInt32 deviceIDSize = sizeof( AudioDeviceID ); AudioObjectPropertyAddress prop_addr = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; OFXAU_RET_FALSE(AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop_addr, 0, NULL, &deviceIDSize, &outputDeviceID), "getting device ID for default input"); if(deviceID != -1) outputDeviceID = deviceID; OFXAU_RET_FALSE(AudioUnitSetProperty(*_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &outputDeviceID, deviceIDSize), "setting HAL unit's device ID"); cout<<"outputDeviceID "<<outputDeviceID<<endl; OFXAU_RET_BOOL(AudioUnitInitialize(*_unit), "initializing hardware input unit after setting it to input mode"); }
JNIEXPORT jint JNICALL Java_com_apple_audio_units_AudioUnit_AudioUnitInitialize (JNIEnv *, jclass, jint ci) { return (jint)AudioUnitInitialize((AudioUnit)ci); }
sint_t aubio_audio_unit_init (aubio_audio_unit_t *o) { OSStatus err = noErr; Float32 latency = o->latency; Float64 samplerate = (Float64)o->samplerate; o->au_ios_cb_struct.inputProc = aubio_audio_unit_process; o->au_ios_cb_struct.inputProcRefCon = o; /* setting up audio session with interruption listener */ err = AudioSessionInitialize(NULL, NULL, audio_unit_interruption_listener, o); if (err) { AUBIO_ERR("audio_unit: could not initialize audio session (%d)\n", (int)err); goto fail; } audio_unit_set_audio_session_category(o->input_enabled, o->verbose); audio_unit_check_audio_route(o); /* add route change listener */ err = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, audio_unit_route_change_listener, o); if (err) { AUBIO_ERR("audio_unit: could not set route change listener (%d)\n", (int)err); goto fail; } /* set latency */ err = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(latency), &latency); if (err) { AUBIO_ERR("audio_unit: could not set preferred latency (%d)\n", (int)err); goto fail; } #if 0 // only for iphone OS >= 3.1 UInt32 val = 1; // set to 0 (default) to use ear speaker in voice application err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(UInt32), &val); if (err) { AUBIO_ERR("audio_unit: could not set session property to default to speaker\n"); } #endif /* setting up audio unit */ AudioComponentDescription desc; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentType = kAudioUnitType_Output; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioStreamBasicDescription audioFormat; /* look for a component that match the description */ AudioComponent comp = AudioComponentFindNext(NULL, &desc); /* create the audio component */ AudioUnit *audio_unit = &(o->audio_unit); err = AudioComponentInstanceNew(comp, &(o->audio_unit)); if (err) { AUBIO_ERR("audio_unit: failed creating the audio unit\n"); goto fail; } /* enable IO */ UInt32 enabled = 1; err = AudioUnitSetProperty (*audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enabled, sizeof(enabled)); if (err) { AUBIO_ERR("audio_unit: failed enabling input of audio unit\n"); goto fail; } /* set max fps */ UInt32 max_fps = MIN(o->blocksize, MAX_FPS); err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &max_fps, sizeof(max_fps)); if (err) { AUBIO_ERR("audio_unit: could not set maximum frames per slice property (%d)\n", (int)err); goto fail; } AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &(o->au_ios_cb_struct), sizeof(o->au_ios_cb_struct)); if (err) { AUBIO_ERR("audio_unit: failed setting audio unit render callback\n"); goto fail; } #if 0 err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Input, 0, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 1, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } #endif audioFormat.mSampleRate = (Float64)samplerate; audioFormat.mChannelsPerFrame = 2; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mBitsPerChannel = 8 * sizeof(SInt16); #if 1 // interleaving audioFormat.mBytesPerFrame = 2 * sizeof(SInt16); audioFormat.mBytesPerPacket = 2 * sizeof(SInt16); #else audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = sizeof(SInt32); audioFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; #endif err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio output format\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio input format\n"); goto fail; } #if 0 AudioStreamBasicDescription thruFormat; thissize = sizeof(thruFormat); err = AudioUnitGetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &thruFormat, &thissize); if (err) { AUBIO_ERR("audio_unit: could not get speaker output format, err: %d\n", (int)err); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, sizeof(thruFormat)); if (err) { AUBIO_ERR("audio_unit: could not set input audio format, err: %d\n", (int)err); goto fail; } #endif /* time to initialize the unit */ err = AudioUnitInitialize(*audio_unit); if (err) { AUBIO_ERR("audio_unit: failed initializing audio, err: %d\n", (int)err); goto fail; } return 0; fail: return err; }
static int prepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; AudioComponentDescription desc; AudioComponent comp = NULL; UInt32 enableIO = 0; const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); SDL_memset(&desc, '\0', sizeof(AudioComponentDescription)); desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { fprintf(stderr, "Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); this->hidden->audioUnitOpened = 1; // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 1 : 0); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO input)"); // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 0 : 1); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO output)"); /*result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); */ /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, '\0', sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetInputCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1; }