AudioDestinationMac::AudioDestinationMac(AudioIOCallback& callback, float sampleRate) : m_outputUnit(0) , m_callback(callback) , m_renderBus(2, kBufferSize, false) , m_sampleRate(sampleRate) , m_isPlaying(false) , m_input(new Input()) // LabSound { // Open and initialize DefaultOutputUnit AudioComponent comp; AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = AudioComponentFindNext(0, &desc); ASSERT(comp); OSStatus result = AudioComponentInstanceNew(comp, &m_outputUnit); ASSERT(!result); result = AudioUnitInitialize(m_outputUnit); ASSERT(!result); configure(); }
Input() : m_inputUnit(0) , m_buffers(0) , m_audioBus(0) { AudioComponent comp; AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; #if TARGET_OS_IPHONE desc.componentSubType = kAudioUnitSubType_RemoteIO; #else desc.componentSubType = kAudioUnitSubType_HALOutput; #endif desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = AudioComponentFindNext(0, &desc); ASSERT(comp); OSStatus result = AudioComponentInstanceNew(comp, &m_inputUnit); ASSERT(!result); result = AudioUnitInitialize(m_inputUnit); ASSERT(!result); }
Error AudioDriverIphone::init() { active = false; channels = 2; AudioStreamBasicDescription strdesc; strdesc.mFormatID = kAudioFormatLinearPCM; strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; strdesc.mChannelsPerFrame = channels; strdesc.mSampleRate = 44100; strdesc.mFramesPerPacket = 1; strdesc.mBitsPerChannel = 16; strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8; strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket; OSStatus result = noErr; AURenderCallbackStruct callback; AudioComponentDescription desc; AudioComponent comp = NULL; const AudioUnitElement output_bus = 0; const AudioUnitElement bus = output_bus; const AudioUnitScope scope = kAudioUnitScope_Input; zeromem(&desc, sizeof(desc)); desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; /* !!! FIXME: ? */ comp = AudioComponentFindNext(NULL, &desc); desc.componentManufacturer = kAudioUnitManufacturer_Apple; result = AudioComponentInstanceNew(comp, &audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); ERR_FAIL_COND_V(comp == NULL, FAILED); result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, scope, bus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverIphone::output_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); result = AudioUnitInitialize(audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); result = AudioOutputUnitStart(audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); const int samples = 1024; samples_in = memnew_arr(int32_t, samples); // whatever buffer_frames = samples / channels; return FAILED; };
int initoutput(){ AudioComponentDescription desc; AudioComponent comp; OSStatus err; UInt32 size; Boolean canwrite; AudioStreamBasicDescription inputdesc,outputdesc; desc.componentType=kAudioUnitType_Output; desc.componentSubType=kAudioUnitSubType_DefaultOutput; desc.componentManufacturer=kAudioUnitManufacturer_Apple; desc.componentFlags=0; desc.componentFlagsMask=0; comp=AudioComponentFindNext(NULL,&desc); if (comp==NULL) return -1; err= AudioComponentInstanceNew(comp,&out); if (err) return err; err=AudioUnitInitialize(out);if (err) return err; err=AudioUnitGetPropertyInfo(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&size,&canwrite); if (err) return err; err=AudioUnitGetProperty(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&outputdesc,&size); if (err) return err; // dumpdesc(&outputdesc); inputdesc.mSampleRate=44100.0; inputdesc.mFormatID='lpcm'; #if __BIG_ENDIAN__ inputdesc.mFormatFlags=0x0e; #else inputdesc.mFormatFlags=0x0c; #endif inputdesc.mBytesPerPacket=4; inputdesc.mFramesPerPacket=1; inputdesc.mBytesPerFrame=4; inputdesc.mChannelsPerFrame=2; inputdesc.mBitsPerChannel=16; inputdesc.mReserved=0; // dumpdesc(&inputdesc); err=AudioConverterNew(&inputdesc,&outputdesc,&conv); if (err) { // printf("AudioConvertNew failed %.*s\n",4,(char*)&err); return err; } return err; }
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, Float64 sampleRate, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat) { try { // Open the output unit AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioComponent comp = AudioComponentFindNext(NULL, &desc); XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit"); UInt32 zero = 0; UInt32 one = 1; //enable input XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit"); //disable output XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &zero, sizeof(zero)), "couldn't disable output "); //set input callback XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 1, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o input callback"); // set our required format - LPCM non-interleaved 32 bit floating point AudioStreamBasicDescription outFormat; outFormat.mSampleRate = sampleRate; outFormat.mFormatID = kAudioFormatLinearPCM; outFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved; outFormat.mFramesPerPacket = 1; outFormat.mBytesPerPacket= 4; outFormat.mChannelsPerFrame = 1; outFormat.mBitsPerChannel = 32; outFormat.mBytesPerFrame = 4; XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format"); XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit"); } catch (CAXException &e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); return 1; } catch (...) { fprintf(stderr, "An unknown error occurred\n"); return 1; } return 0; }
int CoreAudioDrv_CD_Play(int track, int loop) { char filename[40]; FILE *f; OSStatus result; AudioStreamBasicDescription requestedDesc; sprintf(filename, "classic/MUSIC/Track%0.2d.ogg", track); f = fopen(filename, "rb"); if (f == NULL) { return -1; } ov_open(f, &vf, NULL, 0); vorbis_info *vi = ov_info(&vf, -1); requestedDesc.mFormatID = kAudioFormatLinearPCM; requestedDesc.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; requestedDesc.mChannelsPerFrame = vi->channels; requestedDesc.mSampleRate = vi->rate; requestedDesc.mBitsPerChannel = 16; requestedDesc.mFramesPerPacket = 1; requestedDesc.mBytesPerFrame = requestedDesc.mBitsPerChannel * requestedDesc.mChannelsPerFrame / 8; requestedDesc.mBytesPerPacket = requestedDesc.mBytesPerFrame * requestedDesc.mFramesPerPacket; result = AudioComponentInstanceNew(comp, &output_audio_unit); if (result != noErr) { return CAErr_Error; } result = AudioUnitInitialize(output_audio_unit); if (result != noErr) { return CAErr_Error; } // Set the input format of the audio unit. result = AudioUnitSetProperty(output_audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &requestedDesc, sizeof(requestedDesc)); if (result != noErr) { return CAErr_Error; } // // Set the audio callback // setMixerCallback(); return 0; }
OSStatus CAPlayThrough::SetupAUHAL(AudioDeviceID in) { OSStatus err = noErr; AudioComponent comp; AudioComponentDescription desc; //There are several different types of Audio Units. //Some audio units serve as Outputs, Mixers, or DSP //units. See AUComponent.h for listing desc.componentType = kAudioUnitType_Output; //Every Component has a subType, which will give a clearer picture //of what this components function will be. desc.componentSubType = kAudioUnitSubType_HALOutput; //all Audio Units in AUComponent.h must use //"kAudioUnitManufacturer_Apple" as the Manufacturer desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; //Finds a component that meets the desc spec's comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) exit (-1); //gains access to the services provided by the component err = AudioComponentInstanceNew(comp, &mInputUnit); checkErr(err); //AUHAL needs to be initialized before anything is done to it err = AudioUnitInitialize(mInputUnit); checkErr(err); err = EnableIO(); checkErr(err); err= SetInputDeviceAsCurrent(in); checkErr(err); err = CallbackSetup(); checkErr(err); //Don't setup buffers until you know what the //input and output device audio streams look like. err = AudioUnitInitialize(mInputUnit); return err; }
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat) { try { // Open the output unit AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioComponent comp = AudioComponentFindNext(NULL, &desc); XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit"); UInt32 one = 1; XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit"); XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o render callback"); // NEWL: Establecer el formato canónico no de los AudioUnits sino del sistema de entrada/salida: // LPCM, no entrelazado, datos enteros con signo de 16 bits. // outFormat.SetCanonical(2, false); // OLDL: set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point outFormat.SetAUCanonical(2, false); outFormat.mSampleRate = SAMPRATE; XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format"); XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's input client format"); XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit"); } catch (CAXException &e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); return 1; } catch (...) { fprintf(stderr, "An unknown error occurred\n"); return 1; } return 0; }
AudioDestinationIOS::AudioDestinationIOS(AudioIOCallback& callback, double sampleRate) : m_outputUnit(0) , m_callback(callback) , m_renderBus(AudioBus::create(2, kRenderBufferSize, false)) , m_mediaSession(MediaSession::create(*this)) , m_sampleRate(sampleRate) , m_isPlaying(false) { audioDestinations().add(this); // Open and initialize DefaultOutputUnit AudioComponent comp; AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = AudioComponentFindNext(0, &desc); ASSERT(comp); OSStatus result = AudioComponentInstanceNew(comp, &m_outputUnit); ASSERT(!result); UInt32 flag = 1; result = AudioUnitSetProperty(m_outputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &flag, sizeof(flag)); ASSERT(!result); result = AudioUnitAddPropertyListener(m_outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, frameSizeChangedProc, this); ASSERT(!result); result = AudioUnitInitialize(m_outputUnit); ASSERT(!result); configure(); }
int AuHAL_open(CSOUND *csound, const csRtAudioParams * parm, csdata *cdata, int isInput) { UInt32 psize, devnum, devnos; AudioDeviceID dev; AudioDeviceID *sysdevs; AudioStreamBasicDescription format; int i; Device_Info *devinfo; UInt32 bufframes, nchnls; int devouts = 0, devins = 0; double srate; UInt32 enableIO, maxFPS; AudioComponent HALOutput; AudioComponentInstance *aunit; AudioComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0}; AudioObjectPropertyAddress prop = { kAudioObjectPropertyName, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; CFStringRef devName; CFStringEncoding defaultEncoding = CFStringGetSystemEncoding(); prop.mSelector = (isInput ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultOutputDevice); psize = sizeof(AudioDeviceID); AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &psize, &dev); if(isInput) cdata->defdevin = dev; else cdata->defdevout = dev; prop.mSelector = kAudioHardwarePropertyDevices; AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &psize); devnos = psize / sizeof(AudioDeviceID); sysdevs = (AudioDeviceID *) malloc(psize); devinfo = (Device_Info *) malloc(devnos*sizeof(Device_Info)); AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &psize, sysdevs); cdata->devnos = devnos; for (i = 0; (unsigned int) i < devnos; i++) { AudioBufferList *b; int devchannels, k, n; int numlists; psize = sizeof(CFStringRef); prop.mScope = kAudioObjectPropertyScopeGlobal; prop.mSelector = kAudioObjectPropertyName; AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, &devName); strcpy(devinfo[i].name, CFStringGetCStringPtr(devName, defaultEncoding)); CFRelease(devName); devchannels = 0; prop.mScope = kAudioDevicePropertyScopeInput; prop.mSelector = kAudioDevicePropertyStreamConfiguration; AudioObjectGetPropertyDataSize(sysdevs[i], &prop, 0, NULL, &psize); b = (AudioBufferList *) malloc(psize); numlists = psize / sizeof(AudioBufferList); AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, b); for(n=0; n < numlists; n++){ for(k=0; (unsigned int) k < b[n].mNumberBuffers; k++) devchannels += b[n].mBuffers[k].mNumberChannels; } devinfo[i].inchannels = devchannels; if(devchannels) { devins++; devinfo[i].indevnum = devins; } else devinfo[i].indevnum = -1; free(b); devchannels = 0; prop.mScope = kAudioDevicePropertyScopeOutput; AudioObjectGetPropertyDataSize(sysdevs[i], &prop, 0, NULL, &psize); b = (AudioBufferList *) malloc(psize); numlists = psize /sizeof(AudioBufferList); AudioObjectGetPropertyData(sysdevs[i], &prop, 0, NULL, &psize, b); for(n=0; n < numlists; n++){ for(k=0; (unsigned int) k < b[n].mNumberBuffers; k++) devchannels += b[n].mBuffers[k].mNumberChannels; } devinfo[i].outchannels = devchannels; if(devchannels) { devouts++; devinfo[i].outdevnum = devouts; } else devinfo[i].outdevnum = -1; free(b); } if(cdata->disp) csound->Message(csound, "==========================================================\n"); if (isInput) csound->Message(csound, Str("AuHAL Module: found %d input device(s):\n"), devins); else csound->Message(csound, Str("AuHAL Module: found %d output device(s):\n"), devouts); for (i = 0; (unsigned int) i < devnos; i++) { if (isInput) { if(devinfo[i].inchannels) { csound->Message(csound, Str("%d: %s (%d channels) \n"), devinfo[i].indevnum, devinfo[i].name, devinfo[i].inchannels); } } else { if(devinfo[i].outchannels) csound->Message(csound, Str("%d: %s (%d channels) \n"), devinfo[i].outdevnum, devinfo[i].name, devinfo[i].outchannels); } } if (parm->devName != NULL) devnum = atoi(parm->devName); else devnum = parm->devNum; if (devnum > 0 && devnum < 1024) { int CoreAudioDev = -1; prop.mSelector = kAudioHardwarePropertyDevices; if (isInput) { for(i=0; (unsigned int) i < devnos; i++) { if((unsigned int) devinfo[i].indevnum == devnum) CoreAudioDev = i; } if(CoreAudioDev >= 0) { prop.mSelector = kAudioHardwarePropertyDefaultInputDevice; dev = sysdevs[CoreAudioDev]; AudioObjectSetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, sizeof(AudioDeviceID), &dev); } else csound->Warning(csound, Str("requested device %d out of range"), devnum); } else { prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice; for(i=0;(unsigned int) i < devnos; i++) { if((unsigned int) devinfo[i].outdevnum == devnum) CoreAudioDev = i; } if(CoreAudioDev >= 0) { dev = sysdevs[CoreAudioDev]; AudioObjectSetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, sizeof(AudioDeviceID), &dev); } else csound->Warning(csound, Str("requested device %d out of range"), devnum, devinfo[CoreAudioDev].name); } } free(sysdevs); free(devinfo); psize = sizeof(CFStringRef); prop.mSelector = kAudioObjectPropertyName; AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &devName); if(isInput) csound->Message(csound, Str("selected input device: %s \n"), CFStringGetCStringPtr(devName, defaultEncoding)); else csound->Message(csound, Str("selected output device: %s \n"), CFStringGetCStringPtr(devName, defaultEncoding)); CFRelease(devName); srate = csound->GetSr(csound); if(!isInput){ nchnls =cdata->onchnls = parm->nChannels; bufframes = csound->GetOutputBufferSize(csound)/nchnls; } else { nchnls = cdata->inchnls = parm->nChannels; bufframes = csound->GetInputBufferSize(csound)/nchnls; } /* although the SR is set in the stream properties, we also need to set the device to match */ double sr; prop.mSelector = kAudioDevicePropertyNominalSampleRate; if(!isInput){ AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &sr); csound->system_sr(csound, sr); } psize = sizeof(double); AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &srate); AudioObjectGetPropertyData(dev, &prop, 0, NULL, &psize, &sr); if(sr != srate) { csound->Warning(csound, Str("Attempted to set device SR, tried %.1f, got %.1f \n"), srate, sr); } HALOutput = AudioComponentFindNext(NULL, &cd); if (isInput) { AudioComponentInstanceNew(HALOutput, &(cdata->inunit)); enableIO = 1; AudioUnitSetProperty(cdata->inunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(cdata->inunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); psize = sizeof(AudioDeviceID); /* for input, select device AFTER enabling IO */ AudioUnitSetProperty(cdata->inunit,kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); aunit = &(cdata->inunit); } else { AudioComponentInstanceNew(HALOutput, &(cdata->outunit)); psize = sizeof(AudioDeviceID); /* for output, select device BEFORE enabling IO */ AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, psize); enableIO = 1; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO)); aunit = &(cdata->outunit); } /* now set the buffer size */ psize = sizeof(AudioDeviceID); AudioUnitGetProperty(*aunit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, isInput, &dev, &psize); prop.mSelector = kAudioDevicePropertyBufferFrameSize; psize = 4; AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &bufframes); psize = sizeof(maxFPS); AudioUnitGetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &maxFPS, &psize); AudioUnitSetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, isInput, &bufframes, sizeof(UInt32)); /* set the stream properties */ psize = sizeof(AudioStreamBasicDescription); AudioUnitGetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, &psize); format.mSampleRate = srate; format.mFormatID = kAudioFormatLinearPCM; format.mFormatFlags = kAudioFormatFlagsCanonical | kLinearPCMFormatFlagIsNonInterleaved; format.mBytesPerPacket = sizeof(AudioUnitSampleType); format.mFramesPerPacket = 1; format.mBytesPerFrame = sizeof(AudioUnitSampleType); format.mChannelsPerFrame = nchnls; format.mBitsPerChannel = sizeof(AudioUnitSampleType)*8; AudioUnitSetProperty(*aunit, kAudioUnitProperty_StreamFormat, (isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input), isInput, &format, sizeof(AudioStreamBasicDescription)); /* set the callbacks and open the device */ if(!isInput) { AURenderCallbackStruct output; output.inputProc = Csound_Render; output.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, isInput, &output, sizeof(output)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, Str("***** AuHAL module: output device open with %d " "buffer frames\n"), bufframes); cdata->disp = 0; } else { AURenderCallbackStruct input; AudioBufferList *CAInputData = (AudioBufferList*)malloc(sizeof(UInt32) + cdata->inchnls * sizeof(AudioBuffer)); CAInputData->mNumberBuffers = cdata->inchnls; for (i = 0; i < cdata->inchnls; i++) { CAInputData->mBuffers[i].mNumberChannels = 1; CAInputData->mBuffers[i].mDataByteSize = bufframes * sizeof(AudioUnitSampleType); CAInputData->mBuffers[i].mData = calloc(bufframes, sizeof(AudioUnitSampleType)); } cdata->inputdata = CAInputData; input.inputProc = Csound_Input; input.inputProcRefCon = cdata; AudioUnitSetProperty(*aunit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, isInput, &input, sizeof(input)); AudioUnitInitialize(*aunit); AudioOutputUnitStart(*aunit); csound->Message(csound, "***** AuHAL module: input device open with %d buffer frames\n", bufframes); } if(!cdata->disp) csound->Message(csound, "==========================================================\n"); cdata->disp = 0; return 0; }
void iOSCoreAudioInit() { if (!audioInstance) { OSErr err; // first, grab the default output AudioComponentDescription defaultOutputDescription; defaultOutputDescription.componentType = kAudioUnitType_Output; defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO; defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; defaultOutputDescription.componentFlags = 0; defaultOutputDescription.componentFlagsMask = 0; AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription); // create our instance err = AudioComponentInstanceNew(defaultOutput, &audioInstance); if (err != noErr) { audioInstance = nil; return; } // create our callback so we can give it the audio data AURenderCallbackStruct input; input.inputProc = iOSCoreAudioCallback; input.inputProcRefCon = NULL; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // setup the audio format we'll be using (stereo pcm) AudioStreamBasicDescription streamFormat; memset(&streamFormat, 0, sizeof(streamFormat)); streamFormat.mSampleRate = SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8; streamFormat.mChannelsPerFrame = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame; streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket; err = AudioUnitSetProperty(audioInstance, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // k, all setup, so init err = AudioUnitInitialize(audioInstance); if (err != noErr) { AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // finally start playback err = AudioOutputUnitStart(audioInstance); if (err != noErr) { AudioUnitUninitialize(audioInstance); AudioComponentInstanceDispose(audioInstance); audioInstance = nil; return; } // we're good to go } }
void InputImplAudioUnit::setup() { if( mIsSetup ) return; OSStatus err = noErr; //get default input device if( ! mDevice ) { mDevice = InputImplAudioUnit::getDefaultDevice(); } //create AudioOutputUnit AudioComponent component; AudioComponentDescription description; description.componentType = kAudioUnitType_Output; #if defined( CINDER_MAC ) description.componentSubType = kAudioUnitSubType_HALOutput; #elif defined( CINDER_COCOA_TOUCH ) description.componentSubType = kAudioUnitSubType_RemoteIO; #endif description.componentManufacturer = kAudioUnitManufacturer_Apple; description.componentFlags = 0; description.componentFlagsMask = 0; component = AudioComponentFindNext( NULL, &description ); if( ! component ) { std::cout << "Error finding next component" << std::endl; throw; } err = AudioComponentInstanceNew( component, &mInputUnit ); if( err != noErr ) { mInputUnit = NULL; std::cout << "Error getting output unit" << std::endl; throw; } // Initialize the AU /*err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; }*/ UInt32 param; //enable IO on AudioUnit's input scope param = 1; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error enable IO on Output unit input" << std::endl; throw; } //disable IO on AudioUnit's output scope param = 0; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof( UInt32 ) ); if( err != noErr ) { std::cout << "Error disabling IO on Output unit output" << std::endl; throw; } #if defined( CINDER_MAC ) AudioDeviceID nativeDeviceId = static_cast<AudioDeviceID>( mDevice->getDeviceId() ); // Set the current device to the default input unit. err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &nativeDeviceId, sizeof(AudioDeviceID) ); if( err != noErr ) { std::cout << "failed to set AU input device" << std::endl; throw; } #endif AURenderCallbackStruct callback; callback.inputProc = InputImplAudioUnit::inputCallback; callback.inputProcRefCon = this; err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct) ); //Don't setup buffers until you know what the //input and output device audio streams look like. // Initialize the AudioUnit err = AudioUnitInitialize( mInputUnit ); if(err != noErr) { std::cout << "failed to initialize HAL Output AU" << std::endl; throw; } //Get Size of IO Buffers uint32_t sampleCount; param = sizeof(UInt32); #if defined( CINDER_MAC ) err = AudioUnitGetProperty( mInputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m ); if( err != noErr ) { std::cout << "Error getting buffer frame size" << std::endl; throw; } #elif defined( CINDER_COCOA_TOUCH ) AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m ); #endif AudioStreamBasicDescription deviceInFormat; AudioStreamBasicDescription desiredOutFormat; //StreamFormat setting: //get and the AudioUnit's default input and output scope stream formats //the AudioUnit has a built in AudioConverter than can do basic PCM format conversions //and channel mapping if the desired channel count is different from the device's channel count //Stream Format - Output Client Side param = sizeof( AudioStreamBasicDescription ); err = AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &deviceInFormat, ¶m ); if( err != noErr ) { std::cout << "failed to get input in device ASBD" << std::endl; throw; } //Stream format client side param = sizeof( AudioStreamBasicDescription ); err = AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &desiredOutFormat, ¶m ); if( err != noErr ) { std::cout << "failed to get input output device ASBD" << std::endl; throw; } //get the device's sample rate - this has to be the same as the AudioUnit's output format //this is actually already set on the AudioUnit's input default stream format //Float64 rate = 0; //param = sizeof(Float64); //AudioDeviceGetProperty( nativeDeviceId, 0, 1, kAudioDevicePropertyNominalSampleRate, ¶m, &rate ); //desiredOutFormat.mSampleRate = rate; //the output sample rate must be the same as the input device's sample rate desiredOutFormat.mSampleRate = deviceInFormat.mSampleRate; //output the same number of channels that are input desiredOutFormat.mChannelsPerFrame = deviceInFormat.mChannelsPerFrame; //one of the two above options is necessary, either getting the kAudioDevicePropertyNominalSampleRate //or just setting desiredOutFormat.mSampleRate = deviceInFormat.mSampleRate; //set the AudioUnit's output format to be float 32 linear non-interleaved PCM data desiredOutFormat.mFormatID = kAudioFormatLinearPCM; desiredOutFormat.mFormatFlags |= ( kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked ); uint32_t sampleSize = sizeof(float); desiredOutFormat.mFramesPerPacket = 1; desiredOutFormat.mBytesPerPacket = sampleSize; desiredOutFormat.mBytesPerFrame = sampleSize; desiredOutFormat.mBitsPerChannel = 8 * sampleSize; param = sizeof( AudioStreamBasicDescription ); err = AudioUnitSetProperty( mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &desiredOutFormat, param ); if( err ) { throw; } mSampleRate = desiredOutFormat.mSampleRate; mChannelCount = desiredOutFormat.mChannelsPerFrame; //Buffer Setup - create the buffers necessary for holding input data //param = sizeof( AudioBufferList ); //AudioBufferList aBufferList; //AudioDeviceGetProperty( nativeDeviceId, 0, true, kAudioDevicePropertyStreamConfiguration, ¶m, &aBufferList); //setup buffer for recieving data in the callback mInputBufferData = (float *)malloc( sampleCount * desiredOutFormat.mBytesPerFrame ); float * inputBufferChannels[desiredOutFormat.mChannelsPerFrame]; for( int h = 0; h < desiredOutFormat.mChannelsPerFrame; h++ ) { inputBufferChannels[h] = &mInputBufferData[h * sampleCount]; } mInputBuffer = (AudioBufferList *)malloc( offsetof(AudioBufferList, mBuffers[0]) + ( desiredOutFormat.mChannelsPerFrame * sizeof(AudioBuffer) ) ); mInputBuffer->mNumberBuffers = desiredOutFormat.mChannelsPerFrame; //mBuffers.resize( mInputBuffer->mNumberBuffers ); mCircularBuffers.resize( mInputBuffer->mNumberBuffers ); for( int i = 0; i < mInputBuffer->mNumberBuffers; i++ ) { mInputBuffer->mBuffers[i].mNumberChannels = 1; mInputBuffer->mBuffers[i].mDataByteSize = sampleCount * desiredOutFormat.mBytesPerFrame; mInputBuffer->mBuffers[i].mData = inputBufferChannels[i]; //create a circular buffer for each channel //mBuffers[i] = new circular_buffer<float>( sampleCount * 4 ); mCircularBuffers[i] = new CircularBuffer<float>( sampleCount * 4 ); } mIsSetup = true; }
static int prepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; AudioComponentDescription desc; AudioComponent comp = NULL; UInt32 enableIO = 0; const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); SDL_memset(&desc, '\0', sizeof(AudioComponentDescription)); desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentManufacturer = kAudioUnitManufacturer_Apple; comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { fprintf(stderr, "Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); this->hidden->audioUnitOpened = 1; // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 1 : 0); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO input)"); // !!! FIXME: this is wrong? enableIO = ((iscapture) ? 0 : 1); result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, &enableIO, sizeof(enableIO)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_EnableIO output)"); /*result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); */ /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, '\0', sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetInputCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1; }
bool AudioLoopImplCocoa::init() { if (!initialized) { AudioComponentDescription defaultOutputDescription; defaultOutputDescription.componentType = kAudioUnitType_Output; defaultOutputDescription.componentSubType = AUDIO_UNIT_COMPONENT_SUB_TYPE; defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; defaultOutputDescription.componentFlags = 0; defaultOutputDescription.componentFlagsMask = 0; AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription); if (!defaultOutput) { printf("Can't find default output\n"); return false; } OSStatus err = AudioComponentInstanceNew(defaultOutput, &audioUnit); if (err) { printf("AudioComponentInstanceNew ERROR: %d\n", (int)err); return false; } AURenderCallbackStruct input; input.inputProc = staticRenderCallback; input.inputProcRefCon = this; err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); if (err) { printf("AudioUnitSetProperty/kAudioUnitProperty_SetRenderCallback ERROR: %d\n", (int)err); return false; } AudioStreamBasicDescription streamFormat; streamFormat.mSampleRate = 44100; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; streamFormat.mBytesPerPacket = 4; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = 4; streamFormat.mChannelsPerFrame = 1; streamFormat.mBitsPerChannel = 32; err = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if (err) { printf("AudioUnitSetProperty/kAudioUnitProperty_StreamFormat ERROR: %d\n", (int)err); return false; } err = AudioUnitInitialize(audioUnit); if (err) { printf("AudioUnitInitialize ERROR: %d\n", (int)err); return false; } initialized = true; } return true; }
int input_init(void) { AudioBuffer *buf; theBufferList = malloc(sizeof(AudioBufferList)); theBufferList->mNumberBuffers = 1; int i; buf = theBufferList->mBuffers; buf->mNumberChannels = 3; buf->mDataByteSize = 3*1000; buf->mData = 0; // tell the audiounit to show us its 'buffers' AudioComponent comp; AudioComponentDescription desc; //There are several different types of Audio Units. //Some audio units serve as Outputs, Mixers, or DSP //units. See AUComponent.h for listing desc.componentType = kAudioUnitType_Output; //Every Component has a subType, which will give a clearer picture //of what this components function will be. desc.componentSubType = kAudioUnitSubType_HALOutput; //all Audio Units in AUComponent.h must use //"kAudioUnitManufacturer_Apple" as the Manufacturer desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; //Finds a component that meets the desc spec's comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) exit (-1); //gains access to the services provided by the component AudioComponentInstanceNew(comp, &auHAL); UInt32 enableIO; UInt32 size=0; //When using AudioUnitSetProperty the 4th parameter in the method //refer to an AudioUnitElement. When using an AudioOutputUnit //the input element will be '1' and the output element will be '0'. enableIO = 1; AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, // input element &enableIO, sizeof(enableIO)); enableIO = 0; AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, //output element &enableIO, sizeof(enableIO)); OSStatus err =noErr; size = sizeof(AudioDeviceID); AudioDeviceID inputDevice; err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &size, &inputDevice); if (err) exit(err); err =AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(inputDevice)); if (err) exit(err); AudioStreamBasicDescription DeviceFormat; AudioStreamBasicDescription DesiredFormat; //Use CAStreamBasicDescriptions instead of 'naked' //AudioStreamBasicDescriptions to minimize errors. //CAStreamBasicDescription.h can be found in the CoreAudio SDK. size = sizeof(AudioStreamBasicDescription); //Get the input device format AudioUnitGetProperty (auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &DeviceFormat, &size); //set the desired format to the device's sample rate memcpy(&DesiredFormat, &DeviceFormat, sizeof(AudioStreamBasicDescription)); sampling_rate = DeviceFormat.mSampleRate; // for laser-emulating filters DesiredFormat.mSampleRate = DeviceFormat.mSampleRate; DesiredFormat.mChannelsPerFrame = 4; DesiredFormat.mBitsPerChannel = 16; DesiredFormat.mBytesPerPacket = DesiredFormat.mBytesPerFrame = DesiredFormat.mChannelsPerFrame * 2; DesiredFormat.mFramesPerPacket = 1; DesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //set format to output scope err = AudioUnitSetProperty( auHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &DesiredFormat, sizeof(AudioStreamBasicDescription)); if (err) exit(err); SInt32 *channelMap =NULL; UInt32 numOfChannels = DesiredFormat.mChannelsPerFrame; //2 channels UInt32 mapSize = numOfChannels *sizeof(SInt32); channelMap = (SInt32 *)malloc(mapSize); //for each channel of desired input, map the channel from //the device's output channel. for(i=0;i<numOfChannels;i++) { channelMap[i]=i; } err = AudioUnitSetProperty(auHAL, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Output, 1, channelMap, size); if (err) exit(err); free(channelMap); AURenderCallbackStruct input; input.inputProc = callback; input.inputProcRefCon = 0; err = AudioUnitSetProperty( auHAL, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (err) exit(err); err = AudioUnitInitialize(auHAL); if (err) exit(err); err = AudioOutputUnitStart(auHAL); if (err) exit(err); }
int CoreAudioDrv_PCM_Init(int * mixrate, int * numchannels, int * samplebits, void * initdata) { OSStatus result = noErr; AudioComponentDescription desc; AudioStreamBasicDescription requestedDesc; if (Initialised) { CoreAudioDrv_PCM_Shutdown(); } if (pthread_mutex_init(&mutex, 0) < 0) { ErrorCode = CAErr_Mutex; return CAErr_Error; } // Setup a AudioStreamBasicDescription with the requested format requestedDesc.mFormatID = kAudioFormatLinearPCM; requestedDesc.mFormatFlags = kLinearPCMFormatFlagIsPacked; requestedDesc.mChannelsPerFrame = *numchannels; requestedDesc.mSampleRate = *mixrate; requestedDesc.mBitsPerChannel = *samplebits; if (*samplebits == 16) { requestedDesc.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger; } requestedDesc.mFramesPerPacket = 1; requestedDesc.mBytesPerFrame = requestedDesc.mBitsPerChannel * requestedDesc.mChannelsPerFrame / 8; requestedDesc.mBytesPerPacket = requestedDesc.mBytesPerFrame * requestedDesc.mFramesPerPacket; // Locate the default output audio unit desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { ErrorCode = CAErr_FindNextComponent; pthread_mutex_destroy(&mutex); return CAErr_Error; } // Open & initialize the default output audio unit result = AudioComponentInstanceNew(comp, &output_audio_unit); if (result != noErr) { ErrorCode = CAErr_OpenAComponent; //CloseComponent(output_audio_unit); pthread_mutex_destroy(&mutex); return CAErr_Error; } result = AudioUnitInitialize(output_audio_unit); if (result != noErr) { ErrorCode = CAErr_AudioUnitInitialize; //CloseComponent(output_audio_unit); pthread_mutex_destroy(&mutex); return CAErr_Error; } // Set the input format of the audio unit. result = AudioUnitSetProperty(output_audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &requestedDesc, sizeof(requestedDesc)); if (result != noErr) { ErrorCode = CAErr_AudioUnitSetProperty; //CloseComponent(output_audio_unit); pthread_mutex_destroy(&mutex); return CAErr_Error; } // Set the audio callback setMixerCallback(); Initialised = 1; return CAErr_Ok; }
CoreAudioOutput::CoreAudioOutput(size_t bufferSamples, size_t sampleSize) { OSStatus error = noErr; _spinlockAU = (OSSpinLock *)malloc(sizeof(OSSpinLock)); *_spinlockAU = OS_SPINLOCK_INIT; _buffer = new RingBuffer(bufferSamples, sampleSize); _volume = 1.0f; // Create a new audio unit #if defined(MAC_OS_X_VERSION_10_6) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 if (IsOSXVersionSupported(10, 6, 0)) { AudioComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = AudioComponentInstanceNew(audioComponent, &_au); if (error != noErr) { return; } } else { ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } } #else ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } #endif // Set the render callback AURenderCallbackStruct callback; callback.inputProc = &CoreAudioOutputRenderCallback; callback.inputProcRefCon = _buffer; error = AudioUnitSetProperty(_au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback) ); if(error != noErr) { return; } // Set up the audio unit for audio streaming AudioStreamBasicDescription outputFormat; outputFormat.mSampleRate = SPU_SAMPLE_RATE; outputFormat.mFormatID = kAudioFormatLinearPCM; outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; outputFormat.mBytesPerPacket = SPU_SAMPLE_SIZE; outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = SPU_SAMPLE_SIZE; outputFormat.mChannelsPerFrame = SPU_NUMBER_CHANNELS; outputFormat.mBitsPerChannel = SPU_SAMPLE_RESOLUTION; error = AudioUnitSetProperty(_au, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat) ); if(error != noErr) { return; } // Initialize our new audio unit error = AudioUnitInitialize(_au); if(error != noErr) { return; } }
int CoreAudioDriver::init( unsigned bufferSize ) { OSStatus err = noErr; m_pOut_L = new float[ m_nBufferSize ]; m_pOut_R = new float[ m_nBufferSize ]; memset ( m_pOut_L, 0, m_nBufferSize * sizeof( float ) ); memset ( m_pOut_R, 0, m_nBufferSize * sizeof( float ) ); // Get Component AudioComponent compOutput; AudioComponentDescription descAUHAL; descAUHAL.componentType = kAudioUnitType_Output; descAUHAL.componentSubType = kAudioUnitSubType_HALOutput; descAUHAL.componentManufacturer = kAudioUnitManufacturer_Apple; descAUHAL.componentFlags = 0; descAUHAL.componentFlagsMask = 0; compOutput = AudioComponentFindNext( NULL, &descAUHAL ); if ( compOutput == NULL ) { ERRORLOG( "Error in FindNextComponent" ); //exit (-1); } err = AudioComponentInstanceNew( compOutput, &m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Error Opening Component" ); } // Get Current Output Device retrieveDefaultDevice(); // Set AUHAL to Current Device err = AudioUnitSetProperty( m_outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &m_outputDevice, sizeof( m_outputDevice ) ); if ( err != noErr ) { ERRORLOG( "Could not set Current Device" ); } AudioStreamBasicDescription asbdesc; asbdesc.mSampleRate = ( Float64 )m_nSampleRate; asbdesc.mFormatID = kAudioFormatLinearPCM; asbdesc.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; asbdesc.mBytesPerPacket = sizeof( Float32 ); asbdesc.mFramesPerPacket = 1; asbdesc.mBytesPerFrame = sizeof( Float32 ); asbdesc.mChannelsPerFrame = 2; // comix: was set to 1 asbdesc.mBitsPerChannel = 32; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbdesc, sizeof( AudioStreamBasicDescription ) ); // Set Render Callback AURenderCallbackStruct out; out.inputProc = renderProc; out.inputProcRefCon = ( void * )this; err = AudioUnitSetProperty( m_outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &out, sizeof( out ) ); if ( err != noErr ) { ERRORLOG( "Could not Set Render Callback" ); } //Initialize AUHAL err = AudioUnitInitialize( m_outputUnit ); if ( err != noErr ) { ERRORLOG( "Could not Initialize AudioUnit" ); } return 0; }
Error AudioDriverCoreAudio::init() { mutex = Mutex::create(); AudioComponentDescription desc; zeromem(&desc, sizeof(desc)); desc.componentType = kAudioUnitType_Output; #ifdef OSX_ENABLED desc.componentSubType = kAudioUnitSubType_HALOutput; #else desc.componentSubType = kAudioUnitSubType_RemoteIO; #endif desc.componentManufacturer = kAudioUnitManufacturer_Apple; AudioComponent comp = AudioComponentFindNext(NULL, &desc); ERR_FAIL_COND_V(comp == NULL, FAILED); OSStatus result = AudioComponentInstanceNew(comp, &audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); #ifdef OSX_ENABLED AudioObjectPropertyAddress prop; prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice; prop.mScope = kAudioObjectPropertyScopeGlobal; prop.mElement = kAudioObjectPropertyElementMaster; result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this); ERR_FAIL_COND_V(result != noErr, FAILED); prop.mSelector = kAudioHardwarePropertyDefaultInputDevice; result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this); ERR_FAIL_COND_V(result != noErr, FAILED); #endif AudioStreamBasicDescription strdesc; zeromem(&strdesc, sizeof(strdesc)); UInt32 size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 2: // Stereo case 4: // Surround 3.1 case 6: // Surround 5.1 case 8: // Surround 7.1 channels = strdesc.mChannelsPerFrame; break; default: // Unknown number of channels, default to stereo channels = 2; break; } zeromem(&strdesc, sizeof(strdesc)); size = sizeof(strdesc); result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size); ERR_FAIL_COND_V(result != noErr, FAILED); switch (strdesc.mChannelsPerFrame) { case 1: // Mono capture_channels = 1; break; case 2: // Stereo capture_channels = 2; break; default: // Unknown number of channels, default to stereo capture_channels = 2; break; } mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE); zeromem(&strdesc, sizeof(strdesc)); strdesc.mFormatID = kAudioFormatLinearPCM; strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; strdesc.mChannelsPerFrame = channels; strdesc.mSampleRate = mix_rate; strdesc.mFramesPerPacket = 1; strdesc.mBitsPerChannel = 16; strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8; strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); strdesc.mChannelsPerFrame = capture_channels; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc)); ERR_FAIL_COND_V(result != noErr, FAILED); int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY); // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) buffer_frames = closest_power_of_2(latency * mix_rate / 1000); #ifdef OSX_ENABLED result = AudioUnitSetProperty(audio_unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, kOutputBus, &buffer_frames, sizeof(UInt32)); ERR_FAIL_COND_V(result != noErr, FAILED); #endif unsigned int buffer_size = buffer_frames * channels; samples_in.resize(buffer_size); input_buf.resize(buffer_size); input_buffer.resize(buffer_size * 8); input_position = 0; input_size = 0; print_verbose("CoreAudio: detected " + itos(channels) + " channels"); print_verbose("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms"); AURenderCallbackStruct callback; zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::output_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); zeromem(&callback, sizeof(AURenderCallbackStruct)); callback.inputProc = &AudioDriverCoreAudio::input_callback; callback.inputProcRefCon = this; result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback)); ERR_FAIL_COND_V(result != noErr, FAILED); result = AudioUnitInitialize(audio_unit); ERR_FAIL_COND_V(result != noErr, FAILED); return OK; }
static int prepare_audiounit(_THIS, void *handle, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; AudioComponentDescription desc; AudioComponent comp = NULL; const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); #if MACOSX_COREAUDIO if (!prepare_device(this, handle, iscapture)) { return 0; } #endif SDL_zero(desc); desc.componentType = kAudioUnitType_Output; desc.componentManufacturer = kAudioUnitManufacturer_Apple; #if MACOSX_COREAUDIO desc.componentSubType = kAudioUnitSubType_DefaultOutput; #else desc.componentSubType = kAudioUnitSubType_RemoteIO; #endif comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { SDL_SetError("Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); this->hidden->audioUnitOpened = 1; #if MACOSX_COREAUDIO result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT ("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); #endif /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetRenderCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); #if MACOSX_COREAUDIO /* Fire a callback if the device stops being "alive" (disconnected, etc). */ AudioObjectAddPropertyListener(this->hidden->deviceID, &alive_address, device_unplugged, this); #endif /* We're running! */ return 1; }
static int audio_unit_open(AUCommon *d, bool_t is_read){ OSStatus result; UInt32 param; #if MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5 AudioComponentDescription desc; AudioComponent comp; #else ComponentDescription desc; Component comp; #endif AudioStreamBasicDescription asbd; const int input_bus=1; const int output_bus=0; // Get Default Input audio unit desc.componentType = kAudioUnitType_Output; desc.componentSubType = d->dev!=-1?kAudioUnitSubType_HALOutput:kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5 comp = AudioComponentFindNext(NULL, &desc); #else comp = FindNextComponent(NULL, &desc); #endif if (comp == NULL) { ms_message("Cannot find audio component"); return -1; } #if MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5 result = AudioComponentInstanceNew(comp, &d->au); #else result = OpenAComponent(comp, &d->au); #endif if(result != noErr) { ms_message("Cannot open audio component %"UINT32_X_PRINTF, result); return -1; } param = is_read; if (d->dev!=-1) { CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, ¶m, sizeof(UInt32))); param = !is_read; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, ¶m, sizeof(UInt32))); // Set the current device CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, output_bus, &d->dev, sizeof(AudioDeviceID))); } param=0; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_ShouldAllocateBuffer, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output , is_read ? input_bus : output_bus , ¶m, sizeof(param))); UInt32 asbdsize = sizeof(AudioStreamBasicDescription); memset((char *)&asbd, 0, asbdsize); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output, is_read ? input_bus : output_bus, &asbd, &asbdsize)); show_format(is_read ? "Input audio unit" : "Output audio unit",&asbd); asbd.mSampleRate=d->rate; asbd.mBytesPerPacket=asbd.mBytesPerFrame = 2*d->nchannels; asbd.mChannelsPerFrame = d->nchannels; asbd.mBitsPerChannel=16; asbd.mFormatID=kAudioFormatLinearPCM; asbd.mFormatFlags=kAudioFormatFlagIsPacked|kAudioFormatFlagIsSignedInteger; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, sizeof(AudioStreamBasicDescription))); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, &asbdsize)); show_format(is_read ? "Input audio unit after configuration" : "Output audio unit after configuration",&asbd); // Get the number of frames in the IO buffer(s) param = sizeof(UInt32); UInt32 numFrames; CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Input, input_bus, &numFrames, ¶m)); ms_message("Number of frames per buffer = %"UINT32_PRINTF, numFrames); AURenderCallbackStruct cbs; cbs.inputProcRefCon = d; if (is_read){ cbs.inputProc = readRenderProc; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, input_bus, &cbs, sizeof(AURenderCallbackStruct))); }else{ cbs.inputProc = writeRenderProc; CHECK_AURESULT(AudioUnitSetProperty (d->au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, output_bus, &cbs, sizeof(AURenderCallbackStruct))); } result = AudioUnitInitialize(d->au); if(result != noErr) { ms_error("failed to AudioUnitInitialize %"UINT32_PRINTF" , is_read=%i", result,(int)is_read); return -1; } CHECK_AURESULT(AudioOutputUnitStart(d->au)); return 0; }
static void *coreaudio_init(const char *device, unsigned rate, unsigned latency) { size_t fifo_size; UInt32 i_size; AudioStreamBasicDescription real_desc; #ifdef OSX_PPC Component comp; #else AudioComponent comp; #endif #ifndef TARGET_OS_IPHONE AudioChannelLayout layout = {0}; #endif AURenderCallbackStruct cb = {0}; AudioStreamBasicDescription stream_desc = {0}; bool component_unavailable = false; static bool session_initialized = false; coreaudio_t *dev = NULL; #ifdef OSX_PPC ComponentDescription desc = {0}; #else AudioComponentDescription desc = {0}; #endif settings_t *settings = config_get_ptr(); (void)session_initialized; (void)device; dev = (coreaudio_t*)calloc(1, sizeof(*dev)); if (!dev) return NULL; dev->lock = slock_new(); dev->cond = scond_new(); #if TARGET_OS_IPHONE if (!session_initialized) { session_initialized = true; AudioSessionInitialize(0, 0, coreaudio_interrupt_listener, 0); AudioSessionSetActive(true); } #endif /* Create AudioComponent */ desc.componentType = kAudioUnitType_Output; #if TARGET_OS_IPHONE desc.componentSubType = kAudioUnitSubType_RemoteIO; #else desc.componentSubType = kAudioUnitSubType_HALOutput; #endif desc.componentManufacturer = kAudioUnitManufacturer_Apple; #ifdef OSX_PPC comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif if (comp == NULL) goto error; #ifdef OSX_PPC component_unavailable = (OpenAComponent(comp, &dev->dev) != noErr); #else component_unavailable = (AudioComponentInstanceNew(comp, &dev->dev) != noErr); #endif if (component_unavailable) goto error; #if !TARGET_OS_IPHONE if (device) choose_output_device(dev, device); #endif dev->dev_alive = true; /* Set audio format */ stream_desc.mSampleRate = rate; stream_desc.mBitsPerChannel = sizeof(float) * CHAR_BIT; stream_desc.mChannelsPerFrame = 2; stream_desc.mBytesPerPacket = 2 * sizeof(float); stream_desc.mBytesPerFrame = 2 * sizeof(float); stream_desc.mFramesPerPacket = 1; stream_desc.mFormatID = kAudioFormatLinearPCM; stream_desc.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | (is_little_endian() ? 0 : kAudioFormatFlagIsBigEndian); if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &stream_desc, sizeof(stream_desc)) != noErr) goto error; /* Check returned audio format. */ i_size = sizeof(real_desc); if (AudioUnitGetProperty(dev->dev, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &real_desc, &i_size) != noErr) goto error; if (real_desc.mChannelsPerFrame != stream_desc.mChannelsPerFrame) goto error; if (real_desc.mBitsPerChannel != stream_desc.mBitsPerChannel) goto error; if (real_desc.mFormatFlags != stream_desc.mFormatFlags) goto error; if (real_desc.mFormatID != stream_desc.mFormatID) goto error; RARCH_LOG("[CoreAudio]: Using output sample rate of %.1f Hz\n", (float)real_desc.mSampleRate); settings->audio.out_rate = real_desc.mSampleRate; /* Set channel layout (fails on iOS). */ #ifndef TARGET_OS_IPHONE layout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo; if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Input, 0, &layout, sizeof(layout)) != noErr) goto error; #endif /* Set callbacks and finish up. */ cb.inputProc = audio_write_cb; cb.inputProcRefCon = dev; if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &cb, sizeof(cb)) != noErr) goto error; if (AudioUnitInitialize(dev->dev) != noErr) goto error; fifo_size = (latency * settings->audio.out_rate) / 1000; fifo_size *= 2 * sizeof(float); dev->buffer_size = fifo_size; dev->buffer = fifo_new(fifo_size); if (!dev->buffer) goto error; RARCH_LOG("[CoreAudio]: Using buffer size of %u bytes: (latency = %u ms)\n", (unsigned)fifo_size, latency); if (AudioOutputUnitStart(dev->dev) != noErr) goto error; return dev; error: RARCH_ERR("[CoreAudio]: Failed to initialize driver ...\n"); coreaudio_free(dev); return NULL; }
static bool init_audiounit(struct ao *ao, AudioStreamBasicDescription asbd) { OSStatus err; uint32_t size; struct priv *p = ao->priv; AudioComponentDescription desc = (AudioComponentDescription) { .componentType = kAudioUnitType_Output, .componentSubType = (ao->device) ? kAudioUnitSubType_HALOutput : kAudioUnitSubType_DefaultOutput, .componentManufacturer = kAudioUnitManufacturer_Apple, .componentFlags = 0, .componentFlagsMask = 0, }; AudioComponent comp = AudioComponentFindNext(NULL, &desc); if (comp == NULL) { MP_ERR(ao, "unable to find audio component\n"); goto coreaudio_error; } err = AudioComponentInstanceNew(comp, &(p->audio_unit)); CHECK_CA_ERROR("unable to open audio component"); err = AudioUnitInitialize(p->audio_unit); CHECK_CA_ERROR_L(coreaudio_error_component, "unable to initialize audio unit"); size = sizeof(AudioStreamBasicDescription); err = AudioUnitSetProperty(p->audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, size); CHECK_CA_ERROR_L(coreaudio_error_audiounit, "unable to set the input format on the audio unit"); err = AudioUnitSetProperty(p->audio_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &p->device, sizeof(p->device)); CHECK_CA_ERROR_L(coreaudio_error_audiounit, "can't link audio unit to selected device"); p->hw_latency_us = ca_get_hardware_latency(ao); AURenderCallbackStruct render_cb = (AURenderCallbackStruct) { .inputProc = render_cb_lpcm, .inputProcRefCon = ao, }; err = AudioUnitSetProperty(p->audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &render_cb, sizeof(AURenderCallbackStruct)); CHECK_CA_ERROR_L(coreaudio_error_audiounit, "unable to set render callback on audio unit"); return true; coreaudio_error_audiounit: AudioUnitUninitialize(p->audio_unit); coreaudio_error_component: AudioComponentInstanceDispose(p->audio_unit); coreaudio_error: return false; } static void stop(struct ao *ao) { struct priv *p = ao->priv; OSStatus err = AudioOutputUnitStop(p->audio_unit); CHECK_CA_WARN("can't stop audio unit"); } static void start(struct ao *ao) { struct priv *p = ao->priv; OSStatus err = AudioOutputUnitStart(p->audio_unit); CHECK_CA_WARN("can't start audio unit"); } static void uninit(struct ao *ao) { struct priv *p = ao->priv; AudioOutputUnitStop(p->audio_unit); AudioUnitUninitialize(p->audio_unit); AudioComponentInstanceDispose(p->audio_unit); if (p->original_asbd.mFormatID) { OSStatus err = CA_SET(p->original_asbd_stream, kAudioStreamPropertyPhysicalFormat, &p->original_asbd); CHECK_CA_WARN("could not restore physical stream format"); } } static OSStatus hotplug_cb(AudioObjectID id, UInt32 naddr, const AudioObjectPropertyAddress addr[], void *ctx) { reinit_device(ctx); ao_hotplug_event(ctx); return noErr; }
void CreateInputUnit (MyAUGraphPlayer *player) { // generate description that will match audio HAL AudioComponentDescription inputcd = {0}; inputcd.componentType = kAudioUnitType_Output; inputcd.componentSubType = kAudioUnitSubType_HALOutput; inputcd.componentManufacturer = kAudioUnitManufacturer_Apple; AudioComponent comp = AudioComponentFindNext(NULL, &inputcd); if (comp == NULL) { printf ("can't get output unit"); exit (-1); } CheckError(AudioComponentInstanceNew(comp, &player->inputUnit), "Couldn't open component for inputUnit"); // enable/io UInt32 disableFlag = 0; UInt32 enableFlag = 1; AudioUnitScope outputBus = 0; AudioUnitScope inputBus = 1; CheckError (AudioUnitSetProperty(player->inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputBus, &enableFlag, sizeof(enableFlag)), "Couldn't enable input on I/O unit"); CheckError (AudioUnitSetProperty(player->inputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, outputBus, &disableFlag, // well crap, have to disable sizeof(enableFlag)), "Couldn't disable output on I/O unit"); // set device (osx only... iphone has only one device) AudioDeviceID defaultDevice = kAudioObjectUnknown; UInt32 propertySize = sizeof (defaultDevice); // AudioHardwareGetProperty() is deprecated // CheckError (AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, // &propertySize, // &defaultDevice), // "Couldn't get default input device"); // AudioObjectProperty stuff new in 10.6, replaces AudioHardwareGetProperty() call // TODO: need to update ch08 to explain, use this call. need CoreAudio.framework AudioObjectPropertyAddress defaultDeviceProperty; defaultDeviceProperty.mSelector = kAudioHardwarePropertyDefaultInputDevice; defaultDeviceProperty.mScope = kAudioObjectPropertyScopeGlobal; defaultDeviceProperty.mElement = kAudioObjectPropertyElementMaster; CheckError (AudioObjectGetPropertyData(kAudioObjectSystemObject, &defaultDeviceProperty, 0, NULL, &propertySize, &defaultDevice), "Couldn't get default input device"); // set this defaultDevice as the input's property // kAudioUnitErr_InvalidPropertyValue if output is enabled on inputUnit CheckError(AudioUnitSetProperty(player->inputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, outputBus, &defaultDevice, sizeof(defaultDevice)), "Couldn't set default device on I/O unit"); // use the stream format coming out of the AUHAL (should be de-interleaved) propertySize = sizeof (AudioStreamBasicDescription); CheckError(AudioUnitGetProperty(player->inputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &player->streamFormat, &propertySize), "Couldn't get ASBD from input unit"); // 9/6/10 - check the input device's stream format AudioStreamBasicDescription deviceFormat; CheckError(AudioUnitGetProperty(player->inputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, inputBus, &deviceFormat, &propertySize), "Couldn't get ASBD from input unit"); printf ("Device rate %f, graph rate %f\n", deviceFormat.mSampleRate, player->streamFormat.mSampleRate); player->streamFormat.mSampleRate = deviceFormat.mSampleRate; propertySize = sizeof (AudioStreamBasicDescription); CheckError(AudioUnitSetProperty(player->inputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, inputBus, &player->streamFormat, propertySize), "Couldn't set ASBD on input unit"); /* allocate some buffers to hold samples between input and output callbacks (this part largely copied from CAPlayThrough) */ //Get the size of the IO buffer(s) UInt32 bufferSizeFrames = 0; propertySize = sizeof(UInt32); CheckError (AudioUnitGetProperty(player->inputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &bufferSizeFrames, &propertySize), "Couldn't get buffer frame size from input unit"); UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32); if (player->streamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { printf ("format is non-interleaved\n"); // allocate an AudioBufferList plus enough space for array of AudioBuffers UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame); //malloc buffer lists player->inputBuffer = (AudioBufferList *)malloc(propsize); player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame; //pre-malloc buffers for AudioBufferLists for(UInt32 i =0; i< player->inputBuffer->mNumberBuffers ; i++) { player->inputBuffer->mBuffers[i].mNumberChannels = 1; player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes; player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes); } } else { printf ("format is interleaved\n"); // allocate an AudioBufferList plus enough space for array of AudioBuffers UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1); //malloc buffer lists player->inputBuffer = (AudioBufferList *)malloc(propsize); player->inputBuffer->mNumberBuffers = 1; //pre-malloc buffers for AudioBufferLists player->inputBuffer->mBuffers[0].mNumberChannels = player->streamFormat.mChannelsPerFrame; player->inputBuffer->mBuffers[0].mDataByteSize = bufferSizeBytes; player->inputBuffer->mBuffers[0].mData = malloc(bufferSizeBytes); } //Alloc ring buffer that will hold data between the two audio devices player->ringBuffer = new CARingBuffer(); player->ringBuffer->Allocate(player->streamFormat.mChannelsPerFrame, player->streamFormat.mBytesPerFrame, bufferSizeFrames * 3); // set render proc to supply samples from input unit AURenderCallbackStruct callbackStruct; callbackStruct.inputProc = InputRenderProc; callbackStruct.inputProcRefCon = player; CheckError(AudioUnitSetProperty(player->inputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callbackStruct, sizeof(callbackStruct)), "Couldn't set input callback"); CheckError(AudioUnitInitialize(player->inputUnit), "Couldn't initialize input unit"); player->firstInputSampleTime = -1; player->inToOutSampleTimeOffset = -1; printf ("Bottom of CreateInputUnit()\n"); }
int sound_open_AudioUnit(int rate, int bits, int stereo){ Float64 sampleRate = 48000.0; if( soundInit == 1 ) { sound_close_AudioUnit(); } if(rate==44100) sampleRate = 44100.0; if(rate==32000) sampleRate = 32000.0; else if(rate==22050) sampleRate = 22050.0; else if(rate==11025) sampleRate = 11025.0; //audioBufferSize = (rate / 60) * 2 * (stereo==1 ? 2 : 1) ; OSStatus status; // Describe audio component AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Get component AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc); // Get audio units status = AudioComponentInstanceNew(inputComponent, &audioUnit); checkStatus(status); UInt32 flag = 1; // Enable IO for playback status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); checkStatus(status); AudioStreamBasicDescription audioFormat; memset (&audioFormat, 0, sizeof (audioFormat)); audioFormat.mSampleRate = sampleRate; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; audioFormat.mBytesPerPacket = (stereo == 1 ? 4 : 2 ); audioFormat.mFramesPerPacket = 1; audioFormat.mBytesPerFrame = (stereo == 1? 4 : 2); audioFormat.mChannelsPerFrame = (stereo == 1 ? 2 : 1); audioFormat.mBitsPerChannel = 16; status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat)); checkStatus(status); struct AURenderCallbackStruct callbackStruct; // Set output callback callbackStruct.inputProc = playbackCallback; callbackStruct.inputProcRefCon = NULL; status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &callbackStruct, sizeof(callbackStruct)); checkStatus(status); status = AudioUnitInitialize(audioUnit); checkStatus(status); //ARRANCAR soundInit = 1; status = AudioOutputUnitStart(audioUnit); checkStatus(status); return 1; }
static int prepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; #if MACOSX_COREAUDIO ComponentDescription desc; Component comp = NULL; #else AudioComponentDescription desc; AudioComponent comp = NULL; #endif const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); #if MACOSX_COREAUDIO if (!find_device_by_name(this, devname, iscapture)) { SDL_SetError("Couldn't find requested CoreAudio device"); return 0; } #endif SDL_zero(desc); desc.componentType = kAudioUnitType_Output; desc.componentManufacturer = kAudioUnitManufacturer_Apple; #if MACOSX_COREAUDIO desc.componentSubType = kAudioUnitSubType_DefaultOutput; comp = FindNextComponent(NULL, &desc); #else desc.componentSubType = kAudioUnitSubType_RemoteIO; /* !!! FIXME: ? */ comp = AudioComponentFindNext(NULL, &desc); #endif if (comp == NULL) { SDL_SetError("Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ #if MACOSX_COREAUDIO result = OpenAComponent(comp, &this->hidden->audioUnit); CHECK_RESULT("OpenAComponent"); #else /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); #endif this->hidden->audioUnitOpened = 1; #if MACOSX_COREAUDIO result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT ("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); #endif /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetRenderCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size; OSStatus r; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size, default_buffer_size; OSStatus r; UInt32 size; AudioDeviceID output_device_id; AudioValueRange latency_range; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; stm->current_latency_frames = 0; stm->hw_latency_frames = UINT64_MAX; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = latency / 1000.0 * ss.mSampleRate; /* Get the range of latency this particular device can work with, and clamp * the requested latency to this acceptable range. */ if (audiounit_get_acceptable_latency_range(&latency_range) != CUBEB_OK) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } if (buffer_size < (unsigned int) latency_range.mMinimum) { buffer_size = (unsigned int) latency_range.mMinimum; } else if (buffer_size > (unsigned int) latency_range.mMaximum) { buffer_size = (unsigned int) latency_range.mMaximum; } /** * Get the default buffer size. If our latency request is below the default, * set it. Otherwise, use the default latency. **/ size = sizeof(default_buffer_size); r = AudioUnitGetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &default_buffer_size, &size); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } // Setting the latency doesn't work well for USB headsets (eg. plantronics). // Keep the default latency for now. #if 0 if (buffer_size < default_buffer_size) { /* Set the maximum number of frame that the render callback will ask for, * effectively setting the latency of the stream. This is process-wide. */ r = AudioUnitSetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &buffer_size, sizeof(buffer_size)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } } #endif r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
sint_t aubio_audio_unit_init (aubio_audio_unit_t *o) { OSStatus err = noErr; Float32 latency = o->latency; Float64 samplerate = (Float64)o->samplerate; o->au_ios_cb_struct.inputProc = aubio_audio_unit_process; o->au_ios_cb_struct.inputProcRefCon = o; /* setting up audio session with interruption listener */ err = AudioSessionInitialize(NULL, NULL, audio_unit_interruption_listener, o); if (err) { AUBIO_ERR("audio_unit: could not initialize audio session (%d)\n", (int)err); goto fail; } audio_unit_set_audio_session_category(o->input_enabled, o->verbose); audio_unit_check_audio_route(o); /* add route change listener */ err = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, audio_unit_route_change_listener, o); if (err) { AUBIO_ERR("audio_unit: could not set route change listener (%d)\n", (int)err); goto fail; } /* set latency */ err = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(latency), &latency); if (err) { AUBIO_ERR("audio_unit: could not set preferred latency (%d)\n", (int)err); goto fail; } #if 0 // only for iphone OS >= 3.1 UInt32 val = 1; // set to 0 (default) to use ear speaker in voice application err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(UInt32), &val); if (err) { AUBIO_ERR("audio_unit: could not set session property to default to speaker\n"); } #endif /* setting up audio unit */ AudioComponentDescription desc; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentType = kAudioUnitType_Output; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioStreamBasicDescription audioFormat; /* look for a component that match the description */ AudioComponent comp = AudioComponentFindNext(NULL, &desc); /* create the audio component */ AudioUnit *audio_unit = &(o->audio_unit); err = AudioComponentInstanceNew(comp, &(o->audio_unit)); if (err) { AUBIO_ERR("audio_unit: failed creating the audio unit\n"); goto fail; } /* enable IO */ UInt32 enabled = 1; err = AudioUnitSetProperty (*audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enabled, sizeof(enabled)); if (err) { AUBIO_ERR("audio_unit: failed enabling input of audio unit\n"); goto fail; } /* set max fps */ UInt32 max_fps = MIN(o->blocksize, MAX_FPS); err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &max_fps, sizeof(max_fps)); if (err) { AUBIO_ERR("audio_unit: could not set maximum frames per slice property (%d)\n", (int)err); goto fail; } AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &(o->au_ios_cb_struct), sizeof(o->au_ios_cb_struct)); if (err) { AUBIO_ERR("audio_unit: failed setting audio unit render callback\n"); goto fail; } #if 0 err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Input, 0, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 1, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } #endif audioFormat.mSampleRate = (Float64)samplerate; audioFormat.mChannelsPerFrame = 2; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mBitsPerChannel = 8 * sizeof(SInt16); #if 1 // interleaving audioFormat.mBytesPerFrame = 2 * sizeof(SInt16); audioFormat.mBytesPerPacket = 2 * sizeof(SInt16); #else audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = sizeof(SInt32); audioFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; #endif err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio output format\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio input format\n"); goto fail; } #if 0 AudioStreamBasicDescription thruFormat; thissize = sizeof(thruFormat); err = AudioUnitGetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &thruFormat, &thissize); if (err) { AUBIO_ERR("audio_unit: could not get speaker output format, err: %d\n", (int)err); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, sizeof(thruFormat)); if (err) { AUBIO_ERR("audio_unit: could not set input audio format, err: %d\n", (int)err); goto fail; } #endif /* time to initialize the unit */ err = AudioUnitInitialize(*audio_unit); if (err) { AUBIO_ERR("audio_unit: failed initializing audio, err: %d\n", (int)err); goto fail; } return 0; fail: return err; }
gboolean gst_core_audio_open_device (GstCoreAudio * core_audio, OSType sub_type, const gchar * adesc) { AudioComponentDescription desc; AudioComponent comp; OSStatus status; AudioUnit unit; UInt32 enableIO; desc.componentType = kAudioUnitType_Output; desc.componentSubType = sub_type; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = AudioComponentFindNext (NULL, &desc); if (comp == NULL) { GST_WARNING_OBJECT (core_audio->osxbuf, "Couldn't find %s component", adesc); return FALSE; } status = AudioComponentInstanceNew (comp, &unit); if (status) { GST_ERROR_OBJECT (core_audio->osxbuf, "Couldn't open %s component %" GST_FOURCC_FORMAT, adesc, GST_FOURCC_ARGS (status)); return FALSE; } if (core_audio->is_src) { /* enable input */ enableIO = 1; status = AudioUnitSetProperty (unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, /* 1 = input element */ &enableIO, sizeof (enableIO)); if (status) { AudioComponentInstanceDispose (unit); GST_WARNING_OBJECT (core_audio->osxbuf, "Failed to enable input: %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (status)); return FALSE; } /* disable output */ enableIO = 0; status = AudioUnitSetProperty (unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, /* 0 = output element */ &enableIO, sizeof (enableIO)); if (status) { AudioComponentInstanceDispose (unit); GST_WARNING_OBJECT (core_audio->osxbuf, "Failed to disable output: %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (status)); return FALSE; } } GST_DEBUG_OBJECT (core_audio->osxbuf, "Created %s AudioUnit: %p", adesc, unit); core_audio->audiounit = unit; return TRUE; }