// Component Open Request - Required pascal ComponentResult DelegateOnly_ImageCodecOpen(DelegateOnly_Globals glob, ComponentInstance self) { ComponentDescription cd = { decompressorComponentType, k422YpCbCr8CodecType, FOUR_CHAR_CODE('app3'), 0, 0 }; Component c = 0; ComponentResult rc; // Allocate memory for our globals, set them up and inform the component manager that we've done so glob = (DelegateOnly_Globals)NewPtrClear(sizeof(DelegateOnly_GlobalsRecord)); if (rc = MemError()) goto bail; SetComponentInstanceStorage(self, (Handle)glob); glob->self = self; glob->target = self; if (c = FindNextComponent(c, &cd)) { rc = OpenAComponent(c, &glob->delegateComponent); if (rc) goto bail; ComponentSetTarget(glob->delegateComponent, self); } bail: return rc; }
OSStatus openOutput (AudioUnit *outputUnit) { OSStatus status = noErr; ComponentDescription description; Component component; description.componentType = kAudioUnitType_Output; description.componentSubType = kAudioUnitSubType_DefaultOutput; description.componentManufacturer = kAudioUnitManufacturer_Apple; description.componentFlags = 0; description.componentFlagsMask = 0; component = FindNextComponent(NULL, &description); if (component == NULL) { fprintf(stderr, "Could not find audio output device.\n"); exit(EXIT_FAILURE); } status = OpenAComponent(component, outputUnit); if (status != noErr) { fprintf(stderr, "Could not open audio output device.\n"); exit(EXIT_FAILURE); } status = AudioUnitInitialize(*outputUnit); if (status != noErr) { fprintf(stderr, "Could not initialize audio output device.\n"); exit(EXIT_FAILURE); } return status; }
static ComponentResult volume_catcher_component_open(VolumeCatcherStorage *storage, ComponentInstance self) { ComponentResult result = noErr; VolumeCatcherImpl *impl = VolumeCatcherImpl::getInstance(); storage = new VolumeCatcherStorage; storage->self = self; storage->delegate = NULL; result = OpenAComponent(impl->mOriginalDefaultOutput, &(storage->delegate)); if(result != noErr) { // std::cerr << "OpenAComponent result = " << result << ", component ref = " << storage->delegate << std::endl; // If we failed to open the delagate component, our open is going to fail. Clean things up. delete storage; } else { // Success -- set up this component's storage SetComponentInstanceStorage(self, (Handle)storage); // add this instance to the global list impl->mComponentInstances.push_back(storage); // and set up the initial volume impl->setInstanceVolume(storage); } return result; }
void AUEditWindow::SetUnitToDisplay (AudioUnit editUnit, ComponentDescription& inDesc) { CloseView(); mEditUnit = editUnit; Component editComp = FindNextComponent(NULL, &inDesc); verify_noerr(OpenAComponent(editComp, &mEditView)); ControlRef rootControl; verify_noerr(GetRootControl(mWindow, &rootControl)); Rect r; ControlRef viewPane; GetControlBounds(rootControl, &r); Float32Point location = { kOffsetForAUView_X, kOffsetForAUView_Y }; Float32Point size = { Float32(r.right), Float32(r.bottom) }; verify_noerr(AudioUnitCarbonViewCreate(mEditView, mEditUnit, mWindow, rootControl, &location, &size, &viewPane)); AudioUnitCarbonViewSetEventListener(mEditView, EventListener, this); GetControlBounds(viewPane, &r); size.x = r.right-r.left + kOffsetForAUView_X; size.y = r.bottom-r.top + kOffsetForAUView_Y; Rect r2; GetControlBounds (mResizeableControl->MacControl(), &r2); if ((r.bottom - r.top) < (r2.bottom - r2.top + 20)) size.y = r2.bottom + 20; SetSize(size); }
bool auLoader::loadPlugin() { Component comp = FindNextComponent(NULL, &m_desc); if(comp == NULL) { debug(LOG_ERROR, "AU '%s' '%s' '%s' was not found", m_type, m_subtype, m_manuf); return false; } else { debug(LOG_INFO, "AU '%s' '%s' '%s' found", m_type, m_subtype, m_manuf); } OSErr result = OpenAComponent(comp, &m_plugin); if(result) { debug(LOG_ERROR, "Could not open AU"); return false; } else { debug(LOG_INFO, "AU opened"); } return true; }
AUEditWindow::AUEditWindow(XController *owner, IBNibRef nibRef, CFStringRef name, AudioUnit editUnit, ComponentDescription inCompDesc ) : XWindow(owner, nibRef, name), mEditUnit(editUnit) { Component editComp = FindNextComponent(NULL, &inCompDesc); verify_noerr(OpenAComponent(editComp, &mEditView)); ControlRef rootControl; verify_noerr(GetRootControl(mWindow, &rootControl)); ControlRef customControl = 0; ControlID controlID; controlID.signature = 'cust'; controlID.id = 1000; GetControlByID( mWindow, &controlID, &customControl ); ControlRef ourControl = customControl ? customControl : rootControl; Rect r = {0,0,400,400}; ControlRef viewPane; if(customControl) GetControlBounds(ourControl, &r); Float32Point location = { r.left, r.top }; Float32Point size = { Float32(r.right - r.left ), Float32(r.bottom - r.top ) }; verify_noerr(AudioUnitCarbonViewCreate(mEditView, mEditUnit, mWindow, ourControl, &location, &size, &viewPane)); GetControlBounds(viewPane, &r); size.x = r.right-r.left; size.y = r.bottom-r.top; if(!customControl) SetSize(size); Show(); }
AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, float sampleRate) : m_outputUnit(0) , m_provider(provider) , m_renderBus(2, kBufferSize, false) , m_sampleRate(sampleRate) , m_isPlaying(false) { // Open and initialize DefaultOutputUnit Component comp; ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(0, &desc); ASSERT(comp); OSStatus result = OpenAComponent(comp, &m_outputUnit); ASSERT(!result); result = AudioUnitInitialize(m_outputUnit); ASSERT(!result); configure(); }
AUEditWindow::AUEditWindow(XController *owner, IBNibRef nibRef, CFStringRef name, AudioUnit editUnit, bool forceGeneric) : XWindow(owner, nibRef, name), mEditUnit(editUnit) { OSStatus err; ComponentDescription editorComponentDesc; // set up to use generic UI component editorComponentDesc.componentType = kAudioUnitCarbonViewComponentType; editorComponentDesc.componentSubType = 'gnrc'; editorComponentDesc.componentManufacturer = 'appl'; editorComponentDesc.componentFlags = 0; editorComponentDesc.componentFlagsMask = 0; if (!forceGeneric) { // ask the AU for its first editor component UInt32 propertySize; err = AudioUnitGetPropertyInfo(editUnit, kAudioUnitProperty_GetUIComponentList, kAudioUnitScope_Global, 0, &propertySize, NULL); if (!err) { int nEditors = propertySize / sizeof(ComponentDescription); ComponentDescription *editors = new ComponentDescription[nEditors]; err = AudioUnitGetProperty(editUnit, kAudioUnitProperty_GetUIComponentList, kAudioUnitScope_Global, 0, editors, &propertySize); if (!err) // just pick the first one for now editorComponentDesc = editors[0]; delete[] editors; } } Component editComp = FindNextComponent(NULL, &editorComponentDesc); verify_noerr(OpenAComponent(editComp, &mEditView)); ControlRef rootControl; verify_noerr(GetRootControl(mWindow, &rootControl)); Rect r; ControlRef viewPane; GetControlBounds(rootControl, &r); Float32Point location = { 0., 0. }; Float32Point size = { Float32(r.right), Float32(r.bottom) }; verify_noerr(AudioUnitCarbonViewCreate(mEditView, mEditUnit, mWindow, rootControl, &location, &size, &viewPane)); AudioUnitCarbonViewSetEventListener(mEditView, EventListener, this); GetControlBounds(viewPane, &r); size.x = r.right-r.left; size.y = r.bottom-r.top; SetSize(size); Show(); /* EventLoopTimerRef timer; RequireNoErr( InstallEventLoopTimer( GetMainEventLoop(), 5., 0., TimerProc, this, &timer));*/ }
static ALCenum ca_open_playback(ALCdevice *device, const ALCchar *deviceName) { ComponentDescription desc; Component comp; ca_data *data; OSStatus err; if(!deviceName) deviceName = ca_device; else if(strcmp(deviceName, ca_device) != 0) return ALC_INVALID_VALUE; /* open the default output unit */ desc.componentType = kAudioUnitType_Output; #if TARGET_OS_IPHONE desc.componentSubType = kAudioUnitSubType_RemoteIO; #else desc.componentSubType = kAudioUnitSubType_DefaultOutput; #endif // TARGET_OS_IPHONE desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed\n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed\n"); free(data); return ALC_INVALID_VALUE; } /* init and start the default audio unit... */ err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed\n"); CloseComponent(data->audioUnit); free(data); return ALC_INVALID_VALUE; } device->DeviceName = strdup(deviceName); device->ExtraData = data; return ALC_NO_ERROR; }
int initoutput(){ ComponentDescription desc; Component comp; OSStatus err; UInt32 size; Boolean canwrite; AudioStreamBasicDescription inputdesc,outputdesc; desc.componentType=kAudioUnitType_Output; desc.componentSubType=kAudioUnitSubType_DefaultOutput; desc.componentManufacturer=kAudioUnitManufacturer_Apple; desc.componentFlags=0; desc.componentFlagsMask=0; comp=FindNextComponent(NULL,&desc);if (comp==NULL) return -1; err=OpenAComponent(comp,&out);if (err) return err; err=AudioUnitInitialize(out);if (err) return err; err=AudioUnitGetPropertyInfo(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&size,&canwrite); if (err) return err; err=AudioUnitGetProperty(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&outputdesc,&size); if (err) return err; // dumpdesc(&outputdesc); inputdesc.mSampleRate=44100.0; inputdesc.mFormatID='lpcm'; #if __BIG_ENDIAN__ inputdesc.mFormatFlags=0x0e; #else inputdesc.mFormatFlags=0x0c; #endif inputdesc.mBytesPerPacket=4; inputdesc.mFramesPerPacket=1; inputdesc.mBytesPerFrame=4; inputdesc.mChannelsPerFrame=2; inputdesc.mBitsPerChannel=16; inputdesc.mReserved=0; // dumpdesc(&inputdesc); err=AudioConverterNew(&inputdesc,&outputdesc,&conv); if (err) { // printf("AudioConvertNew failed %.*s\n",4,(char*)&err); return err; } return err; }
static bool osx_output_enable(struct audio_output *ao, GError **error_r) { struct osx_output *oo = (struct osx_output *)ao; ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = oo->component_subtype; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if (comp == 0) { g_set_error(error_r, osx_output_quark(), 0, "Error finding OS X component"); return false; } OSStatus status = OpenAComponent(comp, &oo->au); if (status != noErr) { g_set_error(error_r, osx_output_quark(), status, "Unable to open OS X component: %s", GetMacOSStatusCommentString(status)); return false; } if (!osx_output_set_device(oo, error_r)) { CloseComponent(oo->au); return false; } AURenderCallbackStruct callback; callback.inputProc = osx_render; callback.inputProcRefCon = oo; ComponentResult result = AudioUnitSetProperty(oo->au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback)); if (result != noErr) { CloseComponent(oo->au); g_set_error(error_r, osx_output_quark(), result, "unable to set callback for OS X audio unit"); return false; } return true; }
OSStatus CAPlayThrough::SetupAUHAL(AudioDeviceID in) { OSStatus err = noErr; Component comp; ComponentDescription desc; //There are several different types of Audio Units. //Some audio units serve as Outputs, Mixers, or DSP //units. See AUComponent.h for listing desc.componentType = kAudioUnitType_Output; //Every Component has a subType, which will give a clearer picture //of what this components function will be. desc.componentSubType = kAudioUnitSubType_HALOutput; //all Audio Units in AUComponent.h must use //"kAudioUnitManufacturer_Apple" as the Manufacturer desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; //Finds a component that meets the desc spec's comp = FindNextComponent(NULL, &desc); if (comp == NULL) exit (-1); //gains access to the services provided by the component OpenAComponent(comp, &mInputUnit); //AUHAL needs to be initialized before anything is done to it err = AudioUnitInitialize(mInputUnit); checkErr(err); err = EnableIO(); checkErr(err); err= SetInputDeviceAsCurrent(in); checkErr(err); err = CallbackSetup(); checkErr(err); //Don't setup buffers until you know what the //input and output device audio streams look like. err = AudioUnitInitialize(mInputUnit); return err; }
static int coreaudio_init (void) { trace ("coreaudio_init\n"); //selecting the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; OSStatus err = noErr; Component comp = FindNextComponent(NULL, &desc); if (comp == NULL) { trace ("FindNextComponent= failed to find the default output component.\n"); return -1; } err = OpenAComponent(comp, &output_unit); if (comp == NULL) { trace ("OpenAComponent= %s\n", GetMacOSStatusErrorString(err)); return -1; } // filling out the description for linear PCM data (can only be called after opening audio component) if (coreaudio_set_data_format(&plugin.fmt) < 0) return -1; // callback AURenderCallbackStruct input_cb; input_cb.inputProc = coreaudio_callback; input_cb.inputProcRefCon = NULL; err = AudioUnitSetProperty(output_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input_cb, sizeof(input_cb)); if (err) { trace ("AudioUnitSetProperty-CB= %s\n", GetMacOSStatusErrorString(err)); return -1; } // Initialize unit err = AudioUnitInitialize(output_unit); if (err) { trace ("AudioUnitInitialize= %s\n", GetMacOSStatusErrorString(err)); return -1; } au_state = 1; // audio unit initialised state = OUTPUT_STATE_STOPPED; return 0; }
static void init_audio_unit(AudioUnit *au) { OSStatus err = noErr; ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if (comp == NULL) { DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't audio component\n"); } err = OpenAComponent(comp, au); if (err != noErr) { DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't open audio component\n"); } }
static au_instance_t *audiounits_create_player(SEXP source, float rate, int flags) { ComponentDescription desc = { kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, kAudioUnitManufacturer_Apple, 0, 0 }; Component comp; OSStatus err; au_instance_t *ap = (au_instance_t*) calloc(sizeof(au_instance_t), 1); ap->source = source; ap->sample_rate = rate; ap->done = NO; ap->position = 0; ap->length = LENGTH(source); ap->stereo = NO; { /* if the source is a matrix with 2 rows then we'll use stereo */ SEXP dim = Rf_getAttrib(source, R_DimSymbol); if (TYPEOF(dim) == INTSXP && LENGTH(dim) > 0 && INTEGER(dim)[0] == 2) ap->stereo = YES; } ap->loop = (flags & APFLAG_LOOP) ? YES : NO; memset(&ap->fmtOut, 0, sizeof(ap->fmtOut)); ap->fmtOut.mSampleRate = ap->sample_rate; ap->fmtOut.mFormatID = kAudioFormatLinearPCM; ap->fmtOut.mChannelsPerFrame = ap->stereo ? 2 : 1; ap->fmtOut.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; #if __ppc__ || __ppc64__ || __BIG_ENDIAN__ ap->fmtOut.mFormatFlags |= kAudioFormatFlagIsBigEndian; #endif ap->fmtOut.mFramesPerPacket = 1; ap->fmtOut.mBytesPerPacket = ap->fmtOut.mBytesPerFrame = ap->fmtOut.mFramesPerPacket * ap->fmtOut.mChannelsPerFrame * 2; ap->fmtOut.mBitsPerChannel = 16; if (ap->stereo) ap->length /= 2; comp = FindNextComponent(NULL, &desc); if (!comp) Rf_error("unable to find default audio output"); err = OpenAComponent(comp, &ap->outUnit); if (err) Rf_error("unable to open default audio (%08x)", err); err = AudioUnitInitialize(ap->outUnit); if (err) { CloseComponent(ap->outUnit); Rf_error("unable to initialize default audio (%08x)", err); } R_PreserveObject(ap->source); return ap; }
static OSStatus CheckInit () { if (playBackWasInit) return 0; OSStatus result = noErr; callbackSem = SDL_CreateSemaphore(0); SDL_CreateThread(RunCallBackThread, NULL); { ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent (NULL, &desc); if (comp == NULL) { SDL_SetError ("CheckInit: FindNextComponent returned NULL"); if (result) return -1; } result = OpenAComponent (comp, &theUnit); if (result) return -1; result = AudioUnitInitialize (theUnit); if (result) return -1; playBackWasInit = true; } return 0; }
static ALCenum ca_open_playback(ALCdevice *device, const ALCchar *deviceName) { ComponentDescription desc; Component comp; ca_data *data; OSStatus err; if(!deviceName) deviceName = ca_device; else if(strcmp(deviceName, ca_device) != 0) return ALC_INVALID_VALUE; /* open the default output unit */ desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed\n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); device->ExtraData = data; err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed\n"); free(data); device->ExtraData = NULL; return ALC_INVALID_VALUE; } return ALC_NO_ERROR; }
bool CCoreAudioUnit::Open(ComponentDescription desc) { if (m_Component) Close(); // Find the required Component Component outputComp = FindNextComponent(NULL, &desc); if (outputComp == NULL) // Unable to find the AudioUnit we requested { CLog::Log(LOGERROR, "CCoreAudioUnit::Open: Unable to locate AudioUnit Component."); return false; } // Create an instance of the AudioUnit Component OSStatus ret = OpenAComponent(outputComp, &m_Component); if (ret) // Unable to open AudioUnit { CLog::Log(LOGERROR, "CCoreAudioUnit::Open: Unable to open AudioUnit Component. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret)); return false; } return true; }
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName) { AudioStreamBasicDescription requestedFormat; // The application requested format AudioStreamBasicDescription hardwareFormat; // The hardware format AudioStreamBasicDescription outputFormat; // The AudioUnit output format AURenderCallbackStruct input; ComponentDescription desc; AudioDeviceID inputDevice; UInt32 outputFrameCount; UInt32 propertySize; UInt32 enableIO; Component comp; ca_data *data; OSStatus err; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; // Search for component with given description comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed\n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); device->ExtraData = data; // Open the component err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed\n"); goto error; } // Turn off AudioUnit output enableIO = 0; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Turn on AudioUnit input enableIO = 1; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Get the default input device propertySize = sizeof(AudioDeviceID); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); if(err != noErr) { ERR("AudioHardwareGetProperty failed\n"); goto error; } if(inputDevice == kAudioDeviceUnknown) { ERR("No input device found\n"); goto error; } // Track the input device err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // set capture callback input.inputProc = ca_capture_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Initialize the device err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed\n"); goto error; } // Get the hardware format propertySize = sizeof(AudioStreamBasicDescription); err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize); if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription)) { ERR("AudioUnitGetProperty failed\n"); goto error; } // Set up the requested format description switch(device->FmtType) { case DevFmtUByte: requestedFormat.mBitsPerChannel = 8; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtShort: requestedFormat.mBitsPerChannel = 16; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtInt: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtFloat: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtByte: case DevFmtUShort: case DevFmtUInt: ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); goto error; } switch(device->FmtChans) { case DevFmtMono: requestedFormat.mChannelsPerFrame = 1; break; case DevFmtStereo: requestedFormat.mChannelsPerFrame = 2; break; case DevFmtQuad: case DevFmtX51: case DevFmtX51Side: case DevFmtX61: case DevFmtX71: ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans)); goto error; } requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8; requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame; requestedFormat.mSampleRate = device->Frequency; requestedFormat.mFormatID = kAudioFormatLinearPCM; requestedFormat.mReserved = 0; requestedFormat.mFramesPerPacket = 1; // save requested format description for later use data->format = requestedFormat; data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); // Use intermediate format for sample rate conversion (outputFormat) // Set sample rate to the same as hardware for resampling later outputFormat = requestedFormat; outputFormat.mSampleRate = hardwareFormat.mSampleRate; // Determine sample rate ratio for resampling data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency; // The output format should be the requested format, but using the hardware sample rate // This is because the AudioUnit will automatically scale other properties, except for sample rate err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Set the AudioUnit output format frame count outputFrameCount = device->UpdateSize * data->sampleRateRatio; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount)); if(err != noErr) { ERR("AudioUnitSetProperty failed: %d\n", err); goto error; } // Set up sample converter err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter); if(err != noErr) { ERR("AudioConverterNew failed: %d\n", err); goto error; } // Create a buffer for use in the resample callback data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio); // Allocate buffer for the AudioUnit output data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio); if(data->bufferList == NULL) goto error; data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates); if(data->ring == NULL) goto error; al_string_copy_cstr(&device->DeviceName, deviceName); return ALC_NO_ERROR; error: DestroyRingBuffer(data->ring); free(data->resampleBuffer); destroy_buffer_list(data->bufferList); if(data->audioConverter) AudioConverterDispose(data->audioConverter); if(data->audioUnit) CloseComponent(data->audioUnit); free(data); device->ExtraData = NULL; return ALC_INVALID_VALUE; }
int qCreateEncoderAPI(QEncoder **eptr, char* encoderArgs, int semaIndex, int width, int height) { QEncoder* encoder; OSStatus err; QVideoArgs* args = (QVideoArgs*)encoderArgs; //XXXX: we should be able to configure this SInt32 averageDataRate = 100000; ICMEncodedFrameOutputRecord encodedFrameOutputRecord = {0}; ICMCompressionSessionOptionsRef sessionOptions = NULL; CFMutableDictionaryRef pixelBufferAttributes = NULL; CFNumberRef number = NULL; OSType pixelFormat = Q_PIXEL_FORMAT; fprintf(QSTDERR, "\nqCreateEncoderQT(): ABOUT TO TRY TO CREATE ENCODER"); fprintf(QSTDERR, "\n\t time-scale: %d", args->timeScale); fprintf(QSTDERR, "\n\t big-endian: %d", (args->isBigEndian)[0]); fprintf(QSTDERR, "\n\t codec-type: %c%c%c%c", ((unsigned char*)&(args->codecType))[3], ((unsigned char*)&(args->codecType))[2], ((unsigned char*)&(args->codecType))[1], ((unsigned char*)&(args->codecType))[0]); encoder = (QEncoder*)malloc(sizeof(QEncoder)); if (encoder == NULL) { fprintf(QSTDERR, "\nqCreateDecoderQT: failed to malloc encoder struct"); return -2; } encoder->semaIndex = semaIndex; encoder->timeScale = args->timeScale; encoder->codecType = *((CodecType*)(args->codecType)); encoder->width = width; encoder->height = height; err = ICMCompressionSessionOptionsCreate( NULL, &sessionOptions ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not create session options"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); free(encoder); return -5; } // Create and configure the compressor component. OSType codecManufacturer = 'appl'; ComponentDescription componentDescription; componentDescription.componentType = FOUR_CHAR_CODE('imco'); componentDescription.componentSubType = encoder->codecType; componentDescription.componentManufacturer = codecManufacturer; componentDescription.componentFlags = 0; componentDescription.componentFlagsMask = 0; Component compressorComponent = FindNextComponent(0, &componentDescription); if(compressorComponent == NULL) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not find a matching compressor"); ICMCompressionSessionOptionsRelease(sessionOptions); free(encoder); return -5; } err = OpenAComponent(compressorComponent, &(encoder->compressor)); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): failed to open compressor component"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); free(encoder); return -5; } // If we want to use H.264, we need to muck around a bit. // XXXX: "clean up" this code. if(encoder->codecType == FOUR_CHAR_CODE('avc1')) { // Profile is currently fixed to Baseline // The level is adjusted by the use of the // bitrate, but the SPS returned reveals // level 1.1 in case of QCIF and level 1.3 // in case of CIF Handle h264Settings = NewHandleClear(0); err = ImageCodecGetSettings(encoder->compressor, h264Settings); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): failed to get codec settings"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // For some reason, the QTAtomContainer functions will crash if used on the atom // container returned by ImageCodecGetSettings. // Therefore, we have to parse the atoms self to set the correct settings. unsigned i; unsigned settingsSize = GetHandleSize(h264Settings) / 4; UInt32 *data = (UInt32 *)*h264Settings; for(i = 0; i < settingsSize; i++) { // Forcing Baseline profile #if defined(__BIG_ENDIAN__) if(data[i] == FOUR_CHAR_CODE('sprf')) { i+=4; data[i] = 1; } #else if(data[i] == FOUR_CHAR_CODE('frps')) { i+=4; // data[i] = CFSwapInt32(1); data[i] = 16777216; // avoid CFSwapInt32; } #endif // if video sent is CIF size, we set this flag to one to have the picture // encoded in 5 slices instead of two. // If QCIF is sent, this flag remains zero to send two slices instead of // one. #if defined(__BIG_ENDIAN__) else if(/*videoSize == XMVideoSize_CIF &&*/ data[i] == FOUR_CHAR_CODE('susg')) { i+=4; data[i] = 1; } #else else if(/*videoSize == XMVideoSize_CIF &&*/ data[i] == FOUR_CHAR_CODE('gsus')) { i+=4; // data[i] = CFSwapInt32(1); data[i] = 16777216; // avoid CFSwapInt32; } #endif } err = ImageCodecSetSettings(encoder->compressor, h264Settings); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): failed to set codec settings"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } } err = ICMCompressionSessionOptionsSetProperty(sessionOptions, kQTPropertyClass_ICMCompressionSessionOptions, kICMCompressionSessionOptionsPropertyID_CompressorComponent, sizeof(encoder->compressor), &(encoder->compressor)); // XXXX: allow (some of) the following options to be set from the 'encoderArgs' // We must set this flag to enable P or B frames. err = ICMCompressionSessionOptionsSetAllowTemporalCompression( sessionOptions, true ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not enable temporal compression"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // We must set this flag to enable B frames. // XXXX: err = ICMCompressionSessionOptionsSetAllowFrameReordering( sessionOptions, true ); err = ICMCompressionSessionOptionsSetAllowFrameReordering( sessionOptions, false ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not enable frame reordering"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // Set the maximum key frame interval, also known as the key frame rate. // XXXX: even 5 frames might be a bit long for videoconferencing err = ICMCompressionSessionOptionsSetMaxKeyFrameInterval( sessionOptions, 3 ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not set maximum keyframe interval"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // This allows the compressor more flexibility (ie, dropping and coalescing frames). // XXXX: does this mean that playback has to be more careful about when to actually display decoded frames? // XXXX: err = ICMCompressionSessionOptionsSetAllowFrameTimeChanges( sessionOptions, true ); err = ICMCompressionSessionOptionsSetAllowFrameTimeChanges( sessionOptions, false ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not set enable frame time changes"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // XXXX: CaptureAndCompressIPBMovie set this to true err = ICMCompressionSessionOptionsSetDurationsNeeded( sessionOptions, false ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not set whether frame durations are needed"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } // Set the average data rate. // XXXX: another good one to parameterize // XXXX: can we change this one at runtime? err = ICMCompressionSessionOptionsSetProperty( sessionOptions, kQTPropertyClass_ICMCompressionSessionOptions, kICMCompressionSessionOptionsPropertyID_AverageDataRate, sizeof( averageDataRate ), &averageDataRate ); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not set average data rate"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } encodedFrameOutputRecord.encodedFrameOutputCallback = (void*)qQuickTimeEncoderCallback; encodedFrameOutputRecord.encodedFrameOutputRefCon = encoder; encodedFrameOutputRecord.frameDataAllocator = NULL; // Specify attributes for the compression-session's pixel-buffer pool. pixelBufferAttributes = CFDictionaryCreateMutable( NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks ); number = CFNumberCreate( NULL, kCFNumberIntType, &width ); CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferWidthKey, number ); CFRelease( number ); number = CFNumberCreate( NULL, kCFNumberIntType, &height ); CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferHeightKey, number ); CFRelease( number ); number = CFNumberCreate( NULL, kCFNumberSInt32Type, &pixelFormat ); CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferPixelFormatTypeKey, number ); CFRelease( number ); err = ICMCompressionSessionCreate( NULL, width, height, args->codecType, args->timeScale, sessionOptions, pixelBufferAttributes, &encodedFrameOutputRecord, &(encoder->session)); CFRelease(pixelBufferAttributes); if(err != noErr) { fprintf(QSTDERR, "\nqCreateEncoderQT(): could not create compression session"); fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err); ICMCompressionSessionOptionsRelease(sessionOptions); CloseComponent(encoder->compressor); free(encoder); return -5; } ICMCompressionSessionOptionsRelease(sessionOptions); *eptr = encoder; return 0; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size; OSStatus r; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
//_______________________________________________ // // //_______________________________________________ uint8_t coreAudioDevice::init(uint8_t channels, uint32_t fq) { _channels = channels; OSStatus err; ComponentDescription desc; AudioUnitInputCallback input; AudioStreamBasicDescription streamFormat; AudioDeviceID theDevice; UInt32 sz=0; UInt32 kFramesPerSlice=512; desc.componentType = 'aunt'; desc.componentSubType = kAudioUnitSubType_Output; desc.componentManufacturer = kAudioUnitID_DefaultOutput; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp= FindNextComponent(NULL, &desc); if (comp == NULL) { printf("coreAudio: Cannot find component\n"); return 0; } err = OpenAComponent(comp, &theOutputUnit); if(err) { printf("coreAudio: Cannot open component\n"); return 0; } // Initialize it verify_noerr(AudioUnitInitialize(theOutputUnit)); // Set up a callback function to generate output to the output unit #if 1 input.inputProc = MyRenderer; input.inputProcRefCon = NULL; verify_noerr(AudioUnitSetProperty(theOutputUnit, kAudioUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input))); #endif streamFormat.mSampleRate = fq; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked; streamFormat.mBytesPerPacket = channels * sizeof (UInt16); streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = channels * sizeof (UInt16); streamFormat.mChannelsPerFrame = channels; streamFormat.mBitsPerChannel = sizeof (UInt16) * 8; verify_noerr(AudioUnitSetProperty( theOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription))); printf("Rendering source:\n\t"); printf ("SampleRate=%f,", streamFormat.mSampleRate); printf ("BytesPerPacket=%ld,", streamFormat.mBytesPerPacket); printf ("FramesPerPacket=%ld,", streamFormat.mFramesPerPacket); printf ("BytesPerFrame=%ld,", streamFormat.mBytesPerFrame); printf ("BitsPerChannel=%ld,", streamFormat.mBitsPerChannel); printf ("ChannelsPerFrame=%ld\n", streamFormat.mChannelsPerFrame); sz=sizeof (theDevice); verify_noerr(AudioUnitGetProperty (theOutputUnit, kAudioOutputUnitProperty_CurrentDevice, 0, 0, &theDevice, &sz)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceSetProperty(theDevice, 0, 0, false, kAudioDevicePropertyBufferFrameSize, sz, &kFramesPerSlice)); sz = sizeof (kFramesPerSlice); verify_noerr(AudioDeviceGetProperty(theDevice, 0, false, kAudioDevicePropertyBufferFrameSize, &sz, &kFramesPerSlice)); verify_noerr (AudioDeviceAddPropertyListener(theDevice, 0, false, kAudioDeviceProcessorOverload, OverloadListenerProc, 0)); printf ("size of the device's buffer = %ld frames\n", kFramesPerSlice); frameCount=0; audioBuffer=new int16_t[BUFFER_SIZE]; // between hald a sec and a sec should be enough :) return 1; }
static int prepare_audiounit(_THIS, const char *devname, int iscapture, const AudioStreamBasicDescription * strdesc) { OSStatus result = noErr; AURenderCallbackStruct callback; #if MACOSX_COREAUDIO ComponentDescription desc; Component comp = NULL; #else AudioComponentDescription desc; AudioComponent comp = NULL; #endif const AudioUnitElement output_bus = 0; const AudioUnitElement input_bus = 1; const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus); const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output : kAudioUnitScope_Input); #if MACOSX_COREAUDIO if (!find_device_by_name(this, devname, iscapture)) { SDL_SetError("Couldn't find requested CoreAudio device"); return 0; } #endif SDL_zero(desc); desc.componentType = kAudioUnitType_Output; desc.componentManufacturer = kAudioUnitManufacturer_Apple; #if MACOSX_COREAUDIO desc.componentSubType = kAudioUnitSubType_DefaultOutput; comp = FindNextComponent(NULL, &desc); #else desc.componentSubType = kAudioUnitSubType_RemoteIO; /* !!! FIXME: ? */ comp = AudioComponentFindNext(NULL, &desc); #endif if (comp == NULL) { SDL_SetError("Couldn't find requested CoreAudio component"); return 0; } /* Open & initialize the audio unit */ #if MACOSX_COREAUDIO result = OpenAComponent(comp, &this->hidden->audioUnit); CHECK_RESULT("OpenAComponent"); #else /* AudioComponentInstanceNew only available on iPhone OS 2.0 and Mac OS X 10.6 We can't use OpenAComponent on iPhone because it is not present */ result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit); CHECK_RESULT("AudioComponentInstanceNew"); #endif this->hidden->audioUnitOpened = 1; #if MACOSX_COREAUDIO result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &this->hidden->deviceID, sizeof(AudioDeviceID)); CHECK_RESULT ("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)"); #endif /* Set the data format of the audio unit. */ result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_StreamFormat, scope, bus, strdesc, sizeof(*strdesc)); CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)"); /* Set the audio callback */ SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct)); callback.inputProc = ((iscapture) ? inputCallback : outputCallback); callback.inputProcRefCon = this; result = AudioUnitSetProperty(this->hidden->audioUnit, kAudioUnitProperty_SetRenderCallback, scope, bus, &callback, sizeof(callback)); CHECK_RESULT ("AudioUnitSetProperty (kAudioUnitProperty_SetRenderCallback)"); /* Calculate the final parameters for this audio specification */ SDL_CalculateAudioSpec(&this->spec); /* Allocate a sample buffer */ this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size; this->hidden->buffer = SDL_malloc(this->hidden->bufferSize); result = AudioUnitInitialize(this->hidden->audioUnit); CHECK_RESULT("AudioUnitInitialize"); /* Finally, start processing of the audio unit */ result = AudioOutputUnitStart(this->hidden->audioUnit); CHECK_RESULT("AudioOutputUnitStart"); /* We're running! */ return 1; }
/* * new_fluid_core_audio_driver2 */ fluid_audio_driver_t* new_fluid_core_audio_driver2(fluid_settings_t* settings, fluid_audio_func_t func, void* data) { char* devname = NULL; fluid_core_audio_driver_t* dev = NULL; int period_size, periods; double sample_rate; OSStatus status; UInt32 size; int i; dev = FLUID_NEW(fluid_core_audio_driver_t); if (dev == NULL) { FLUID_LOG(FLUID_ERR, "Out of memory"); return NULL; } FLUID_MEMSET(dev, 0, sizeof(fluid_core_audio_driver_t)); dev->callback = func; dev->data = data; // Open the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; //kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if (comp == NULL) { FLUID_LOG(FLUID_ERR, "Failed to get the default audio device"); goto error_recovery; } status = OpenAComponent(comp, &dev->outputUnit); if (status != noErr) { FLUID_LOG(FLUID_ERR, "Failed to open the default audio device. Status=%ld\n", (long int)status); goto error_recovery; } // Set up a callback function to generate output AURenderCallbackStruct render; render.inputProc = fluid_core_audio_callback; render.inputProcRefCon = (void *) dev; status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &render, sizeof(render)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the audio callback. Status=%ld\n", (long int)status); goto error_recovery; } fluid_settings_getnum(settings, "synth.sample-rate", &sample_rate); fluid_settings_getint(settings, "audio.periods", &periods); fluid_settings_getint(settings, "audio.period-size", &period_size); /* get the selected device name. if none is specified, use NULL for the default device. */ if (fluid_settings_dupstr(settings, "audio.coreaudio.device", &devname) /* alloc device name */ && devname && strlen (devname) > 0) { AudioObjectPropertyAddress pa; pa.mSelector = kAudioHardwarePropertyDevices; pa.mScope = kAudioObjectPropertyScopeWildcard; pa.mElement = kAudioObjectPropertyElementMaster; if (OK (AudioObjectGetPropertyDataSize (kAudioObjectSystemObject, &pa, 0, 0, &size))) { int num = size / (int) sizeof (AudioDeviceID); AudioDeviceID devs [num]; if (OK (AudioObjectGetPropertyData (kAudioObjectSystemObject, &pa, 0, 0, &size, devs))) { for (i = 0; i < num; ++i) { char name [1024]; size = sizeof (name); pa.mSelector = kAudioDevicePropertyDeviceName; if (OK (AudioObjectGetPropertyData (devs[i], &pa, 0, 0, &size, name))) { if (get_num_outputs (devs[i]) > 0 && strcasecmp(devname, name) == 0) { AudioDeviceID selectedID = devs[i]; status = AudioUnitSetProperty (dev->outputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &selectedID, sizeof(AudioDeviceID)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the selected output device. Status=%ld\n", (long int)status); goto error_recovery; } } } } } } } if (devname) FLUID_FREE (devname); /* free device name */ dev->buffer_size = period_size * periods; // The DefaultOutputUnit should do any format conversions // necessary from our format to the device's format. dev->format.mSampleRate = sample_rate; // sample rate of the audio stream dev->format.mFormatID = kAudioFormatLinearPCM; // encoding type of the audio stream dev->format.mFormatFlags = kLinearPCMFormatFlagIsFloat; dev->format.mBytesPerPacket = 2*sizeof(float); dev->format.mFramesPerPacket = 1; dev->format.mBytesPerFrame = 2*sizeof(float); dev->format.mChannelsPerFrame = 2; dev->format.mBitsPerChannel = 8*sizeof(float); FLUID_LOG (FLUID_DBG, "mSampleRate %g", dev->format.mSampleRate); FLUID_LOG (FLUID_DBG, "mFormatFlags %08X", dev->format.mFormatFlags); FLUID_LOG (FLUID_DBG, "mBytesPerPacket %d", dev->format.mBytesPerPacket); FLUID_LOG (FLUID_DBG, "mFramesPerPacket %d", dev->format.mFramesPerPacket); FLUID_LOG (FLUID_DBG, "mChannelsPerFrame %d", dev->format.mChannelsPerFrame); FLUID_LOG (FLUID_DBG, "mBytesPerFrame %d", dev->format.mBytesPerFrame); FLUID_LOG (FLUID_DBG, "mBitsPerChannel %d", dev->format.mBitsPerChannel); status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &dev->format, sizeof(AudioStreamBasicDescription)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error setting the audio format. Status=%ld\n", (long int)status); goto error_recovery; } status = AudioUnitSetProperty (dev->outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Input, 0, &dev->buffer_size, sizeof(unsigned int)); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Failed to set the MaximumFramesPerSlice. Status=%ld\n", (long int)status); goto error_recovery; } FLUID_LOG (FLUID_DBG, "MaximumFramesPerSlice = %d", dev->buffer_size); dev->buffers[0] = FLUID_ARRAY(float, dev->buffer_size); dev->buffers[1] = FLUID_ARRAY(float, dev->buffer_size); // Initialize the audio unit status = AudioUnitInitialize(dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioUnitInitialize(). Status=%ld\n", (long int)status); goto error_recovery; } // Start the rendering status = AudioOutputUnitStart (dev->outputUnit); if (status != noErr) { FLUID_LOG (FLUID_ERR, "Error calling AudioOutputUnitStart(). Status=%ld\n", (long int)status); goto error_recovery; } return (fluid_audio_driver_t*) dev; error_recovery: delete_fluid_core_audio_driver((fluid_audio_driver_t*) dev); return NULL; }
int main (int argc, const char * argv[]) { char* filePath = NULL; bool overwrite = false; ComponentDescription compDesc = {0, 0, 0, 0, 0}; AudioFileID inputFileID = 0; AudioFileID outputFileID = 0; CAStreamBasicDescription desc; AudioUnit theUnit = 0; setbuf (stdout, NULL); for (int i = 1; i < argc; ++i) { if (strcmp (argv[i], "-u") == 0) { if ( (i + 3) < argc ) { compDesc.componentType = str2OSType (argv[i + 1]); compDesc.componentSubType = str2OSType (argv[i + 2]); compDesc.componentManufacturer = str2OSType (argv[i + 3]); Component comp = FindNextComponent (NULL, &compDesc); if (comp == NULL) break; OpenAComponent (comp, &theUnit); i += 3; } else { printf ("Which Component:\n%s", usageStr); return -1; } } else if (strcmp (argv[i], "-f") == 0) { filePath = const_cast<char*>(argv[++i]); printf ("Input File:%s\n", filePath); } else if (strcmp (argv[i], "-o") == 0) { overwrite = true; } else { printf ("%s\n", usageStr); return -1; } } if (compDesc.componentType == 0) { printf ("Must specify AU:\n%s\n", usageStr); return -1; } if (theUnit == 0) { printf ("Can't find specified unit\n"); return -1; } if (filePath == NULL) { printf ("Must specify file to process:\n%s\n", usageStr); return -1; } OSStatus result = 0; if (result = InputFile (filePath, inputFileID)) { printf ("Result = %ld, parsing input file, exit...\n", result); return result; } UInt32 fileType; UInt32 size = sizeof (fileType); result = AudioFileGetProperty (inputFileID, kAudioFilePropertyFileFormat, &size, &fileType); if (result) { printf ("Error getting File Type of input file:%ld, exit...\n", result); return result; } size = sizeof (desc); result = AudioFileGetProperty (inputFileID, kAudioFilePropertyDataFormat, &size, &desc); if (result) { printf ("Error getting File Format of input file:%ld, exit...\n", result); return result; } if (desc.IsPCM() == false) { printf ("Only processing linear PCM file types and data:\n"); desc.Print(); return -1; } result = OutputFile (filePath, fileType, compDesc.componentSubType, overwrite, desc, outputFileID); if (result) { printf ("Error creating output file:%ld, exit...\n", result); return result; } // at this point we're ready to process return Process (theUnit, compDesc, inputFileID, desc, outputFileID); }
void *runPluginLoop(void *plug) { AudioUnit outputUnit; OSStatus err = noErr; // Open the default output unit ComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; Component comp = FindNextComponent(NULL, &desc); if(comp == NULL) { debug(LOG_ERROR, "FindNextComponent failed"); return NULL; } err = OpenAComponent(comp, &outputUnit); if(comp == NULL) { debug(LOG_ERROR, "OpenAComponent failed with error code %ld\n", err); return NULL; } // Set up a callback function to generate output to the output unit AURenderCallbackStruct input; input.inputProc = processData; input.inputProcRefCon = plug; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input)); AudioStreamBasicDescription streamFormat; streamFormat.mSampleRate = DEF_SAMPLE_RATE; streamFormat.mFormatID = kAudioFormatLinearPCM; streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved; streamFormat.mBytesPerPacket = 2; streamFormat.mFramesPerPacket = 1; streamFormat.mBytesPerFrame = 2; streamFormat.mChannelsPerFrame = 2; streamFormat.mBitsPerChannel = 16; err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription)); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-SF failed with code %4.4s, %ld\n", (char*)&err, err); return NULL; } // Initialize unit err = AudioUnitInitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitInitialize failed with code %ld\n", err); return NULL; } Float64 outSampleRate; UInt32 size = sizeof(Float64); err = AudioUnitGetProperty(outputUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &outSampleRate, &size); if(err) { debug(LOG_ERROR, "AudioUnitSetProperty-GF failed with code %4.4s, %ld\n", (char*)&err, err); return NULL; } // Start the rendering // The DefaultOutputUnit will do any format conversions to the format of the default device err = AudioOutputUnitStart(outputUnit); if(err) { debug(LOG_ERROR, "AudioOutputUnitStart failed with code %ld\n", err); return NULL; } // Loop until this thread is killed CFRunLoopRun(); // REALLY after you're finished playing STOP THE AUDIO OUTPUT UNIT!!!!!! // but we never get here because we're running until the process is nuked... AudioOutputUnitStop(outputUnit); err = AudioUnitUninitialize(outputUnit); if(err) { debug(LOG_ERROR, "AudioUnitUninitialize failed with code %ld\n", err); return NULL; } return NULL; }
static int audiounit_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 ComponentDescription desc; Component comp; #else AudioComponentDescription desc; AudioComponent comp; #endif cubeb_stream * stm; AURenderCallbackStruct input; unsigned int buffer_size, default_buffer_size; OSStatus r; UInt32 size; AudioDeviceID output_device_id; AudioValueRange latency_range; assert(context); *stream = NULL; memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; stm->current_latency_frames = 0; stm->hw_latency_frames = UINT64_MAX; #if MAC_OS_X_VERSION_MIN_REQUIRED < 1060 r = OpenAComponent(comp, &stm->unit); #else r = AudioComponentInstanceNew(comp, &stm->unit); #endif if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } input.inputProc = audiounit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } buffer_size = latency / 1000.0 * ss.mSampleRate; /* Get the range of latency this particular device can work with, and clamp * the requested latency to this acceptable range. */ if (audiounit_get_acceptable_latency_range(&latency_range) != CUBEB_OK) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } if (buffer_size < (unsigned int) latency_range.mMinimum) { buffer_size = (unsigned int) latency_range.mMinimum; } else if (buffer_size > (unsigned int) latency_range.mMaximum) { buffer_size = (unsigned int) latency_range.mMaximum; } /** * Get the default buffer size. If our latency request is below the default, * set it. Otherwise, use the default latency. **/ size = sizeof(default_buffer_size); r = AudioUnitGetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &default_buffer_size, &size); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } // Setting the latency doesn't work well for USB headsets (eg. plantronics). // Keep the default latency for now. #if 0 if (buffer_size < default_buffer_size) { /* Set the maximum number of frame that the render callback will ask for, * effectively setting the latency of the stream. This is process-wide. */ r = AudioUnitSetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Output, 0, &buffer_size, sizeof(buffer_size)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } } #endif r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } r = AudioUnitInitialize(stm->unit); if (r != 0) { audiounit_stream_destroy(stm); return CUBEB_ERROR; } *stream = stm; return CUBEB_OK; }
CoreAudioOutput::CoreAudioOutput(size_t bufferSamples, size_t sampleSize) { OSStatus error = noErr; _spinlockAU = (OSSpinLock *)malloc(sizeof(OSSpinLock)); *_spinlockAU = OS_SPINLOCK_INIT; _buffer = new RingBuffer(bufferSamples, sampleSize); _volume = 1.0f; // Create a new audio unit #if defined(MAC_OS_X_VERSION_10_6) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 if (IsOSXVersionSupported(10, 6, 0)) { AudioComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = AudioComponentInstanceNew(audioComponent, &_au); if (error != noErr) { return; } } else { ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } } #else ComponentDescription audioDesc; audioDesc.componentType = kAudioUnitType_Output; audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput; audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple; audioDesc.componentFlags = 0; audioDesc.componentFlagsMask = 0; Component audioComponent = FindNextComponent(NULL, &audioDesc); if (audioComponent == NULL) { return; } error = OpenAComponent(audioComponent, &_au); if (error != noErr) { return; } #endif // Set the render callback AURenderCallbackStruct callback; callback.inputProc = &CoreAudioOutputRenderCallback; callback.inputProcRefCon = _buffer; error = AudioUnitSetProperty(_au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callback, sizeof(callback) ); if(error != noErr) { return; } // Set up the audio unit for audio streaming AudioStreamBasicDescription outputFormat; outputFormat.mSampleRate = SPU_SAMPLE_RATE; outputFormat.mFormatID = kAudioFormatLinearPCM; outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked; outputFormat.mBytesPerPacket = SPU_SAMPLE_SIZE; outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = SPU_SAMPLE_SIZE; outputFormat.mChannelsPerFrame = SPU_NUMBER_CHANNELS; outputFormat.mBitsPerChannel = SPU_SAMPLE_RESOLUTION; error = AudioUnitSetProperty(_au, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat) ); if(error != noErr) { return; } // Initialize our new audio unit error = AudioUnitInitialize(_au); if(error != noErr) { return; } }
int cubeb_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { AudioStreamBasicDescription ss; ComponentDescription desc; cubeb_stream * stm; Component comp; AURenderCallbackStruct input; unsigned int buffer_size; OSStatus r; assert(context == (void *) 0xdeadbeef); *stream = NULL; if (stream_params.rate < 1 || stream_params.rate > 192000 || stream_params.channels < 1 || stream_params.channels > 32 || latency < 1 || latency > 2000) { return CUBEB_ERROR_INVALID_FORMAT; } memset(&ss, 0, sizeof(ss)); ss.mFormatFlags = 0; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger; break; case CUBEB_SAMPLE_S16BE: ss.mBitsPerChannel = 16; ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsBigEndian; break; case CUBEB_SAMPLE_FLOAT32LE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat; break; case CUBEB_SAMPLE_FLOAT32BE: ss.mBitsPerChannel = 32; ss.mFormatFlags |= kAudioFormatFlagIsFloat | kAudioFormatFlagIsBigEndian; break; default: return CUBEB_ERROR_INVALID_FORMAT; } ss.mFormatID = kAudioFormatLinearPCM; ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked; ss.mSampleRate = stream_params.rate; ss.mChannelsPerFrame = stream_params.channels; ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame; ss.mFramesPerPacket = 1; ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); assert(comp); stm = calloc(1, sizeof(*stm)); assert(stm); stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->sample_spec = ss; r = pthread_mutex_init(&stm->mutex, NULL); assert(r == 0); stm->frames_played = 0; stm->frames_queued = 0; r = OpenAComponent(comp, &stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: OpenAComponent returned %ld\n", (long) r); } assert(r == 0); input.inputProc = audio_unit_output_callback; input.inputProcRefCon = stm; r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(SetRenderCallback) returned %ld\n", (long) r); } assert(r == 0); r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ss, sizeof(ss)); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(StreamFormat) returned %ld\n", (long) r); } assert(r == 0); buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS; if (buffer_size % ss.mBytesPerFrame != 0) { buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame); } assert(buffer_size % ss.mBytesPerFrame == 0); r = AudioUnitInitialize(stm->unit); if (r != 0) { fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitInitialize returned %ld\n", (long) r); } assert(r == 0); *stream = stm; return CUBEB_OK; }
static int audio_unit_open(AUCommon *d, bool_t is_read) { OSStatus result; UInt32 param; ComponentDescription desc; Component comp; AudioStreamBasicDescription asbd; const int input_bus=1; const int output_bus=0; // Get Default Input audio unit desc.componentType = kAudioUnitType_Output; desc.componentSubType = d->dev!=-1?kAudioUnitSubType_HALOutput:kAudioUnitSubType_DefaultOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; comp = FindNextComponent(NULL, &desc); if (comp == NULL) { ms_message("Cannot find audio component"); return -1; } result = OpenAComponent(comp, &d->au); if(result != noErr) { ms_message("Cannot open audio component %x", result); return -1; } param = is_read; if (d->dev!=-1) { CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, input_bus, ¶m, sizeof(UInt32))); param = !is_read; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, output_bus, ¶m, sizeof(UInt32))); // Set the current device CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, output_bus, &d->dev, sizeof(AudioDeviceID))); } param=0; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_ShouldAllocateBuffer, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output , is_read ? input_bus : output_bus , ¶m, sizeof(param))); UInt32 asbdsize = sizeof(AudioStreamBasicDescription); memset((char *)&asbd, 0, asbdsize); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Input : kAudioUnitScope_Output, is_read ? input_bus : output_bus, &asbd, &asbdsize)); show_format(is_read ? "Input audio unit" : "Output audio unit",&asbd); asbd.mSampleRate=d->rate; asbd.mBytesPerPacket=asbd.mBytesPerFrame = 2*d->nchannels; asbd.mChannelsPerFrame = d->nchannels; asbd.mBitsPerChannel=16; asbd.mFormatID=kAudioFormatLinearPCM; asbd.mFormatFlags=kAudioFormatFlagIsPacked|kAudioFormatFlagIsSignedInteger; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, sizeof(AudioStreamBasicDescription))); CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioUnitProperty_StreamFormat, is_read ? kAudioUnitScope_Output : kAudioUnitScope_Input, is_read ? input_bus : output_bus , &asbd, &asbdsize)); show_format(is_read ? "Input audio unit after configuration" : "Output audio unit after configuration",&asbd); // Get the number of frames in the IO buffer(s) param = sizeof(UInt32); UInt32 numFrames; CHECK_AURESULT(AudioUnitGetProperty(d->au, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Input, input_bus, &numFrames, ¶m)); ms_message("Number of frames per buffer = %i", numFrames); AURenderCallbackStruct cbs; cbs.inputProcRefCon = d; if (is_read) { cbs.inputProc = readRenderProc; CHECK_AURESULT(AudioUnitSetProperty(d->au, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, input_bus, &cbs, sizeof(AURenderCallbackStruct))); } else { cbs.inputProc = writeRenderProc; CHECK_AURESULT(AudioUnitSetProperty (d->au, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, output_bus, &cbs, sizeof(AURenderCallbackStruct))); } result = AudioUnitInitialize(d->au); if(result != noErr) { ms_error("failed to AudioUnitInitialize %i , is_read=%i", result,(int)is_read); return -1; } CHECK_AURESULT(AudioOutputUnitStart(d->au)); return 0; }