int QBSound_SetCategory(const char* category) { int type = 0; if (category == NULL) { } else if (strcmp(category,"ambient") == 0) { type = 1; } switch (type) { case 0: { AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL); UInt32 category = kAudioSessionCategory_SoloAmbientSound; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); AudioSessionSetActive(true); } break; case 1: { AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL); UInt32 category = kAudioSessionCategory_AmbientSound; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); AudioSessionSetActive(true); } break; } return 0; }
static int COREAUDIO_Init(SDL_AudioDriverImpl * impl) { /* Set the function pointers */ impl->OpenDevice = COREAUDIO_OpenDevice; impl->CloseDevice = COREAUDIO_CloseDevice; #if MACOSX_COREAUDIO impl->DetectDevices = COREAUDIO_DetectDevices; #else impl->OnlyHasDefaultOutputDevice = 1; /* Set category to ambient sound so that other music continues playing. You can change this at runtime in your own code if you need different behavior. If this is common, we can add an SDL hint for this. */ AudioSessionInitialize(NULL, NULL, NULL, nil); UInt32 category = kAudioSessionCategory_AmbientSound; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(UInt32), &category); #endif impl->ProvidesOwnCallbackThread = 1; return 1; /* this audio target is available. */ }
int audio_session_enable(void) { OSStatus res; UInt32 category; res = AudioSessionInitialize(NULL, NULL, interruptionListener, 0); if (res && res != 1768843636) return ENODEV; category = kAudioSessionCategory_PlayAndRecord; res = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); if (res) { warning("coreaudio: Audio Category: %d\n", res); return ENODEV; } res = AudioSessionSetActive(true); if (res) { warning("coreaudio: AudioSessionSetActive: %d\n", res); return ENODEV; } return 0; }
void KLAL_InitSystem() { OSStatus err = AudioSessionInitialize(NULL, NULL, KLAL_Interruption, NULL); if(err) { KLLog("[ KLAL ] ERR: AudioSessionInitialize:%x\n", (int)err); } err = AudioSessionSetActive(TRUE); if(err) { KLLog("[ KLAL ] ERR: AudioSessionSetActive:%x\n", (int)err); } UInt32 sessionCategory = kAudioSessionCategory_AmbientSound; AudioSessionSetProperty( kAudioSessionProperty_AudioCategory, sizeof (sessionCategory), &sessionCategory ); klal_pDevice = alcOpenDevice(NULL); klal_pContext= alcCreateContext(klal_pDevice, NULL); #if KLAL_ERRCHECK ALenum aen = alGetError(); if( aen!=AL_NO_ERROR ) { KLLog("[ KLAL ] %s %u\n", __func__, aen); } #endif alcMakeContextCurrent(klal_pContext); klal_is_loaded = TRUE; }
static MSSndCard *au_card_new(const char* name){ MSSndCard *card=ms_snd_card_new_with_name(&au_card_desc,name); OSStatus auresult = AudioSessionInitialize(NULL, NULL, au_interuption_listener, card); if (auresult != kAudioSessionAlreadyInitialized) { check_auresult(auresult,"AudioSessionInitialize"); } return card; }
//============================================================================== IPhoneAudioIODevice (const String& deviceName) : AudioIODevice (deviceName, "Audio"), actualBufferSize (0), isRunning (false), audioUnit (0), callback (nullptr), floatData (1, 2) { numInputChannels = 2; numOutputChannels = 2; preferredBufferSize = 0; AudioSessionInitialize (0, 0, interruptionListenerStatic, this); updateDeviceInfo(); }
bool IPhoneSoundDevice::Init() { // Initialize the default audio session object to tell it // to allow background music, and to tell us when audio // gets resumed (like if a phone call comes in, iphone takes // over audio. If the user then ignores the phone call, the // audio needs to be turned on again. AudioSessionInitialize(NULL, NULL, wi::InterruptionListener, this); UInt32 category = kAudioSessionCategory_UserInterfaceSoundEffects; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); AudioSessionSetActive(true); // Set up streaming AudioStreamBasicDescription desc; desc.mSampleRate = 8000; desc.mFormatID = kAudioFormatLinearPCM; desc.mFormatFlags = kAudioFormatFlagIsPacked; desc.mBytesPerPacket = 1; desc.mFramesPerPacket = 1; desc.mBytesPerFrame = 1; desc.mChannelsPerFrame = 1; desc.mBitsPerChannel = 8; OSStatus err = AudioQueueNewOutput(&desc, AudioCallback, this, NULL, kCFRunLoopCommonModes, 0, &m_haq); if (err != 0) { return false; } for (int i = 0; i < kcBuffers; i++) { err = AudioQueueAllocateBuffer(m_haq, kcbBuffer, &m_apaqb[i]); if (err != 0) { return false; } } return true; }
/* *Audio Session Configuration. * Requests an audio session from core audio and configures it for effects processing by default (one input, one output). * <Sam> All major configurations are set for the AudioSession Instance here */ int MUEAudioIO::configureAudioSession() { try { // Initialize and configure the audio session AudioSessionInitialize(NULL, NULL, rioInterruptionListener, this); AudioSessionSetActive(true); //audio should not mix with iPod audio, and we want input and output. UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord; //audio will mix with iPod audio, but we get output only (no input) with this type of session //UInt32 audioCategory = kAudioSessionCategory_AmbientSound; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory); // The entire purpose of the propListener is to detect a change in signal flow (headphones w/ mic or even third party device) AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, propListener, this); //(TODO) make get/set preferred buffer size // This value is in seconds! We want really low latency... preferredBufferSize = .01; // .005 for buffer of 256, .01 for buffer of 512 AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize); // Related to our propListener. When the signal flow changes, sometimes the hardware sample rate can change. You'll notice in the propListener it checks for a new one. UInt32 size = sizeof(hwSampleRate); AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate); } catch (...) { printf("An unknown error occurred in audio session configuration!\n"); //if (url) CFRelease(url); } return 0; }
sint_t aubio_audio_unit_init (aubio_audio_unit_t *o) { OSStatus err = noErr; Float32 latency = o->latency; Float64 samplerate = (Float64)o->samplerate; o->au_ios_cb_struct.inputProc = aubio_audio_unit_process; o->au_ios_cb_struct.inputProcRefCon = o; /* setting up audio session with interruption listener */ err = AudioSessionInitialize(NULL, NULL, audio_unit_interruption_listener, o); if (err) { AUBIO_ERR("audio_unit: could not initialize audio session (%d)\n", (int)err); goto fail; } audio_unit_set_audio_session_category(o->input_enabled, o->verbose); audio_unit_check_audio_route(o); /* add route change listener */ err = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, audio_unit_route_change_listener, o); if (err) { AUBIO_ERR("audio_unit: could not set route change listener (%d)\n", (int)err); goto fail; } /* set latency */ err = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(latency), &latency); if (err) { AUBIO_ERR("audio_unit: could not set preferred latency (%d)\n", (int)err); goto fail; } #if 0 // only for iphone OS >= 3.1 UInt32 val = 1; // set to 0 (default) to use ear speaker in voice application err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(UInt32), &val); if (err) { AUBIO_ERR("audio_unit: could not set session property to default to speaker\n"); } #endif /* setting up audio unit */ AudioComponentDescription desc; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentType = kAudioUnitType_Output; desc.componentFlags = 0; desc.componentFlagsMask = 0; AudioStreamBasicDescription audioFormat; /* look for a component that match the description */ AudioComponent comp = AudioComponentFindNext(NULL, &desc); /* create the audio component */ AudioUnit *audio_unit = &(o->audio_unit); err = AudioComponentInstanceNew(comp, &(o->audio_unit)); if (err) { AUBIO_ERR("audio_unit: failed creating the audio unit\n"); goto fail; } /* enable IO */ UInt32 enabled = 1; err = AudioUnitSetProperty (*audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enabled, sizeof(enabled)); if (err) { AUBIO_ERR("audio_unit: failed enabling input of audio unit\n"); goto fail; } /* set max fps */ UInt32 max_fps = MIN(o->blocksize, MAX_FPS); err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &max_fps, sizeof(max_fps)); if (err) { AUBIO_ERR("audio_unit: could not set maximum frames per slice property (%d)\n", (int)err); goto fail; } AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &(o->au_ios_cb_struct), sizeof(o->au_ios_cb_struct)); if (err) { AUBIO_ERR("audio_unit: failed setting audio unit render callback\n"); goto fail; } #if 0 err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Input, 0, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 1, &samplerate, sizeof(Float64)); if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; } #endif audioFormat.mSampleRate = (Float64)samplerate; audioFormat.mChannelsPerFrame = 2; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mBitsPerChannel = 8 * sizeof(SInt16); #if 1 // interleaving audioFormat.mBytesPerFrame = 2 * sizeof(SInt16); audioFormat.mBytesPerPacket = 2 * sizeof(SInt16); #else audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = sizeof(SInt32); audioFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; #endif err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio output format\n"); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat)); if (err) { AUBIO_ERR("audio_unit: could not set audio input format\n"); goto fail; } #if 0 AudioStreamBasicDescription thruFormat; thissize = sizeof(thruFormat); err = AudioUnitGetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &thruFormat, &thissize); if (err) { AUBIO_ERR("audio_unit: could not get speaker output format, err: %d\n", (int)err); goto fail; } err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, sizeof(thruFormat)); if (err) { AUBIO_ERR("audio_unit: could not set input audio format, err: %d\n", (int)err); goto fail; } #endif /* time to initialize the unit */ err = AudioUnitInitialize(*audio_unit); if (err) { AUBIO_ERR("audio_unit: failed initializing audio, err: %d\n", (int)err); goto fail; } return 0; fail: return err; }
static void *coreaudio_init(const char *device, unsigned rate, unsigned latency) { size_t fifo_size; UInt32 i_size; AudioStreamBasicDescription real_desc; #ifdef OSX_PPC Component comp; #else AudioComponent comp; #endif #ifndef TARGET_OS_IPHONE AudioChannelLayout layout = {0}; #endif AURenderCallbackStruct cb = {0}; AudioStreamBasicDescription stream_desc = {0}; bool component_unavailable = false; static bool session_initialized = false; coreaudio_t *dev = NULL; #ifdef OSX_PPC ComponentDescription desc = {0}; #else AudioComponentDescription desc = {0}; #endif settings_t *settings = config_get_ptr(); (void)session_initialized; (void)device; dev = (coreaudio_t*)calloc(1, sizeof(*dev)); if (!dev) return NULL; dev->lock = slock_new(); dev->cond = scond_new(); #if TARGET_OS_IPHONE if (!session_initialized) { session_initialized = true; AudioSessionInitialize(0, 0, coreaudio_interrupt_listener, 0); AudioSessionSetActive(true); } #endif /* Create AudioComponent */ desc.componentType = kAudioUnitType_Output; #if TARGET_OS_IPHONE desc.componentSubType = kAudioUnitSubType_RemoteIO; #else desc.componentSubType = kAudioUnitSubType_HALOutput; #endif desc.componentManufacturer = kAudioUnitManufacturer_Apple; #ifdef OSX_PPC comp = FindNextComponent(NULL, &desc); #else comp = AudioComponentFindNext(NULL, &desc); #endif if (comp == NULL) goto error; #ifdef OSX_PPC component_unavailable = (OpenAComponent(comp, &dev->dev) != noErr); #else component_unavailable = (AudioComponentInstanceNew(comp, &dev->dev) != noErr); #endif if (component_unavailable) goto error; #if !TARGET_OS_IPHONE if (device) choose_output_device(dev, device); #endif dev->dev_alive = true; /* Set audio format */ stream_desc.mSampleRate = rate; stream_desc.mBitsPerChannel = sizeof(float) * CHAR_BIT; stream_desc.mChannelsPerFrame = 2; stream_desc.mBytesPerPacket = 2 * sizeof(float); stream_desc.mBytesPerFrame = 2 * sizeof(float); stream_desc.mFramesPerPacket = 1; stream_desc.mFormatID = kAudioFormatLinearPCM; stream_desc.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | (is_little_endian() ? 0 : kAudioFormatFlagIsBigEndian); if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &stream_desc, sizeof(stream_desc)) != noErr) goto error; /* Check returned audio format. */ i_size = sizeof(real_desc); if (AudioUnitGetProperty(dev->dev, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &real_desc, &i_size) != noErr) goto error; if (real_desc.mChannelsPerFrame != stream_desc.mChannelsPerFrame) goto error; if (real_desc.mBitsPerChannel != stream_desc.mBitsPerChannel) goto error; if (real_desc.mFormatFlags != stream_desc.mFormatFlags) goto error; if (real_desc.mFormatID != stream_desc.mFormatID) goto error; RARCH_LOG("[CoreAudio]: Using output sample rate of %.1f Hz\n", (float)real_desc.mSampleRate); settings->audio.out_rate = real_desc.mSampleRate; /* Set channel layout (fails on iOS). */ #ifndef TARGET_OS_IPHONE layout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo; if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Input, 0, &layout, sizeof(layout)) != noErr) goto error; #endif /* Set callbacks and finish up. */ cb.inputProc = audio_write_cb; cb.inputProcRefCon = dev; if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &cb, sizeof(cb)) != noErr) goto error; if (AudioUnitInitialize(dev->dev) != noErr) goto error; fifo_size = (latency * settings->audio.out_rate) / 1000; fifo_size *= 2 * sizeof(float); dev->buffer_size = fifo_size; dev->buffer = fifo_new(fifo_size); if (!dev->buffer) goto error; RARCH_LOG("[CoreAudio]: Using buffer size of %u bytes: (latency = %u ms)\n", (unsigned)fifo_size, latency); if (AudioOutputUnitStart(dev->dev) != noErr) goto error; return dev; error: RARCH_ERR("[CoreAudio]: Failed to initialize driver ...\n"); coreaudio_free(dev); return NULL; }
Manager::Manager(CriAtomExPlayerConfig playerConfig, CriAtomExStandardVoicePoolConfig voicePoolConfig) { #if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS) auto interruptionListener = [](void *userData, UInt32 interruptionState) { switch (interruptionState) { case kAudioSessionBeginInterruption: criAtomEx_StopSound_IOS(); break; case kAudioSessionEndInterruption: AudioSessionSetActive(true); criAtomEx_StartSound_IOS(); break; } }; AudioSessionInitialize(NULL, NULL, interruptionListener, NULL); UInt32 category = kAudioSessionCategory_AmbientSound; AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category); AudioSessionSetActive(true); #endif auto errorCallback = [](const char *errid, uint32_t p1, uint32_t p2, uint32_t *parray) { const CriChar8 *errmsg; errmsg = criErr_ConvertIdToMessage(errid, p1, p2); cocos2d::log("%s", errmsg); }; criErr_SetCallback(errorCallback); auto userAlloc = [](void *obj, uint32_t size) { return malloc(size); }; auto userFree = [] (void *obj, void *ptr) { free(ptr); }; criAtomEx_SetUserAllocator(userAlloc, userFree, NULL); #if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS) criAtomEx_Initialize_IOS(NULL, NULL, 0); #elif (CC_TARGET_PLATFORM == CC_PLATFORM_ANDROID) criAtomEx_Initialize_ANDROID(NULL, NULL, 0); /* ANDROIDの場合で必要な一手間。assetsフォルダへのアクセスを可能にする */ /* まずはJniHelperでActivityのContextを取得 */ cocos2d::JniMethodInfo methodInfo; cocos2d::JniHelper::getStaticMethodInfo(methodInfo, "org/cocos2dx/lib/Cocos2dxActivity", "getContext", "()Landroid/content/Context;"); auto android_context_object = (jobject)methodInfo.env->CallStaticObjectMethod( methodInfo.classID, methodInfo.methodID ); /* 有効化。assetsフォルダはCocosプロジェクトのResource相当なので、ほぼ必須と言って良い手順 */ criFs_EnableAssetsAccess_ANDROID(cocos2d::JniHelper::getJavaVM(), android_context_object); #elif (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32) criAtomEx_Initialize_PC(NULL, NULL, 0); #endif _dbasID = criAtomDbas_Create(NULL, NULL, 0); /* 上で作った設定オブジェクトを渡して、ボイスプールを作成 */ _voicePool = criAtomExVoicePool_AllocateStandardVoicePool(&voicePoolConfig, NULL, 0); /* Player作成にも設定は必要 */ criAtomExPlayer_SetDefaultConfig(&playerConfig); _player = criAtomExPlayer_Create(&playerConfig, NULL, 0); }