int QBSound_SetCategory(const char* category)
{
	int type = 0;
	if (category == NULL) {
	} else
	if (strcmp(category,"ambient") == 0) {
		type = 1;
	}
	switch (type) {
	case 0:
		{
			AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL);
			
			UInt32 category = kAudioSessionCategory_SoloAmbientSound;
			AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
			
			AudioSessionSetActive(true);
		}
		break;
	case 1:
		{
			AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL);
			
			UInt32 category = kAudioSessionCategory_AmbientSound;
			AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
			
			AudioSessionSetActive(true);
		}
		break;
	}
	return 0;
}
Exemple #2
0
/* interruption listeners */
void audio_unit_interruption_listener(void *closure, UInt32 inInterruptionState)
{
  OSStatus err = 0;
  aubio_audio_unit_t *o = (aubio_audio_unit_t *) closure;
  AudioUnit this_unit = o->audio_unit;

  if (inInterruptionState == kAudioSessionEndInterruption) {
    AUBIO_WRN("audio_unit: session interruption ended\n");
    err = AudioSessionSetActive(true);
    if (err) {
      AUBIO_ERR("audio_unit: could not make session active after interruption (%d)\n", (int)err);
      goto fail;
    }
    err = AudioOutputUnitStart(this_unit);
    if (err) {
      AUBIO_ERR("audio_unit: failed starting unit (%d)\n", (int)err);
      goto fail;
    }
  }
  if (inInterruptionState == kAudioSessionBeginInterruption) {
    AUBIO_WRN("audio_unit: session interruption started\n");
    err = AudioOutputUnitStop(this_unit);
    if (err) {
      AUBIO_ERR("audio_unit: could not stop unit at interruption (%d)\n", (int)err);
      goto fail;
    }
    err = AudioSessionSetActive(false);
    if (err) {
      AUBIO_ERR("audio_unit: could not make session inactive after interruption (%d)\n", (int)err);
      goto fail;
    }
  }
fail:
  return;
}
void IPhoneSoundDevice::InterruptionListener(UInt32 interruptionState) {
    switch (interruptionState) {
    case kAudioSessionBeginInterruption:
        AudioSessionSetActive(false);
        break;

    case kAudioSessionEndInterruption:
        AudioSessionSetActive(true);
        break;
    }
}
Exemple #4
0
int audio_session_enable(void)
{
	OSStatus res;
	UInt32 category;

	res = AudioSessionInitialize(NULL, NULL, interruptionListener, 0);
	if (res && res != 1768843636)
		return ENODEV;

	category = kAudioSessionCategory_PlayAndRecord;
	res = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
				      sizeof(category), &category);
	if (res) {
		warning("coreaudio: Audio Category: %d\n", res);
		return ENODEV;
	}

	res = AudioSessionSetActive(true);
	if (res) {
		warning("coreaudio: AudioSessionSetActive: %d\n", res);
		return ENODEV;
	}

	return 0;
}
Exemple #5
0
void KLAL_UnloadSystem()
{	
	
	KLAL* p = &klal;

	OSStatus err = AudioSessionSetActive (false);
	if(err)
	{
		KLLog("[ KLAL ] ERR: AudioSetActiveFalse for unload:%x\n", (int) err);
	}
	
	if( p->src )
	{
		alSourceStop( p->src );
		alDeleteSources (1, &p->src);
	}
	
	if( p->bufid )
		alDeleteBuffers (1, &p->bufid);
	
	alcMakeContextCurrent(NULL);
	
	if ( klal_pContext )
	{
		alcDestroyContext(klal_pContext);
		klal_pContext= NULL;
	}
	
	alcCloseDevice(klal_pDevice);
	klal_pDevice	= NULL;
	klal_is_loaded = FALSE;
	
	KLLog("[ KLAL ] Unloaded.\n");
	
}
Exemple #6
0
void KLAL_InitSystem()
{
	
	OSStatus err = AudioSessionInitialize(NULL, NULL, KLAL_Interruption, NULL);
	if(err)
	{
		KLLog("[ KLAL ] ERR: AudioSessionInitialize:%x\n", (int)err);
	}
	
	err = AudioSessionSetActive(TRUE);
	if(err)
	{
		KLLog("[ KLAL ] ERR: AudioSessionSetActive:%x\n", (int)err);
	}
	
	UInt32 sessionCategory = kAudioSessionCategory_AmbientSound;
	
	AudioSessionSetProperty( kAudioSessionProperty_AudioCategory,
							 sizeof (sessionCategory),
							 &sessionCategory );
	
	klal_pDevice	= alcOpenDevice(NULL);
	klal_pContext= alcCreateContext(klal_pDevice, NULL);
#if KLAL_ERRCHECK
	ALenum aen = alGetError();
	if( aen!=AL_NO_ERROR )
	{
		KLLog("[ KLAL ] %s %u\n", __func__, aen);
	}
#endif
	alcMakeContextCurrent(klal_pContext);
	klal_is_loaded = TRUE;
}
Exemple #7
0
sint_t aubio_audio_unit_stop(aubio_audio_unit_t *o)
{
  if (o->audio_unit == NULL) return -1;
  OSStatus err = AudioOutputUnitStop (o->audio_unit);
  if (err) { AUBIO_WRN("audio_unit: failed stopping audio unit (%d)\n", (int)err); }
  err = AudioUnitUninitialize (o->audio_unit);
  if (err) { AUBIO_WRN("audio_unit: failed unitializing audio unit (%d)\n", (int)err); }
  err = AudioSessionSetActive(false);
  if (err) { AUBIO_WRN("audio_unit: failed stopping audio session (%d)\n", (int)err); }
  return err;
}
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double sampleRate,
                 int bufferSize)
    {
        close();

        lastError = String::empty;
        preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        //  xxx set up channel mapping

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();
        monoOutputChannelNumber = activeOutputChans.findNextSetBit (0);

        activeInputChans = inputChannels;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();
        monoInputChannelNumber = activeInputChans.findNextSetBit (0);

        AudioSessionSetActive (true);

        UInt32 audioCategory = (numInputChannels > 0 && audioInputIsAvailable) ? kAudioSessionCategory_PlayAndRecord
                               : kAudioSessionCategory_MediaPlayback;

        AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, sizeof (audioCategory), &audioCategory);

        if (audioCategory == kAudioSessionCategory_PlayAndRecord)
        {
            // (note: mustn't set this until after the audio category property has been set)
            UInt32 allowBluetoothInput = 1;
            AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryEnableBluetoothInput,
                                     sizeof (allowBluetoothInput), &allowBluetoothInput);
        }

        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);

        fixAudioRouteIfSetToReceiver();
        updateDeviceInfo();

        Float32 bufferDuration = preferredBufferSize / sampleRate;
        AudioSessionSetProperty (kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof (bufferDuration), &bufferDuration);
        actualBufferSize = preferredBufferSize;

        prepareFloatBuffers (actualBufferSize);

        isRunning = true;
        routingChanged (nullptr);  // creates and starts the AU

        lastError = audioUnit != 0 ? "" : "Couldn't open the device";
        return lastError;
    }
Exemple #9
0
    String open (const BigInteger& inputChannelsWanted,
                 const BigInteger& outputChannelsWanted,
                 double targetSampleRate, int bufferSize) override
    {
        close();

        lastError.clear();
        preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        //  xxx set up channel mapping

        activeOutputChans = outputChannelsWanted;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();
        monoOutputChannelNumber = activeOutputChans.findNextSetBit (0);

        activeInputChans = inputChannelsWanted;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();
        monoInputChannelNumber = activeInputChans.findNextSetBit (0);

        AudioSessionSetActive (true);

        if (numInputChannels > 0 && audioInputIsAvailable)
        {
            setSessionUInt32Property (kAudioSessionProperty_AudioCategory, kAudioSessionCategory_PlayAndRecord);
            setSessionUInt32Property (kAudioSessionProperty_OverrideCategoryEnableBluetoothInput, 1);
        }
        else
        {
            setSessionUInt32Property (kAudioSessionProperty_AudioCategory, kAudioSessionCategory_MediaPlayback);
        }

        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);

        fixAudioRouteIfSetToReceiver();
        updateDeviceInfo();

        setSessionFloat64Property (kAudioSessionProperty_PreferredHardwareSampleRate, targetSampleRate);
        updateSampleRates();

        setSessionFloat64Property (kAudioSessionProperty_PreferredHardwareIOBufferDuration, preferredBufferSize / sampleRate);
        updateCurrentBufferSize();

        prepareFloatBuffers (actualBufferSize);

        isRunning = true;
        routingChanged (nullptr);  // creates and starts the AU

        lastError = audioUnit != 0 ? "" : "Couldn't open the device";
        return lastError;
    }
Exemple #10
0
static void destructor(void *arg)
{
	struct audiosess *as = arg;
#if TARGET_OS_IPHONE
	AudioSessionPropertyID id = kAudioSessionProperty_AudioRouteChange;

	AudioSessionRemovePropertyListenerWithUserData(id, propListener, as);
	AudioSessionSetActive(false);
#endif

	list_flush(&as->sessl);

	gas = NULL;
}
    void close()
    {
        if (isRunning)
        {
            isRunning = false;
            AudioSessionSetActive (false);

            if (audioUnit != 0)
            {
                AudioComponentInstanceDispose (audioUnit);
                audioUnit = 0;
            }
        }
    }
    void close()
    {
        if (isRunning)
        {
            isRunning = false;
            AudioSessionRemovePropertyListenerWithUserData (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);
            AudioSessionSetActive (false);

            if (audioUnit != 0)
            {
                AudioComponentInstanceDispose (audioUnit);
                audioUnit = 0;
            }
        }
    }
Exemple #13
0
/*
 *This function is called when Core Audio interrupts your audio session. It waits until your session is no longer interrupted,
 *and handles reactivation of of the audio session once the interruption ends.
 *https://developer.apple.com/iphone/library/documentation/Audio/Conceptual/AudioSessionProgrammingGuide/HowAudioSessionsWork/chapter_3_section_4.html
 */
void rioInterruptionListener(	void *inUserData, UInt32	inInterruption)
{
	printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption");
	
	AudioUnit *remoteIO = (AudioUnit*)inUserData;
	
	if (inInterruption == kAudioSessionEndInterruption)
	{
		// make sure we are again the active session
		AudioSessionSetActive(true);
		AudioOutputUnitStart(*remoteIO);
	}
	
	if (inInterruption == kAudioSessionBeginInterruption)
		AudioOutputUnitStop(*remoteIO);		
}
Exemple #14
0
    void close() override
    {
        if (isRunning)
        {
            isRunning = false;

            setSessionUInt32Property (kAudioSessionProperty_AudioCategory, kAudioSessionCategory_MediaPlayback);

            AudioSessionRemovePropertyListenerWithUserData (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);
            AudioSessionSetActive (false);

            if (audioUnit != 0)
            {
                AudioComponentInstanceDispose (audioUnit);
                audioUnit = 0;
            }
        }
    }
bool IPhoneSoundDevice::Init()
{
    // Initialize the default audio session object to tell it
    // to allow background music, and to tell us when audio
    // gets resumed (like if a phone call comes in, iphone takes
    // over audio. If the user then ignores the phone call, the
    // audio needs to be turned on again.

    AudioSessionInitialize(NULL, NULL, wi::InterruptionListener, this);
    UInt32 category = kAudioSessionCategory_UserInterfaceSoundEffects;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
            sizeof(category), &category);
    AudioSessionSetActive(true);

    // Set up streaming

    AudioStreamBasicDescription desc;
    desc.mSampleRate = 8000;
    desc.mFormatID = kAudioFormatLinearPCM;
    desc.mFormatFlags = kAudioFormatFlagIsPacked;
    desc.mBytesPerPacket = 1;
    desc.mFramesPerPacket = 1;
    desc.mBytesPerFrame = 1;
    desc.mChannelsPerFrame = 1;
    desc.mBitsPerChannel = 8;

    OSStatus err = AudioQueueNewOutput(&desc, AudioCallback, this,
            NULL,
            kCFRunLoopCommonModes,
            0, &m_haq);
    if (err != 0) {
        return false;
    }

    for (int i = 0; i < kcBuffers; i++) {
        err = AudioQueueAllocateBuffer(m_haq, kcbBuffer, &m_apaqb[i]);
        if (err != 0) {
           return false;
        }
    }

    return true;
}
Exemple #16
0
static void aq_stop_w(MSFilter * f)
{
    AQData *d = (AQData *) f->data;
    if (d->write_started == TRUE) {
        ms_mutex_lock(&d->mutex);
        d->write_started = FALSE;	/* avoid a deadlock related to buffer conversion in callback */
        ms_mutex_unlock(&d->mutex);
#if 0
        AudioConverterDispose(d->writeAudioConverter);
#endif
        AudioQueueStop(d->writeQueue, true);

        AudioQueueDispose(d->writeQueue, true);

#if TARGET_OS_IPHONE
        check_aqresult(AudioSessionSetActive(false),"AudioSessionSetActive(false)");
#endif
    }
}
Exemple #17
0
/*
 *Audio Session Configuration.
 * Requests an audio session from core audio and configures it for effects processing by default (one input, one output).
 * <Sam> All major configurations are set for the AudioSession Instance here
 */
int MUEAudioIO::configureAudioSession()
{
    try {
		// Initialize and configure the audio session
		AudioSessionInitialize(NULL, NULL, rioInterruptionListener, this);
		AudioSessionSetActive(true);
		

		//audio should not mix with iPod audio, and we want input and output.
		UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
        //audio will mix with iPod audio, but we get output only (no input) with this type of session
        //UInt32 audioCategory = kAudioSessionCategory_AmbientSound;
		AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
		
        
		// The entire purpose of the propListener is to detect a change in signal flow (headphones w/ mic or even third party device)
		AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, propListener, this);
		
        
        //(TODO) make get/set preferred buffer size
		// This value is in seconds! We want really low latency...
		preferredBufferSize = .01;	// .005 for buffer of 256, .01 for buffer of 512
		AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, 
								sizeof(preferredBufferSize), &preferredBufferSize);
		
		
		// Related to our propListener. When the signal flow changes, sometimes the hardware sample rate can change. You'll notice in the propListener it checks for a new one.
		UInt32 size = sizeof(hwSampleRate);
		AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
	}
	catch (...) {
		printf("An unknown error occurred in audio session configuration!\n");
		//if (url) CFRelease(url);
	}
    
    return 0;
}
static void au_interuption_listener(void* inClientData, UInt32 inInterruptionState) {
	if (((MSSndCard*)inClientData)->data == NULL) return;
	
	AUData *d=(AUData*)(((MSSndCard*)inClientData)->data);
	if (d->started == FALSE) {
		//nothing to do
		return;
	}
	switch (inInterruptionState) {
		case kAudioSessionBeginInterruption:
			ms_message ("IO unit interruption begin");
			AudioOutputUnitStop(d->io_unit);
			break;
		case kAudioSessionEndInterruption:
			// make sure we are again the active session
			ms_message ("IO unit interruption end");
			OSStatus auresult = AudioSessionSetActive(true);
			check_auresult(auresult,"AudioSessionSetActive");
			d->io_unit_must_be_started=TRUE;
			break;
		default:
			ms_warning ("unexpected interuption %i",inInterruptionState);
	}
}
static void au_configure(AUData *d) {
	AudioStreamBasicDescription audioFormat;
	AudioComponentDescription au_description;
	AudioComponent foundComponent;
	OSStatus auresult;
	UInt32 doSetProperty      = 1;
	UInt32 doNotSetProperty    = 0;
	
	
	
	auresult = AudioSessionSetActive(true);
	check_auresult(auresult,"AudioSessionSetActive");
	
	
	UInt32 audioCategory =kAudioSessionCategory_PlayAndRecord;
	auresult =AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
	check_auresult(auresult,"Configuring audio session ");
	

	if (d->is_ringer) {
		auresult=AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,sizeof (doSetProperty),&doSetProperty);
		check_auresult(auresult,"kAudioSessionProperty_OverrideAudioRoute");
		ms_message("Configuring audio session default route to speaker");
	} else {
		ms_message("Configuring audio session default route to receiver");
	}
	if (d->started == TRUE) {
		//nothing else to do
		return;
	}
	
	au_description.componentType          = kAudioUnitType_Output;
	au_description.componentSubType       = kAudioUnitSubType_VoiceProcessingIO;
	au_description.componentManufacturer  = kAudioUnitManufacturer_Apple;
	au_description.componentFlags         = 0;
	au_description.componentFlagsMask     = 0;
	
	foundComponent = AudioComponentFindNext (NULL,&au_description);
	
	auresult=AudioComponentInstanceNew (foundComponent, &d->io_unit);
	
	check_auresult(auresult,"AudioComponentInstanceNew");

	audioFormat.mSampleRate			= d->rate;
	audioFormat.mFormatID			= kAudioFormatLinearPCM;
	audioFormat.mFormatFlags		= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	audioFormat.mFramesPerPacket	= 1;
	audioFormat.mChannelsPerFrame	= d->nchannels;
	audioFormat.mBitsPerChannel		= d->bits;
	audioFormat.mBytesPerPacket		= d->bits / 8;
	audioFormat.mBytesPerFrame		= d->nchannels * d->bits / 8;
	AudioUnitElement outputBus = 0;
	AudioUnitElement inputBus = 1;
	auresult=AudioUnitUninitialize (d->io_unit);
	
	check_auresult(auresult,"AudioUnitUninitialize");
	
	//read
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioOutputUnitProperty_EnableIO,
								   kAudioUnitScope_Input ,
								   inputBus,
								   &doSetProperty,
								   sizeof (doSetProperty)
								   );
	check_auresult(auresult,"kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input");
	//setup stream format
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_StreamFormat,
								   kAudioUnitScope_Input,
								   outputBus,
								   &audioFormat,
								   sizeof (audioFormat)
								   );
	
	//write	
	//enable output bus
	auresult =AudioUnitSetProperty (
									d->io_unit,
									kAudioOutputUnitProperty_EnableIO,
									kAudioUnitScope_Output ,
									outputBus,
									&doSetProperty,
									sizeof (doSetProperty)
									);
	check_auresult(auresult,"kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output");
	
	//setup stream format
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_StreamFormat,
								   kAudioUnitScope_Output,
								   inputBus,
								   &audioFormat,
								   sizeof (audioFormat)
								   );
	check_auresult(auresult,"kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output");
	
	check_auresult(auresult,"kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input");
	
	//disable unit buffer allocation
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_ShouldAllocateBuffer,
								   kAudioUnitScope_Output,
								   outputBus,
								   &doNotSetProperty,
								   sizeof (doNotSetProperty)
								   );
	check_auresult(auresult,"kAudioUnitProperty_ShouldAllocateBuffer,kAudioUnitScope_Output");
	
	
	AURenderCallbackStruct renderCallbackStruct;            
	renderCallbackStruct.inputProc       = au_render_cb;  
	renderCallbackStruct.inputProcRefCon = d;          
	
	auresult=AudioUnitSetProperty (
								   d->io_unit,                                  
								   kAudioUnitProperty_SetRenderCallback,        
								   kAudioUnitScope_Input,                       
								   outputBus,                                   
								   &renderCallbackStruct,                              
								   sizeof (renderCallbackStruct)                       
								   );
	check_auresult(auresult,"kAudioUnitProperty_SetRenderCallback,kAudioUnitScope_Input");
	
	const Float64 preferredSampleRate = d->rate;//PREFERRED_HW_SAMPLE_RATE; /*optimum to minimize delay, must put a software resampler to deal with 8khz*/
	
	auresult=AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareSampleRate
									 ,sizeof(preferredSampleRate)
									 , &preferredSampleRate);
	check_auresult(auresult,"kAudioSessionProperty_PreferredHardwareSampleRate");
	
	
	Float32 preferredBufferSize;
	switch (d->rate) {
		case 11025:
		case 22050: 
			preferredBufferSize= .020;
			break;
		default:
			preferredBufferSize= .015;
	}
			
	auresult=AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration
									 ,sizeof(preferredBufferSize)
									 , &preferredBufferSize);
	
	
	if (auresult != 0) ms_message("kAudioSessionProperty_PreferredHardwareIOBufferDuration returns %i ",auresult);
	
	Float64 delay;
	UInt32 delaySize = sizeof(delay);
	auresult=AudioUnitGetProperty(d->io_unit
								  ,kAudioUnitProperty_Latency
								  , kAudioUnitScope_Global
								  , 0
								  , &delay
								  , &delaySize);

	UInt32 quality;
	UInt32 qualitySize = sizeof(quality);
	auresult=AudioUnitGetProperty(d->io_unit
								  ,kAudioUnitProperty_RenderQuality
								  , kAudioUnitScope_Global
								  , 0
								  , &quality
								  , &qualitySize);
	
	
	
	ms_message("I/O unit latency [%f], quality [%i]",delay,quality);
	Float32 hwoutputlatency;
	UInt32 hwoutputlatencySize=sizeof(hwoutputlatency);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareOutputLatency
									 ,&hwoutputlatencySize
									 , &hwoutputlatency);
	Float32 hwinputlatency;
	UInt32 hwinputlatencySize=sizeof(hwoutputlatency);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputLatency
									 ,&hwinputlatencySize
									 , &hwinputlatency);
	
	Float32 hwiobuf;
	UInt32 hwiobufSize=sizeof(hwiobuf);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration
									 ,&hwiobufSize
									 , &hwiobuf);
	
	Float64 hwsamplerate;
	UInt32 hwsamplerateSize=sizeof(hwsamplerate);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate
									 ,&hwsamplerateSize
									 ,&hwsamplerate);

	ms_message("current hw output latency [%f] input [%f] iobuf[%f] sample rate [%f]",hwoutputlatency,hwinputlatency,hwiobuf,hwsamplerate);
	auresult=AudioOutputUnitStart(d->io_unit);
	check_auresult(auresult,"AudioOutputUnitStart");
	d->started=TRUE;
	return;
}	
Exemple #20
0
    Manager::Manager(CriAtomExPlayerConfig playerConfig,
                             CriAtomExStandardVoicePoolConfig voicePoolConfig)
    {
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
        auto interruptionListener = [](void *userData, UInt32 interruptionState) {
            switch (interruptionState) {
                case kAudioSessionBeginInterruption:
                    criAtomEx_StopSound_IOS();
                    break;
                case kAudioSessionEndInterruption:
                    AudioSessionSetActive(true);
                    criAtomEx_StartSound_IOS();
                    break;
            }
        };
        AudioSessionInitialize(NULL, NULL, interruptionListener, NULL);
        UInt32 category = kAudioSessionCategory_AmbientSound;
        AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
        AudioSessionSetActive(true);
#endif
        auto errorCallback = [](const char *errid, uint32_t p1, uint32_t p2, uint32_t *parray) {
            const CriChar8 *errmsg;
            errmsg = criErr_ConvertIdToMessage(errid, p1, p2);
            cocos2d::log("%s", errmsg);
        };
        criErr_SetCallback(errorCallback);
        
        auto userAlloc = [](void *obj, uint32_t size) {
            return malloc(size);
        };
        
        auto userFree = [] (void *obj, void *ptr) {
            free(ptr);
        };
        
        criAtomEx_SetUserAllocator(userAlloc, userFree, NULL);
        
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
        criAtomEx_Initialize_IOS(NULL, NULL, 0);
#elif (CC_TARGET_PLATFORM == CC_PLATFORM_ANDROID)
        criAtomEx_Initialize_ANDROID(NULL, NULL, 0);
        
        /* ANDROIDの場合で必要な一手間。assetsフォルダへのアクセスを可能にする */
        /* まずはJniHelperでActivityのContextを取得 */
        cocos2d::JniMethodInfo methodInfo;
        cocos2d::JniHelper::getStaticMethodInfo(methodInfo,
                                                "org/cocos2dx/lib/Cocos2dxActivity",
                                                "getContext",
                                                "()Landroid/content/Context;");
        auto android_context_object = (jobject)methodInfo.env->CallStaticObjectMethod( methodInfo.classID, methodInfo.methodID );
        /* 有効化。assetsフォルダはCocosプロジェクトのResource相当なので、ほぼ必須と言って良い手順 */
        criFs_EnableAssetsAccess_ANDROID(cocos2d::JniHelper::getJavaVM(), android_context_object);
#elif (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32)
        criAtomEx_Initialize_PC(NULL, NULL, 0);
#endif
        
        _dbasID = criAtomDbas_Create(NULL, NULL, 0);
        
        /* 上で作った設定オブジェクトを渡して、ボイスプールを作成 */
        _voicePool = criAtomExVoicePool_AllocateStandardVoicePool(&voicePoolConfig, NULL, 0);
        
        /* Player作成にも設定は必要 */
        criAtomExPlayer_SetDefaultConfig(&playerConfig);
        _player = criAtomExPlayer_Create(&playerConfig, NULL, 0);
    }
Exemple #21
0
static void aq_start_w(MSFilter * f)
{
    AQData *d = (AQData *) f->data;
    if (d->write_started == FALSE) {
        OSStatus aqresult;
#if TARGET_OS_IPHONE
        aqresult = AudioSessionSetActive(true);
        check_aqresult(aqresult,"AudioSessionSetActive");

        UInt32 audioCategory;

        audioCategory= kAudioSessionCategory_AmbientSound;
        ms_message("Configuring audio session for play back");
        aqresult =AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
        check_aqresult(aqresult,"Configuring audio session ");
#endif
        d->writeAudioFormat.mSampleRate = d->rate;
        d->writeAudioFormat.mFormatID = kAudioFormatLinearPCM;
        d->writeAudioFormat.mFormatFlags =
            kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        d->writeAudioFormat.mFramesPerPacket = 1;
        d->writeAudioFormat.mChannelsPerFrame = 1;
        d->writeAudioFormat.mBitsPerChannel = d->bits;
        d->writeAudioFormat.mBytesPerPacket = d->bits / 8;
        d->writeAudioFormat.mBytesPerFrame = d->bits / 8;

        //show_format("data provided to output filter",	&d->writeAudioFormat);
        //show_format("output device", &d->devicewriteFormat);

        memcpy(&d->devicewriteFormat, &d->writeAudioFormat,
               sizeof(d->writeAudioFormat));
        d->writeBufferByteSize =
            kSecondsPerBuffer * d->devicewriteFormat.mSampleRate *
            (d->devicewriteFormat.mBitsPerChannel / 8) *
            d->devicewriteFormat.mChannelsPerFrame;

#if 0
        aqresult = AudioConverterNew(&d->writeAudioFormat,
                                     &d->devicewriteFormat,
                                     &d->writeAudioConverter);
        if (aqresult != noErr) {
            ms_error("d->writeAudioConverter = %d", aqresult);
            d->writeAudioConverter = NULL;
        }
#endif

        // create the playback audio queue object
        aqresult = AudioQueueNewOutput(&d->devicewriteFormat, writeCallback, d, NULL,	/*CFRunLoopGetCurrent () */
                                       NULL,	/*kCFRunLoopCommonModes */
                                       0,	// run loop flags
                                       &d->writeQueue);
        if (aqresult != noErr) {
            ms_error("AudioQueueNewOutput = %ld", aqresult);
        }

        AudioQueueSetParameter (d->writeQueue,
                                kAudioQueueParam_Volume,
                                gain_volume_out);

        if (d->uidname!=NULL) {
            char uidname[256];
            CFStringGetCString(d->uidname, uidname, 256,
                               CFStringGetSystemEncoding());
            ms_message("AQ: using uidname:%s", uidname);
            aqresult =
                AudioQueueSetProperty(d->writeQueue,
                                      kAudioQueueProperty_CurrentDevice,
                                      &d->uidname, sizeof(CFStringRef));
            if (aqresult != noErr) {
                ms_error
                ("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %ld",
                 aqresult);
            }
        }

        setupWrite(f);
        d->curWriteBuffer = 0;
    }
}
Exemple #22
0
void audio_session_disable(void)
{
	AudioSessionSetActive(false);
}
Exemple #23
0
int audiosess_alloc(struct audiosess_st **stp,
		    audiosess_int_h *inth, void *arg)
{
	struct audiosess_st *st = NULL;
	struct audiosess *as = NULL;
	int err = 0;
	bool created = false;
#if TARGET_OS_IPHONE
	AudioSessionPropertyID id = kAudioSessionProperty_AudioRouteChange;
	UInt32 category;
	OSStatus ret;
#endif

	if (!stp)
		return EINVAL;

#if TARGET_OS_IPHONE
	/* Must be done for all modules */
	category = kAudioSessionCategory_PlayAndRecord;
	ret = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
				      sizeof(category), &category);
	if (ret) {
		re_fprintf(stderr, "Audio Category: %d\n", ret);
		return EINVAL;
	}
#endif

	if (gas)
		goto makesess;

	as = mem_zalloc(sizeof(*as), destructor);
	if (!as)
		return ENOMEM;

#if TARGET_OS_IPHONE
	ret = AudioSessionSetActive(true);
	if (ret) {
		re_fprintf(stderr, "AudioSessionSetActive: %d\n", ret);
		err = ENOSYS;
		goto out;
	}

	ret = AudioSessionAddPropertyListener(id, propListener, as);
	if (ret) {
		re_fprintf(stderr, "AudioSessionAddPropertyListener: %d\n",
			   ret);
		err = EINVAL;
		goto out;
	}
#endif

	gas = as;
	created = true;

 makesess:
	st = mem_zalloc(sizeof(*st), sess_destructor);
	if (!st) {
		err = ENOMEM;
		goto out;
	}
	st->inth = inth;
	st->arg = arg;
	st->as = created ? gas : mem_ref(gas);

	list_append(&gas->sessl, &st->le, st);

 out:
	if (err) {
		mem_deref(as);
		mem_deref(st);
	}
	else {
		*stp = st;
	}

	return err;
}
Exemple #24
0
static void *coreaudio_init(const char *device,
      unsigned rate, unsigned latency)
{
   size_t fifo_size;
   UInt32 i_size;
   AudioStreamBasicDescription real_desc;
#ifdef OSX_PPC
   Component comp;
#else
   AudioComponent comp;
#endif
#ifndef TARGET_OS_IPHONE
   AudioChannelLayout layout               = {0};
#endif
   AURenderCallbackStruct cb               = {0};
   AudioStreamBasicDescription stream_desc = {0};
   bool component_unavailable              = false;
   static bool session_initialized         = false;
   coreaudio_t *dev                        = NULL;
#ifdef OSX_PPC
   ComponentDescription desc               = {0};
#else
   AudioComponentDescription desc          = {0};
#endif
   settings_t *settings                    = config_get_ptr();

   (void)session_initialized;
   (void)device;

   dev = (coreaudio_t*)calloc(1, sizeof(*dev));
   if (!dev)
      return NULL;

   dev->lock = slock_new();
   dev->cond = scond_new();

#if TARGET_OS_IPHONE
   if (!session_initialized)
   {
      session_initialized = true;
      AudioSessionInitialize(0, 0, coreaudio_interrupt_listener, 0);
      AudioSessionSetActive(true);
   }
#endif

   /* Create AudioComponent */
   desc.componentType = kAudioUnitType_Output;
#if TARGET_OS_IPHONE
   desc.componentSubType = kAudioUnitSubType_RemoteIO;
#else
   desc.componentSubType = kAudioUnitSubType_HALOutput;
#endif
   desc.componentManufacturer = kAudioUnitManufacturer_Apple;

#ifdef OSX_PPC
   comp = FindNextComponent(NULL, &desc);
#else
   comp = AudioComponentFindNext(NULL, &desc);
#endif
   if (comp == NULL)
      goto error;
   
#ifdef OSX_PPC
   component_unavailable = (OpenAComponent(comp, &dev->dev) != noErr);
#else
   component_unavailable = (AudioComponentInstanceNew(comp, &dev->dev) != noErr);
#endif

   if (component_unavailable)
      goto error;

#if !TARGET_OS_IPHONE
   if (device)
      choose_output_device(dev, device);
#endif

   dev->dev_alive = true;

   /* Set audio format */
   stream_desc.mSampleRate       = rate;
   stream_desc.mBitsPerChannel   = sizeof(float) * CHAR_BIT;
   stream_desc.mChannelsPerFrame = 2;
   stream_desc.mBytesPerPacket   = 2 * sizeof(float);
   stream_desc.mBytesPerFrame    = 2 * sizeof(float);
   stream_desc.mFramesPerPacket  = 1;
   stream_desc.mFormatID         = kAudioFormatLinearPCM;
   stream_desc.mFormatFlags      = kAudioFormatFlagIsFloat | 
      kAudioFormatFlagIsPacked | (is_little_endian() ? 
            0 : kAudioFormatFlagIsBigEndian);
   
   if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_StreamFormat,
         kAudioUnitScope_Input, 0, &stream_desc, sizeof(stream_desc)) != noErr)
      goto error;
   
   /* Check returned audio format. */
   i_size = sizeof(real_desc);
   if (AudioUnitGetProperty(dev->dev, kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Input, 0, &real_desc, &i_size) != noErr)
      goto error;

   if (real_desc.mChannelsPerFrame != stream_desc.mChannelsPerFrame)
      goto error;
   if (real_desc.mBitsPerChannel != stream_desc.mBitsPerChannel)
      goto error;
   if (real_desc.mFormatFlags != stream_desc.mFormatFlags)
      goto error;
   if (real_desc.mFormatID != stream_desc.mFormatID)
      goto error;

   RARCH_LOG("[CoreAudio]: Using output sample rate of %.1f Hz\n",
         (float)real_desc.mSampleRate);
   settings->audio.out_rate = real_desc.mSampleRate;

   /* Set channel layout (fails on iOS). */
#ifndef TARGET_OS_IPHONE
   layout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
   if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_AudioChannelLayout,
         kAudioUnitScope_Input, 0, &layout, sizeof(layout)) != noErr)
      goto error;
#endif

   /* Set callbacks and finish up. */
   cb.inputProc = audio_write_cb;
   cb.inputProcRefCon = dev;

   if (AudioUnitSetProperty(dev->dev, kAudioUnitProperty_SetRenderCallback,
         kAudioUnitScope_Input, 0, &cb, sizeof(cb)) != noErr)
      goto error;

   if (AudioUnitInitialize(dev->dev) != noErr)
      goto error;

   fifo_size = (latency * settings->audio.out_rate) / 1000;
   fifo_size *= 2 * sizeof(float);
   dev->buffer_size = fifo_size;

   dev->buffer = fifo_new(fifo_size);
   if (!dev->buffer)
      goto error;

   RARCH_LOG("[CoreAudio]: Using buffer size of %u bytes: (latency = %u ms)\n",
         (unsigned)fifo_size, latency);

   if (AudioOutputUnitStart(dev->dev) != noErr)
      goto error;

   return dev;

error:
   RARCH_ERR("[CoreAudio]: Failed to initialize driver ...\n");
   coreaudio_free(dev);
   return NULL;
}