Esempio n. 1
0
OSStatus audio_unit_set_audio_session_category(bool has_input, bool verbose)
{
  //if we have input, set the session category accordingly
  OSStatus err = 0;
  UInt32 category;
  if (has_input) {
    category = kAudioSessionCategory_PlayAndRecord;
    if (verbose) AUBIO_MSG("audio_unit: setting category to PlayAndRecord\n");
  } else {
    category = kAudioSessionCategory_MediaPlayback;
    if (verbose) AUBIO_MSG("audio_unit: setting category to MediaPlayback\n");
  }
  err = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
      sizeof(category), &category);
  if (err) {
    AUBIO_ERR("audio_unit: could not set audio category\n");
  }

  // Audiob.us style
  UInt32 allowMixing = 1;
  AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers,
      sizeof (allowMixing), &allowMixing);
  if (err) {
    AUBIO_ERR("audio_unit: could not set audio session to mix with others\n");
  }

  return err;
}
Esempio n. 2
0
int QBSound_SetCategory(const char* category)
{
	int type = 0;
	if (category == NULL) {
	} else
	if (strcmp(category,"ambient") == 0) {
		type = 1;
	}
	switch (type) {
	case 0:
		{
			AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL);
			
			UInt32 category = kAudioSessionCategory_SoloAmbientSound;
			AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
			
			AudioSessionSetActive(true);
		}
		break;
	case 1:
		{
			AudioSessionInitialize(NULL, NULL, InterruptionListener, NULL);
			
			UInt32 category = kAudioSessionCategory_AmbientSound;
			AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
			
			AudioSessionSetActive(true);
		}
		break;
	}
	return 0;
}
Esempio n. 3
0
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double sampleRate,
                 int bufferSize)
    {
        close();

        lastError = String::empty;
        preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        //  xxx set up channel mapping

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();
        monoOutputChannelNumber = activeOutputChans.findNextSetBit (0);

        activeInputChans = inputChannels;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();
        monoInputChannelNumber = activeInputChans.findNextSetBit (0);

        AudioSessionSetActive (true);

        UInt32 audioCategory = (numInputChannels > 0 && audioInputIsAvailable) ? kAudioSessionCategory_PlayAndRecord
                               : kAudioSessionCategory_MediaPlayback;

        AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, sizeof (audioCategory), &audioCategory);

        if (audioCategory == kAudioSessionCategory_PlayAndRecord)
        {
            // (note: mustn't set this until after the audio category property has been set)
            UInt32 allowBluetoothInput = 1;
            AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryEnableBluetoothInput,
                                     sizeof (allowBluetoothInput), &allowBluetoothInput);
        }

        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);

        fixAudioRouteIfSetToReceiver();
        updateDeviceInfo();

        Float32 bufferDuration = preferredBufferSize / sampleRate;
        AudioSessionSetProperty (kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof (bufferDuration), &bufferDuration);
        actualBufferSize = preferredBufferSize;

        prepareFloatBuffers (actualBufferSize);

        isRunning = true;
        routingChanged (nullptr);  // creates and starts the AU

        lastError = audioUnit != 0 ? "" : "Couldn't open the device";
        return lastError;
    }
Esempio n. 4
0
int audio_session_enable(void)
{
	OSStatus res;
	UInt32 category;

	res = AudioSessionInitialize(NULL, NULL, interruptionListener, 0);
	if (res && res != 1768843636)
		return ENODEV;

	category = kAudioSessionCategory_PlayAndRecord;
	res = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
				      sizeof(category), &category);
	if (res) {
		warning("coreaudio: Audio Category: %d\n", res);
		return ENODEV;
	}

	res = AudioSessionSetActive(true);
	if (res) {
		warning("coreaudio: AudioSessionSetActive: %d\n", res);
		return ENODEV;
	}

	return 0;
}
Esempio n. 5
0
static int
COREAUDIO_Init(SDL_AudioDriverImpl * impl)
{
    /* Set the function pointers */
    impl->OpenDevice = COREAUDIO_OpenDevice;
    impl->CloseDevice = COREAUDIO_CloseDevice;

#if MACOSX_COREAUDIO
    impl->DetectDevices = COREAUDIO_DetectDevices;
#else
    impl->OnlyHasDefaultOutputDevice = 1;

    /* Set category to ambient sound so that other music continues playing.
       You can change this at runtime in your own code if you need different
       behavior.  If this is common, we can add an SDL hint for this.
    */
    AudioSessionInitialize(NULL, NULL, NULL, nil);
    UInt32 category = kAudioSessionCategory_AmbientSound;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(UInt32), &category);
#endif

    impl->ProvidesOwnCallbackThread = 1;

    return 1;   /* this audio target is available. */
}
Esempio n. 6
0
File: KLAL.c Progetto: klib/example
void KLAL_InitSystem()
{
	
	OSStatus err = AudioSessionInitialize(NULL, NULL, KLAL_Interruption, NULL);
	if(err)
	{
		KLLog("[ KLAL ] ERR: AudioSessionInitialize:%x\n", (int)err);
	}
	
	err = AudioSessionSetActive(TRUE);
	if(err)
	{
		KLLog("[ KLAL ] ERR: AudioSessionSetActive:%x\n", (int)err);
	}
	
	UInt32 sessionCategory = kAudioSessionCategory_AmbientSound;
	
	AudioSessionSetProperty( kAudioSessionProperty_AudioCategory,
							 sizeof (sessionCategory),
							 &sessionCategory );
	
	klal_pDevice	= alcOpenDevice(NULL);
	klal_pContext= alcCreateContext(klal_pDevice, NULL);
#if KLAL_ERRCHECK
	ALenum aen = alGetError();
	if( aen!=AL_NO_ERROR )
	{
		KLLog("[ KLAL ] %s %u\n", __func__, aen);
	}
#endif
	alcMakeContextCurrent(klal_pContext);
	klal_is_loaded = TRUE;
}
Esempio n. 7
0
void audio_unit_check_audio_route(aubio_audio_unit_t *o) {
  CFStringRef currentRoute;
  UInt32 val, thissize = sizeof(currentRoute);
  OSStatus err = AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &thissize, &currentRoute);
  if (err) { AUBIO_ERR("audio_unit: could not get current route\n"); goto fail; }
  else {
    char *route = (char *)CFStringGetCStringPtr ( currentRoute, kCFStringEncodingUTF8);
    if (route == NULL) {
      int bufferSize = 25;
      route = calloc(bufferSize, sizeof(char));
      CFStringGetCString ( currentRoute, route, bufferSize,
          kCFStringEncodingUTF8);
    }
    if (o->verbose) {
      AUBIO_MSG ("audio_unit: current route is %s\n", route);
    }
    //free(route);
  }
  if( currentRoute ) {
    if( CFStringCompare( currentRoute, CFSTR("Headset"), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_None;
    } else if( CFStringCompare( currentRoute, CFSTR("Receiver" ), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_Speaker;
    } else if( CFStringCompare( currentRoute, CFSTR("ReceiverAndMicrophone" ), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_Speaker;
    } else if( CFStringCompare( currentRoute, CFSTR("SpeakerAndMicrophone" ), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_Speaker;
    } else if( CFStringCompare( currentRoute, CFSTR("HeadphonesAndMicrophone" ), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_None;
    } else if( CFStringCompare( currentRoute, CFSTR("HeadsetInOut" ), 0 ) == kCFCompareEqualTo ) {
      val = kAudioSessionOverrideAudioRoute_None;
    } else {
      val = kAudioSessionOverrideAudioRoute_None;
    }

    o->input_enabled = true;
    if (val == kAudioSessionOverrideAudioRoute_Speaker) {
      if (o->prevent_feedback) {
        o->input_enabled = false;
        if (o->verbose) {
          AUBIO_MSG ("audio_unit: disabling input to avoid feedback\n");
        }
      } else {
        AUBIO_WRN ("audio_unit: input not disabled as prevent_feedback set to 0, risking feedback\n");
      }
    }

    err = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute,
        sizeof(UInt32), &val);
    if (err) { AUBIO_ERR("audio_unit: could not set session OverrideAudioRoute to Speaker\n"); }

  }

fail:
  if ( currentRoute ) free((void*)currentRoute);
  return;

}
Esempio n. 8
0
/*
 *Audio Session Configuration.
 * Requests an audio session from core audio and configures it for effects processing by default (one input, one output).
 * <Sam> All major configurations are set for the AudioSession Instance here
 */
int MUEAudioIO::configureAudioSession()
{
    try {
		// Initialize and configure the audio session
		AudioSessionInitialize(NULL, NULL, rioInterruptionListener, this);
		AudioSessionSetActive(true);
		

		//audio should not mix with iPod audio, and we want input and output.
		UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
        //audio will mix with iPod audio, but we get output only (no input) with this type of session
        //UInt32 audioCategory = kAudioSessionCategory_AmbientSound;
		AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
		
        
		// The entire purpose of the propListener is to detect a change in signal flow (headphones w/ mic or even third party device)
		AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, propListener, this);
		
        
        //(TODO) make get/set preferred buffer size
		// This value is in seconds! We want really low latency...
		preferredBufferSize = .01;	// .005 for buffer of 256, .01 for buffer of 512
		AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, 
								sizeof(preferredBufferSize), &preferredBufferSize);
		
		
		// Related to our propListener. When the signal flow changes, sometimes the hardware sample rate can change. You'll notice in the propListener it checks for a new one.
		UInt32 size = sizeof(hwSampleRate);
		AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
	}
	catch (...) {
		printf("An unknown error occurred in audio session configuration!\n");
		//if (url) CFRelease(url);
	}
    
    return 0;
}
Esempio n. 9
0
bool IPhoneSoundDevice::Init()
{
    // Initialize the default audio session object to tell it
    // to allow background music, and to tell us when audio
    // gets resumed (like if a phone call comes in, iphone takes
    // over audio. If the user then ignores the phone call, the
    // audio needs to be turned on again.

    AudioSessionInitialize(NULL, NULL, wi::InterruptionListener, this);
    UInt32 category = kAudioSessionCategory_UserInterfaceSoundEffects;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
            sizeof(category), &category);
    AudioSessionSetActive(true);

    // Set up streaming

    AudioStreamBasicDescription desc;
    desc.mSampleRate = 8000;
    desc.mFormatID = kAudioFormatLinearPCM;
    desc.mFormatFlags = kAudioFormatFlagIsPacked;
    desc.mBytesPerPacket = 1;
    desc.mFramesPerPacket = 1;
    desc.mBytesPerFrame = 1;
    desc.mChannelsPerFrame = 1;
    desc.mBitsPerChannel = 8;

    OSStatus err = AudioQueueNewOutput(&desc, AudioCallback, this,
            NULL,
            kCFRunLoopCommonModes,
            0, &m_haq);
    if (err != 0) {
        return false;
    }

    for (int i = 0; i < kcBuffers; i++) {
        err = AudioQueueAllocateBuffer(m_haq, kcbBuffer, &m_apaqb[i]);
        if (err != 0) {
           return false;
        }
    }

    return true;
}
Esempio n. 10
0
int tdav_audiounit_handle_configure(tdav_audiounit_handle_t* self, tsk_bool_t consumer, uint32_t ptime, AudioStreamBasicDescription* audioFormat)
{
	OSStatus status = noErr;
	tdav_audiounit_instance_t* inst = (tdav_audiounit_instance_t*)self;
	
	if(!inst || !audioFormat){
		TSK_DEBUG_ERROR("Invalid parameter");
		return -1;
	}

#if TARGET_OS_IPHONE
	// set preferred buffer size
	Float32 preferredBufferSize = ((Float32)ptime / 1000.f); // in seconds
	UInt32 size = sizeof(preferredBufferSize);
	status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
	if(status != noErr){
		TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration) failed with status=%ld", status);
		TSK_OBJECT_SAFE_FREE(inst);
		goto done;
	}
	status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &preferredBufferSize);
	if(status == noErr){
		inst->frame_duration = (preferredBufferSize * 1000);
		TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
	}
	else {
		TSK_DEBUG_ERROR("AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, %f) failed", preferredBufferSize);
	}
	
	
	UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
	status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
	if(status != noErr){
		TSK_DEBUG_ERROR("AudioSessionSetProperty(kAudioSessionProperty_AudioCategory) failed with status code=%ld", status);
		goto done;
	}
	
#elif TARGET_OS_MAC
#if 1
	// set preferred buffer size
	UInt32 preferredBufferSize = ((ptime * audioFormat->mSampleRate)/1000); // in bytes
	UInt32 size = sizeof(preferredBufferSize);
	status = AudioUnitSetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, size);
	if(status != noErr){
		TSK_DEBUG_ERROR("AudioUnitSetProperty(kAudioOutputUnitProperty_SetInputCallback) failed with status=%ld", (signed long)status);
	}
	status = AudioUnitGetProperty(inst->audioUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &preferredBufferSize, &size);
	if(status == noErr){
		inst->frame_duration = ((preferredBufferSize * 1000)/audioFormat->mSampleRate);
		TSK_DEBUG_INFO("Frame duration=%d", inst->frame_duration);
	}
	else {
		TSK_DEBUG_ERROR("AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize, %lu) failed", (unsigned long)preferredBufferSize);
	}
#endif
	
#endif
	
done:
	return (status == noErr) ? 0 : -2;
}
Esempio n. 11
0
static void au_configure(AUData *d) {
	AudioStreamBasicDescription audioFormat;
	AudioComponentDescription au_description;
	AudioComponent foundComponent;
	OSStatus auresult;
	UInt32 doSetProperty      = 1;
	UInt32 doNotSetProperty    = 0;
	
	
	
	auresult = AudioSessionSetActive(true);
	check_auresult(auresult,"AudioSessionSetActive");
	
	
	UInt32 audioCategory =kAudioSessionCategory_PlayAndRecord;
	auresult =AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
	check_auresult(auresult,"Configuring audio session ");
	

	if (d->is_ringer) {
		auresult=AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,sizeof (doSetProperty),&doSetProperty);
		check_auresult(auresult,"kAudioSessionProperty_OverrideAudioRoute");
		ms_message("Configuring audio session default route to speaker");
	} else {
		ms_message("Configuring audio session default route to receiver");
	}
	if (d->started == TRUE) {
		//nothing else to do
		return;
	}
	
	au_description.componentType          = kAudioUnitType_Output;
	au_description.componentSubType       = kAudioUnitSubType_VoiceProcessingIO;
	au_description.componentManufacturer  = kAudioUnitManufacturer_Apple;
	au_description.componentFlags         = 0;
	au_description.componentFlagsMask     = 0;
	
	foundComponent = AudioComponentFindNext (NULL,&au_description);
	
	auresult=AudioComponentInstanceNew (foundComponent, &d->io_unit);
	
	check_auresult(auresult,"AudioComponentInstanceNew");

	audioFormat.mSampleRate			= d->rate;
	audioFormat.mFormatID			= kAudioFormatLinearPCM;
	audioFormat.mFormatFlags		= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	audioFormat.mFramesPerPacket	= 1;
	audioFormat.mChannelsPerFrame	= d->nchannels;
	audioFormat.mBitsPerChannel		= d->bits;
	audioFormat.mBytesPerPacket		= d->bits / 8;
	audioFormat.mBytesPerFrame		= d->nchannels * d->bits / 8;
	AudioUnitElement outputBus = 0;
	AudioUnitElement inputBus = 1;
	auresult=AudioUnitUninitialize (d->io_unit);
	
	check_auresult(auresult,"AudioUnitUninitialize");
	
	//read
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioOutputUnitProperty_EnableIO,
								   kAudioUnitScope_Input ,
								   inputBus,
								   &doSetProperty,
								   sizeof (doSetProperty)
								   );
	check_auresult(auresult,"kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input");
	//setup stream format
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_StreamFormat,
								   kAudioUnitScope_Input,
								   outputBus,
								   &audioFormat,
								   sizeof (audioFormat)
								   );
	
	//write	
	//enable output bus
	auresult =AudioUnitSetProperty (
									d->io_unit,
									kAudioOutputUnitProperty_EnableIO,
									kAudioUnitScope_Output ,
									outputBus,
									&doSetProperty,
									sizeof (doSetProperty)
									);
	check_auresult(auresult,"kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output");
	
	//setup stream format
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_StreamFormat,
								   kAudioUnitScope_Output,
								   inputBus,
								   &audioFormat,
								   sizeof (audioFormat)
								   );
	check_auresult(auresult,"kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output");
	
	check_auresult(auresult,"kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input");
	
	//disable unit buffer allocation
	auresult=AudioUnitSetProperty (
								   d->io_unit,
								   kAudioUnitProperty_ShouldAllocateBuffer,
								   kAudioUnitScope_Output,
								   outputBus,
								   &doNotSetProperty,
								   sizeof (doNotSetProperty)
								   );
	check_auresult(auresult,"kAudioUnitProperty_ShouldAllocateBuffer,kAudioUnitScope_Output");
	
	
	AURenderCallbackStruct renderCallbackStruct;            
	renderCallbackStruct.inputProc       = au_render_cb;  
	renderCallbackStruct.inputProcRefCon = d;          
	
	auresult=AudioUnitSetProperty (
								   d->io_unit,                                  
								   kAudioUnitProperty_SetRenderCallback,        
								   kAudioUnitScope_Input,                       
								   outputBus,                                   
								   &renderCallbackStruct,                              
								   sizeof (renderCallbackStruct)                       
								   );
	check_auresult(auresult,"kAudioUnitProperty_SetRenderCallback,kAudioUnitScope_Input");
	
	const Float64 preferredSampleRate = d->rate;//PREFERRED_HW_SAMPLE_RATE; /*optimum to minimize delay, must put a software resampler to deal with 8khz*/
	
	auresult=AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareSampleRate
									 ,sizeof(preferredSampleRate)
									 , &preferredSampleRate);
	check_auresult(auresult,"kAudioSessionProperty_PreferredHardwareSampleRate");
	
	
	Float32 preferredBufferSize;
	switch (d->rate) {
		case 11025:
		case 22050: 
			preferredBufferSize= .020;
			break;
		default:
			preferredBufferSize= .015;
	}
			
	auresult=AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration
									 ,sizeof(preferredBufferSize)
									 , &preferredBufferSize);
	
	
	if (auresult != 0) ms_message("kAudioSessionProperty_PreferredHardwareIOBufferDuration returns %i ",auresult);
	
	Float64 delay;
	UInt32 delaySize = sizeof(delay);
	auresult=AudioUnitGetProperty(d->io_unit
								  ,kAudioUnitProperty_Latency
								  , kAudioUnitScope_Global
								  , 0
								  , &delay
								  , &delaySize);

	UInt32 quality;
	UInt32 qualitySize = sizeof(quality);
	auresult=AudioUnitGetProperty(d->io_unit
								  ,kAudioUnitProperty_RenderQuality
								  , kAudioUnitScope_Global
								  , 0
								  , &quality
								  , &qualitySize);
	
	
	
	ms_message("I/O unit latency [%f], quality [%i]",delay,quality);
	Float32 hwoutputlatency;
	UInt32 hwoutputlatencySize=sizeof(hwoutputlatency);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareOutputLatency
									 ,&hwoutputlatencySize
									 , &hwoutputlatency);
	Float32 hwinputlatency;
	UInt32 hwinputlatencySize=sizeof(hwoutputlatency);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputLatency
									 ,&hwinputlatencySize
									 , &hwinputlatency);
	
	Float32 hwiobuf;
	UInt32 hwiobufSize=sizeof(hwiobuf);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration
									 ,&hwiobufSize
									 , &hwiobuf);
	
	Float64 hwsamplerate;
	UInt32 hwsamplerateSize=sizeof(hwsamplerate);
	auresult=AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate
									 ,&hwsamplerateSize
									 ,&hwsamplerate);

	ms_message("current hw output latency [%f] input [%f] iobuf[%f] sample rate [%f]",hwoutputlatency,hwinputlatency,hwiobuf,hwsamplerate);
	auresult=AudioOutputUnitStart(d->io_unit);
	check_auresult(auresult,"AudioOutputUnitStart");
	d->started=TRUE;
	return;
}	
Esempio n. 12
0
sint_t aubio_audio_unit_init (aubio_audio_unit_t *o)
{
  OSStatus err = noErr;
  Float32 latency = o->latency;
  Float64 samplerate = (Float64)o->samplerate;

  o->au_ios_cb_struct.inputProc = aubio_audio_unit_process;
  o->au_ios_cb_struct.inputProcRefCon = o;

  /* setting up audio session with interruption listener */
  err = AudioSessionInitialize(NULL, NULL, audio_unit_interruption_listener, o);
  if (err) { AUBIO_ERR("audio_unit: could not initialize audio session (%d)\n", (int)err); goto fail; }

  audio_unit_set_audio_session_category(o->input_enabled, o->verbose);
  audio_unit_check_audio_route(o);

  /* add route change listener */
  err = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange,
      audio_unit_route_change_listener, o);
  if (err) { AUBIO_ERR("audio_unit: could not set route change listener (%d)\n", (int)err); goto fail; }

  /* set latency */
  err = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
      sizeof(latency), &latency);
  if (err) { AUBIO_ERR("audio_unit: could not set preferred latency (%d)\n", (int)err); goto fail; }

#if 0 // only for iphone OS >= 3.1
  UInt32 val = 1; // set to 0 (default) to use ear speaker in voice application
  err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
      sizeof(UInt32), &val);
  if (err) { AUBIO_ERR("audio_unit: could not set session property to default to speaker\n"); }
#endif

  /* setting up audio unit */
  AudioComponentDescription desc;
  desc.componentManufacturer = kAudioUnitManufacturer_Apple;
  desc.componentSubType = kAudioUnitSubType_RemoteIO;
  desc.componentType = kAudioUnitType_Output;
  desc.componentFlags = 0;
  desc.componentFlagsMask = 0;

  AudioStreamBasicDescription audioFormat;

  /* look for a component that match the description */
  AudioComponent comp = AudioComponentFindNext(NULL, &desc);

  /* create the audio component */
  AudioUnit *audio_unit = &(o->audio_unit);

  err = AudioComponentInstanceNew(comp, &(o->audio_unit));
  if (err) { AUBIO_ERR("audio_unit: failed creating the audio unit\n"); goto fail; }

  /* enable IO */
  UInt32 enabled = 1;
  err = AudioUnitSetProperty (*audio_unit, kAudioOutputUnitProperty_EnableIO,
      kAudioUnitScope_Input, 1, &enabled, sizeof(enabled));
  if (err) {
    AUBIO_ERR("audio_unit: failed enabling input of audio unit\n");
    goto fail;
  }

  /* set max fps */
  UInt32 max_fps = MIN(o->blocksize, MAX_FPS);
  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_MaximumFramesPerSlice,
      kAudioUnitScope_Global, 0, &max_fps, sizeof(max_fps));
  if (err) {
    AUBIO_ERR("audio_unit: could not set maximum frames per slice property (%d)\n", (int)err);
    goto fail;
  }

  AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SetRenderCallback,
      kAudioUnitScope_Input, 0, &(o->au_ios_cb_struct), sizeof(o->au_ios_cb_struct));
  if (err) { AUBIO_ERR("audio_unit: failed setting audio unit render callback\n"); goto fail; }

#if 0
  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate,
      kAudioUnitScope_Input, 0, &samplerate, sizeof(Float64));
  if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; }
  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_SampleRate,
      kAudioUnitScope_Output, 1, &samplerate, sizeof(Float64));
  if (err) { AUBIO_ERR("audio_unit: could not set audio input sample rate\n"); goto fail; }
#endif

  audioFormat.mSampleRate = (Float64)samplerate;
  audioFormat.mChannelsPerFrame = 2;
  audioFormat.mFormatID = kAudioFormatLinearPCM;
  audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
  audioFormat.mFramesPerPacket = 1;
  audioFormat.mBitsPerChannel = 8 * sizeof(SInt16);
#if 1  // interleaving
  audioFormat.mBytesPerFrame = 2 * sizeof(SInt16);
  audioFormat.mBytesPerPacket = 2 * sizeof(SInt16);
#else
  audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = sizeof(SInt32);
  audioFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
#endif

  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat,
      kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));
  if (err) { AUBIO_ERR("audio_unit: could not set audio output format\n"); goto fail; }
  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat,
      kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
  if (err) { AUBIO_ERR("audio_unit: could not set audio input format\n"); goto fail; }

#if 0
  AudioStreamBasicDescription thruFormat;
  thissize = sizeof(thruFormat);
  err = AudioUnitGetProperty (*audio_unit, kAudioUnitProperty_StreamFormat,
      kAudioUnitScope_Input, 0, &thruFormat, &thissize);
  if (err) { AUBIO_ERR("audio_unit: could not get speaker output format, err: %d\n", (int)err); goto fail; }
  err = AudioUnitSetProperty (*audio_unit, kAudioUnitProperty_StreamFormat,
      kAudioUnitScope_Output, 1, &thruFormat, sizeof(thruFormat));
  if (err) { AUBIO_ERR("audio_unit: could not set input audio format, err: %d\n", (int)err); goto fail; }
#endif

  /* time to initialize the unit */
  err = AudioUnitInitialize(*audio_unit);
  if (err) { AUBIO_ERR("audio_unit: failed initializing audio, err: %d\n", (int)err); goto fail; }

  return 0;

fail:
  return err;
}
Esempio n. 13
0
int tdav_consumer_audioqueue_prepare(tmedia_consumer_t* self, const tmedia_codec_t* codec)
{
    OSStatus ret;
    tsk_size_t i;
    tdav_consumer_audioqueue_t* consumer = (tdav_consumer_audioqueue_t*)self;

    if(!consumer || !codec && codec->plugin) {
        TSK_DEBUG_ERROR("Invalid parameter");
        return -1;
    }

    TMEDIA_CONSUMER(consumer)->audio.ptime = codec->plugin->audio.ptime;
    TMEDIA_CONSUMER(consumer)->audio.in.channels = codec->plugin->audio.channels;
    TMEDIA_CONSUMER(consumer)->audio.in.rate = codec->plugin->rate;
    /* codec should have ptime */

    // Set audio category
#if TARGET_OS_IPHONE
    UInt32 category = kAudioSessionCategory_PlayAndRecord;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
#endif

    // Create the audio stream description
    AudioStreamBasicDescription *description = &(consumer->description);
    description->mSampleRate = TMEDIA_CONSUMER(consumer)->audio.out.rate ? TMEDIA_CONSUMER(consumer)->audio.out.rate : TMEDIA_CONSUMER(consumer)->audio.in.rate;
    description->mFormatID = kAudioFormatLinearPCM;
    description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    description->mChannelsPerFrame = TMEDIA_CONSUMER(consumer)->audio.in.channels;
    description->mFramesPerPacket = 1;
    description->mBitsPerChannel = TMEDIA_CONSUMER(consumer)->audio.bits_per_sample;
    description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
    description->mBytesPerFrame = description->mBytesPerPacket;
    description->mReserved = 0;

    int packetperbuffer = 1000 / TMEDIA_CONSUMER(consumer)->audio.ptime;
    consumer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;

    // Create the playback audio queue
    ret = AudioQueueNewOutput(&(consumer->description),
                              __handle_output_buffer,
                              consumer,
                              NULL,
                              NULL,
                              0,
                              &(consumer->queue));

    for(i = 0; i < CoreAudioPlayBuffers; i++) {
        // Create the buffer for the queue
        ret = AudioQueueAllocateBuffer(consumer->queue, consumer->buffer_size, &(consumer->buffers[i]));
        if (ret) {
            break;
        }

        // Clear the data
        memset(consumer->buffers[i]->mAudioData, 0, consumer->buffer_size);
        consumer->buffers[i]->mAudioDataByteSize = consumer->buffer_size;

        // Enqueue the buffer
        ret = AudioQueueEnqueueBuffer(consumer->queue, consumer->buffers[i], 0, NULL);
        if (ret) {
            break;
        }
    }

    return ret;
}
Esempio n. 14
0
static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
{
    OSStatus ret;
	tsk_size_t i;
	tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
	
	if(!producer || !codec && codec->plugin){
		TSK_DEBUG_ERROR("Invalid parameter");
		return -1;
	}

	TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
	/* codec should have ptime */
	
	
	// Set audio category
#if TARGET_OS_IPHONE
	UInt32 category = kAudioSessionCategory_PlayAndRecord;
	AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
#endif
    // Create the audio stream description
    AudioStreamBasicDescription *description = &(producer->description);
    description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
    description->mFormatID = kAudioFormatLinearPCM;
    description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
    description->mFramesPerPacket = 1;
    description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
    description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
    description->mBytesPerFrame = description->mBytesPerPacket;
    description->mReserved = 0;
    
    int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
    producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
    
    // Create the record audio queue
    ret = AudioQueueNewInput(&(producer->description),
							 __handle_input_buffer,
							 producer,
							 NULL, 
							 kCFRunLoopCommonModes,
							 0,
							 &(producer->queue));
    
    for(i = 0; i < CoreAudioRecordBuffers; i++) {
        // Create the buffer for the queue
        ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
        if (ret) {
            break;
        }
        
        // Clear the data
        memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
        producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
        
        // Enqueue the buffer
        ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
        if (ret) {
            break;
        }
    }
	
	return 0;
}
Esempio n. 15
0
    Manager::Manager(CriAtomExPlayerConfig playerConfig,
                             CriAtomExStandardVoicePoolConfig voicePoolConfig)
    {
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
        auto interruptionListener = [](void *userData, UInt32 interruptionState) {
            switch (interruptionState) {
                case kAudioSessionBeginInterruption:
                    criAtomEx_StopSound_IOS();
                    break;
                case kAudioSessionEndInterruption:
                    AudioSessionSetActive(true);
                    criAtomEx_StartSound_IOS();
                    break;
            }
        };
        AudioSessionInitialize(NULL, NULL, interruptionListener, NULL);
        UInt32 category = kAudioSessionCategory_AmbientSound;
        AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
        AudioSessionSetActive(true);
#endif
        auto errorCallback = [](const char *errid, uint32_t p1, uint32_t p2, uint32_t *parray) {
            const CriChar8 *errmsg;
            errmsg = criErr_ConvertIdToMessage(errid, p1, p2);
            cocos2d::log("%s", errmsg);
        };
        criErr_SetCallback(errorCallback);
        
        auto userAlloc = [](void *obj, uint32_t size) {
            return malloc(size);
        };
        
        auto userFree = [] (void *obj, void *ptr) {
            free(ptr);
        };
        
        criAtomEx_SetUserAllocator(userAlloc, userFree, NULL);
        
#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
        criAtomEx_Initialize_IOS(NULL, NULL, 0);
#elif (CC_TARGET_PLATFORM == CC_PLATFORM_ANDROID)
        criAtomEx_Initialize_ANDROID(NULL, NULL, 0);
        
        /* ANDROIDの場合で必要な一手間。assetsフォルダへのアクセスを可能にする */
        /* まずはJniHelperでActivityのContextを取得 */
        cocos2d::JniMethodInfo methodInfo;
        cocos2d::JniHelper::getStaticMethodInfo(methodInfo,
                                                "org/cocos2dx/lib/Cocos2dxActivity",
                                                "getContext",
                                                "()Landroid/content/Context;");
        auto android_context_object = (jobject)methodInfo.env->CallStaticObjectMethod( methodInfo.classID, methodInfo.methodID );
        /* 有効化。assetsフォルダはCocosプロジェクトのResource相当なので、ほぼ必須と言って良い手順 */
        criFs_EnableAssetsAccess_ANDROID(cocos2d::JniHelper::getJavaVM(), android_context_object);
#elif (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32)
        criAtomEx_Initialize_PC(NULL, NULL, 0);
#endif
        
        _dbasID = criAtomDbas_Create(NULL, NULL, 0);
        
        /* 上で作った設定オブジェクトを渡して、ボイスプールを作成 */
        _voicePool = criAtomExVoicePool_AllocateStandardVoicePool(&voicePoolConfig, NULL, 0);
        
        /* Player作成にも設定は必要 */
        criAtomExPlayer_SetDefaultConfig(&playerConfig);
        _player = criAtomExPlayer_Create(&playerConfig, NULL, 0);
    }
Esempio n. 16
0
int audiosess_alloc(struct audiosess_st **stp,
		    audiosess_int_h *inth, void *arg)
{
	struct audiosess_st *st = NULL;
	struct audiosess *as = NULL;
	int err = 0;
	bool created = false;
#if TARGET_OS_IPHONE
	AudioSessionPropertyID id = kAudioSessionProperty_AudioRouteChange;
	UInt32 category;
	OSStatus ret;
#endif

	if (!stp)
		return EINVAL;

#if TARGET_OS_IPHONE
	/* Must be done for all modules */
	category = kAudioSessionCategory_PlayAndRecord;
	ret = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
				      sizeof(category), &category);
	if (ret) {
		re_fprintf(stderr, "Audio Category: %d\n", ret);
		return EINVAL;
	}
#endif

	if (gas)
		goto makesess;

	as = mem_zalloc(sizeof(*as), destructor);
	if (!as)
		return ENOMEM;

#if TARGET_OS_IPHONE
	ret = AudioSessionSetActive(true);
	if (ret) {
		re_fprintf(stderr, "AudioSessionSetActive: %d\n", ret);
		err = ENOSYS;
		goto out;
	}

	ret = AudioSessionAddPropertyListener(id, propListener, as);
	if (ret) {
		re_fprintf(stderr, "AudioSessionAddPropertyListener: %d\n",
			   ret);
		err = EINVAL;
		goto out;
	}
#endif

	gas = as;
	created = true;

 makesess:
	st = mem_zalloc(sizeof(*st), sess_destructor);
	if (!st) {
		err = ENOMEM;
		goto out;
	}
	st->inth = inth;
	st->arg = arg;
	st->as = created ? gas : mem_ref(gas);

	list_append(&gas->sessl, &st->le, st);

 out:
	if (err) {
		mem_deref(as);
		mem_deref(st);
	}
	else {
		*stp = st;
	}

	return err;
}
Esempio n. 17
0
static void aq_start_w(MSFilter * f)
{
    AQData *d = (AQData *) f->data;
    if (d->write_started == FALSE) {
        OSStatus aqresult;
#if TARGET_OS_IPHONE
        aqresult = AudioSessionSetActive(true);
        check_aqresult(aqresult,"AudioSessionSetActive");

        UInt32 audioCategory;

        audioCategory= kAudioSessionCategory_AmbientSound;
        ms_message("Configuring audio session for play back");
        aqresult =AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
        check_aqresult(aqresult,"Configuring audio session ");
#endif
        d->writeAudioFormat.mSampleRate = d->rate;
        d->writeAudioFormat.mFormatID = kAudioFormatLinearPCM;
        d->writeAudioFormat.mFormatFlags =
            kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        d->writeAudioFormat.mFramesPerPacket = 1;
        d->writeAudioFormat.mChannelsPerFrame = 1;
        d->writeAudioFormat.mBitsPerChannel = d->bits;
        d->writeAudioFormat.mBytesPerPacket = d->bits / 8;
        d->writeAudioFormat.mBytesPerFrame = d->bits / 8;

        //show_format("data provided to output filter",	&d->writeAudioFormat);
        //show_format("output device", &d->devicewriteFormat);

        memcpy(&d->devicewriteFormat, &d->writeAudioFormat,
               sizeof(d->writeAudioFormat));
        d->writeBufferByteSize =
            kSecondsPerBuffer * d->devicewriteFormat.mSampleRate *
            (d->devicewriteFormat.mBitsPerChannel / 8) *
            d->devicewriteFormat.mChannelsPerFrame;

#if 0
        aqresult = AudioConverterNew(&d->writeAudioFormat,
                                     &d->devicewriteFormat,
                                     &d->writeAudioConverter);
        if (aqresult != noErr) {
            ms_error("d->writeAudioConverter = %d", aqresult);
            d->writeAudioConverter = NULL;
        }
#endif

        // create the playback audio queue object
        aqresult = AudioQueueNewOutput(&d->devicewriteFormat, writeCallback, d, NULL,	/*CFRunLoopGetCurrent () */
                                       NULL,	/*kCFRunLoopCommonModes */
                                       0,	// run loop flags
                                       &d->writeQueue);
        if (aqresult != noErr) {
            ms_error("AudioQueueNewOutput = %ld", aqresult);
        }

        AudioQueueSetParameter (d->writeQueue,
                                kAudioQueueParam_Volume,
                                gain_volume_out);

        if (d->uidname!=NULL) {
            char uidname[256];
            CFStringGetCString(d->uidname, uidname, 256,
                               CFStringGetSystemEncoding());
            ms_message("AQ: using uidname:%s", uidname);
            aqresult =
                AudioQueueSetProperty(d->writeQueue,
                                      kAudioQueueProperty_CurrentDevice,
                                      &d->uidname, sizeof(CFStringRef));
            if (aqresult != noErr) {
                ms_error
                ("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %ld",
                 aqresult);
            }
        }

        setupWrite(f);
        d->curWriteBuffer = 0;
    }
}