Esempio n. 1
0
static GVBool gviHardwareInitCapture(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	GVICapturedFrame * frame;
	int numCaptureBufferBytes;
	int numCaptureBufferFrames;
	int i;

	// get the capture format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, true, kAudioDevicePropertyStreamFormat, &size, &data->m_captureStreamDescriptor);
	if(result != noErr)
		return GVFalse;

	// create a converter from the capture format to the GV format
	result = AudioConverterNew(&data->m_captureStreamDescriptor, &GVIVoiceFormat, &data->m_captureConverter);
	if(result != noErr)
		return GVFalse;

	// allocate a capture buffer
	data->m_captureBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_captureBuffer)
	{
		AudioConverterDispose(data->m_captureConverter);
		return GVFalse;		
	}

	// allocate space for holding captured frames
	numCaptureBufferBytes = gviMultiplyByBytesPerMillisecond(GVI_CAPTURE_BUFFER_MILLISECONDS);
	numCaptureBufferBytes = gviRoundUpToNearestMultiple(numCaptureBufferBytes, GVIBytesPerFrame);
	numCaptureBufferFrames = (numCaptureBufferBytes / GVIBytesPerFrame);
	for(i = 0 ; i < numCaptureBufferFrames ; i++)
	{
		frame = (GVICapturedFrame *)gsimalloc(sizeof(GVICapturedFrame) + GVIBytesPerFrame - sizeof(GVSample));
		if(!frame)
		{
			gviFreeCapturedFrames(&data->m_captureAvailableFrames);
			gsifree(data->m_captureBuffer);
			AudioConverterDispose(data->m_captureConverter);
			return GVFalse;
		}
		gviPushFirstFrame(&data->m_captureAvailableFrames, frame);
	}

	// init the last crossed time
	data->m_captureLastCrossedThresholdTime = (data->m_captureClock - GVI_HOLD_THRESHOLD_FRAMES - 1);

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, true, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_captureVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Esempio n. 2
0
void	CoreAudioDevice::AddPropertyListener(UInt32 inChannel, CoreAudioDeviceSection inSection, AudioHardwarePropertyID inPropertyID, AudioDevicePropertyListenerProc inListenerProc, void* inClientData)
{
    if (AudioDeviceAddPropertyListener(deviceID, inChannel, inSection, inPropertyID, inListenerProc, inClientData) != 0)
    {
        fprintf(stderr,"Error while Installing device notifications listener. Exiting.");
        exit(0);
    }
}
Esempio n. 3
0
static int
ca_init (void) {
    UInt32 sz;
    char device_name[128];

    sz = sizeof(device_id);
    if (AudioHardwareGetProperty (kAudioHardwarePropertyDefaultOutputDevice, &sz, &device_id)) {
        return -1;
    }

    sz = sizeof (device_name);
    if (AudioDeviceGetProperty (device_id, 1, 0, kAudioDevicePropertyDeviceName, &sz, device_name)) {
           return -1;
    }
    
    sz = sizeof (default_format);
    if (AudioDeviceGetProperty (device_id, 0, 0, kAudioDevicePropertyStreamFormat, &sz, &default_format)) {
        return -1;
    }

    UInt32 bufsize = 4096;
    sz = sizeof (bufsize);
    if (AudioDeviceSetProperty(device_id, NULL, 0, 0, kAudioDevicePropertyBufferFrameSize, sz, &bufsize)) {
        fprintf (stderr, "Failed to set buffer size\n");
    }

    if (ca_apply_format ()) {
        return -1;
    }

    if (AudioDeviceAddIOProc (device_id, ca_buffer_callback, NULL)) {
        return -1;
    }
    
    if (AudioDeviceAddPropertyListener (device_id, 0, 0, kAudioDevicePropertyStreamFormat, ca_fmtchanged, NULL)) {
        return -1;
    }
    
    ca_fmtchanged(0, 0, 0, kAudioDevicePropertyStreamFormat, NULL);

    state = OUTPUT_STATE_STOPPED;

    return 0;
}
//_______________________________________________
//
//
//_______________________________________________
uint8_t coreAudioDevice::init(uint8_t channels, uint32_t fq) 
{
_channels = channels;
OSStatus 		err;
ComponentDescription 	desc;
AudioUnitInputCallback 	input;
AudioStreamBasicDescription streamFormat;
AudioDeviceID 		theDevice;
UInt32			sz=0;
UInt32			kFramesPerSlice=512; 

	desc.componentType = 'aunt';
	desc.componentSubType = kAudioUnitSubType_Output;
	desc.componentManufacturer = kAudioUnitID_DefaultOutput;
	desc.componentFlags = 0;
	desc.componentFlagsMask = 0;
		
	comp= FindNextComponent(NULL, &desc);
	if (comp == NULL)
	{
		printf("coreAudio: Cannot find component\n");
		return 0;
	}
		
	err = OpenAComponent(comp, &theOutputUnit);
	if(err)
	{
		printf("coreAudio: Cannot open component\n");
		return 0;
	}
	// Initialize it
	verify_noerr(AudioUnitInitialize(theOutputUnit));
	
	// Set up a callback function to generate output to the output unit
#if 1
	input.inputProc = MyRenderer;
	input.inputProcRefCon = NULL;
	
	verify_noerr(AudioUnitSetProperty(theOutputUnit, 
					kAudioUnitProperty_SetInputCallback, 
					kAudioUnitScope_Global,
					0,
					&input, 
					sizeof(input)));
#endif
	streamFormat.mSampleRate = fq;		
	streamFormat.mFormatID = kAudioFormatLinearPCM;	
	streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger 
								| kLinearPCMFormatFlagIsBigEndian
								| kLinearPCMFormatFlagIsPacked;

	streamFormat.mBytesPerPacket = channels * sizeof (UInt16);	
	streamFormat.mFramesPerPacket = 1;	
	streamFormat.mBytesPerFrame = channels * sizeof (UInt16);		
	streamFormat.mChannelsPerFrame = channels;	
	streamFormat.mBitsPerChannel = sizeof (UInt16) * 8;	
	
	verify_noerr(AudioUnitSetProperty(
		theOutputUnit,
		kAudioUnitProperty_StreamFormat,
		kAudioUnitScope_Input,
		0,
		&streamFormat,
		sizeof(AudioStreamBasicDescription)));
	
	printf("Rendering source:\n\t");
	printf ("SampleRate=%f,", streamFormat.mSampleRate);
	printf ("BytesPerPacket=%ld,", streamFormat.mBytesPerPacket);
	printf ("FramesPerPacket=%ld,", streamFormat.mFramesPerPacket);
	printf ("BytesPerFrame=%ld,", streamFormat.mBytesPerFrame);
	printf ("BitsPerChannel=%ld,", streamFormat.mBitsPerChannel);
	printf ("ChannelsPerFrame=%ld\n", streamFormat.mChannelsPerFrame);

	sz=sizeof (theDevice);
	verify_noerr(AudioUnitGetProperty 
		(theOutputUnit, kAudioOutputUnitProperty_CurrentDevice, 0, 0, &theDevice, &sz));
	sz = sizeof (kFramesPerSlice);
	verify_noerr(AudioDeviceSetProperty(theDevice, 0, 0, false,
		kAudioDevicePropertyBufferFrameSize, sz, &kFramesPerSlice));

	sz = sizeof (kFramesPerSlice);
	verify_noerr(AudioDeviceGetProperty(theDevice, 0, false, 
		kAudioDevicePropertyBufferFrameSize, &sz, &kFramesPerSlice));

	verify_noerr (AudioDeviceAddPropertyListener(theDevice, 0, false,
		kAudioDeviceProcessorOverload, OverloadListenerProc, 0));

	printf ("size of the device's buffer = %ld frames\n", kFramesPerSlice);
	
	frameCount=0;
	
	audioBuffer=new int16_t[BUFFER_SIZE]; // between hald a sec and a sec should be enough :)
	
    return 1;
}
Esempio n. 5
0
static GVBool gviHardwareInitPlayback(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	UInt32 primeMethod;
	SInt32 channelMap[100];
	int i;

	// create the array of sources
	data->m_playbackSources = gviNewSourceList();
	if(!data->m_playbackSources)
		return GVFalse;

	// get the playback format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, false, kAudioDevicePropertyStreamFormat, &size, &data->m_playbackStreamDescriptor);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// create a converter from the GV format to the playback format
	result = AudioConverterNew(&GVIVoiceFormat, &data->m_playbackStreamDescriptor, &data->m_playbackConverter);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// set it to do no priming
	primeMethod = kConverterPrimeMethod_None;
	result = AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterPrimeMethod, sizeof(UInt32), &primeMethod);
	if(result != noErr)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// setup the converter to map the input channel to all output channels
	result = AudioConverterGetPropertyInfo(data->m_playbackConverter, kAudioConverterChannelMap, &size, NULL);
	if(result == noErr)
	{
		result = AudioConverterGetProperty(data->m_playbackConverter, kAudioConverterChannelMap, &size, channelMap);
		if(result == noErr)
		{
			for(i = 0 ; i < (size / sizeof(SInt32)) ; i++)
				channelMap[i] = 0;

			AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterChannelMap, size, channelMap);
		}
	}

	// allocate the playback buffer
	data->m_playbackBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_playbackBuffer)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, false, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_playbackVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Esempio n. 6
0
void CCoreAudioSoundManager::Run()
{
  AudioDeviceAddPropertyListener(m_OutputDevice.GetId(), 0, false, kAudioDevicePropertyHogMode, PropertyChangeCallback, this); // If this fails, there is not a whole lot to be done
  m_OutputUnit.Start();
  CLog::Log(LOGDEBUG, "CCoreAudioSoundManager::Run: SoundManager is now running.");
}
Esempio n. 7
0
// ----------------------------------------------------------------------------
void AudioCoreDriver::initialize(PlayerLibSidplay* player, int sampleRate, int bitsPerSample)
// ----------------------------------------------------------------------------
{
	if (mInstanceId != 0)
		return;

	mPlayer = player;
	mNumSamplesInBuffer = 512;
	mIsPlaying = false;
	mIsPlayingPreRenderedBuffer = false;
	mBufferUnderrunDetected = false;
    mBufferUnderrunCount = 0;
    mSpectrumTemporalSmoothing = 0.5f;
	
	mPreRenderedBuffer = NULL;
	mPreRenderedBufferSampleCount = 0;
	mPreRenderedBufferPlaybackPosition = 0;
	
	if (!mIsInitialized)
	{
        OSStatus err;

        //get default output device
		UInt32 propertySize = sizeof(mDeviceID);
        AudioObjectPropertyAddress prop1 = {kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
        err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop1, 0, NULL, &propertySize, &mDeviceID);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectGetPropertyData(kAudioHardwarePropertyDefaultOutputDevice) failed\n");
            return;
        }

		if (mDeviceID == kAudioDeviceUnknown)
			return;

        err = queryStreamFormat(mDeviceID, mStreamFormat);
        if (err != kAudioHardwareNoError) {
            printf("queryStreamFormat failed\n");
            return;
        }

        //add property listeners
#if USE_NEW_API
        AudioObjectPropertyAddress prop5 = {
            kAudioDevicePropertyStreamFormat,       //TODO this is deprecated, how to get this notification?
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop5, streamFormatChanged, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(streamFormatChanged) failed\n");
            return;
        }

        AudioObjectPropertyAddress prop6 = {
            kAudioDeviceProcessorOverload,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop6, overloadDetected, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(overloadDetected) failed\n");
            return;
        }

        AudioObjectPropertyAddress prop7 = {
            kAudioHardwarePropertyDefaultOutputDevice,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop7, deviceChanged, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(deviceChanged) failed\n");
            return;
        }
#else
		if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDevicePropertyStreamFormat, streamFormatChanged, (void*) this) != kAudioHardwareNoError)
			return;
		if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDeviceProcessorOverload, overloadDetected, (void*) this) != kAudioHardwareNoError)
			return;
		if (AudioHardwareAddPropertyListener(kAudioHardwarePropertyDefaultOutputDevice, deviceChanged, (void*) this)  != kAudioHardwareNoError)
			return;
#endif
		
		mSampleBuffer1 = new short[mNumSamplesInBuffer];
		memset(mSampleBuffer1, 0, sizeof(short) * mNumSamplesInBuffer);
		mSampleBuffer2 = new short[mNumSamplesInBuffer];
		memset(mSampleBuffer2, 0, sizeof(short) * mNumSamplesInBuffer);
        mSpectrumBuffer = new float[mNumSamplesInBuffer/2];
		memset(mSpectrumBuffer, 0, sizeof(float) * (mNumSamplesInBuffer/2));

        mSampleBuffer = mSampleBuffer1;
        mRetSampleBuffer = mSampleBuffer2;

		int bufferByteSize = mNumSamplesInBuffer * mStreamFormat.mChannelsPerFrame * sizeof(float);
		propertySize = sizeof(bufferByteSize);
#if USE_NEW_API
        AudioObjectPropertyAddress prop8 = {
            kAudioDevicePropertyBufferSize,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectSetPropertyData(mDeviceID, &prop8, 0, NULL, propertySize, &bufferByteSize);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectSetPropertyData(kAudioDevicePropertyBufferSize) failed\n");
            return;
        }
#else
		if (AudioDeviceSetProperty(mDeviceID, NULL, 0, false, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize) != kAudioHardwareNoError)
			return;
#endif
		mScaleFactor = sBitScaleFactor;
		mPreRenderedBufferScaleFactor = sBitScaleFactor;
		
		if (AudioDeviceCreateIOProcID(mDeviceID, emulationPlaybackProc, (void*) this, &mEmulationPlaybackProcID) != kAudioHardwareNoError)
		{
			delete[] mSampleBuffer1;
			mSampleBuffer1 = NULL;
			delete[] mSampleBuffer2;
			mSampleBuffer2 = NULL;
            mSampleBuffer = NULL;
            mRetSampleBuffer = NULL;
			delete[] mSpectrumBuffer;
			mSpectrumBuffer = NULL;
			return;
		}

		if (AudioDeviceCreateIOProcID(mDeviceID, preRenderedBufferPlaybackProc, (void*) this, &mPreRenderedBufferPlaybackProcID) != kAudioHardwareNoError)
		{
			delete[] mSampleBuffer1;
			mSampleBuffer1 = NULL;
			delete[] mSampleBuffer2;
			mSampleBuffer2 = NULL;
            mSampleBuffer = NULL;
            mRetSampleBuffer = NULL;
			delete[] mSpectrumBuffer;
			mSpectrumBuffer = NULL;
			return;
		}
	}
	
	mVolume = 1.0f;
	mIsInitialized = true;
}
Esempio n. 8
0
/** create a new driver instance
*/
static jack_driver_t *coreaudio_driver_new(char* name,
										   jack_client_t* client,
										   jack_nframes_t nframes,
										   jack_nframes_t samplerate,
										   int capturing,
										   int playing,
										   int inchannels,
										   int outchannels,
										   char* capture_driver_uid,
										   char* playback_driver_uid,
										   jack_nframes_t capture_latency, 
										   jack_nframes_t playback_latency)
{
    coreaudio_driver_t *driver;
	OSStatus err = noErr;
	ComponentResult err1;
    UInt32 outSize;
	UInt32 enableIO;
	AudioStreamBasicDescription srcFormat, dstFormat;
	Float64 sampleRate;
	int in_nChannels = 0;
	int out_nChannels = 0;
	int i;
	
    driver = (coreaudio_driver_t *) calloc(1, sizeof(coreaudio_driver_t));
    jack_driver_init((jack_driver_t *) driver);

    if (!jack_power_of_two(nframes)) {
		jack_error("CA: -p must be a power of two.");
		goto error;
    }

	driver->state = 0;
    driver->frames_per_cycle = nframes;
    driver->frame_rate = samplerate;
    driver->capturing = capturing;
    driver->playing = playing;
	driver->xrun_detected = 0;
	driver->null_cycle = 0;

    driver->attach = (JackDriverAttachFunction) coreaudio_driver_attach;
    driver->detach = (JackDriverDetachFunction) coreaudio_driver_detach;
    driver->read = (JackDriverReadFunction) coreaudio_driver_read;
    driver->write = (JackDriverReadFunction) coreaudio_driver_write;
    driver->null_cycle =
	(JackDriverNullCycleFunction) coreaudio_driver_null_cycle;
    driver->bufsize = (JackDriverBufSizeFunction) coreaudio_driver_bufsize;
    driver->start = (JackDriverStartFunction) coreaudio_driver_audio_start;
    driver->stop = (JackDriverStopFunction) coreaudio_driver_audio_stop;
	driver->capture_frame_latency = capture_latency;
	driver->playback_frame_latency = playback_latency;
	
	// Duplex
    if (strcmp(capture_driver_uid, "") != 0 && strcmp(playback_driver_uid, "") != 0) {
		JCALog("Open duplex \n");
        if (get_device_id_from_uid(playback_driver_uid, &driver->device_id) != noErr) {
            if (get_default_device(&driver->device_id) != noErr) {
				jack_error("Cannot open default device");
				goto error;
			}
		}
		if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr || get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) {
			jack_error("Cannot get device name from device ID");
			goto error;
		}
		
	// Capture only
	} else if (strcmp(capture_driver_uid, "") != 0) {
		JCALog("Open capture only \n");
		if (get_device_id_from_uid(capture_driver_uid, &driver->device_id) != noErr) {
            if (get_default_input_device(&driver->device_id) != noErr) {
				jack_error("Cannot open default device");
                goto error;
			}
		}
		if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr) {
			jack_error("Cannot get device name from device ID");
			goto error;
		}
		
  	// Playback only
	} else if (playback_driver_uid != NULL) {
		JCALog("Open playback only \n");
		if (get_device_id_from_uid(playback_driver_uid, &driver->device_id) != noErr) {
            if (get_default_output_device(&driver->device_id) != noErr) {
				jack_error("Cannot open default device");
                goto error;
			}
        }
		if (get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) {
			jack_error("Cannot get device name from device ID");
			goto error;
		}
		
	// Use default driver in duplex mode
	} else {
		JCALog("Open default driver \n");
		if (get_default_device(&driver->device_id) != noErr) {
			jack_error("Cannot open default device");
			goto error;
		}
		if (get_device_name_from_id(driver->device_id, driver->capture_driver_name) != noErr || get_device_name_from_id(driver->device_id, driver->playback_driver_name) != noErr) {
			jack_error("Cannot get device name from device ID");
			goto error;
		}
	}
	
	driver->client = client;
    driver->period_usecs =
		(((float) driver->frames_per_cycle) / driver->frame_rate) *
		1000000.0f;
	
	if (capturing) {
		err = get_total_channels(driver->device_id, &in_nChannels, true);
		if (err != noErr) { 
			jack_error("Cannot get input channel number");
			printError(err);
			goto error;
		} 
	}
	
	if (playing) {
		err = get_total_channels(driver->device_id, &out_nChannels, false);
		if (err != noErr) { 
			jack_error("Cannot get output channel number");
			printError(err);
			goto error;
		} 
	}
	
	if (inchannels > in_nChannels) {
        jack_error("This device hasn't required input channels inchannels = %ld in_nChannels = %ld", inchannels, in_nChannels);
		goto error;
    }
	
	if (outchannels > out_nChannels) {
        jack_error("This device hasn't required output channels outchannels = %ld out_nChannels = %ld", outchannels, out_nChannels);
		goto error;
    }

	if (inchannels == 0) {
		JCALog("Setup max in channels = %ld\n", in_nChannels);
		inchannels = in_nChannels; 
	}
		
	if (outchannels == 0) {
		JCALog("Setup max out channels = %ld\n", out_nChannels);
		outchannels = out_nChannels; 
	}

    // Setting buffer size
    outSize = sizeof(UInt32);
    err = AudioDeviceSetProperty(driver->device_id, NULL, 0, false, kAudioDevicePropertyBufferFrameSize, outSize, &nframes);
    if (err != noErr) {
        jack_error("Cannot set buffer size %ld", nframes);
        printError(err);
		goto error;
    }

	// Set sample rate
	outSize =  sizeof(Float64);
	err = AudioDeviceGetProperty(driver->device_id, 0, kAudioDeviceSectionGlobal, kAudioDevicePropertyNominalSampleRate, &outSize, &sampleRate);
	if (err != noErr) {
		jack_error("Cannot get current sample rate");
		printError(err);
		goto error;
	}

	if (samplerate != (jack_nframes_t)sampleRate) {
		sampleRate = (Float64)samplerate;
		
		// To get SR change notification
		err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, sr_notification, driver);
		if (err != noErr) {
			jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDevicePropertyNominalSampleRate");
			printError(err);
			return -1;
		}
		err = AudioDeviceSetProperty(driver->device_id, NULL, 0, kAudioDeviceSectionGlobal, kAudioDevicePropertyNominalSampleRate, outSize, &sampleRate);
		if (err != noErr) {
			jack_error("Cannot set sample rate = %ld", samplerate);
			printError(err);
			return -1;
		}
		
		// Waiting for SR change notification
		int count = 0;
		while (!driver->state && count++ < 100) {
			usleep(100000);
			JCALog("Wait count = %ld\n", count);
		}
		
		// Remove SR change notification
		AudioDeviceRemovePropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, sr_notification);
	}

    // AUHAL
    ComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0};
    Component HALOutput = FindNextComponent(NULL, &cd);

    err1 = OpenAComponent(HALOutput, &driver->au_hal);
    if (err1 != noErr) {
		jack_error("Error calling OpenAComponent");
        printError(err1);
        goto error;
	}

    err1 = AudioUnitInitialize(driver->au_hal);
    if (err1 != noErr) {
		jack_error("Cannot initialize AUHAL unit");
		printError(err1);
        goto error;
	}

 	// Start I/O
	enableIO = 1;
	if (capturing && inchannels > 0) {
		JCALog("Setup AUHAL input\n");
        err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO));
        if (err1 != noErr) {
            jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input");
            printError(err1);
            goto error;
        }
    }
	
	if (playing && outchannels > 0) {
		JCALog("Setup AUHAL output\n");
		err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO));
		if (err1 != noErr) {
			jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output");
			printError(err1);
			goto error;
		}
	}
	
	// Setup up choosen device, in both input and output cases
	err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &driver->device_id, sizeof(AudioDeviceID));
	if (err1 != noErr) {
		jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_CurrentDevice");
		printError(err1);
		goto error;
	}

	// Set buffer size
	if (capturing && inchannels > 0) {
		err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 1, (UInt32*)&nframes, sizeof(UInt32));
		if (err1 != noErr) {
			jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_MaximumFramesPerSlice");
			printError(err1);
			goto error;
		}
	}
	
	if (playing && outchannels > 0) {
		err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, (UInt32*)&nframes, sizeof(UInt32));
		if (err1 != noErr) {
			jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_MaximumFramesPerSlice");
			printError(err1);
			goto error;
		}
	}

	// Setup channel map
	if (capturing && inchannels > 0 && inchannels < in_nChannels) {
        SInt32 chanArr[in_nChannels];
        for (i = 0; i < in_nChannels; i++) {
            chanArr[i] = -1;
        }
        for (i = 0; i < inchannels; i++) {
            chanArr[i] = i;
        }
        AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_ChannelMap , kAudioUnitScope_Input, 1, chanArr, sizeof(SInt32) * in_nChannels);
        if (err1 != noErr) {
            jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_ChannelMap 1");
            printError(err1);
        }
    }

    if (playing && outchannels > 0 && outchannels < out_nChannels) {
        SInt32 chanArr[out_nChannels];
        for (i = 0;	i < out_nChannels; i++) {
            chanArr[i] = -1;
        }
        for (i = 0; i < outchannels; i++) {
            chanArr[i] = i;
        }
        err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Output, 0, chanArr, sizeof(SInt32) * out_nChannels);
        if (err1 != noErr) {
            jack_error("Error calling AudioUnitSetProperty - kAudioOutputUnitProperty_ChannelMap 0");
            printError(err1);
        }
    }

	// Setup stream converters
  	srcFormat.mSampleRate = samplerate;
	srcFormat.mFormatID = kAudioFormatLinearPCM;
	srcFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
	srcFormat.mBytesPerPacket = sizeof(float);
	srcFormat.mFramesPerPacket = 1;
	srcFormat.mBytesPerFrame = sizeof(float);
	srcFormat.mChannelsPerFrame = outchannels;
	srcFormat.mBitsPerChannel = 32;

	err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormat, sizeof(AudioStreamBasicDescription));
	if (err1 != noErr) {
		jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Input");
		printError(err1);
	}

	dstFormat.mSampleRate = samplerate;
	dstFormat.mFormatID = kAudioFormatLinearPCM;
	dstFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
	dstFormat.mBytesPerPacket = sizeof(float);
	dstFormat.mFramesPerPacket = 1;
	dstFormat.mBytesPerFrame = sizeof(float);
	dstFormat.mChannelsPerFrame = inchannels;
	dstFormat.mBitsPerChannel = 32;

	err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &dstFormat, sizeof(AudioStreamBasicDescription));
	if (err1 != noErr) {
		jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Output");
		printError(err1);
	}

	// Setup callbacks
    if (inchannels > 0 && outchannels == 0) {
        AURenderCallbackStruct output;
        output.inputProc = render_input;
        output.inputProcRefCon = driver;
    	err1 = AudioUnitSetProperty(driver->au_hal, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &output, sizeof(output));
        if (err1 != noErr) {
            jack_error("Error calling  AudioUnitSetProperty - kAudioUnitProperty_SetRenderCallback 1");
            printError(err1);
            goto error;
        }
    } else {
        AURenderCallbackStruct output;
        output.inputProc = render;
        output.inputProcRefCon = driver;
        err1 = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &output, sizeof(output));
        if (err1 != noErr) {
            jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_SetRenderCallback 0");
            printError(err1);
            goto error;
        }
    }

	if (capturing && inchannels > 0) {
		driver->input_list = (AudioBufferList*)malloc(sizeof(UInt32) + inchannels * sizeof(AudioBuffer));
		if (driver->input_list == 0)
			goto error;
		driver->input_list->mNumberBuffers = inchannels;
		
		// Prepare buffers
		for (i = 0; i < driver->capture_nchannels; i++) {
			driver->input_list->mBuffers[i].mNumberChannels = 1;
			driver->input_list->mBuffers[i].mDataByteSize = nframes * sizeof(float);
		}
	}

	err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDeviceProcessorOverload, notification, driver);
    if (err != noErr) {
		jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDeviceProcessorOverload");
        goto error;
	}
		
	err = AudioDeviceAddPropertyListener(driver->device_id, 0, true, kAudioDevicePropertyNominalSampleRate, notification, driver);
    if (err != noErr) {
        jack_error("Error calling AudioDeviceAddPropertyListener with kAudioDevicePropertyNominalSampleRate");
        goto error;
    }
 
	driver->playback_nchannels = outchannels;
    driver->capture_nchannels = inchannels;
	return ((jack_driver_t *) driver);

  error:
	AudioUnitUninitialize(driver->au_hal);
	CloseComponent(driver->au_hal);
    jack_error("Cannot open the coreaudio driver");
    free(driver);
    return NULL;
}
/* sets the value of the given property and waits for the change to 
   be acknowledged, and returns the final value, which is not guaranteed
   by this function to be the same as the desired value. Obviously, this
   function can only be used for data whose input and output are the
   same size and format, and their size and format are known in advance.*/
PaError AudioDeviceSetPropertyNowAndWaitForChange(
    AudioDeviceID inDevice,
    UInt32 inChannel, 
    Boolean isInput, 
    AudioDevicePropertyID inPropertyID,
    UInt32 inPropertyDataSize, 
    const void *inPropertyData,
    void *outPropertyData )
{
   OSStatus macErr;
   int unixErr;
   MutexAndBool mab;
   UInt32 outPropertyDataSize = inPropertyDataSize;

   /* First, see if it already has that value. If so, return. */
   macErr = AudioDeviceGetProperty( inDevice, inChannel,
                                 isInput, inPropertyID, 
                                 &outPropertyDataSize, outPropertyData );
   if( macErr )
      goto failMac2;
   if( inPropertyDataSize!=outPropertyDataSize )
      return paInternalError;
   if( 0==memcmp( outPropertyData, inPropertyData, outPropertyDataSize ) )
      return paNoError;

   /* setup and lock mutex */
   mab.once = FALSE;
   unixErr = pthread_mutex_init( &mab.mutex, NULL );
   if( unixErr )
      goto failUnix2;
   unixErr = pthread_mutex_lock( &mab.mutex );
   if( unixErr )
      goto failUnix;

   /* add property listener */
   macErr = AudioDeviceAddPropertyListener( inDevice, inChannel, isInput,
                                   inPropertyID, propertyProc,
                                   &mab ); 
   if( macErr )
      goto failMac;
   /* set property */
   macErr  = AudioDeviceSetProperty( inDevice, NULL, inChannel,
                                 isInput, inPropertyID,
                                 inPropertyDataSize, inPropertyData );
   if( macErr ) {
      /* we couldn't set the property, so we'll just unlock the mutex
         and move on. */
      pthread_mutex_unlock( &mab.mutex );
   }

   /* wait for property to change */                      
   unixErr = pthread_mutex_lock( &mab.mutex );
   if( unixErr )
      goto failUnix;

   /* now read the property back out */
   macErr = AudioDeviceGetProperty( inDevice, inChannel,
                                 isInput, inPropertyID, 
                                 &outPropertyDataSize, outPropertyData );
   if( macErr )
      goto failMac;
   /* cleanup */
   AudioDeviceRemovePropertyListener( inDevice, inChannel, isInput,
                                      inPropertyID, propertyProc );
   unixErr = pthread_mutex_unlock( &mab.mutex );
   if( unixErr )
      goto failUnix2;
   unixErr = pthread_mutex_destroy( &mab.mutex );
   if( unixErr )
      goto failUnix2;

   return paNoError;

 failUnix:
   pthread_mutex_destroy( &mab.mutex );
   AudioDeviceRemovePropertyListener( inDevice, inChannel, isInput,
                                      inPropertyID, propertyProc );

 failUnix2:
   DBUG( ("Error #%d while setting a device property: %s\n", unixErr, strerror( unixErr ) ) );
   return paUnanticipatedHostError;

 failMac:
   pthread_mutex_destroy( &mab.mutex );
   AudioDeviceRemovePropertyListener( inDevice, inChannel, isInput,
                                      inPropertyID, propertyProc );
 failMac2:
   return ERR( macErr );
}