示例#1
1
bool audio::orchestra::api::Core::open(uint32_t _device,
                                       audio::orchestra::mode _mode,
                                       uint32_t _channels,
                                       uint32_t _firstChannel,
                                       uint32_t _sampleRate,
                                       audio::format _format,
                                       uint32_t *_bufferSize,
                                       const audio::orchestra::StreamOptions& _options) {
	// Get device ID
	uint32_t nDevices = getDeviceCount();
	if (nDevices == 0) {
		// This should not happen because a check is made before this function is called.
		ATA_ERROR("no devices found!");
		return false;
	}
	if (_device >= nDevices) {
		// This should not happen because a check is made before this function is called.
		ATA_ERROR("device ID is invalid!");
		return false;
	}
	AudioDeviceID deviceList[ nDevices/2 ];
	uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2;
	AudioObjectPropertyAddress property = {
		kAudioHardwarePropertyDevices,
		kAudioObjectPropertyScopeGlobal,
		kAudioObjectPropertyElementMaster
	};
	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
	                                             &property,
	                                             0,
	                                             nullptr,
	                                             &dataSize,
	                                             (void *) &deviceList);
	if (result != noErr) {
		ATA_ERROR("OS-X system error getting device IDs.");
		return false;
	}
	AudioDeviceID id = deviceList[ _device/2 ];
	// Setup for stream mode.
	bool isInput = false;
	if (_mode == audio::orchestra::mode_input) {
		isInput = true;
		property.mScope = kAudioDevicePropertyScopeInput;
	} else {
		property.mScope = kAudioDevicePropertyScopeOutput;
	}
	// Get the stream "configuration".
	AudioBufferList	*bufferList = nil;
	dataSize = 0;
	property.mSelector = kAudioDevicePropertyStreamConfiguration;
	result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
	if (    result != noErr
	     || dataSize == 0) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ").");
		return false;
	}
	// Allocate the AudioBufferList.
	bufferList = (AudioBufferList *) malloc(dataSize);
	if (bufferList == nullptr) {
		ATA_ERROR("memory error allocating AudioBufferList.");
		return false;
	}
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList);
	if (    result != noErr
	     || dataSize == 0) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ").");
		return false;
	}
	// Search for one or more streams that contain the desired number of
	// channels. CoreAudio devices can have an arbitrary number of
	// streams and each stream can have an arbitrary number of channels.
	// For each stream, a single buffer of interleaved samples is
	// provided. orchestra prefers the use of one stream of interleaved
	// data or multiple consecutive single-channel streams.	However, we
	// now support multiple consecutive multi-channel streams of
	// interleaved data as well.
	uint32_t iStream, offsetCounter = _firstChannel;
	uint32_t nStreams = bufferList->mNumberBuffers;
	bool monoMode = false;
	bool foundStream = false;
	// First check that the device supports the requested number of
	// channels.
	uint32_t deviceChannels = 0;
	for (iStream=0; iStream<nStreams; iStream++) {
		deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
	}
	if (deviceChannels < (_channels + _firstChannel)) {
		free(bufferList);
		ATA_ERROR("the device (" << _device << ") does not support the requested channel count.");
		return false;
	}
	// Look for a single stream meeting our needs.
	uint32_t firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
	for (iStream=0; iStream<nStreams; iStream++) {
		streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
		if (streamChannels >= _channels + offsetCounter) {
			firstStream = iStream;
			channelOffset = offsetCounter;
			foundStream = true;
			break;
		}
		if (streamChannels > offsetCounter) {
			break;
		}
		offsetCounter -= streamChannels;
	}
	// If we didn't find a single stream above, then we should be able
	// to meet the channel specification with multiple streams.
	if (foundStream == false) {
		monoMode = true;
		offsetCounter = _firstChannel;
		for (iStream=0; iStream<nStreams; iStream++) {
			streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
			if (streamChannels > offsetCounter) {
				break;
			}
			offsetCounter -= streamChannels;
		}
		firstStream = iStream;
		channelOffset = offsetCounter;
		int32_t channelCounter = _channels + offsetCounter - streamChannels;
		if (streamChannels > 1) {
			monoMode = false;
		}
		while (channelCounter > 0) {
			streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
			if (streamChannels > 1) {
				monoMode = false;
			}
			channelCounter -= streamChannels;
			streamCount++;
		}
	}
	free(bufferList);
	// Determine the buffer size.
	AudioValueRange	bufferRange;
	dataSize = sizeof(AudioValueRange);
	property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &bufferRange);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting buffer size range for device (" << _device << ").");
		return false;
	}
	if (bufferRange.mMinimum > *_bufferSize) {
		*_bufferSize = (uint64_t) bufferRange.mMinimum;
	} else if (bufferRange.mMaximum < *_bufferSize) {
		*_bufferSize = (uint64_t) bufferRange.mMaximum;
	}
	if (_options.flags.m_minimizeLatency == true) {
		*_bufferSize = (uint64_t) bufferRange.mMinimum;
	}
	// Set the buffer size.	For multiple streams, I'm assuming we only
	// need to make this setting for the master channel.
	uint32_t theSize = (uint32_t) *_bufferSize;
	dataSize = sizeof(uint32_t);
	property.mSelector = kAudioDevicePropertyBufferFrameSize;
	result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &theSize);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") setting the buffer size for device (" << _device << ").");
		return false;
	}
	// If attempting to setup a duplex stream, the bufferSize parameter
	// MUST be the same in both directions!
	*_bufferSize = theSize;
	if (    m_mode == audio::orchestra::mode_output
	     && _mode == audio::orchestra::mode_input
	     && *_bufferSize != m_bufferSize) {
		ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ").");
		return false;
	}
	m_bufferSize = *_bufferSize;
	m_nBuffers = 1;
	// Check and if necessary, change the sample rate for the device.
	double nominalRate;
	dataSize = sizeof(double);
	property.mSelector = kAudioDevicePropertyNominalSampleRate;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &nominalRate);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting current sample rate.");
		return false;
	}
	// Only change the sample rate if off by more than 1 Hz.
	if (fabs(nominalRate - (double)_sampleRate) > 1.0) {
		// Set a property listener for the sample rate change
		double reportedRate = 0.0;
		AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
		result = AudioObjectAddPropertyListener(id, &tmp, &rateListener, (void *) &reportedRate);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate property listener for device (" << _device << ").");
			return false;
		}
		nominalRate = (double) _sampleRate;
		result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &nominalRate);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate for device (" << _device << ").");
			return false;
		}
		// Now wait until the reported nominal rate is what we just set.
		uint32_t microCounter = 0;
		while (reportedRate != nominalRate) {
			microCounter += 5000;
			if (microCounter > 5000000) {
				break;
			}
			std::this_thread::sleep_for(std::chrono::milliseconds(5));
		}
		// Remove the property listener.
		AudioObjectRemovePropertyListener(id, &tmp, &rateListener, (void *) &reportedRate);
		if (microCounter > 5000000) {
			ATA_ERROR("timeout waiting for sample rate update for device (" << _device << ").");
			return false;
		}
	}
	// Now set the stream format for all streams.	Also, check the
	// physical format of the device and change that if necessary.
	AudioStreamBasicDescription	description;
	dataSize = sizeof(AudioStreamBasicDescription);
	property.mSelector = kAudioStreamPropertyVirtualFormat;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream format for device (" << _device << ").");
		return false;
	}
	// Set the sample rate and data format id.	However, only make the
	// change if the sample rate is not within 1.0 of the desired
	// rate and the format is not linear pcm.
	bool updateFormat = false;
	if (fabs(description.mSampleRate - (double)_sampleRate) > 1.0) {
		description.mSampleRate = (double) _sampleRate;
		updateFormat = true;
	}
	if (description.mFormatID != kAudioFormatLinearPCM) {
		description.mFormatID = kAudioFormatLinearPCM;
		updateFormat = true;
	}
	if (updateFormat) {
		result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &description);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << _device << ").");
			return false;
		}
	}
	// Now check the physical format.
	property.mSelector = kAudioStreamPropertyPhysicalFormat;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr,	&dataSize, &description);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream physical format for device (" << _device << ").");
		return false;
	}
	//std::cout << "Current physical stream format:" << std::endl;
	//std::cout << "	 mBitsPerChan = " << description.mBitsPerChannel << std::endl;
	//std::cout << "	 aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
	//std::cout << "	 bytesPerFrame = " << description.mBytesPerFrame << std::endl;
	//std::cout << "	 sample rate = " << description.mSampleRate << std::endl;
	if (    description.mFormatID != kAudioFormatLinearPCM
	     || description.mBitsPerChannel < 16) {
		description.mFormatID = kAudioFormatLinearPCM;
		//description.mSampleRate = (double) sampleRate;
		AudioStreamBasicDescription	testDescription = description;
		uint32_t formatFlags;
		// We'll try higher bit rates first and then work our way down.
		std::vector< std::pair<uint32_t, uint32_t>	> physicalFormats;
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
		physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
		physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
		physicalFormats.push_back(std::pair<float, uint32_t>(24, formatFlags));	 // 24-bit packed
		formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh);
		physicalFormats.push_back(std::pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low
		formatFlags |= kAudioFormatFlagIsAlignedHigh;
		physicalFormats.push_back(std::pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
		physicalFormats.push_back(std::pair<float, uint32_t>(16, formatFlags));
		physicalFormats.push_back(std::pair<float, uint32_t>(8, formatFlags));
		bool setPhysicalFormat = false;
		for(uint32_t i=0; i<physicalFormats.size(); i++) {
			testDescription = description;
			testDescription.mBitsPerChannel = (uint32_t) physicalFormats[i].first;
			testDescription.mFormatFlags = physicalFormats[i].second;
			if (    (24 == (uint32_t)physicalFormats[i].first)
			     && ~(physicalFormats[i].second & kAudioFormatFlagIsPacked)) {
				testDescription.mBytesPerFrame =	4 * testDescription.mChannelsPerFrame;
			} else {
				testDescription.mBytesPerFrame =	testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
			}
			testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
			result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &testDescription);
			if (result == noErr) {
				setPhysicalFormat = true;
				//std::cout << "Updated physical stream format:" << std::endl;
				//std::cout << "	 mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
				//std::cout << "	 aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
				//std::cout << "	 bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
				//std::cout << "	 sample rate = " << testDescription.mSampleRate << std::endl;
				break;
			}
		}
		if (!setPhysicalFormat) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting physical data format for device (" << _device << ").");
			return false;
		}
	} // done setting virtual/physical formats.
	// Get the stream / device latency.
	uint32_t latency;
	dataSize = sizeof(uint32_t);
	property.mSelector = kAudioDevicePropertyLatency;
	if (AudioObjectHasProperty(id, &property) == true) {
		result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency);
		if (result == kAudioHardwareNoError) {
			m_latency[ _mode ] = latency;
		} else {
			ATA_ERROR("system error (" << getErrorCode(result) << ") getting device latency for device (" << _device << ").");
			return false;
		}
	}
	// Byte-swapping: According to AudioHardware.h, the stream data will
	// always be presented in native-endian format, so we should never
	// need to byte swap.
	m_doByteSwap[modeToIdTable(_mode)] = false;
	// From the CoreAudio documentation, PCM data must be supplied as
	// 32-bit floats.
	m_userFormat = _format;
	m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
	if (streamCount == 1) {
		m_nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame;
	} else {
		// multiple streams
		m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
	}
	m_nUserChannels[modeToIdTable(_mode)] = _channels;
	m_channelOffset[modeToIdTable(_mode)] = channelOffset;	// offset within a CoreAudio stream
	m_deviceInterleaved[modeToIdTable(_mode)] = true;
	if (monoMode == true) {
		m_deviceInterleaved[modeToIdTable(_mode)] = false;
	}
	// Set flags for buffer conversion.
	m_doConvertBuffer[modeToIdTable(_mode)] = false;
	if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	if (streamCount == 1) {
		if (    m_nUserChannels[modeToIdTable(_mode)] > 1
		     && m_deviceInterleaved[modeToIdTable(_mode)] == false) {
			m_doConvertBuffer[modeToIdTable(_mode)] = true;
		}
	} else if (monoMode) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	m_private->iStream[modeToIdTable(_mode)] = firstStream;
	m_private->nStreams[modeToIdTable(_mode)] = streamCount;
	m_private->id[modeToIdTable(_mode)] = id;
	// Allocate necessary internal buffers.
	uint64_t bufferBytes;
	bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
	//	m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
	m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
	if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
		ATA_ERROR("error allocating user buffer memory.");
		goto error;
	}
	// If possible, we will make use of the CoreAudio stream buffers as
	// "device buffers".	However, we can't do this if using multiple
	// streams.
	if (    m_doConvertBuffer[modeToIdTable(_mode)]
	     && m_private->nStreams[modeToIdTable(_mode)] > 1) {
		bool makeBuffer = true;
		bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
		if (_mode == audio::orchestra::mode_input) {
			if (    m_mode == audio::orchestra::mode_output
			     && m_deviceBuffer) {
				uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
				if (bufferBytes <= bytesOut) {
					makeBuffer = false;
				}
			}
		}
		if (makeBuffer) {
			bufferBytes *= *_bufferSize;
			if (m_deviceBuffer) {
				free(m_deviceBuffer);
				m_deviceBuffer = nullptr;
			}
			m_deviceBuffer = (char *) calloc(bufferBytes, 1);
			if (m_deviceBuffer == nullptr) {
				ATA_ERROR("error allocating device buffer memory.");
				goto error;
			}
		}
	}
	m_sampleRate = _sampleRate;
	m_device[modeToIdTable(_mode)] = _device;
	m_state = audio::orchestra::state::stopped;
	ATA_VERBOSE("Set state as stopped");
	// Setup the buffer conversion information structure.
	if (m_doConvertBuffer[modeToIdTable(_mode)]) {
		if (streamCount > 1) {
			setConvertInfo(_mode, 0);
		} else {
			setConvertInfo(_mode, channelOffset);
		}
	}
	if (    _mode == audio::orchestra::mode_input
	     && m_mode == audio::orchestra::mode_output
	     && m_device[0] == _device) {
		// Only one callback procedure per device.
		m_mode = audio::orchestra::mode_duplex;
	} else {
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
		result = AudioDeviceCreateIOProcID(id, &audio::orchestra::api::Core::callbackEvent, this, &m_private->procId[modeToIdTable(_mode)]);
#else
		// deprecated in favor of AudioDeviceCreateIOProcID()
		result = AudioDeviceAddIOProc(id, &audio::orchestra::api::Core::callbackEvent, this);
#endif
		if (result != noErr) {
			ATA_ERROR("system error setting callback for device (" << _device << ").");
			goto error;
		}
		if (    m_mode == audio::orchestra::mode_output
		     && _mode == audio::orchestra::mode_input) {
			m_mode = audio::orchestra::mode_duplex;
		} else {
			m_mode = _mode;
		}
	}
	// Setup the device property listener for over/underload.
	property.mSelector = kAudioDeviceProcessorOverload;
	result = AudioObjectAddPropertyListener(id, &property, &audio::orchestra::api::Core::xrunListener, this);
	return true;
error:
	m_userBuffer[0].clear();
	m_userBuffer[1].clear();
	if (m_deviceBuffer) {
		free(m_deviceBuffer);
		m_deviceBuffer = 0;
	}
	m_state = audio::orchestra::state::closed;
	ATA_VERBOSE("Set state as closed");
	return false;
}
    //==============================================================================
    CoreAudioInternal (AudioDeviceID id)
       : inputLatency (0),
         outputLatency (0),
         callback (nullptr),
        #if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5
         audioProcID (0),
        #endif
         isSlaveDevice (false),
         deviceID (id),
         started (false),
         sampleRate (0),
         bufferSize (512),
         numInputChans (0),
         numOutputChans (0),
         callbacksAllowed (true),
         numInputChannelInfos (0),
         numOutputChannelInfos (0)
    {
        jassert (deviceID != 0);

        updateDetailsFromDevice();

        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioObjectPropertySelectorWildcard;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectAddPropertyListener (deviceID, &pa, deviceListenerProc, this);
    }
示例#3
0
void CCoreAudioDevice::RegisterDefaultOutputDeviceChangedCB(bool bRegister, AudioObjectPropertyListenerProc callback, void *ref)
{
    OSStatus ret = noErr;
    static int registered = -1;
    
    //only allow registration once
    if (bRegister == (registered == 1))
        return;
    
    AudioObjectPropertyAddress inAdr =
    {
        kAudioHardwarePropertyDefaultOutputDevice,
        kAudioObjectPropertyScopeGlobal,
        kAudioObjectPropertyElementMaster
    };
    
    if (bRegister)
    {
        ret = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &inAdr, defaultOutputDeviceChanged, ref);
        m_defaultOutputDeviceChangedCB = callback;
    }
    else
    {
        ret = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &inAdr, defaultOutputDeviceChanged, ref);
        m_defaultOutputDeviceChangedCB = NULL;
    }
    
    if (ret != noErr)
        CLog::Log(LOGERROR, "CCoreAudioAE::Deinitialize - error %s a listener callback for default output device changes!", bRegister?"attaching":"removing");
    else
        registered = bRegister ? 1 : 0;
}
示例#4
0
static int hotplug_init(struct ao *ao)
{
    if (!reinit_device(ao))
        goto coreaudio_error;

    OSStatus err = noErr;
    for (int i = 0; i < MP_ARRAY_SIZE(hotplug_properties); i++) {
        AudioObjectPropertyAddress addr = {
            hotplug_properties[i],
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster
        };
        err = AudioObjectAddPropertyListener(
            kAudioObjectSystemObject, &addr, hotplug_cb, (void *)ao);
        if (err != noErr) {
            char *c1 = fourcc_repr(hotplug_properties[i]);
            char *c2 = fourcc_repr(err);
            MP_ERR(ao, "failed to set device listener %s (%s)", c1, c2);
            goto coreaudio_error;
        }
    }

    return 0;

coreaudio_error:
    return -1;
}
示例#5
0
void add_audio_status_listener(audio_status_callback_t function, ptr_t data)
{
    OSStatus result;
    CFRunLoopRef theRunLoop;
    closure_t closure;

    AudioObjectPropertyAddress runLoop = {
        .mSelector = kAudioHardwarePropertyRunLoop,
        .mScope    = kAudioObjectPropertyScopeGlobal,
        .mElement  = kAudioObjectPropertyElementMaster
    };

    AudioObjectPropertyAddress devices = {
        .mSelector = kAudioHardwarePropertyDevices,
        .mScope    = kAudioObjectPropertyScopeGlobal,
        .mElement  = kAudioObjectPropertyElementMaster
    };

    theRunLoop = NULL; // necessary?
    result = AudioObjectSetPropertyData(kAudioObjectSystemObject, &runLoop, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
    assert(result == noErr);

    closure = new_closure(function, data);
    result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &devices, audio_listener, closure);
    assert(result == noErr);
}
示例#6
0
static int
COREAUDIO_Init(SDL_AudioDriverImpl * impl)
{
    /* Set the function pointers */
    impl->OpenDevice = COREAUDIO_OpenDevice;
    impl->CloseDevice = COREAUDIO_CloseDevice;
    impl->Deinitialize = COREAUDIO_Deinitialize;

#if MACOSX_COREAUDIO
    impl->DetectDevices = COREAUDIO_DetectDevices;
    AudioObjectAddPropertyListener(kAudioObjectSystemObject, &devlist_address, device_list_changed, NULL);
#else
    impl->OnlyHasDefaultOutputDevice = 1;

    /* Set category to ambient sound so that other music continues playing.
       You can change this at runtime in your own code if you need different
       behavior.  If this is common, we can add an SDL hint for this.
    */
    AudioSessionInitialize(NULL, NULL, NULL, nil);
    UInt32 category = kAudioSessionCategory_AmbientSound;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(UInt32), &category);
#endif

    impl->ProvidesOwnCallbackThread = 1;

    return 1;   /* this audio target is available. */
}
void AddChangeListeners(PortMixer *mixer) {
    if (!mixer->listenersInstalled) {
        for (size_t i=0; i<sizeof(changeListenersAddresses)/sizeof(changeListenersAddresses[0]); i++) {
            AudioObjectAddPropertyListener(mixer->deviceID, &changeListenersAddresses[i], ChangeListenerProc, mixer);
        }
        mixer->listenersInstalled = true;
    }
}
    //==============================================================================
    CoreAudioIODeviceType()
        : AudioIODeviceType ("CoreAudio"),
          hasScanned (false)
    {
        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioHardwarePropertyDevices;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectAddPropertyListener (kAudioObjectSystemObject, &pa, hardwareListenerProc, this);
    }
示例#9
0
static OSStatus add_listener(struct coreaudio_data *ca, UInt32 property)
{
	AudioObjectPropertyAddress addr = {
		property,
		kAudioObjectPropertyScopeGlobal,
		kAudioObjectPropertyElementMaster
	};

	return AudioObjectAddPropertyListener(ca->device_id, &addr,
			notification_callback, ca);
}
示例#10
0
bool CCoreAudioStream::Open(AudioStreamID streamId)
{
  m_StreamId = streamId;
  CLog::Log(LOGDEBUG, "CCoreAudioStream::Open: Opened stream 0x%04x.", (uint)m_StreamId);

  // watch for physical property changes.
  AudioObjectPropertyAddress propertyAOPA;
  propertyAOPA.mScope    = kAudioObjectPropertyScopeGlobal;
  propertyAOPA.mElement  = kAudioObjectPropertyElementMaster;  
  propertyAOPA.mSelector = kAudioStreamPropertyPhysicalFormat;
  if (AudioObjectAddPropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr)
    CLog::Log(LOGERROR, "CCoreAudioStream::Open: couldn't set up a physical property listener.");

  // watch for virtual property changes.
  propertyAOPA.mScope    = kAudioObjectPropertyScopeGlobal;
  propertyAOPA.mElement  = kAudioObjectPropertyElementMaster;  
  propertyAOPA.mSelector = kAudioStreamPropertyVirtualFormat;
  if (AudioObjectAddPropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr)
    CLog::Log(LOGERROR, "CCoreAudioStream::Open: couldn't set up a virtual property listener.");

  return true;
}
示例#11
0
void	HP_SystemInfo::Initialize()
{
	if(!sIsInitialized)
	{
		//	set up the user session status including initializing it
		CAPropertyAddress theAddress('user');	//	kAudioHardwarePropertyUserSessionIsActiveOrHeadless
		AudioObjectAddPropertyListener(kAudioObjectSystemObject, &theAddress, SystemListener, NULL);
		SystemListener(kAudioObjectSystemObject, 1, &theAddress, NULL);
		
		//	set up the audibility status
		theAddress.mSelector = 'pmut';	//	kAudioHardwarePropertyProcessIsAudible
		AudioObjectAddPropertyListener(kAudioObjectSystemObject, &theAddress, SystemListener, NULL);
		SystemListener(kAudioObjectSystemObject, 1, &theAddress, NULL);
		
		//	set up the mixing mono to stereo status
		theAddress.mSelector = 'stmo';	//	kAudioHardwarePropertyMixStereoToMono
		AudioObjectAddPropertyListener(kAudioObjectSystemObject, &theAddress, SystemListener, NULL);
		SystemListener(kAudioObjectSystemObject, 1, &theAddress, NULL);
		
		sIsInitialized = true;
	}
}
示例#12
0
   // IUnknown implementation
   bool SetHandler(wxEvtHandler *handler)
   {
      mHandler = handler;

      AudioObjectPropertyAddress property_address;

      property_address.mSelector = kAudioHardwarePropertyDevices;
      property_address.mScope = kAudioObjectPropertyScopeGlobal;
      property_address.mElement = kAudioObjectPropertyElementMaster;

      mListening = AudioObjectAddPropertyListener(kAudioObjectSystemObject,
                                                  &property_address,
                                                  DeviceChangeListener::Listener,
                                                  this) == 0;
   }
示例#13
0
void CCoreAudioDevice::RegisterDeviceChangedCB(bool bRegister, AudioObjectPropertyListenerProc callback, void *ref)
{
    OSStatus ret = noErr;
    AudioObjectPropertyAddress inAdr =
    {
        kAudioHardwarePropertyDevices,
        kAudioObjectPropertyScopeGlobal,
        kAudioObjectPropertyElementMaster
    };
    
    if (bRegister)
        ret = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &inAdr, callback, ref);
    else
        ret = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &inAdr, callback, ref);
    
    if (ret != noErr)
        CLog::Log(LOGERROR, "CCoreAudioAE::Deinitialize - error %s a listener callback for device changes!", bRegister?"attaching":"removing");
}
示例#14
0
    CoreAudioIODevice (const String& deviceName,
                       AudioDeviceID inputDeviceId,
                       const int inputIndex_,
                       AudioDeviceID outputDeviceId,
                       const int outputIndex_)
        : AudioIODevice (deviceName, "CoreAudio"),
          inputIndex (inputIndex_),
          outputIndex (outputIndex_),
          isOpen_ (false),
          isStarted (false)
    {
        CoreAudioInternal* device = nullptr;

        if (outputDeviceId == 0 || outputDeviceId == inputDeviceId)
        {
            jassert (inputDeviceId != 0);

            device = new CoreAudioInternal (inputDeviceId);
        }
        else
        {
            device = new CoreAudioInternal (outputDeviceId);

            if (inputDeviceId != 0)
            {
                CoreAudioInternal* secondDevice = new CoreAudioInternal (inputDeviceId);

                device->inputDevice = secondDevice;
                secondDevice->isSlaveDevice = true;
            }
        }

        internal = device;
        jassert (device != nullptr);

        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioObjectPropertySelectorWildcard;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectAddPropertyListener (kAudioObjectSystemObject, &pa, hardwareListenerProc, internal);
    }
示例#15
0
static bool coreaudio_init_hooks(struct coreaudio_data *ca)
{
	OSStatus stat;
	AURenderCallbackStruct callback_info = {
		.inputProc       = input_callback,
		.inputProcRefCon = ca
	};

	stat = add_listener(ca, kAudioDevicePropertyDeviceIsAlive);
	if (!ca_success(stat, ca, "coreaudio_init_hooks",
				"set disconnect callback"))
		return false;

	stat = add_listener(ca, PROPERTY_FORMATS);
	if (!ca_success(stat, ca, "coreaudio_init_hooks",
				"set format change callback"))
		return false;

	if (ca->default_device) {
		AudioObjectPropertyAddress addr = {
			PROPERTY_DEFAULT_DEVICE,
			kAudioObjectPropertyScopeGlobal,
			kAudioObjectPropertyElementMaster
		};

		stat = AudioObjectAddPropertyListener(kAudioObjectSystemObject,
				&addr, notification_callback, ca);
		if (!ca_success(stat, ca, "coreaudio_init_hooks",
					"set device change callback"))
			return false;
	}

	stat = set_property(ca->unit, kAudioOutputUnitProperty_SetInputCallback,
			SCOPE_GLOBAL, 0, &callback_info, sizeof(callback_info));
	if (!ca_success(stat, ca, "coreaudio_init_hooks", "set input callback"))
		return false;

	return true;
}
示例#16
0
bool CCoreAudioDevice::SetObjectListenerProc(AudioObjectPropertyListenerProc callback, void* pClientData)
{
  // Allow only one ObjectListener at a time
  if (!m_DeviceId || m_ObjectListenerProc)
    return false;

  AudioObjectPropertyAddress audioProperty;
  audioProperty.mSelector = kAudioObjectPropertySelectorWildcard;
  audioProperty.mScope = kAudioObjectPropertyScopeWildcard;
  audioProperty.mElement = kAudioObjectPropertyElementWildcard;

  OSStatus ret = AudioObjectAddPropertyListener(m_DeviceId, &audioProperty, callback, pClientData);

  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioDevice::SetObjectListenerProc: "
      "Unable to remove ObjectListener callback. Error = %s", GetError(ret).c_str());
    return false;
  }

  m_ObjectListenerProc = callback;
  return true;
}
static inline gboolean
_monitorize_spdif (GstCoreAudio * core_audio)
{
  OSStatus status = noErr;
  gboolean ret = TRUE;

  AudioObjectPropertyAddress propAddress = {
    kAudioDevicePropertyDeviceHasChanged,
    kAudioObjectPropertyScopeGlobal,
    kAudioObjectPropertyElementMaster
  };

  /* Install the property listener */
  status = AudioObjectAddPropertyListener (core_audio->device_id,
      &propAddress, _audio_stream_hardware_changed_listener,
      (void *) core_audio);
  if (status != noErr) {
    GST_ERROR_OBJECT (core_audio->osxbuf,
        "AudioObjectAddPropertyListener failed: %d", (int) status);
    ret = FALSE;
  }

  return ret;
}
示例#18
0
/*****************************************************************************
 * AudioStreamChangeFormat: Change i_stream_id to change_format
 *****************************************************************************/
static int AudioStreamChangeFormat( AudioStreamID i_stream_id, AudioStreamBasicDescription change_format )
{
    OSStatus err = noErr;
    int i;
    AudioObjectPropertyAddress property_address;

    static volatile int stream_format_changed;
    stream_format_changed = 0;

    print_format(MSGL_V, "setting stream format:", &change_format);

    /* Install the callback. */
    property_address.mSelector = kAudioStreamPropertyPhysicalFormat;
    property_address.mScope    = kAudioObjectPropertyScopeGlobal;
    property_address.mElement  = kAudioObjectPropertyElementMaster;

    err = AudioObjectAddPropertyListener(i_stream_id,
                                         &property_address,
                                         StreamListener,
                                         (void *)&stream_format_changed);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "AudioStreamAddPropertyListener failed: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    /* Change the format. */
    err = SetAudioProperty(i_stream_id,
                           kAudioStreamPropertyPhysicalFormat,
                           sizeof(AudioStreamBasicDescription), &change_format);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "could not set the stream format: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    /* The AudioStreamSetProperty is not only asynchronious,
     * it is also not Atomic, in its behaviour.
     * Therefore we check 5 times before we really give up.
     * FIXME: failing isn't actually implemented yet. */
    for (i = 0; i < 5; ++i)
    {
        AudioStreamBasicDescription actual_format;
        int j;
        for (j = 0; !stream_format_changed && j < 50; ++j)
            usec_sleep(10000);
        if (stream_format_changed)
            stream_format_changed = 0;
        else
            ao_msg(MSGT_AO, MSGL_V, "reached timeout\n" );

        err = GetAudioProperty(i_stream_id,
                               kAudioStreamPropertyPhysicalFormat,
                               sizeof(AudioStreamBasicDescription), &actual_format);

        print_format(MSGL_V, "actual format in use:", &actual_format);
        if (actual_format.mSampleRate == change_format.mSampleRate &&
            actual_format.mFormatID == change_format.mFormatID &&
            actual_format.mFramesPerPacket == change_format.mFramesPerPacket)
        {
            /* The right format is now active. */
            break;
        }
        /* We need to check again. */
    }

    /* Removing the property listener. */
    err = AudioObjectRemovePropertyListener(i_stream_id,
                                            &property_address,
                                            StreamListener,
                                            (void *)&stream_format_changed);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "AudioStreamRemovePropertyListener failed: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    return CONTROL_TRUE;
}
示例#19
0
static int
prepare_audiounit(_THIS, void *handle, int iscapture,
                  const AudioStreamBasicDescription * strdesc)
{
    OSStatus result = noErr;
    AURenderCallbackStruct callback;
    AudioComponentDescription desc;
    AudioComponent comp = NULL;
    const AudioUnitElement output_bus = 0;
    const AudioUnitElement input_bus = 1;
    const AudioUnitElement bus = ((iscapture) ? input_bus : output_bus);
    const AudioUnitScope scope = ((iscapture) ? kAudioUnitScope_Output :
                                  kAudioUnitScope_Input);

#if MACOSX_COREAUDIO
    if (!prepare_device(this, handle, iscapture)) {
        return 0;
    }
#endif

    SDL_zero(desc);
    desc.componentType = kAudioUnitType_Output;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

#if MACOSX_COREAUDIO
    desc.componentSubType = kAudioUnitSubType_DefaultOutput;
#else
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
    comp = AudioComponentFindNext(NULL, &desc);

    if (comp == NULL) {
        SDL_SetError("Couldn't find requested CoreAudio component");
        return 0;
    }

    /* Open & initialize the audio unit */
    result = AudioComponentInstanceNew(comp, &this->hidden->audioUnit);
    CHECK_RESULT("AudioComponentInstanceNew");

    this->hidden->audioUnitOpened = 1;

#if MACOSX_COREAUDIO
    result = AudioUnitSetProperty(this->hidden->audioUnit,
                                  kAudioOutputUnitProperty_CurrentDevice,
                                  kAudioUnitScope_Global, 0,
                                  &this->hidden->deviceID,
                                  sizeof(AudioDeviceID));
    CHECK_RESULT
        ("AudioUnitSetProperty (kAudioOutputUnitProperty_CurrentDevice)");
#endif

    /* Set the data format of the audio unit. */
    result = AudioUnitSetProperty(this->hidden->audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  scope, bus, strdesc, sizeof(*strdesc));
    CHECK_RESULT("AudioUnitSetProperty (kAudioUnitProperty_StreamFormat)");

    /* Set the audio callback */
    SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct));
    callback.inputProc = ((iscapture) ? inputCallback : outputCallback);
    callback.inputProcRefCon = this;
    result = AudioUnitSetProperty(this->hidden->audioUnit,
                                  kAudioUnitProperty_SetRenderCallback,
                                  scope, bus, &callback, sizeof(callback));
    CHECK_RESULT
        ("AudioUnitSetProperty (kAudioUnitProperty_SetRenderCallback)");

    /* Calculate the final parameters for this audio specification */
    SDL_CalculateAudioSpec(&this->spec);

    /* Allocate a sample buffer */
    this->hidden->bufferOffset = this->hidden->bufferSize = this->spec.size;
    this->hidden->buffer = SDL_malloc(this->hidden->bufferSize);

    result = AudioUnitInitialize(this->hidden->audioUnit);
    CHECK_RESULT("AudioUnitInitialize");

    /* Finally, start processing of the audio unit */
    result = AudioOutputUnitStart(this->hidden->audioUnit);
    CHECK_RESULT("AudioOutputUnitStart");

#if MACOSX_COREAUDIO
    /* Fire a callback if the device stops being "alive" (disconnected, etc). */
    AudioObjectAddPropertyListener(this->hidden->deviceID, &alive_address, device_unplugged, this);
#endif

    /* We're running! */
    return 1;
}
示例#20
0
// ----------------------------------------------------------------------------
void AudioCoreDriver::initialize(PlayerLibSidplay* player, int sampleRate, int bitsPerSample)
// ----------------------------------------------------------------------------
{
	if (mInstanceId != 0)
		return;

	mPlayer = player;
	mNumSamplesInBuffer = 512;
	mIsPlaying = false;
	mIsPlayingPreRenderedBuffer = false;
	mBufferUnderrunDetected = false;
    mBufferUnderrunCount = 0;
    mSpectrumTemporalSmoothing = 0.5f;
	
	mPreRenderedBuffer = NULL;
	mPreRenderedBufferSampleCount = 0;
	mPreRenderedBufferPlaybackPosition = 0;
	
	if (!mIsInitialized)
	{
        OSStatus err;

        //get default output device
		UInt32 propertySize = sizeof(mDeviceID);
        AudioObjectPropertyAddress prop1 = {kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster};
        err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop1, 0, NULL, &propertySize, &mDeviceID);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectGetPropertyData(kAudioHardwarePropertyDefaultOutputDevice) failed\n");
            return;
        }

		if (mDeviceID == kAudioDeviceUnknown)
			return;

        err = queryStreamFormat(mDeviceID, mStreamFormat);
        if (err != kAudioHardwareNoError) {
            printf("queryStreamFormat failed\n");
            return;
        }

        //add property listeners
#if USE_NEW_API
        AudioObjectPropertyAddress prop5 = {
            kAudioDevicePropertyStreamFormat,       //TODO this is deprecated, how to get this notification?
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop5, streamFormatChanged, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(streamFormatChanged) failed\n");
            return;
        }

        AudioObjectPropertyAddress prop6 = {
            kAudioDeviceProcessorOverload,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop6, overloadDetected, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(overloadDetected) failed\n");
            return;
        }

        AudioObjectPropertyAddress prop7 = {
            kAudioHardwarePropertyDefaultOutputDevice,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectAddPropertyListener(mDeviceID, &prop7, deviceChanged, (void*)this);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectAddPropertyListener(deviceChanged) failed\n");
            return;
        }
#else
		if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDevicePropertyStreamFormat, streamFormatChanged, (void*) this) != kAudioHardwareNoError)
			return;
		if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDeviceProcessorOverload, overloadDetected, (void*) this) != kAudioHardwareNoError)
			return;
		if (AudioHardwareAddPropertyListener(kAudioHardwarePropertyDefaultOutputDevice, deviceChanged, (void*) this)  != kAudioHardwareNoError)
			return;
#endif
		
		mSampleBuffer1 = new short[mNumSamplesInBuffer];
		memset(mSampleBuffer1, 0, sizeof(short) * mNumSamplesInBuffer);
		mSampleBuffer2 = new short[mNumSamplesInBuffer];
		memset(mSampleBuffer2, 0, sizeof(short) * mNumSamplesInBuffer);
        mSpectrumBuffer = new float[mNumSamplesInBuffer/2];
		memset(mSpectrumBuffer, 0, sizeof(float) * (mNumSamplesInBuffer/2));

        mSampleBuffer = mSampleBuffer1;
        mRetSampleBuffer = mSampleBuffer2;

		int bufferByteSize = mNumSamplesInBuffer * mStreamFormat.mChannelsPerFrame * sizeof(float);
		propertySize = sizeof(bufferByteSize);
#if USE_NEW_API
        AudioObjectPropertyAddress prop8 = {
            kAudioDevicePropertyBufferSize,
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster 
        };
        err = AudioObjectSetPropertyData(mDeviceID, &prop8, 0, NULL, propertySize, &bufferByteSize);
        if (err != kAudioHardwareNoError) {
            printf("AudioObjectSetPropertyData(kAudioDevicePropertyBufferSize) failed\n");
            return;
        }
#else
		if (AudioDeviceSetProperty(mDeviceID, NULL, 0, false, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize) != kAudioHardwareNoError)
			return;
#endif
		mScaleFactor = sBitScaleFactor;
		mPreRenderedBufferScaleFactor = sBitScaleFactor;
		
		if (AudioDeviceCreateIOProcID(mDeviceID, emulationPlaybackProc, (void*) this, &mEmulationPlaybackProcID) != kAudioHardwareNoError)
		{
			delete[] mSampleBuffer1;
			mSampleBuffer1 = NULL;
			delete[] mSampleBuffer2;
			mSampleBuffer2 = NULL;
            mSampleBuffer = NULL;
            mRetSampleBuffer = NULL;
			delete[] mSpectrumBuffer;
			mSpectrumBuffer = NULL;
			return;
		}

		if (AudioDeviceCreateIOProcID(mDeviceID, preRenderedBufferPlaybackProc, (void*) this, &mPreRenderedBufferPlaybackProcID) != kAudioHardwareNoError)
		{
			delete[] mSampleBuffer1;
			mSampleBuffer1 = NULL;
			delete[] mSampleBuffer2;
			mSampleBuffer2 = NULL;
            mSampleBuffer = NULL;
            mRetSampleBuffer = NULL;
			delete[] mSpectrumBuffer;
			mSpectrumBuffer = NULL;
			return;
		}
	}
	
	mVolume = 1.0f;
	mIsInitialized = true;
}
示例#21
0
Error AudioDriverCoreAudio::init() {
	mutex = Mutex::create();

	AudioComponentDescription desc;
	zeromem(&desc, sizeof(desc));
	desc.componentType = kAudioUnitType_Output;
#ifdef OSX_ENABLED
	desc.componentSubType = kAudioUnitSubType_HALOutput;
#else
	desc.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
	desc.componentManufacturer = kAudioUnitManufacturer_Apple;

	AudioComponent comp = AudioComponentFindNext(NULL, &desc);
	ERR_FAIL_COND_V(comp == NULL, FAILED);

	OSStatus result = AudioComponentInstanceNew(comp, &audio_unit);
	ERR_FAIL_COND_V(result != noErr, FAILED);

#ifdef OSX_ENABLED
	AudioObjectPropertyAddress prop;
	prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
	prop.mScope = kAudioObjectPropertyScopeGlobal;
	prop.mElement = kAudioObjectPropertyElementMaster;

	result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	prop.mSelector = kAudioHardwarePropertyDefaultInputDevice;

	result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this);
	ERR_FAIL_COND_V(result != noErr, FAILED);
#endif

	AudioStreamBasicDescription strdesc;

	zeromem(&strdesc, sizeof(strdesc));
	UInt32 size = sizeof(strdesc);
	result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	switch (strdesc.mChannelsPerFrame) {
		case 2: // Stereo
		case 4: // Surround 3.1
		case 6: // Surround 5.1
		case 8: // Surround 7.1
			channels = strdesc.mChannelsPerFrame;
			break;

		default:
			// Unknown number of channels, default to stereo
			channels = 2;
			break;
	}

	zeromem(&strdesc, sizeof(strdesc));
	size = sizeof(strdesc);
	result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	switch (strdesc.mChannelsPerFrame) {
		case 1: // Mono
			capture_channels = 1;
			break;

		case 2: // Stereo
			capture_channels = 2;
			break;

		default:
			// Unknown number of channels, default to stereo
			capture_channels = 2;
			break;
	}

	mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);

	zeromem(&strdesc, sizeof(strdesc));
	strdesc.mFormatID = kAudioFormatLinearPCM;
	strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
	strdesc.mChannelsPerFrame = channels;
	strdesc.mSampleRate = mix_rate;
	strdesc.mFramesPerPacket = 1;
	strdesc.mBitsPerChannel = 16;
	strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8;
	strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket;

	result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	strdesc.mChannelsPerFrame = capture_channels;

	result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY);
	// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
	buffer_frames = closest_power_of_2(latency * mix_rate / 1000);

#ifdef OSX_ENABLED
	result = AudioUnitSetProperty(audio_unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, kOutputBus, &buffer_frames, sizeof(UInt32));
	ERR_FAIL_COND_V(result != noErr, FAILED);
#endif

	unsigned int buffer_size = buffer_frames * channels;
	samples_in.resize(buffer_size);
	input_buf.resize(buffer_size);
	input_buffer.resize(buffer_size * 8);
	input_position = 0;
	input_size = 0;

	print_verbose("CoreAudio: detected " + itos(channels) + " channels");
	print_verbose("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");

	AURenderCallbackStruct callback;
	zeromem(&callback, sizeof(AURenderCallbackStruct));
	callback.inputProc = &AudioDriverCoreAudio::output_callback;
	callback.inputProcRefCon = this;
	result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	zeromem(&callback, sizeof(AURenderCallbackStruct));
	callback.inputProc = &AudioDriverCoreAudio::input_callback;
	callback.inputProcRefCon = this;
	result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	result = AudioUnitInitialize(audio_unit);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	return OK;
}
static gboolean
_audio_stream_change_format (AudioStreamID stream_id,
    AudioStreamBasicDescription format)
{
  OSStatus status = noErr;
  gint i;
  gboolean ret = FALSE;
  AudioStreamBasicDescription cformat;
  PropertyMutex prop_mutex;

  AudioObjectPropertyAddress formatAddress = {
    kAudioStreamPropertyPhysicalFormat,
    kAudioObjectPropertyScopeGlobal,
    kAudioObjectPropertyElementMaster
  };

  GST_DEBUG ("setting stream format: " CORE_AUDIO_FORMAT,
      CORE_AUDIO_FORMAT_ARGS (format));

  /* Condition because SetProperty is asynchronous */
  g_mutex_init (&prop_mutex.lock);
  g_cond_init (&prop_mutex.cond);

  g_mutex_lock (&prop_mutex.lock);

  /* Install the property listener to serialize the operations */
  status = AudioObjectAddPropertyListener (stream_id, &formatAddress,
      _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectAddPropertyListener failed: %d", (int) status);
    goto done;
  }

  /* Change the format */
  if (!_audio_stream_set_current_format (stream_id, format)) {
    goto done;
  }

  /* The AudioObjectSetProperty is not only asynchronous
   * it is also not atomic in its behaviour.
   * Therefore we check 4 times before we really give up. */
  for (i = 0; i < 4; i++) {
    GTimeVal timeout;

    g_get_current_time (&timeout);
    g_time_val_add (&timeout, 250000);

    if (!g_cond_wait_until (&prop_mutex.cond, &prop_mutex.lock, timeout.tv_sec)) {
      GST_LOG ("timeout...");
    }

    if (_audio_stream_get_current_format (stream_id, &cformat)) {
      GST_DEBUG ("current stream format: " CORE_AUDIO_FORMAT,
          CORE_AUDIO_FORMAT_ARGS (cformat));

      if (cformat.mSampleRate == format.mSampleRate &&
          cformat.mFormatID == format.mFormatID &&
          cformat.mFramesPerPacket == format.mFramesPerPacket) {
        /* The right format is now active */
        break;
      }
    }
  }

  if (cformat.mSampleRate != format.mSampleRate ||
      cformat.mFormatID != format.mFormatID ||
      cformat.mFramesPerPacket != format.mFramesPerPacket) {
    goto done;
  }

  ret = TRUE;

done:
  /* Removing the property listener */
  status = AudioObjectRemovePropertyListener (stream_id,
      &formatAddress, _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectRemovePropertyListener failed: %d", (int) status);
  }
  /* Destroy the lock and condition */
  g_mutex_unlock (&prop_mutex.lock);
  g_mutex_clear (&prop_mutex.lock);
  g_cond_clear (&prop_mutex.cond);

  return ret;
}
示例#23
0
/*****************************************************************************
 * Setup a encoded digital stream (SPDIF)
 *****************************************************************************/
static int OpenSPDIF(void)
{
    OSStatus                    err = noErr;
    UInt32                      i_param_size, b_mix = 0;
    Boolean                     b_writeable = 0;
    AudioStreamID               *p_streams = NULL;
    int                         i, i_streams = 0;
    AudioObjectPropertyAddress  property_address;

    /* Start doing the SPDIF setup process. */
    ao->b_digital = 1;

    /* Hog the device. */
    ao->i_hog_pid = getpid() ;

    err = SetAudioProperty(ao->i_selected_dev,
                           kAudioDevicePropertyHogMode,
                           sizeof(ao->i_hog_pid), &ao->i_hog_pid);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "failed to set hogmode: [%4.4s]\n", (char *)&err);
        ao->i_hog_pid = -1;
        goto err_out;
    }

    /* Set mixable to false if we are allowed to. */
    err = IsAudioPropertySettable(ao->i_selected_dev,
                                  kAudioDevicePropertySupportsMixing,
                                  &b_writeable);
    err = GetAudioProperty(ao->i_selected_dev,
                           kAudioDevicePropertySupportsMixing,
                           sizeof(UInt32), &b_mix);
    if (err != noErr && b_writeable)
    {
        b_mix = 0;
        err = SetAudioProperty(ao->i_selected_dev,
                               kAudioDevicePropertySupportsMixing,
                               sizeof(UInt32), &b_mix);
        ao->b_changed_mixing = 1;
    }
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "failed to set mixmode: [%4.4s]\n", (char *)&err);
        goto err_out;
    }

    /* Get a list of all the streams on this device. */
    i_param_size = GetAudioPropertyArray(ao->i_selected_dev,
                                         kAudioDevicePropertyStreams,
                                         kAudioDevicePropertyScopeOutput,
                                         (void **)&p_streams);

    if (!i_param_size) {
        ao_msg(MSGT_AO, MSGL_WARN, "could not get number of streams.\n");
        goto err_out;
    }

    i_streams = i_param_size / sizeof(AudioStreamID);

    ao_msg(MSGT_AO, MSGL_V, "current device stream number: %d\n", i_streams);

    for (i = 0; i < i_streams && ao->i_stream_index < 0; ++i)
    {
        /* Find a stream with a cac3 stream. */
        AudioStreamBasicDescription *p_format_list = NULL;
        int i_formats = 0, j = 0, b_digital = 0;

        i_param_size = GetGlobalAudioPropertyArray(p_streams[i],
                                                   kAudioStreamPropertyPhysicalFormats,
                                                   (void **)&p_format_list);

        if (!i_param_size) {
            ao_msg(MSGT_AO, MSGL_WARN,
                   "Could not get number of stream formats.\n");
            continue;
        }

        i_formats = i_param_size / sizeof(AudioStreamBasicDescription);

        /* Check if one of the supported formats is a digital format. */
        for (j = 0; j < i_formats; ++j)
        {
            if (p_format_list[j].mFormatID == 'IAC3' ||
                  p_format_list[j].mFormatID == kAudioFormat60958AC3)
            {
                b_digital = 1;
                break;
            }
        }

        if (b_digital)
        {
            /* If this stream supports a digital (cac3) format, then set it. */
            int i_requested_rate_format = -1;
            int i_current_rate_format = -1;
            int i_backup_rate_format = -1;

            ao->i_stream_id = p_streams[i];
            ao->i_stream_index = i;

            if (ao->b_revert == 0)
            {
                /* Retrieve the original format of this stream first if not done so already. */
                err = GetAudioProperty(ao->i_stream_id,
                                       kAudioStreamPropertyPhysicalFormat,
                                       sizeof(ao->sfmt_revert), &ao->sfmt_revert);
                if (err != noErr)
                {
                    ao_msg(MSGT_AO, MSGL_WARN,
                           "Could not retrieve the original stream format: [%4.4s]\n",
                           (char *)&err);
                    if (p_format_list) free(p_format_list);
                    continue;
                }
                ao->b_revert = 1;
            }

            for (j = 0; j < i_formats; ++j)
                if (p_format_list[j].mFormatID == 'IAC3' ||
                      p_format_list[j].mFormatID == kAudioFormat60958AC3)
                {
                    if (p_format_list[j].mSampleRate == ao->stream_format.mSampleRate)
                    {
                        i_requested_rate_format = j;
                        break;
                    }
                    if (p_format_list[j].mSampleRate == ao->sfmt_revert.mSampleRate)
                        i_current_rate_format = j;
                    else if (i_backup_rate_format < 0 || p_format_list[j].mSampleRate > p_format_list[i_backup_rate_format].mSampleRate)
                        i_backup_rate_format = j;
                }

            if (i_requested_rate_format >= 0) /* We prefer to output at the samplerate of the original audio. */
                ao->stream_format = p_format_list[i_requested_rate_format];
            else if (i_current_rate_format >= 0) /* If not possible, we will try to use the current samplerate of the device. */
                ao->stream_format = p_format_list[i_current_rate_format];
            else ao->stream_format = p_format_list[i_backup_rate_format]; /* And if we have to, any digital format will be just fine (highest rate possible). */
        }
        if (p_format_list) free(p_format_list);
    }
    if (p_streams) free(p_streams);

    if (ao->i_stream_index < 0)
    {
        ao_msg(MSGT_AO, MSGL_WARN,
               "Cannot find any digital output stream format when OpenSPDIF().\n");
        goto err_out;
    }

    print_format(MSGL_V, "original stream format:", &ao->sfmt_revert);

    if (!AudioStreamChangeFormat(ao->i_stream_id, ao->stream_format))
        goto err_out;

    property_address.mSelector = kAudioDevicePropertyDeviceHasChanged;
    property_address.mScope    = kAudioObjectPropertyScopeGlobal;
    property_address.mElement  = kAudioObjectPropertyElementMaster;

    err = AudioObjectAddPropertyListener(ao->i_selected_dev,
                                         &property_address,
                                         DeviceListener,
                                         NULL);
    if (err != noErr)
        ao_msg(MSGT_AO, MSGL_WARN, "AudioDeviceAddPropertyListener for kAudioDevicePropertyDeviceHasChanged failed: [%4.4s]\n", (char *)&err);


    /* FIXME: If output stream is not native byte-order, we need change endian somewhere. */
    /*        Although there's no such case reported.                                     */
#if HAVE_BIGENDIAN
    if (!(ao->stream_format.mFormatFlags & kAudioFormatFlagIsBigEndian))
#else
    if (ao->stream_format.mFormatFlags & kAudioFormatFlagIsBigEndian)
#endif
        ao_msg(MSGT_AO, MSGL_WARN,
               "Output stream has non-native byte order, digital output may fail.\n");

    /* For ac3/dts, just use packet size 6144 bytes as chunk size. */
    ao->chunk_size = ao->stream_format.mBytesPerPacket;

    ao_data.samplerate = ao->stream_format.mSampleRate;
    ao_data.channels = ao->stream_format.mChannelsPerFrame;
    ao_data.bps = ao_data.samplerate * (ao->stream_format.mBytesPerPacket/ao->stream_format.mFramesPerPacket);
    ao_data.outburst = ao->chunk_size;
    ao_data.buffersize = ao_data.bps;

    ao->num_chunks = (ao_data.bps+ao->chunk_size-1)/ao->chunk_size;
    ao->buffer_len = ao->num_chunks * ao->chunk_size;
    ao->buffer = av_fifo_alloc(ao->buffer_len);

    ao_msg(MSGT_AO,MSGL_V, "using %5d chunks of %d bytes (buffer len %d bytes)\n", (int)ao->num_chunks, (int)ao->chunk_size, (int)ao->buffer_len);


    /* Create IOProc callback. */
    err = AudioDeviceCreateIOProcID(ao->i_selected_dev,
                                    (AudioDeviceIOProc)RenderCallbackSPDIF,
                                    (void *)ao,
                                    &ao->renderCallback);

    if (err != noErr || ao->renderCallback == NULL)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "AudioDeviceAddIOProc failed: [%4.4s]\n", (char *)&err);
        goto err_out1;
    }

    reset();

    return CONTROL_TRUE;

err_out1:
    if (ao->b_revert)
        AudioStreamChangeFormat(ao->i_stream_id, ao->sfmt_revert);
err_out:
    if (ao->b_changed_mixing && ao->sfmt_revert.mFormatID != kAudioFormat60958AC3)
    {
        int b_mix = 1;
        err = SetAudioProperty(ao->i_selected_dev,
                               kAudioDevicePropertySupportsMixing,
                               sizeof(int), &b_mix);
        if (err != noErr)
            ao_msg(MSGT_AO, MSGL_WARN, "failed to set mixmode: [%4.4s]\n",
                   (char *)&err);
    }
    if (ao->i_hog_pid == getpid())
    {
        ao->i_hog_pid = -1;
        err = SetAudioProperty(ao->i_selected_dev,
                               kAudioDevicePropertyHogMode,
                               sizeof(ao->i_hog_pid), &ao->i_hog_pid);
        if (err != noErr)
            ao_msg(MSGT_AO, MSGL_WARN, "Could not release hogmode: [%4.4s]\n",
                   (char *)&err);
    }
    av_fifo_free(ao->buffer);
    free(ao);
    ao = NULL;
    return CONTROL_FALSE;
}
void	CAHALAudioObject::AddPropertyListener(AudioObjectPropertyAddress& inAddress, AudioObjectPropertyListenerProc inListenerProc, void* inClientData)
{
	OSStatus theError = AudioObjectAddPropertyListener(mObjectID, &inAddress, inListenerProc, inClientData);
	ThrowIfError(theError, CAException(theError), "CAHALAudioObject::AddPropertyListener: got an error adding a property listener");
}
示例#25
0
Error AudioDriverCoreAudio::capture_init() {
	AudioComponentDescription desc;
	zeromem(&desc, sizeof(desc));
	desc.componentType = kAudioUnitType_Output;
#ifdef OSX_ENABLED
	desc.componentSubType = kAudioUnitSubType_HALOutput;
#else
	desc.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
	desc.componentManufacturer = kAudioUnitManufacturer_Apple;

	AudioComponent comp = AudioComponentFindNext(NULL, &desc);
	ERR_FAIL_COND_V(comp == NULL, FAILED);

	OSStatus result = AudioComponentInstanceNew(comp, &input_unit);
	ERR_FAIL_COND_V(result != noErr, FAILED);

#ifdef OSX_ENABLED
	AudioObjectPropertyAddress prop;
	prop.mSelector = kAudioHardwarePropertyDefaultInputDevice;
	prop.mScope = kAudioObjectPropertyScopeGlobal;
	prop.mElement = kAudioObjectPropertyElementMaster;

	result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this);
	ERR_FAIL_COND_V(result != noErr, FAILED);
#endif

	UInt32 flag = 1;
	result = AudioUnitSetProperty(input_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
	ERR_FAIL_COND_V(result != noErr, FAILED);
	flag = 0;
	result = AudioUnitSetProperty(input_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	UInt32 size;
#ifdef OSX_ENABLED
	AudioDeviceID deviceId;
	size = sizeof(AudioDeviceID);
	AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };

	result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &size, &deviceId);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	result = AudioUnitSetProperty(input_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(AudioDeviceID));
	ERR_FAIL_COND_V(result != noErr, FAILED);
#endif

	AudioStreamBasicDescription strdesc;
	zeromem(&strdesc, sizeof(strdesc));
	size = sizeof(strdesc);
	result = AudioUnitGetProperty(input_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	switch (strdesc.mChannelsPerFrame) {
		case 1: // Mono
			capture_channels = 1;
			break;

		case 2: // Stereo
			capture_channels = 2;
			break;

		default:
			// Unknown number of channels, default to stereo
			capture_channels = 2;
			break;
	}

	mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);

	zeromem(&strdesc, sizeof(strdesc));
	strdesc.mFormatID = kAudioFormatLinearPCM;
	strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
	strdesc.mChannelsPerFrame = capture_channels;
	strdesc.mSampleRate = mix_rate;
	strdesc.mFramesPerPacket = 1;
	strdesc.mBitsPerChannel = 16;
	strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8;
	strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket;

	result = AudioUnitSetProperty(input_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	AURenderCallbackStruct callback;
	zeromem(&callback, sizeof(AURenderCallbackStruct));
	callback.inputProc = &AudioDriverCoreAudio::input_callback;
	callback.inputProcRefCon = this;
	result = AudioUnitSetProperty(input_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callback, sizeof(callback));
	ERR_FAIL_COND_V(result != noErr, FAILED);

	result = AudioUnitInitialize(input_unit);
	ERR_FAIL_COND_V(result != noErr, FAILED);

	return OK;
}