Exemple #1
1
bool audio::orchestra::api::Core::open(uint32_t _device,
                                       audio::orchestra::mode _mode,
                                       uint32_t _channels,
                                       uint32_t _firstChannel,
                                       uint32_t _sampleRate,
                                       audio::format _format,
                                       uint32_t *_bufferSize,
                                       const audio::orchestra::StreamOptions& _options) {
	// Get device ID
	uint32_t nDevices = getDeviceCount();
	if (nDevices == 0) {
		// This should not happen because a check is made before this function is called.
		ATA_ERROR("no devices found!");
		return false;
	}
	if (_device >= nDevices) {
		// This should not happen because a check is made before this function is called.
		ATA_ERROR("device ID is invalid!");
		return false;
	}
	AudioDeviceID deviceList[ nDevices/2 ];
	uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2;
	AudioObjectPropertyAddress property = {
		kAudioHardwarePropertyDevices,
		kAudioObjectPropertyScopeGlobal,
		kAudioObjectPropertyElementMaster
	};
	OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
	                                             &property,
	                                             0,
	                                             nullptr,
	                                             &dataSize,
	                                             (void *) &deviceList);
	if (result != noErr) {
		ATA_ERROR("OS-X system error getting device IDs.");
		return false;
	}
	AudioDeviceID id = deviceList[ _device/2 ];
	// Setup for stream mode.
	bool isInput = false;
	if (_mode == audio::orchestra::mode_input) {
		isInput = true;
		property.mScope = kAudioDevicePropertyScopeInput;
	} else {
		property.mScope = kAudioDevicePropertyScopeOutput;
	}
	// Get the stream "configuration".
	AudioBufferList	*bufferList = nil;
	dataSize = 0;
	property.mSelector = kAudioDevicePropertyStreamConfiguration;
	result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
	if (    result != noErr
	     || dataSize == 0) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ").");
		return false;
	}
	// Allocate the AudioBufferList.
	bufferList = (AudioBufferList *) malloc(dataSize);
	if (bufferList == nullptr) {
		ATA_ERROR("memory error allocating AudioBufferList.");
		return false;
	}
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList);
	if (    result != noErr
	     || dataSize == 0) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ").");
		return false;
	}
	// Search for one or more streams that contain the desired number of
	// channels. CoreAudio devices can have an arbitrary number of
	// streams and each stream can have an arbitrary number of channels.
	// For each stream, a single buffer of interleaved samples is
	// provided. orchestra prefers the use of one stream of interleaved
	// data or multiple consecutive single-channel streams.	However, we
	// now support multiple consecutive multi-channel streams of
	// interleaved data as well.
	uint32_t iStream, offsetCounter = _firstChannel;
	uint32_t nStreams = bufferList->mNumberBuffers;
	bool monoMode = false;
	bool foundStream = false;
	// First check that the device supports the requested number of
	// channels.
	uint32_t deviceChannels = 0;
	for (iStream=0; iStream<nStreams; iStream++) {
		deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
	}
	if (deviceChannels < (_channels + _firstChannel)) {
		free(bufferList);
		ATA_ERROR("the device (" << _device << ") does not support the requested channel count.");
		return false;
	}
	// Look for a single stream meeting our needs.
	uint32_t firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
	for (iStream=0; iStream<nStreams; iStream++) {
		streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
		if (streamChannels >= _channels + offsetCounter) {
			firstStream = iStream;
			channelOffset = offsetCounter;
			foundStream = true;
			break;
		}
		if (streamChannels > offsetCounter) {
			break;
		}
		offsetCounter -= streamChannels;
	}
	// If we didn't find a single stream above, then we should be able
	// to meet the channel specification with multiple streams.
	if (foundStream == false) {
		monoMode = true;
		offsetCounter = _firstChannel;
		for (iStream=0; iStream<nStreams; iStream++) {
			streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
			if (streamChannels > offsetCounter) {
				break;
			}
			offsetCounter -= streamChannels;
		}
		firstStream = iStream;
		channelOffset = offsetCounter;
		int32_t channelCounter = _channels + offsetCounter - streamChannels;
		if (streamChannels > 1) {
			monoMode = false;
		}
		while (channelCounter > 0) {
			streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
			if (streamChannels > 1) {
				monoMode = false;
			}
			channelCounter -= streamChannels;
			streamCount++;
		}
	}
	free(bufferList);
	// Determine the buffer size.
	AudioValueRange	bufferRange;
	dataSize = sizeof(AudioValueRange);
	property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &bufferRange);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting buffer size range for device (" << _device << ").");
		return false;
	}
	if (bufferRange.mMinimum > *_bufferSize) {
		*_bufferSize = (uint64_t) bufferRange.mMinimum;
	} else if (bufferRange.mMaximum < *_bufferSize) {
		*_bufferSize = (uint64_t) bufferRange.mMaximum;
	}
	if (_options.flags.m_minimizeLatency == true) {
		*_bufferSize = (uint64_t) bufferRange.mMinimum;
	}
	// Set the buffer size.	For multiple streams, I'm assuming we only
	// need to make this setting for the master channel.
	uint32_t theSize = (uint32_t) *_bufferSize;
	dataSize = sizeof(uint32_t);
	property.mSelector = kAudioDevicePropertyBufferFrameSize;
	result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &theSize);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") setting the buffer size for device (" << _device << ").");
		return false;
	}
	// If attempting to setup a duplex stream, the bufferSize parameter
	// MUST be the same in both directions!
	*_bufferSize = theSize;
	if (    m_mode == audio::orchestra::mode_output
	     && _mode == audio::orchestra::mode_input
	     && *_bufferSize != m_bufferSize) {
		ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ").");
		return false;
	}
	m_bufferSize = *_bufferSize;
	m_nBuffers = 1;
	// Check and if necessary, change the sample rate for the device.
	double nominalRate;
	dataSize = sizeof(double);
	property.mSelector = kAudioDevicePropertyNominalSampleRate;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &nominalRate);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting current sample rate.");
		return false;
	}
	// Only change the sample rate if off by more than 1 Hz.
	if (fabs(nominalRate - (double)_sampleRate) > 1.0) {
		// Set a property listener for the sample rate change
		double reportedRate = 0.0;
		AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
		result = AudioObjectAddPropertyListener(id, &tmp, &rateListener, (void *) &reportedRate);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate property listener for device (" << _device << ").");
			return false;
		}
		nominalRate = (double) _sampleRate;
		result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &nominalRate);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate for device (" << _device << ").");
			return false;
		}
		// Now wait until the reported nominal rate is what we just set.
		uint32_t microCounter = 0;
		while (reportedRate != nominalRate) {
			microCounter += 5000;
			if (microCounter > 5000000) {
				break;
			}
			std::this_thread::sleep_for(std::chrono::milliseconds(5));
		}
		// Remove the property listener.
		AudioObjectRemovePropertyListener(id, &tmp, &rateListener, (void *) &reportedRate);
		if (microCounter > 5000000) {
			ATA_ERROR("timeout waiting for sample rate update for device (" << _device << ").");
			return false;
		}
	}
	// Now set the stream format for all streams.	Also, check the
	// physical format of the device and change that if necessary.
	AudioStreamBasicDescription	description;
	dataSize = sizeof(AudioStreamBasicDescription);
	property.mSelector = kAudioStreamPropertyVirtualFormat;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream format for device (" << _device << ").");
		return false;
	}
	// Set the sample rate and data format id.	However, only make the
	// change if the sample rate is not within 1.0 of the desired
	// rate and the format is not linear pcm.
	bool updateFormat = false;
	if (fabs(description.mSampleRate - (double)_sampleRate) > 1.0) {
		description.mSampleRate = (double) _sampleRate;
		updateFormat = true;
	}
	if (description.mFormatID != kAudioFormatLinearPCM) {
		description.mFormatID = kAudioFormatLinearPCM;
		updateFormat = true;
	}
	if (updateFormat) {
		result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &description);
		if (result != noErr) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << _device << ").");
			return false;
		}
	}
	// Now check the physical format.
	property.mSelector = kAudioStreamPropertyPhysicalFormat;
	result = AudioObjectGetPropertyData(id, &property, 0, nullptr,	&dataSize, &description);
	if (result != noErr) {
		ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream physical format for device (" << _device << ").");
		return false;
	}
	//std::cout << "Current physical stream format:" << std::endl;
	//std::cout << "	 mBitsPerChan = " << description.mBitsPerChannel << std::endl;
	//std::cout << "	 aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
	//std::cout << "	 bytesPerFrame = " << description.mBytesPerFrame << std::endl;
	//std::cout << "	 sample rate = " << description.mSampleRate << std::endl;
	if (    description.mFormatID != kAudioFormatLinearPCM
	     || description.mBitsPerChannel < 16) {
		description.mFormatID = kAudioFormatLinearPCM;
		//description.mSampleRate = (double) sampleRate;
		AudioStreamBasicDescription	testDescription = description;
		uint32_t formatFlags;
		// We'll try higher bit rates first and then work our way down.
		std::vector< std::pair<uint32_t, uint32_t>	> physicalFormats;
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
		physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
		physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
		physicalFormats.push_back(std::pair<float, uint32_t>(24, formatFlags));	 // 24-bit packed
		formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh);
		physicalFormats.push_back(std::pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low
		formatFlags |= kAudioFormatFlagIsAlignedHigh;
		physicalFormats.push_back(std::pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high
		formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
		physicalFormats.push_back(std::pair<float, uint32_t>(16, formatFlags));
		physicalFormats.push_back(std::pair<float, uint32_t>(8, formatFlags));
		bool setPhysicalFormat = false;
		for(uint32_t i=0; i<physicalFormats.size(); i++) {
			testDescription = description;
			testDescription.mBitsPerChannel = (uint32_t) physicalFormats[i].first;
			testDescription.mFormatFlags = physicalFormats[i].second;
			if (    (24 == (uint32_t)physicalFormats[i].first)
			     && ~(physicalFormats[i].second & kAudioFormatFlagIsPacked)) {
				testDescription.mBytesPerFrame =	4 * testDescription.mChannelsPerFrame;
			} else {
				testDescription.mBytesPerFrame =	testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
			}
			testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
			result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &testDescription);
			if (result == noErr) {
				setPhysicalFormat = true;
				//std::cout << "Updated physical stream format:" << std::endl;
				//std::cout << "	 mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
				//std::cout << "	 aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
				//std::cout << "	 bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
				//std::cout << "	 sample rate = " << testDescription.mSampleRate << std::endl;
				break;
			}
		}
		if (!setPhysicalFormat) {
			ATA_ERROR("system error (" << getErrorCode(result) << ") setting physical data format for device (" << _device << ").");
			return false;
		}
	} // done setting virtual/physical formats.
	// Get the stream / device latency.
	uint32_t latency;
	dataSize = sizeof(uint32_t);
	property.mSelector = kAudioDevicePropertyLatency;
	if (AudioObjectHasProperty(id, &property) == true) {
		result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency);
		if (result == kAudioHardwareNoError) {
			m_latency[ _mode ] = latency;
		} else {
			ATA_ERROR("system error (" << getErrorCode(result) << ") getting device latency for device (" << _device << ").");
			return false;
		}
	}
	// Byte-swapping: According to AudioHardware.h, the stream data will
	// always be presented in native-endian format, so we should never
	// need to byte swap.
	m_doByteSwap[modeToIdTable(_mode)] = false;
	// From the CoreAudio documentation, PCM data must be supplied as
	// 32-bit floats.
	m_userFormat = _format;
	m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
	if (streamCount == 1) {
		m_nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame;
	} else {
		// multiple streams
		m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
	}
	m_nUserChannels[modeToIdTable(_mode)] = _channels;
	m_channelOffset[modeToIdTable(_mode)] = channelOffset;	// offset within a CoreAudio stream
	m_deviceInterleaved[modeToIdTable(_mode)] = true;
	if (monoMode == true) {
		m_deviceInterleaved[modeToIdTable(_mode)] = false;
	}
	// Set flags for buffer conversion.
	m_doConvertBuffer[modeToIdTable(_mode)] = false;
	if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	if (streamCount == 1) {
		if (    m_nUserChannels[modeToIdTable(_mode)] > 1
		     && m_deviceInterleaved[modeToIdTable(_mode)] == false) {
			m_doConvertBuffer[modeToIdTable(_mode)] = true;
		}
	} else if (monoMode) {
		m_doConvertBuffer[modeToIdTable(_mode)] = true;
	}
	m_private->iStream[modeToIdTable(_mode)] = firstStream;
	m_private->nStreams[modeToIdTable(_mode)] = streamCount;
	m_private->id[modeToIdTable(_mode)] = id;
	// Allocate necessary internal buffers.
	uint64_t bufferBytes;
	bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
	//	m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
	m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
	if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
		ATA_ERROR("error allocating user buffer memory.");
		goto error;
	}
	// If possible, we will make use of the CoreAudio stream buffers as
	// "device buffers".	However, we can't do this if using multiple
	// streams.
	if (    m_doConvertBuffer[modeToIdTable(_mode)]
	     && m_private->nStreams[modeToIdTable(_mode)] > 1) {
		bool makeBuffer = true;
		bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
		if (_mode == audio::orchestra::mode_input) {
			if (    m_mode == audio::orchestra::mode_output
			     && m_deviceBuffer) {
				uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
				if (bufferBytes <= bytesOut) {
					makeBuffer = false;
				}
			}
		}
		if (makeBuffer) {
			bufferBytes *= *_bufferSize;
			if (m_deviceBuffer) {
				free(m_deviceBuffer);
				m_deviceBuffer = nullptr;
			}
			m_deviceBuffer = (char *) calloc(bufferBytes, 1);
			if (m_deviceBuffer == nullptr) {
				ATA_ERROR("error allocating device buffer memory.");
				goto error;
			}
		}
	}
	m_sampleRate = _sampleRate;
	m_device[modeToIdTable(_mode)] = _device;
	m_state = audio::orchestra::state::stopped;
	ATA_VERBOSE("Set state as stopped");
	// Setup the buffer conversion information structure.
	if (m_doConvertBuffer[modeToIdTable(_mode)]) {
		if (streamCount > 1) {
			setConvertInfo(_mode, 0);
		} else {
			setConvertInfo(_mode, channelOffset);
		}
	}
	if (    _mode == audio::orchestra::mode_input
	     && m_mode == audio::orchestra::mode_output
	     && m_device[0] == _device) {
		// Only one callback procedure per device.
		m_mode = audio::orchestra::mode_duplex;
	} else {
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
		result = AudioDeviceCreateIOProcID(id, &audio::orchestra::api::Core::callbackEvent, this, &m_private->procId[modeToIdTable(_mode)]);
#else
		// deprecated in favor of AudioDeviceCreateIOProcID()
		result = AudioDeviceAddIOProc(id, &audio::orchestra::api::Core::callbackEvent, this);
#endif
		if (result != noErr) {
			ATA_ERROR("system error setting callback for device (" << _device << ").");
			goto error;
		}
		if (    m_mode == audio::orchestra::mode_output
		     && _mode == audio::orchestra::mode_input) {
			m_mode = audio::orchestra::mode_duplex;
		} else {
			m_mode = _mode;
		}
	}
	// Setup the device property listener for over/underload.
	property.mSelector = kAudioDeviceProcessorOverload;
	result = AudioObjectAddPropertyListener(id, &property, &audio::orchestra::api::Core::xrunListener, this);
	return true;
error:
	m_userBuffer[0].clear();
	m_userBuffer[1].clear();
	if (m_deviceBuffer) {
		free(m_deviceBuffer);
		m_deviceBuffer = 0;
	}
	m_state = audio::orchestra::state::closed;
	ATA_VERBOSE("Set state as closed");
	return false;
}
// ----------------------------------------------------------------------------
void AudioCoreDriver::deinitialize()
// ----------------------------------------------------------------------------
{
	if (!mIsInitialized)
		return;
	
	stopPlayback();
	
	AudioDeviceDestroyIOProcID(mDeviceID, mEmulationPlaybackProcID);
	AudioDeviceDestroyIOProcID(mDeviceID, mPreRenderedBufferPlaybackProcID);
#if USE_NEW_API
    OSStatus err;
    AudioObjectPropertyAddress prop5 = {
        kAudioDevicePropertyStreamFormat,
        kAudioDevicePropertyScopeOutput,
        kAudioObjectPropertyElementMaster
    };
    err = AudioObjectRemovePropertyListener(mDeviceID, &prop5, streamFormatChanged, (void*)this);
    if (err != kAudioHardwareNoError)
            printf("AudioObjectRemovePropertyListener(streamFormatChanged) failed\n");

    AudioObjectPropertyAddress prop6 = {
        kAudioDeviceProcessorOverload,
        kAudioDevicePropertyScopeOutput,
        kAudioObjectPropertyElementMaster
    };
    err = AudioObjectRemovePropertyListener(mDeviceID, &prop6, overloadDetected, (void*)this);
    if (err != kAudioHardwareNoError)
            printf("AudioObjectRemovePropertyListener(overloadDetected) failed\n");

    AudioObjectPropertyAddress prop7 = {
        kAudioHardwarePropertyDefaultOutputDevice,
        kAudioDevicePropertyScopeOutput,
        kAudioObjectPropertyElementMaster
    };
    err = AudioObjectRemovePropertyListener(mDeviceID, &prop7, deviceChanged, (void*)this);
    if (err != kAudioHardwareNoError)
            printf("AudioObjectRemovePropertyListener(deviceChanged) failed\n");
#else
	AudioDeviceRemovePropertyListener(mDeviceID, 0, false, kAudioDevicePropertyStreamFormat, streamFormatChanged);
	AudioDeviceRemovePropertyListener(mDeviceID, 0, false, kAudioDeviceProcessorOverload, overloadDetected);
	AudioHardwareRemovePropertyListener(kAudioHardwarePropertyDefaultOutputDevice, deviceChanged);
#endif

	delete[] mSampleBuffer1;
	mSampleBuffer1 = NULL;
	delete[] mSampleBuffer2;
	mSampleBuffer2 = NULL;
    mSampleBuffer = NULL;
    mRetSampleBuffer = NULL;
    delete[] mSpectrumBuffer;
	mSpectrumBuffer = NULL;
	mIsInitialized = false;
}
void	HP_SystemInfo::Teardown()
{
	if(sIsInitialized)
	{
		CAPropertyAddress theAddress('user');	//	kAudioHardwarePropertyUserSessionIsActiveOrHeadless
		AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &theAddress, SystemListener, NULL);
		
		theAddress.mSelector = 'pmut';	//	kAudioHardwarePropertyProcessIsAudible
		AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &theAddress, SystemListener, NULL);
		
		sIsInitialized = false;
	}
}
Exemple #4
0
void CCoreAudioDevice::RegisterDefaultOutputDeviceChangedCB(bool bRegister, AudioObjectPropertyListenerProc callback, void *ref)
{
    OSStatus ret = noErr;
    static int registered = -1;
    
    //only allow registration once
    if (bRegister == (registered == 1))
        return;
    
    AudioObjectPropertyAddress inAdr =
    {
        kAudioHardwarePropertyDefaultOutputDevice,
        kAudioObjectPropertyScopeGlobal,
        kAudioObjectPropertyElementMaster
    };
    
    if (bRegister)
    {
        ret = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &inAdr, defaultOutputDeviceChanged, ref);
        m_defaultOutputDeviceChangedCB = callback;
    }
    else
    {
        ret = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &inAdr, defaultOutputDeviceChanged, ref);
        m_defaultOutputDeviceChangedCB = NULL;
    }
    
    if (ret != noErr)
        CLog::Log(LOGERROR, "CCoreAudioAE::Deinitialize - error %s a listener callback for default output device changes!", bRegister?"attaching":"removing");
    else
        registered = bRegister ? 1 : 0;
}
void RemoveChangeListeners(PortMixer *mixer) {
    if (mixer->listenersInstalled) {
        for (size_t i=0; i<sizeof(changeListenersAddresses)/sizeof(changeListenersAddresses[0]); i++) {
            AudioObjectRemovePropertyListener(mixer->deviceID, &changeListenersAddresses[i], ChangeListenerProc, mixer);
        }
        mixer->listenersInstalled = false;
    }
}
Exemple #6
0
// TODO: Should it even be possible to change both the 
// physical and virtual formats, since the devices do it themselves?
void CCoreAudioStream::Close(bool restore)
{
  if (!m_StreamId)
    return;

  std::string formatString;

  // remove the physical/virtual property listeners before we make changes
  // that will trigger callbacks that we do not care about.
  AudioObjectPropertyAddress propertyAOPA;
  propertyAOPA.mScope    = kAudioObjectPropertyScopeGlobal;
  propertyAOPA.mElement  = kAudioObjectPropertyElementMaster;  
  propertyAOPA.mSelector = kAudioStreamPropertyPhysicalFormat;
  if (AudioObjectRemovePropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr)
    CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Couldn't remove property listener.");

  propertyAOPA.mScope    = kAudioObjectPropertyScopeGlobal;
  propertyAOPA.mElement  = kAudioObjectPropertyElementMaster;  
  propertyAOPA.mSelector = kAudioStreamPropertyVirtualFormat;
  if (AudioObjectRemovePropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr)
    CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Couldn't remove property listener.");

  // Revert any format changes we made
  if (restore && m_OriginalVirtualFormat.mFormatID && m_StreamId)
  {
    CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: "
      "Restoring original virtual format for stream 0x%04x. (%s)",
      (uint)m_StreamId, StreamDescriptionToString(m_OriginalVirtualFormat, formatString));
    AudioStreamBasicDescription setFormat = m_OriginalVirtualFormat;
    SetVirtualFormat(&setFormat);
  }
  if (restore && m_OriginalPhysicalFormat.mFormatID && m_StreamId)
  {
    CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: "
      "Restoring original physical format for stream 0x%04x. (%s)",
      (uint)m_StreamId, StreamDescriptionToString(m_OriginalPhysicalFormat, formatString));
    AudioStreamBasicDescription setFormat = m_OriginalPhysicalFormat;
    SetPhysicalFormat(&setFormat);
  }

  m_OriginalVirtualFormat.mFormatID  = 0;
  m_OriginalPhysicalFormat.mFormatID = 0;
  CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Closed stream 0x%04x.", (uint)m_StreamId);
  m_StreamId = 0;
}
    ~CoreAudioIODeviceType()
    {
        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioHardwarePropertyDevices;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectRemovePropertyListener (kAudioObjectSystemObject, &pa, hardwareListenerProc, this);
    }
static void
COREAUDIO_Deinitialize(void)
{
#if MACOSX_COREAUDIO
    AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &devlist_address, device_list_changed, NULL);
    free_audio_device_list(&capture_devs);
    free_audio_device_list(&output_devs);
#endif
}
void AudioDriverCoreAudio::finish() {
	capture_finish();

	if (audio_unit) {
		OSStatus result;

		lock();

		AURenderCallbackStruct callback;
		zeromem(&callback, sizeof(AURenderCallbackStruct));
		result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
		if (result != noErr) {
			ERR_PRINT("AudioUnitSetProperty failed");
		}

		if (active) {
			result = AudioOutputUnitStop(audio_unit);
			if (result != noErr) {
				ERR_PRINT("AudioOutputUnitStop failed");
			}

			active = false;
		}

		result = AudioUnitUninitialize(audio_unit);
		if (result != noErr) {
			ERR_PRINT("AudioUnitUninitialize failed");
		}

#ifdef OSX_ENABLED
		AudioObjectPropertyAddress prop;
		prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
		prop.mScope = kAudioObjectPropertyScopeGlobal;
		prop.mElement = kAudioObjectPropertyElementMaster;

		result = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
		if (result != noErr) {
			ERR_PRINT("AudioObjectRemovePropertyListener failed");
		}
#endif

		result = AudioComponentInstanceDispose(audio_unit);
		if (result != noErr) {
			ERR_PRINT("AudioComponentInstanceDispose failed");
		}

		audio_unit = NULL;
		unlock();
	}

	if (mutex) {
		memdelete(mutex);
		mutex = NULL;
	}
}
    ~CoreAudioInternal()
    {
        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioObjectPropertySelectorWildcard;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectRemovePropertyListener (deviceID, &pa, deviceListenerProc, this);

        stop (false);
    }
    ~CoreAudioIODevice()
    {
        close();

        AudioObjectPropertyAddress pa;
        pa.mSelector = kAudioObjectPropertySelectorWildcard;
        pa.mScope = kAudioObjectPropertyScopeWildcard;
        pa.mElement = kAudioObjectPropertyElementWildcard;

        AudioObjectRemovePropertyListener (kAudioObjectSystemObject, &pa, hardwareListenerProc, internal);
    }
Exemple #12
0
 virtual ~DeviceChangeListener()
 {
    if (mListening)
    {
       AudioObjectPropertyAddress property_address;
 
       property_address.mSelector = kAudioHardwarePropertyDevices;
       property_address.mScope = kAudioObjectPropertyScopeGlobal;
       property_address.mElement = kAudioObjectPropertyElementMaster;
 
       AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
                                         &property_address,
                                         DeviceChangeListener::Listener,
                                         this);
       mListening = false;
    }
 }
Exemple #13
0
void CCoreAudioDevice::RegisterDeviceChangedCB(bool bRegister, AudioObjectPropertyListenerProc callback, void *ref)
{
    OSStatus ret = noErr;
    AudioObjectPropertyAddress inAdr =
    {
        kAudioHardwarePropertyDevices,
        kAudioObjectPropertyScopeGlobal,
        kAudioObjectPropertyElementMaster
    };
    
    if (bRegister)
        ret = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &inAdr, callback, ref);
    else
        ret = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &inAdr, callback, ref);
    
    if (ret != noErr)
        CLog::Log(LOGERROR, "CCoreAudioAE::Deinitialize - error %s a listener callback for device changes!", bRegister?"attaching":"removing");
}
Exemple #14
0
void CCoreAudioDevice::RemoveObjectListenerProc(AudioObjectPropertyListenerProc callback, void* pClientData)
{
  if (!m_DeviceId)
    return;

  AudioObjectPropertyAddress audioProperty;
  audioProperty.mSelector = kAudioObjectPropertySelectorWildcard;
  audioProperty.mScope = kAudioObjectPropertyScopeWildcard;
  audioProperty.mElement = kAudioObjectPropertyElementWildcard;

  OSStatus ret = AudioObjectRemovePropertyListener(m_DeviceId, &audioProperty, callback, pClientData);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioDevice::RemoveObjectListenerProc: "
      "Unable to set ObjectListener callback. Error = %s", GetError(ret).c_str());
  }
  m_ObjectListenerProc = NULL;
}
Exemple #15
0
static void hotplug_uninit(struct ao *ao)
{
    OSStatus err = noErr;
    for (int i = 0; i < MP_ARRAY_SIZE(hotplug_properties); i++) {
        AudioObjectPropertyAddress addr = {
            hotplug_properties[i],
            kAudioObjectPropertyScopeGlobal,
            kAudioObjectPropertyElementMaster
        };
        err = AudioObjectRemovePropertyListener(
            kAudioObjectSystemObject, &addr, hotplug_cb, (void *)ao);
        if (err != noErr) {
            char *c1 = fourcc_repr(hotplug_properties[i]);
            char *c2 = fourcc_repr(err);
            MP_ERR(ao, "failed to set device listener %s (%s)", c1, c2);
        }
    }
}
void AudioDriverCoreAudio::capture_finish() {
	if (input_unit) {
		lock();

		AURenderCallbackStruct callback;
		zeromem(&callback, sizeof(AURenderCallbackStruct));
		OSStatus result = AudioUnitSetProperty(input_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
		if (result != noErr) {
			ERR_PRINT("AudioUnitSetProperty failed");
		}

		result = AudioUnitUninitialize(input_unit);
		if (result != noErr) {
			ERR_PRINT("AudioUnitUninitialize failed");
		}

#ifdef OSX_ENABLED
		AudioObjectPropertyAddress prop;
		prop.mSelector = kAudioHardwarePropertyDefaultInputDevice;
		prop.mScope = kAudioObjectPropertyScopeGlobal;
		prop.mElement = kAudioObjectPropertyElementMaster;

		result = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this);
		if (result != noErr) {
			ERR_PRINT("AudioObjectRemovePropertyListener failed");
		}
#endif

		result = AudioComponentInstanceDispose(input_unit);
		if (result != noErr) {
			ERR_PRINT("AudioComponentInstanceDispose failed");
		}

		input_unit = NULL;
		unlock();
	}
}
static inline gboolean
_unmonitorize_spdif (GstCoreAudio * core_audio)
{
  OSStatus status = noErr;
  gboolean ret = TRUE;

  AudioObjectPropertyAddress propAddress = {
    kAudioDevicePropertyDeviceHasChanged,
    kAudioObjectPropertyScopeGlobal,
    kAudioObjectPropertyElementMaster
  };

  /* Remove the property listener */
  status = AudioObjectRemovePropertyListener (core_audio->device_id,
      &propAddress, _audio_stream_hardware_changed_listener,
      (void *) core_audio);
  if (status != noErr) {
    GST_ERROR_OBJECT (core_audio->osxbuf,
        "AudioObjectRemovePropertyListener failed: %d", (int) status);
    ret = FALSE;
  }

  return ret;
}
static void
COREAUDIO_CloseDevice(_THIS)
{
    if (this->hidden != NULL) {
        if (this->hidden->audioUnitOpened) {
            #if MACOSX_COREAUDIO
            /* Unregister our disconnect callback. */
            AudioObjectRemovePropertyListener(this->hidden->deviceID, &alive_address, device_unplugged, this);
            #endif

            AURenderCallbackStruct callback;
            const AudioUnitElement output_bus = 0;
            const AudioUnitElement input_bus = 1;
            const int iscapture = this->iscapture;
            const AudioUnitElement bus =
                ((iscapture) ? input_bus : output_bus);
            const AudioUnitScope scope =
                ((iscapture) ? kAudioUnitScope_Output :
                 kAudioUnitScope_Input);

            /* stop processing the audio unit */
            AudioOutputUnitStop(this->hidden->audioUnit);

            /* Remove the input callback */
            SDL_memset(&callback, 0, sizeof(AURenderCallbackStruct));
            AudioUnitSetProperty(this->hidden->audioUnit,
                                 kAudioUnitProperty_SetRenderCallback,
                                 scope, bus, &callback, sizeof(callback));
            AudioComponentInstanceDispose(this->hidden->audioUnit);
            this->hidden->audioUnitOpened = 0;
        }
        SDL_free(this->hidden->buffer);
        SDL_free(this->hidden);
        this->hidden = NULL;
    }
}
static gboolean
_audio_stream_change_format (AudioStreamID stream_id,
    AudioStreamBasicDescription format)
{
  OSStatus status = noErr;
  gint i;
  gboolean ret = FALSE;
  AudioStreamBasicDescription cformat;
  PropertyMutex prop_mutex;

  AudioObjectPropertyAddress formatAddress = {
    kAudioStreamPropertyPhysicalFormat,
    kAudioObjectPropertyScopeGlobal,
    kAudioObjectPropertyElementMaster
  };

  GST_DEBUG ("setting stream format: " CORE_AUDIO_FORMAT,
      CORE_AUDIO_FORMAT_ARGS (format));

  /* Condition because SetProperty is asynchronous */
  g_mutex_init (&prop_mutex.lock);
  g_cond_init (&prop_mutex.cond);

  g_mutex_lock (&prop_mutex.lock);

  /* Install the property listener to serialize the operations */
  status = AudioObjectAddPropertyListener (stream_id, &formatAddress,
      _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectAddPropertyListener failed: %d", (int) status);
    goto done;
  }

  /* Change the format */
  if (!_audio_stream_set_current_format (stream_id, format)) {
    goto done;
  }

  /* The AudioObjectSetProperty is not only asynchronous
   * it is also not atomic in its behaviour.
   * Therefore we check 4 times before we really give up. */
  for (i = 0; i < 4; i++) {
    GTimeVal timeout;

    g_get_current_time (&timeout);
    g_time_val_add (&timeout, 250000);

    if (!g_cond_wait_until (&prop_mutex.cond, &prop_mutex.lock, timeout.tv_sec)) {
      GST_LOG ("timeout...");
    }

    if (_audio_stream_get_current_format (stream_id, &cformat)) {
      GST_DEBUG ("current stream format: " CORE_AUDIO_FORMAT,
          CORE_AUDIO_FORMAT_ARGS (cformat));

      if (cformat.mSampleRate == format.mSampleRate &&
          cformat.mFormatID == format.mFormatID &&
          cformat.mFramesPerPacket == format.mFramesPerPacket) {
        /* The right format is now active */
        break;
      }
    }
  }

  if (cformat.mSampleRate != format.mSampleRate ||
      cformat.mFormatID != format.mFormatID ||
      cformat.mFramesPerPacket != format.mFramesPerPacket) {
    goto done;
  }

  ret = TRUE;

done:
  /* Removing the property listener */
  status = AudioObjectRemovePropertyListener (stream_id,
      &formatAddress, _audio_stream_format_listener, (void *) &prop_mutex);
  if (status != noErr) {
    GST_ERROR ("AudioObjectRemovePropertyListener failed: %d", (int) status);
  }
  /* Destroy the lock and condition */
  g_mutex_unlock (&prop_mutex.lock);
  g_mutex_clear (&prop_mutex.lock);
  g_cond_clear (&prop_mutex.cond);

  return ret;
}
Exemple #20
0
/*****************************************************************************
 * AudioStreamChangeFormat: Change i_stream_id to change_format
 *****************************************************************************/
static int AudioStreamChangeFormat( AudioStreamID i_stream_id, AudioStreamBasicDescription change_format )
{
    OSStatus err = noErr;
    int i;
    AudioObjectPropertyAddress property_address;

    static volatile int stream_format_changed;
    stream_format_changed = 0;

    print_format(MSGL_V, "setting stream format:", &change_format);

    /* Install the callback. */
    property_address.mSelector = kAudioStreamPropertyPhysicalFormat;
    property_address.mScope    = kAudioObjectPropertyScopeGlobal;
    property_address.mElement  = kAudioObjectPropertyElementMaster;

    err = AudioObjectAddPropertyListener(i_stream_id,
                                         &property_address,
                                         StreamListener,
                                         (void *)&stream_format_changed);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "AudioStreamAddPropertyListener failed: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    /* Change the format. */
    err = SetAudioProperty(i_stream_id,
                           kAudioStreamPropertyPhysicalFormat,
                           sizeof(AudioStreamBasicDescription), &change_format);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "could not set the stream format: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    /* The AudioStreamSetProperty is not only asynchronious,
     * it is also not Atomic, in its behaviour.
     * Therefore we check 5 times before we really give up.
     * FIXME: failing isn't actually implemented yet. */
    for (i = 0; i < 5; ++i)
    {
        AudioStreamBasicDescription actual_format;
        int j;
        for (j = 0; !stream_format_changed && j < 50; ++j)
            usec_sleep(10000);
        if (stream_format_changed)
            stream_format_changed = 0;
        else
            ao_msg(MSGT_AO, MSGL_V, "reached timeout\n" );

        err = GetAudioProperty(i_stream_id,
                               kAudioStreamPropertyPhysicalFormat,
                               sizeof(AudioStreamBasicDescription), &actual_format);

        print_format(MSGL_V, "actual format in use:", &actual_format);
        if (actual_format.mSampleRate == change_format.mSampleRate &&
            actual_format.mFormatID == change_format.mFormatID &&
            actual_format.mFramesPerPacket == change_format.mFramesPerPacket)
        {
            /* The right format is now active. */
            break;
        }
        /* We need to check again. */
    }

    /* Removing the property listener. */
    err = AudioObjectRemovePropertyListener(i_stream_id,
                                            &property_address,
                                            StreamListener,
                                            (void *)&stream_format_changed);
    if (err != noErr)
    {
        ao_msg(MSGT_AO, MSGL_WARN, "AudioStreamRemovePropertyListener failed: [%4.4s]\n", (char *)&err);
        return CONTROL_FALSE;
    }

    return CONTROL_TRUE;
}
void	CAHALAudioObject::RemovePropertyListener(AudioObjectPropertyAddress& inAddress, AudioObjectPropertyListenerProc inListenerProc, void* inClientData)
{
	OSStatus theError = AudioObjectRemovePropertyListener(mObjectID, &inAddress, inListenerProc, inClientData);
	ThrowIfError(theError, CAException(theError), "CAHALAudioObject::RemovePropertyListener: got an error removing a property listener");
}