CAUOutputDevice *CCoreAudioGraph::CreateUnit(AEAudioFormat &format) { if (!m_audioUnit || !m_mixerUnit) return NULL; std::string formatString; AudioStreamBasicDescription inputFormat; AudioStreamBasicDescription outputFormat; OSStatus ret; int busNumber = GetFreeBus(); if (busNumber == INVALID_BUS) return NULL; // create output unit CAUOutputDevice *outputUnit = new CAUOutputDevice(); if (!outputUnit->Open(m_audioGraph, kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter, kAudioUnitManufacturer_Apple)) goto error; m_audioUnit->GetFormatDesc(format, &inputFormat); // get the format frm the mixer if (!m_mixerUnit->GetFormat(&outputFormat, kAudioUnitScope_Input, kOutputBus)) goto error; if (!outputUnit->SetFormat(&outputFormat, kAudioUnitScope_Output, kOutputBus)) goto error; if (!outputUnit->SetFormat(&inputFormat, kAudioUnitScope_Input, kOutputBus)) goto error; ret = AUGraphConnectNodeInput(m_audioGraph, outputUnit->GetNode(), 0, m_mixerUnit->GetNode(), busNumber); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::CreateUnit: Error connecting outputUnit. Error = %s", GetError(ret).c_str()); goto error; } // TODO: setup mixmap, get free bus number for connection outputUnit->SetBus(busNumber); AUGraphUpdate(m_audioGraph, NULL); printf("Add unit\n\n"); ShowGraph(); printf("\n"); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputFormat, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputFormat, formatString)); m_auUnitList.push_back(outputUnit); return outputUnit; error: delete outputUnit; return NULL; }
// TODO: Should it even be possible to change both the // physical and virtual formats, since the devices do it themselves? void CCoreAudioStream::Close(bool restore) { if (!m_StreamId) return; std::string formatString; // remove the physical/virtual property listeners before we make changes // that will trigger callbacks that we do not care about. AudioObjectPropertyAddress propertyAOPA; propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal; propertyAOPA.mElement = kAudioObjectPropertyElementMaster; propertyAOPA.mSelector = kAudioStreamPropertyPhysicalFormat; if (AudioObjectRemovePropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr) CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Couldn't remove property listener."); propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal; propertyAOPA.mElement = kAudioObjectPropertyElementMaster; propertyAOPA.mSelector = kAudioStreamPropertyVirtualFormat; if (AudioObjectRemovePropertyListener(m_StreamId, &propertyAOPA, HardwareStreamListener, this) != noErr) CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Couldn't remove property listener."); // Revert any format changes we made if (restore && m_OriginalVirtualFormat.mFormatID && m_StreamId) { CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: " "Restoring original virtual format for stream 0x%04x. (%s)", (uint)m_StreamId, StreamDescriptionToString(m_OriginalVirtualFormat, formatString)); AudioStreamBasicDescription setFormat = m_OriginalVirtualFormat; SetVirtualFormat(&setFormat); } if (restore && m_OriginalPhysicalFormat.mFormatID && m_StreamId) { CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: " "Restoring original physical format for stream 0x%04x. (%s)", (uint)m_StreamId, StreamDescriptionToString(m_OriginalPhysicalFormat, formatString)); AudioStreamBasicDescription setFormat = m_OriginalPhysicalFormat; SetPhysicalFormat(&setFormat); } m_OriginalVirtualFormat.mFormatID = 0; m_OriginalPhysicalFormat.mFormatID = 0; CLog::Log(LOGDEBUG, "CCoreAudioStream::Close: Closed stream 0x%04x.", (uint)m_StreamId); m_StreamId = 0; }
bool CCoreAudioStream::SetPhysicalFormat(AudioStreamBasicDescription* pDesc) { if (!pDesc || !m_StreamId) return false; std::string formatString; if (!m_OriginalPhysicalFormat.mFormatID) { // Store the original format (as we found it) so that it can be restored later if (!GetPhysicalFormat(&m_OriginalPhysicalFormat)) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to retrieve current physical format for stream 0x%04x.", (uint)m_StreamId); return false; } } m_physical_format_event.Reset(); OSStatus ret = AudioStreamSetProperty(m_StreamId, NULL, 0, kAudioStreamPropertyPhysicalFormat, sizeof(AudioStreamBasicDescription), pDesc); if (ret) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to set physical format for stream 0x%04x. Error = %s", (uint)m_StreamId, GetError(ret).c_str()); return false; } // The AudioStreamSetProperty is not only asynchronious, // it is also not Atomic, in its behaviour. // Therefore we check 5 times before we really give up. // FIXME: failing isn't actually implemented yet. for(int i = 0; i < 10; ++i) { AudioStreamBasicDescription checkPhysicalFormat; if (!GetPhysicalFormat(&checkPhysicalFormat)) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to retrieve current physical format for stream 0x%04x.", (uint)m_StreamId); return false; } if (checkPhysicalFormat.mSampleRate == pDesc->mSampleRate && checkPhysicalFormat.mFormatID == pDesc->mFormatID && checkPhysicalFormat.mFramesPerPacket == pDesc->mFramesPerPacket) { // The right format is now active. CLog::Log(LOGDEBUG, "CCoreAudioStream::SetPhysicalFormat: " "Physical format for stream 0x%04x. now active (%s)", (uint)m_StreamId, StreamDescriptionToString(checkPhysicalFormat, formatString)); break; } m_physical_format_event.WaitMSec(100); } return true; }
OSStatus CCoreAudioStream::HardwareStreamListener(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress inAddresses[], void *inClientData) { CCoreAudioStream *ca_stream = (CCoreAudioStream*)inClientData; for (UInt32 i = 0; i < inNumberAddresses; i++) { if (inAddresses[i].mSelector == kAudioStreamPropertyPhysicalFormat) { AudioStreamBasicDescription actualFormat; UInt32 propertySize = sizeof(AudioStreamBasicDescription); // hardware physical format has changed. if (AudioObjectGetPropertyData(ca_stream->m_StreamId, &inAddresses[i], 0, NULL, &propertySize, &actualFormat) == noErr) { std::string formatString; CLog::Log(LOGINFO, "CCoreAudioStream::HardwareStreamListener: " "Hardware physical format changed to %s", StreamDescriptionToString(actualFormat, formatString)); ca_stream->m_physical_format_event.Set(); } } else if (inAddresses[i].mSelector == kAudioStreamPropertyVirtualFormat) { // hardware virtual format has changed. AudioStreamBasicDescription actualFormat; UInt32 propertySize = sizeof(AudioStreamBasicDescription); if (AudioObjectGetPropertyData(ca_stream->m_StreamId, &inAddresses[i], 0, NULL, &propertySize, &actualFormat) == noErr) { std::string formatString; CLog::Log(LOGINFO, "CCoreAudioStream::HardwareStreamListener: " "Hardware virtual format changed to %s", StreamDescriptionToString(actualFormat, formatString)); ca_stream->m_virtual_format_event.Set(); } } } return noErr; }
bool CCoreAudioStream::SetPhysicalFormat(AudioStreamBasicDescription* pDesc) { if (!pDesc || !m_StreamId) return false; std::string formatString; // suppress callbacks for the default output device change // for the next 2 seconds because setting format // might trigger a change (when setting/unsetting an encoded // passthrough format) CCoreAudioDevice::SuppressDefaultOutputDeviceCB(2000); if (!m_OriginalPhysicalFormat.mFormatID) { // Store the original format (as we found it) so that it can be restored later if (!GetPhysicalFormat(&m_OriginalPhysicalFormat)) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to retrieve current physical format for stream 0x%04x.", (uint)m_StreamId); return false; } } m_physical_format_event.Reset(); AudioObjectPropertyAddress propertyAddress; propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; propertyAddress.mElement = kAudioObjectPropertyElementMaster; propertyAddress.mSelector = kAudioStreamPropertyPhysicalFormat; UInt32 propertySize = sizeof(AudioStreamBasicDescription); OSStatus ret = AudioObjectSetPropertyData(m_StreamId, &propertyAddress, 0, NULL, propertySize, pDesc); if (ret) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to set physical format for stream 0x%04x. Error = %s", (uint)m_StreamId, GetError(ret).c_str()); return false; } // The AudioStreamSetProperty is not only asynchronious, // it is also not Atomic, in its behaviour. // Therefore we check 5 times before we really give up. // FIXME: failing isn't actually implemented yet. for(int i = 0; i < 10; ++i) { AudioStreamBasicDescription checkPhysicalFormat; if (!GetPhysicalFormat(&checkPhysicalFormat)) { CLog::Log(LOGERROR, "CCoreAudioStream::SetPhysicalFormat: " "Unable to retrieve current physical format for stream 0x%04x.", (uint)m_StreamId); return false; } if (checkPhysicalFormat.mSampleRate == pDesc->mSampleRate && checkPhysicalFormat.mFormatID == pDesc->mFormatID && checkPhysicalFormat.mFramesPerPacket == pDesc->mFramesPerPacket && checkPhysicalFormat.mChannelsPerFrame == pDesc->mChannelsPerFrame) { // The right format is now active. CLog::Log(LOGDEBUG, "CCoreAudioStream::SetPhysicalFormat: " "Physical format for stream 0x%04x. now active (%s)", (uint)m_StreamId, StreamDescriptionToString(checkPhysicalFormat, formatString)); break; } m_physical_format_event.WaitMSec(100); } return true; }
bool CCoreAudioGraph::Open(ICoreAudioSource *pSource, AEAudioFormat &format, bool allowMixing) { OSStatus ret; AudioStreamBasicDescription inputFormat; AudioStreamBasicDescription outputFormat; m_allowMixing = allowMixing; ret = NewAUGraph(&m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error create audio grpah. Error = %s", GetError(ret).c_str()); return false; } ret = AUGraphOpen(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error open audio grpah. Error = %s", GetError(ret).c_str()); return false; } // get output unit if (m_audioUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error audio unit already open. double call ?"); return false; } m_audioUnit = new CAUOutputDevice(); if (!m_audioUnit->Open(m_audioGraph, kAudioUnitType_Output, kAudioUnitSubType_RemoteIO, kAudioUnitManufacturer_Apple)) return false; if (!m_audioUnit->EnableInputOuput()) return false; m_audioUnit->GetFormatDesc(format, &inputFormat); //if(!allowMixing) //{ if (!m_audioUnit->SetFormat(&inputFormat, kAudioUnitScope_Input, kOutputBus)) return false; if (!m_audioUnit->SetFormat(&inputFormat, kAudioUnitScope_Output, kInputBus)) return false; //} if (allowMixing) { // get mixer unit if (m_mixerUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error mixer unit already open. double call ?"); return false; } m_mixerUnit = new CAUMultiChannelMixer(); if (!m_mixerUnit->Open(m_audioGraph, kAudioUnitType_Mixer, kAudioUnitSubType_MultiChannelMixer, kAudioUnitManufacturer_Apple)) return false; // set number of input buses if (!m_mixerUnit->SetInputBusCount(MAX_CONNECTION_LIMIT)) return false; //if(!m_mixerUnit->SetFormat(&fmt, kAudioUnitScope_Output, kOutputBus)) // return false; m_mixerUnit->SetBus(0); if (!m_audioUnit->GetFormat(&outputFormat, kAudioUnitScope_Input, kOutputBus)) return false; /* if(!m_mixerUnit->SetInputBusFormat(MAX_CONNECTION_LIMIT, &outputFormat)) return false; */ ret = AUGraphConnectNodeInput(m_audioGraph, m_mixerUnit->GetNode(), 0, m_audioUnit->GetNode(), 0); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error connecting m_m_mixerNode. Error = %s", GetError(ret).c_str()); return false; } // get output unit if (m_inputUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error mixer unit already open. double call ?"); return false; } m_inputUnit = new CAUOutputDevice(); if (!m_inputUnit->Open(m_audioGraph, kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter, kAudioUnitManufacturer_Apple)) return false; if (!m_inputUnit->SetFormat(&inputFormat, kAudioUnitScope_Input, kOutputBus)) return false; /* if(!m_inputUnit->SetFormat(&outputFormat, kAudioUnitScope_Output, kOutputBus)) return false; */ // configure output unit int busNumber = GetFreeBus(); ret = AUGraphConnectNodeInput(m_audioGraph, m_inputUnit->GetNode(), 0, m_mixerUnit->GetNode(), busNumber); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error connecting m_converterNode. Error = %s", GetError(ret).c_str()); return false; } m_inputUnit->SetBus(busNumber); ret = AUGraphUpdate(m_audioGraph, NULL); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error update graph. Error = %s", GetError(ret).c_str()); return false; } ret = AUGraphInitialize(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error initialize graph. Error = %s", GetError(ret).c_str()); return false; } // Regenerate audio format and copy format for the Output AU } ret = AUGraphUpdate(m_audioGraph, NULL); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error update graph. Error = %s", GetError(ret).c_str()); return false; } std::string formatString; AudioStreamBasicDescription inputDesc_end, outputDesc_end; m_audioUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_audioUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kInputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); if (m_mixerUnit) { m_mixerUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_mixerUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kOutputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); } if (m_inputUnit) { m_inputUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_inputUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kOutputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); } ret = AUGraphInitialize(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error initialize graph. Error = %s", GetError(ret).c_str()); return false; } UInt32 bufferFrames = m_audioUnit->GetBufferFrameSize(); m_audioUnit->SetMaxFramesPerSlice(bufferFrames); if (m_inputUnit) m_inputUnit->SetMaxFramesPerSlice(bufferFrames); SetInputSource(pSource); ShowGraph(); return Start(); }
bool CCoreAudioAEHALOSX::InitializeEncoded(AudioDeviceID outputDevice, AEAudioFormat &format) { std::string formatString; AudioStreamID outputStream = 0; AudioStreamBasicDescription outputFormat = {0}; // Fetch a list of the streams defined by the output device UInt32 streamIndex = 0; AudioStreamIdList streams; m_AudioDevice->GetStreams(&streams); m_OutputBufferIndex = 0; while (!streams.empty()) { // Get the next stream CCoreAudioStream stream; stream.Open(streams.front()); streams.pop_front(); // We copied it, now we are done with it // Probe physical formats StreamFormatList physicalFormats; stream.GetAvailablePhysicalFormats(&physicalFormats); while (!physicalFormats.empty()) { AudioStreamRangedDescription& desc = physicalFormats.front(); CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "Considering Physical Format: %s", StreamDescriptionToString(desc.mFormat, formatString)); if (m_rawDataFormat == AE_FMT_LPCM || m_rawDataFormat == AE_FMT_DTSHD || m_rawDataFormat == AE_FMT_TRUEHD || m_rawDataFormat == AE_FMT_EAC3) { // check pcm output formats unsigned int bps = CAEUtil::DataFormatToBits(AE_FMT_S16NE); if (desc.mFormat.mChannelsPerFrame == m_initformat.m_channelLayout.Count() && desc.mFormat.mBitsPerChannel == bps && desc.mFormat.mSampleRate == m_initformat.m_sampleRate ) { outputFormat = desc.mFormat; // Select this format m_OutputBufferIndex = streamIndex; outputStream = stream.GetId(); break; } } else { // check encoded formats if (desc.mFormat.mFormatID == kAudioFormat60958AC3 || desc.mFormat.mFormatID == 'IAC3') { if (desc.mFormat.mChannelsPerFrame == m_initformat.m_channelLayout.Count() && desc.mFormat.mSampleRate == m_initformat.m_sampleRate ) { outputFormat = desc.mFormat; // Select this format m_OutputBufferIndex = streamIndex; outputStream = stream.GetId(); break; } } } physicalFormats.pop_front(); } // TODO: How do we determine if this is the right stream (not just the right format) to use? if (outputFormat.mFormatID) break; // We found a suitable format. No need to continue. streamIndex++; } if (!outputFormat.mFormatID) // No match found { CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "Unable to identify suitable output format."); return false; } CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "Selected stream[%u] - id: 0x%04X, Physical Format: %s", m_OutputBufferIndex, (uint)outputStream, StreamDescriptionToString(outputFormat, formatString)); // TODO: Auto hogging sets this for us. Figure out how/when to turn it off or use it // It appears that leaving this set will aslo restore the previous stream format when the // Application exits. If auto hogging is set and we try to set hog mode, we will deadlock // From the SDK docs: "If the AudioDevice is in a non-mixable mode, the HAL will automatically take hog mode on behalf of the first process to start an IOProc." // Lock down the device. This MUST be done PRIOR to switching to a non-mixable format, if it is done at all // If it is attempted after the format change, there is a high likelihood of a deadlock // We may need to do this sooner to enable mix-disable (i.e. before setting the stream format) // Auto-Hog does not always un-hog the device when changing back to a mixable mode. // Handle this on our own until it is fixed. CCoreAudioHardware::SetAutoHogMode(false); bool autoHog = CCoreAudioHardware::GetAutoHogMode(); CLog::Log(LOGDEBUG, " CoreAudioRenderer::InitializeEncoded: " "Auto 'hog' mode is set to '%s'.", autoHog ? "On" : "Off"); if (!autoHog) // Try to handle this ourselves { // Hog the device if it is not set to be done automatically m_AudioDevice->SetHogStatus(true); // Try to disable mixing. If we cannot, it may not be a problem m_AudioDevice->SetMixingSupport(false); } m_NumLatencyFrames = m_AudioDevice->GetNumLatencyFrames(); // Configure the output stream object, this is the one we will keep m_OutputStream->Open(outputStream); AudioStreamBasicDescription virtualFormat; m_OutputStream->GetVirtualFormat(&virtualFormat); CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "Previous Virtual Format: %s", StreamDescriptionToString(virtualFormat, formatString)); AudioStreamBasicDescription previousPhysicalFormat; m_OutputStream->GetPhysicalFormat(&previousPhysicalFormat); CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "Previous Physical Format: %s", StreamDescriptionToString(previousPhysicalFormat, formatString)); // Set the active format (the old one will be reverted when we close) m_OutputStream->SetPhysicalFormat(&outputFormat); m_NumLatencyFrames += m_OutputStream->GetNumLatencyFrames(); m_OutputStream->GetVirtualFormat(&virtualFormat); CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "New Virtual Format: %s", StreamDescriptionToString(virtualFormat, formatString)); CLog::Log(LOGDEBUG, "CCoreAudioAEHALOSX::InitializeEncoded: " "New Physical Format: %s", StreamDescriptionToString(outputFormat, formatString)); m_allowMixing = false; return true; }
bool CCoreAudioGraph::Open(ICoreAudioSource *pSource, AEAudioFormat &format, AudioDeviceID deviceId, bool allowMixing, AudioChannelLayoutTag layoutTag) { AudioStreamBasicDescription fmt = {0}; AudioStreamBasicDescription inputFormat = {0}; AudioStreamBasicDescription outputFormat = {0}; m_deviceId = deviceId; m_allowMixing = allowMixing; OSStatus ret = NewAUGraph(&m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error create audio grpah. Error = %s", GetError(ret).c_str()); return false; } ret = AUGraphOpen(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error open audio grpah. Error = %s", GetError(ret).c_str()); return false; } // get output unit if (m_audioUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error audio unit already open. double call ?"); return false; } m_audioUnit = new CAUOutputDevice(); if (!m_audioUnit->Open(m_audioGraph, kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple)) return false; m_audioUnit->SetBus(GetFreeBus()); m_audioUnit->GetFormatDesc(format, &inputFormat, &fmt); if (!m_audioUnit->EnableInputOuput()) return false; if (!m_audioUnit->SetCurrentDevice(deviceId)) return false; if (allowMixing) { delete m_mixMap; m_mixMap = CCoreAudioMixMap::CreateMixMap(m_audioUnit, format, layoutTag); if (m_mixMap || m_mixMap->IsValid()) { // maximum input channel ber input bus //fmt.mChannelsPerFrame = MAXIMUM_MIXER_CHANNELS; // get output unit if (m_inputUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error mixer unit already open. double call ?"); return false; } m_inputUnit = new CAUOutputDevice(); if (!m_inputUnit->Open(m_audioGraph, kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter, kAudioUnitManufacturer_Apple)) return false; if (!m_inputUnit->SetFormat(&inputFormat, kAudioUnitScope_Input, kOutputBus)) return false; if (!m_inputUnit->SetFormat(&fmt, kAudioUnitScope_Output, kOutputBus)) return false; // get mixer unit if (m_mixerUnit) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: Error mixer unit already open. double call ?"); return false; } m_mixerUnit = new CAUMatrixMixer(); if (!m_mixerUnit->Open(m_audioGraph, kAudioUnitType_Mixer, kAudioUnitSubType_MatrixMixer, kAudioUnitManufacturer_Apple)) return false; // set number of input buses if (!m_mixerUnit->SetInputBusCount(MAX_CONNECTION_LIMIT)) return false; // set number of output buses if (!m_mixerUnit->SetOutputBusCount(1)) return false; if (!m_mixerUnit->SetInputBusFormat(MAX_CONNECTION_LIMIT, &fmt)) return false; if (!m_mixerUnit->SetFormat(&fmt, kAudioUnitScope_Output, kOutputBus)) return false; ret = AUGraphConnectNodeInput(m_audioGraph, m_mixerUnit->GetNode(), 0, m_audioUnit->GetNode(), 0); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error connecting m_m_mixerNode. Error = %s", GetError(ret).c_str()); return false; } m_mixerUnit->SetBus(0); // configure output unit int busNumber = GetFreeBus(); ret = AUGraphConnectNodeInput(m_audioGraph, m_inputUnit->GetNode(), 0, m_mixerUnit->GetNode(), busNumber); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error connecting m_converterNode. Error = %s", GetError(ret).c_str()); return false; } m_inputUnit->SetBus(busNumber); ret = AUGraphUpdate(m_audioGraph, NULL); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error update graph. Error = %s", GetError(ret).c_str()); return false; } ret = AUGraphInitialize(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error initialize graph. Error = %s", GetError(ret).c_str()); return false; } // Update format structure to reflect the desired format from the mixer // The output format of the mixer is identical to the input format, except for the channel count fmt.mChannelsPerFrame = m_mixMap->GetOutputChannels(); UInt32 inputNumber = m_inputUnit->GetBus(); int channelOffset = GetMixerChannelOffset(inputNumber); if (!CCoreAudioMixMap::SetMixingMatrix(m_mixerUnit, m_mixMap, &inputFormat, &fmt, channelOffset)) return false; // Regenerate audio format and copy format for the Output AU outputFormat = fmt; } else { outputFormat = inputFormat; } } else { outputFormat = inputFormat; } if (!m_audioUnit->SetFormat(&outputFormat, kAudioUnitScope_Input, kOutputBus)) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error setting input format on audio device. Channel count %d, set it to %d", (int)outputFormat.mChannelsPerFrame, format.m_channelLayout.Count()); outputFormat.mChannelsPerFrame = format.m_channelLayout.Count(); if (!m_audioUnit->SetFormat(&outputFormat, kAudioUnitScope_Input, kOutputBus)) return false; } std::string formatString; // asume we are in dd-wave mode if (!m_inputUnit) { if (!m_audioUnit->SetFormat(&inputFormat, kAudioUnitScope_Output, kInputBus)) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error setting Device Output Stream Format %s", StreamDescriptionToString(inputFormat, formatString)); } } ret = AUGraphUpdate(m_audioGraph, NULL); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error update graph. Error = %s", GetError(ret).c_str()); return false; } AudioStreamBasicDescription inputDesc_end, outputDesc_end; m_audioUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_audioUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kInputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); if (m_mixerUnit) { m_mixerUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_mixerUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kOutputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); } if (m_inputUnit) { m_inputUnit->GetFormat(&inputDesc_end, kAudioUnitScope_Input, kOutputBus); m_inputUnit->GetFormat(&outputDesc_end, kAudioUnitScope_Output, kOutputBus); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Input Stream Format %s", StreamDescriptionToString(inputDesc_end, formatString)); CLog::Log(LOGINFO, "CCoreAudioGraph::Open: Output Stream Format %s", StreamDescriptionToString(outputDesc_end, formatString)); } ret = AUGraphInitialize(m_audioGraph); if (ret) { CLog::Log(LOGERROR, "CCoreAudioGraph::Open: " "Error initialize graph. Error = %s", GetError(ret).c_str()); return false; } UInt32 bufferFrames = m_audioUnit->GetBufferFrameSize(); if (!m_audioUnit->SetMaxFramesPerSlice(bufferFrames)) return false; SetInputSource(pSource); return Start(); }
bool AEDeviceEnumerationOSX::FindSuitableFormatForStream(UInt32 &streamIdx, const AEAudioFormat &format, AudioStreamBasicDescription &outputFormat, EPassthroughMode &passthrough, AudioStreamID &outputStream) const { CLog::Log(LOGDEBUG, "%s: Finding stream for format %s", __FUNCTION__, CAEUtil::DataFormatToStr(format.m_dataFormat)); bool formatFound = false; float outputScore = 0; UInt32 streamIdxStart = streamIdx; UInt32 streamIdxEnd = streamIdx + 1; UInt32 streamIdxCurrent = streamIdx; passthrough = PassthroughModeNone; if (streamIdx == INT_MAX) { streamIdxStart = 0; streamIdxEnd = m_caStreamInfos.size(); streamIdxCurrent = 0; } if (streamIdxCurrent >= m_caStreamInfos.size()) return false; // loop over all streams or over given streams (depends on initial value of param streamIdx for(streamIdxCurrent = streamIdxStart; streamIdxCurrent < streamIdxEnd; streamIdxCurrent++) { // Probe physical formats const StreamFormatList &formats = m_caStreamInfos[streamIdxCurrent].formatList; for (StreamFormatList::const_iterator j = formats.begin(); j != formats.end(); ++j) { AudioStreamBasicDescription formatDesc = j->mFormat; // for devices with kAudioStreamAnyRate // assume that the user uses a fixed config // and knows what he is doing - so we use // the requested samplerate here if (formatDesc.mSampleRate == kAudioStreamAnyRate) formatDesc.mSampleRate = format.m_sampleRate; float score = ScoreFormat(formatDesc, format); std::string formatString; CLog::Log(LOGDEBUG, "%s: Physical Format: %s rated %f", __FUNCTION__, StreamDescriptionToString(formatDesc, formatString), score); if (score > outputScore) { if (score > 10000) { if (score > FLT_MAX/2) passthrough = PassthroughModeNative; else passthrough = PassthroughModeBitstream; } outputScore = score; outputFormat = formatDesc; outputStream = m_caStreamInfos[streamIdxCurrent].streamID; streamIdx = streamIdxCurrent;// return the streamIdx for the caller formatFound = true; } } } if (m_isPlanar) outputFormat.mChannelsPerFrame = std::min((size_t)format.m_channelLayout.Count(), m_caStreamInfos.size()); return formatFound; }