bool audio::orchestra::api::Core::open(uint32_t _device, audio::orchestra::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, audio::format _format, uint32_t *_bufferSize, const audio::orchestra::StreamOptions& _options) { // Get device ID uint32_t nDevices = getDeviceCount(); if (nDevices == 0) { // This should not happen because a check is made before this function is called. ATA_ERROR("no devices found!"); return false; } if (_device >= nDevices) { // This should not happen because a check is made before this function is called. ATA_ERROR("device ID is invalid!"); return false; } AudioDeviceID deviceList[ nDevices/2 ]; uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2; AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, nullptr, &dataSize, (void *) &deviceList); if (result != noErr) { ATA_ERROR("OS-X system error getting device IDs."); return false; } AudioDeviceID id = deviceList[ _device/2 ]; // Setup for stream mode. bool isInput = false; if (_mode == audio::orchestra::mode_input) { isInput = true; property.mScope = kAudioDevicePropertyScopeInput; } else { property.mScope = kAudioDevicePropertyScopeOutput; } // Get the stream "configuration". AudioBufferList *bufferList = nil; dataSize = 0; property.mSelector = kAudioDevicePropertyStreamConfiguration; result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize); if ( result != noErr || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ")."); return false; } // Allocate the AudioBufferList. bufferList = (AudioBufferList *) malloc(dataSize); if (bufferList == nullptr) { ATA_ERROR("memory error allocating AudioBufferList."); return false; } result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList); if ( result != noErr || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ")."); return false; } // Search for one or more streams that contain the desired number of // channels. CoreAudio devices can have an arbitrary number of // streams and each stream can have an arbitrary number of channels. // For each stream, a single buffer of interleaved samples is // provided. orchestra prefers the use of one stream of interleaved // data or multiple consecutive single-channel streams. However, we // now support multiple consecutive multi-channel streams of // interleaved data as well. uint32_t iStream, offsetCounter = _firstChannel; uint32_t nStreams = bufferList->mNumberBuffers; bool monoMode = false; bool foundStream = false; // First check that the device supports the requested number of // channels. uint32_t deviceChannels = 0; for (iStream=0; iStream<nStreams; iStream++) { deviceChannels += bufferList->mBuffers[iStream].mNumberChannels; } if (deviceChannels < (_channels + _firstChannel)) { free(bufferList); ATA_ERROR("the device (" << _device << ") does not support the requested channel count."); return false; } // Look for a single stream meeting our needs. uint32_t firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0; for (iStream=0; iStream<nStreams; iStream++) { streamChannels = bufferList->mBuffers[iStream].mNumberChannels; if (streamChannels >= _channels + offsetCounter) { firstStream = iStream; channelOffset = offsetCounter; foundStream = true; break; } if (streamChannels > offsetCounter) { break; } offsetCounter -= streamChannels; } // If we didn't find a single stream above, then we should be able // to meet the channel specification with multiple streams. if (foundStream == false) { monoMode = true; offsetCounter = _firstChannel; for (iStream=0; iStream<nStreams; iStream++) { streamChannels = bufferList->mBuffers[iStream].mNumberChannels; if (streamChannels > offsetCounter) { break; } offsetCounter -= streamChannels; } firstStream = iStream; channelOffset = offsetCounter; int32_t channelCounter = _channels + offsetCounter - streamChannels; if (streamChannels > 1) { monoMode = false; } while (channelCounter > 0) { streamChannels = bufferList->mBuffers[++iStream].mNumberChannels; if (streamChannels > 1) { monoMode = false; } channelCounter -= streamChannels; streamCount++; } } free(bufferList); // Determine the buffer size. AudioValueRange bufferRange; dataSize = sizeof(AudioValueRange); property.mSelector = kAudioDevicePropertyBufferFrameSizeRange; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &bufferRange); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting buffer size range for device (" << _device << ")."); return false; } if (bufferRange.mMinimum > *_bufferSize) { *_bufferSize = (uint64_t) bufferRange.mMinimum; } else if (bufferRange.mMaximum < *_bufferSize) { *_bufferSize = (uint64_t) bufferRange.mMaximum; } if (_options.flags.m_minimizeLatency == true) { *_bufferSize = (uint64_t) bufferRange.mMinimum; } // Set the buffer size. For multiple streams, I'm assuming we only // need to make this setting for the master channel. uint32_t theSize = (uint32_t) *_bufferSize; dataSize = sizeof(uint32_t); property.mSelector = kAudioDevicePropertyBufferFrameSize; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &theSize); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting the buffer size for device (" << _device << ")."); return false; } // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! *_bufferSize = theSize; if ( m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input && *_bufferSize != m_bufferSize) { ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ")."); return false; } m_bufferSize = *_bufferSize; m_nBuffers = 1; // Check and if necessary, change the sample rate for the device. double nominalRate; dataSize = sizeof(double); property.mSelector = kAudioDevicePropertyNominalSampleRate; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &nominalRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting current sample rate."); return false; } // Only change the sample rate if off by more than 1 Hz. if (fabs(nominalRate - (double)_sampleRate) > 1.0) { // Set a property listener for the sample rate change double reportedRate = 0.0; AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; result = AudioObjectAddPropertyListener(id, &tmp, &rateListener, (void *) &reportedRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate property listener for device (" << _device << ")."); return false; } nominalRate = (double) _sampleRate; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &nominalRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate for device (" << _device << ")."); return false; } // Now wait until the reported nominal rate is what we just set. uint32_t microCounter = 0; while (reportedRate != nominalRate) { microCounter += 5000; if (microCounter > 5000000) { break; } std::this_thread::sleep_for(std::chrono::milliseconds(5)); } // Remove the property listener. AudioObjectRemovePropertyListener(id, &tmp, &rateListener, (void *) &reportedRate); if (microCounter > 5000000) { ATA_ERROR("timeout waiting for sample rate update for device (" << _device << ")."); return false; } } // Now set the stream format for all streams. Also, check the // physical format of the device and change that if necessary. AudioStreamBasicDescription description; dataSize = sizeof(AudioStreamBasicDescription); property.mSelector = kAudioStreamPropertyVirtualFormat; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream format for device (" << _device << ")."); return false; } // Set the sample rate and data format id. However, only make the // change if the sample rate is not within 1.0 of the desired // rate and the format is not linear pcm. bool updateFormat = false; if (fabs(description.mSampleRate - (double)_sampleRate) > 1.0) { description.mSampleRate = (double) _sampleRate; updateFormat = true; } if (description.mFormatID != kAudioFormatLinearPCM) { description.mFormatID = kAudioFormatLinearPCM; updateFormat = true; } if (updateFormat) { result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << _device << ")."); return false; } } // Now check the physical format. property.mSelector = kAudioStreamPropertyPhysicalFormat; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream physical format for device (" << _device << ")."); return false; } //std::cout << "Current physical stream format:" << std::endl; //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl; //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl; //std::cout << " sample rate = " << description.mSampleRate << std::endl; if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16) { description.mFormatID = kAudioFormatLinearPCM; //description.mSampleRate = (double) sampleRate; AudioStreamBasicDescription testDescription = description; uint32_t formatFlags; // We'll try higher bit rates first and then work our way down. std::vector< std::pair<uint32_t, uint32_t> > physicalFormats; formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger; physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags)); formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags)); physicalFormats.push_back(std::pair<float, uint32_t>(24, formatFlags)); // 24-bit packed formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh); physicalFormats.push_back(std::pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low formatFlags |= kAudioFormatFlagIsAlignedHigh; physicalFormats.push_back(std::pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; physicalFormats.push_back(std::pair<float, uint32_t>(16, formatFlags)); physicalFormats.push_back(std::pair<float, uint32_t>(8, formatFlags)); bool setPhysicalFormat = false; for(uint32_t i=0; i<physicalFormats.size(); i++) { testDescription = description; testDescription.mBitsPerChannel = (uint32_t) physicalFormats[i].first; testDescription.mFormatFlags = physicalFormats[i].second; if ( (24 == (uint32_t)physicalFormats[i].first) && ~(physicalFormats[i].second & kAudioFormatFlagIsPacked)) { testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame; } else { testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame; } testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &testDescription); if (result == noErr) { setPhysicalFormat = true; //std::cout << "Updated physical stream format:" << std::endl; //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl; //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl; //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl; break; } } if (!setPhysicalFormat) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting physical data format for device (" << _device << ")."); return false; } } // done setting virtual/physical formats. // Get the stream / device latency. uint32_t latency; dataSize = sizeof(uint32_t); property.mSelector = kAudioDevicePropertyLatency; if (AudioObjectHasProperty(id, &property) == true) { result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency); if (result == kAudioHardwareNoError) { m_latency[ _mode ] = latency; } else { ATA_ERROR("system error (" << getErrorCode(result) << ") getting device latency for device (" << _device << ")."); return false; } } // Byte-swapping: According to AudioHardware.h, the stream data will // always be presented in native-endian format, so we should never // need to byte swap. m_doByteSwap[modeToIdTable(_mode)] = false; // From the CoreAudio documentation, PCM data must be supplied as // 32-bit floats. m_userFormat = _format; m_deviceFormat[modeToIdTable(_mode)] = audio::format_float; if (streamCount == 1) { m_nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame; } else { // multiple streams m_nDeviceChannels[modeToIdTable(_mode)] = _channels; } m_nUserChannels[modeToIdTable(_mode)] = _channels; m_channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream m_deviceInterleaved[modeToIdTable(_mode)] = true; if (monoMode == true) { m_deviceInterleaved[modeToIdTable(_mode)] = false; } // Set flags for buffer conversion. m_doConvertBuffer[modeToIdTable(_mode)] = false; if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } if (streamCount == 1) { if ( m_nUserChannels[modeToIdTable(_mode)] > 1 && m_deviceInterleaved[modeToIdTable(_mode)] == false) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } } else if (monoMode) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } m_private->iStream[modeToIdTable(_mode)] = firstStream; m_private->nStreams[modeToIdTable(_mode)] = streamCount; m_private->id[modeToIdTable(_mode)] = id; // Allocate necessary internal buffers. uint64_t bufferBytes; bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); // m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0); if (m_userBuffer[modeToIdTable(_mode)].size() == 0) { ATA_ERROR("error allocating user buffer memory."); goto error; } // If possible, we will make use of the CoreAudio stream buffers as // "device buffers". However, we can't do this if using multiple // streams. if ( m_doConvertBuffer[modeToIdTable(_mode)] && m_private->nStreams[modeToIdTable(_mode)] > 1) { bool makeBuffer = true; bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == audio::orchestra::mode_input) { if ( m_mode == audio::orchestra::mode_output && m_deviceBuffer) { uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } } } if (makeBuffer) { bufferBytes *= *_bufferSize; if (m_deviceBuffer) { free(m_deviceBuffer); m_deviceBuffer = nullptr; } m_deviceBuffer = (char *) calloc(bufferBytes, 1); if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } m_sampleRate = _sampleRate; m_device[modeToIdTable(_mode)] = _device; m_state = audio::orchestra::state::stopped; ATA_VERBOSE("Set state as stopped"); // Setup the buffer conversion information structure. if (m_doConvertBuffer[modeToIdTable(_mode)]) { if (streamCount > 1) { setConvertInfo(_mode, 0); } else { setConvertInfo(_mode, channelOffset); } } if ( _mode == audio::orchestra::mode_input && m_mode == audio::orchestra::mode_output && m_device[0] == _device) { // Only one callback procedure per device. m_mode = audio::orchestra::mode_duplex; } else { #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) result = AudioDeviceCreateIOProcID(id, &audio::orchestra::api::Core::callbackEvent, this, &m_private->procId[modeToIdTable(_mode)]); #else // deprecated in favor of AudioDeviceCreateIOProcID() result = AudioDeviceAddIOProc(id, &audio::orchestra::api::Core::callbackEvent, this); #endif if (result != noErr) { ATA_ERROR("system error setting callback for device (" << _device << ")."); goto error; } if ( m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) { m_mode = audio::orchestra::mode_duplex; } else { m_mode = _mode; } } // Setup the device property listener for over/underload. property.mSelector = kAudioDeviceProcessorOverload; result = AudioObjectAddPropertyListener(id, &property, &audio::orchestra::api::Core::xrunListener, this); return true; error: m_userBuffer[0].clear(); m_userBuffer[1].clear(); if (m_deviceBuffer) { free(m_deviceBuffer); m_deviceBuffer = 0; } m_state = audio::orchestra::state::closed; ATA_VERBOSE("Set state as closed"); return false; }
static inline gboolean _io_proc_spdif_start (GstCoreAudio * core_audio) { OSErr status; GST_DEBUG_OBJECT (core_audio, "osx ring buffer start ioproc ID: %p device_id %lu", core_audio->procID, (gulong) core_audio->device_id); if (!core_audio->io_proc_active) { /* Add IOProc callback */ status = AudioDeviceCreateIOProcID (core_audio->device_id, (AudioDeviceIOProc) _io_proc_spdif, (void *) core_audio, &core_audio->procID); if (status != noErr) { GST_ERROR_OBJECT (core_audio->osxbuf, ":AudioDeviceCreateIOProcID failed: %d", (int) status); return FALSE; } core_audio->io_proc_active = TRUE; } core_audio->io_proc_needs_deactivation = FALSE; /* Start device */ status = AudioDeviceStart (core_audio->device_id, core_audio->procID); if (status != noErr) { GST_ERROR_OBJECT (core_audio->osxbuf, "AudioDeviceStart failed: %d", (int) status); return FALSE; } return TRUE; }
AudioDeviceIOProcID CAHALAudioDevice::CreateIOProcID(AudioDeviceIOProc inIOProc, void* inClientData) { AudioDeviceIOProcID theAnswer = NULL; OSStatus theError = AudioDeviceCreateIOProcID(mObjectID, inIOProc, inClientData, &theAnswer); ThrowIfError(theError, CAException(theError), "CAHALAudioDevice::CreateIOProcID: got an error creating the IOProc ID"); return theAnswer; }
static au_instance_t *audiounits_create_recorder(SEXP source, float rate, int chs, int flags) { UInt32 propsize=0; OSStatus err; au_instance_t *ap = (au_instance_t*) calloc(sizeof(au_instance_t), 1); ap->source = source; ap->sample_rate = rate; ap->done = NO; ap->position = 0; ap->length = LENGTH(source); ap->stereo = (chs == 2) ? YES : NO; propsize = sizeof(ap->inDev); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propsize, &ap->inDev); if (err) { free(ap); Rf_error("unable to find default audio input (%08x)", err); } propsize = sizeof(ap->fmtIn); err = AudioDeviceGetProperty(ap->inDev, 0, YES, kAudioDevicePropertyStreamFormat, &propsize, &ap->fmtIn); if (err) { free(ap); Rf_error("unable to retrieve audio input format (%08x)", err); } /* Rprintf(" recording format: %f, chs: %d, fpp: %d, bpp: %d, bpf: %d, flags: %x\n", ap->fmtIn.mSampleRate, ap->fmtIn.mChannelsPerFrame, ap->fmtIn.mFramesPerPacket, ap->fmtIn.mBytesPerPacket, ap->fmtIn.mBytesPerFrame, ap->fmtIn.mFormatFlags); */ ap->srFrac = 1.0; if (ap->fmtIn.mSampleRate != ap->sample_rate) ap->srFrac = ap->sample_rate / ap->fmtIn.mSampleRate; ap->srRun = 0.0; #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED>=MAC_OS_X_VERSION_10_5) err = AudioDeviceCreateIOProcID(ap->inDev, inputRenderProc, ap, &ap->inIOProcID ); #else err = AudioDeviceAddIOProc(ap->inDev, inputRenderProc, ap); #endif if (err) { free(ap); Rf_error("unable to register recording callback (%08x)", err); } R_PreserveObject(ap->source); Rf_setAttrib(ap->source, Rf_install("rate"), Rf_ScalarInteger(rate)); /* we adjust the rate */ Rf_setAttrib(ap->source, Rf_install("bits"), Rf_ScalarInteger(16)); /* we say it's 16 because we don't know - float is always 32-bit */ Rf_setAttrib(ap->source, Rf_install("class"), Rf_mkString("audioSample")); if (ap->stereo) { SEXP dim = Rf_allocVector(INTSXP, 2); INTEGER(dim)[0] = 2; INTEGER(dim)[1] = LENGTH(ap->source) / 2; Rf_setAttrib(ap->source, R_DimSymbol, dim); } return ap; }
void AudioTee::start() { if (mInputDevice.mID == kAudioDeviceUnknown || mOutputDevice.mID == kAudioDeviceUnknown) return; if (mInputDevice.mFormat.mSampleRate != mOutputDevice.mFormat.mSampleRate) { printf("Error in AudioTee::Start() - sample rate mismatch: %f / %f\n", mInputDevice.mFormat.mSampleRate, mOutputDevice.mFormat.mSampleRate); return; } mWorkBuf = new Byte[mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame]; memset(mWorkBuf, 0, mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame); UInt32 framesInHistoryBuffer = NextPowerOfTwo(mInputDevice.mFormat.mSampleRate * mSecondsInHistoryBuffer); mHistoryBufferMaxByteSize = mInputDevice.mFormat.mBytesPerFrame * framesInHistoryBuffer; mHistBuf = new CARingBuffer(); mHistBuf->Allocate(2, mInputDevice.mFormat.mBytesPerFrame, framesInHistoryBuffer); printf("Initializing history buffer with byte capacity %u — %f seconds at %f kHz", mHistoryBufferMaxByteSize, (mHistoryBufferMaxByteSize / mInputDevice.mFormat.mSampleRate / (4 * 2)), mInputDevice.mFormat.mSampleRate); printf("Initializing work buffer with mBufferSizeFrames:%u and mBytesPerFrame %u\n", mInputDevice.mBufferSizeFrames, mInputDevice.mFormat.mBytesPerFrame); mInputIOProcID = NULL; AudioDeviceCreateIOProcID(mInputDevice.mID, InputIOProc, this, &mInputIOProcID); AudioDeviceStart(mInputDevice.mID, mInputIOProcID); mOutputIOProc = OutputIOProc; mOutputIOProcID = NULL; AudioDeviceCreateIOProcID(mOutputDevice.mID, mOutputIOProc, this, &mOutputIOProcID); AudioDeviceStart(mOutputDevice.mID, mOutputIOProcID); }
bool CCoreAudioDevice::AddIOProc(AudioDeviceIOProc ioProc, void* pCallbackData) { // Allow only one IOProc at a time if (!m_DeviceId || m_IoProc) return false; OSStatus ret = AudioDeviceCreateIOProcID(m_DeviceId, ioProc, pCallbackData, &m_IoProc); if (ret) { CLog::Log(LOGERROR, "CCoreAudioDevice::AddIOProc: " "Unable to add IOProc. Error = %s", GetError(ret).c_str()); m_IoProc = NULL; return false; } Start(); return true; }
bool CCoreAudioDevice::AddIOProc() { // Allow only one IOProc at a time if (!m_DeviceId || m_IoProc) return false; OSStatus ret = AudioDeviceCreateIOProcID(m_DeviceId, DirectRenderCallback, this, &m_IoProc); if (ret) { CLog::Log(LOGERROR, "CCoreAudioDevice::AddIOProc: " "Unable to add IOProc. Error = %s", GetError(ret).c_str()); m_IoProc = NULL; return false; } Start(); CLog::Log(LOGDEBUG, "CCoreAudioDevice::AddIOProc: " "IOProc %p set for device 0x%04x", m_IoProc, (uint)m_DeviceId); return true; }
bool start (AudioIODeviceCallback* cb) { if (! started) { callback = nullptr; if (deviceID != 0) { #if MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_5 if (OK (AudioDeviceAddIOProc (deviceID, audioIOProc, this))) #else if (OK (AudioDeviceCreateIOProcID (deviceID, audioIOProc, this, &audioProcID))) #endif { if (OK (AudioDeviceStart (deviceID, audioIOProc))) { started = true; } else { #if MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_5 OK (AudioDeviceRemoveIOProc (deviceID, audioIOProc)); #else OK (AudioDeviceDestroyIOProcID (deviceID, audioProcID)); audioProcID = 0; #endif } } } } if (started) { const ScopedLock sl (callbackLock); callback = cb; } return started && (inputDevice == nullptr || inputDevice->start (cb)); }
int macosx_audio_open(audio_desc_t ad, audio_format* ifmt, audio_format *ofmt) { OSStatus err = noErr; UInt32 propertySize; Boolean writable; obtained_ = false; add = ad; //dev[0] = devices[ad]; UNUSED(ofmt); // Get the default input device ID. err = AudioHardwareGetPropertyInfo(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &writable); if (err != noErr) { return 0; } err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &(devices[ad].inputDeviceID_)); if (err != noErr) { debug_msg("error kAudioHardwarePropertyDefaultInputDevice"); return 0; } if (devices[ad].inputDeviceID_ == kAudioDeviceUnknown) { debug_msg("error kAudioDeviceUnknown"); return 0; } // Get the input stream description. err = AudioDeviceGetPropertyInfo(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &writable); if (err != noErr) { debug_msg("error AudioDeviceGetPropertyInfo"); return 0; } err = AudioDeviceGetProperty(devices[ad].inputDeviceID_, 0, true, kAudioDevicePropertyStreamFormat, &propertySize, &(devices[ad].inputStreamBasicDescription_)); //printf("inputStreamBasicDescription_.mBytesPerFrame %d\n", devices[add].inputStreamBasicDescription_); if (err != noErr) { debug_msg("error AudioDeviceGetProperty"); return 0; } // nastavime maly endian devices[ad].inputStreamBasicDescription_.mFormatFlags &= (kAudioFormatFlagIsBigEndian & 0); if (writable) { err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyStreamFormat, sizeof(AudioStreamBasicDescription), &(devices[ad].inputStreamBasicDescription_)); if (err != noErr) printf("err: AudioDeviceSetProperty: kAudioDevicePropertyStreamFormat\n"); } /* set the buffer size of the device */ /* int bufferByteSize = 8192; propertySize = sizeof(bufferByteSize); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, 0, true, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize); if (err != noErr) debug_msg("err: Set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); else debug_msg("sucessfully set kAudioDevicePropertyBufferSize to %d\n", bufferByteSize); */ // Set the device sample rate -- a temporary fix for the G5's // built-in audio and possibly other audio devices. Boolean IsInput = 0; int inChannel = 0; Float64 theAnswer = 44100; UInt32 theSize = sizeof(theAnswer); err = AudioDeviceSetProperty(devices[ad].inputDeviceID_, NULL, inChannel, IsInput, kAudioDevicePropertyNominalSampleRate, theSize, &theAnswer); if (err != noErr) { debug_msg("error AudioDeviceSetProperty\n"); return 0; } debug_msg("Sample rate, %f\n", theAnswer); #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioDeviceCreateIOProcID(devices[ad].inputDeviceID_, audioIOProc, (void*)NULL, &devices[ad].inputDeviceProcID_); if (err != noErr) { debug_msg("error AudioDeviceCreateIOProcID, %s\n", GetMacOSStatusCommentString(err)); return 0; } err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput, &(devices[ad].outputUnit_)); // The HAL AU maybe a better way to in the future... //err = OpenADefaultComponent(kAudioUnitType_Output, kAudioUnitSubType_HALOutput, &(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenADefaultComponent\n"); return 0; } #else // Register the AudioDeviceIOProc. err = AudioDeviceAddIOProc(devices[ad].inputDeviceID_, audioIOProc, NULL); if (err != noErr) { debug_msg("error AudioDeviceAddIOProc\n"); return 0; } err = OpenDefaultAudioOutput(&(devices[ad].outputUnit_)); if (err != noErr) { debug_msg("error OpenDefaultAudioOutput\n"); return 0; } #endif // Register a callback function to provide output data to the unit. devices[ad].input.inputProc = outputRenderer; devices[ad].input.inputProcRefCon = 0; /* These would be needed if HAL used * UInt32 enableIO =1; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, (const void*)&enableIO, sizeof(UInt32)); enableIO=0; err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, (const void*)&enableIO, sizeof(UInt32)); if (err != noErr) { debug_msg("error AudioUnitSetProperty EnableIO with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; }*/ #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #else err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &(devices[ad].input), sizeof(AURenderCallbackStruct)); #endif if (err != noErr) { debug_msg("error AudioUnitSetProperty1 with error %ld: %s\n", err, GetMacOSStatusErrorString(err)); return 0; } // Define the Mash stream description. Mash puts 20ms of data into each read // and write call. 20ms at 8000Hz equals 160 samples. Each sample is a u_char, // so that's 160 bytes. Mash uses 8-bit mu-law internally, so we need to convert // to 16-bit linear before using the audio data. devices[ad].mashStreamBasicDescription_.mSampleRate = 8000.0; //devices[ad].mashStreamBasicDescription_.mSampleRate = ifmt->sample_rate; devices[ad].mashStreamBasicDescription_.mFormatID = kAudioFormatLinearPCM; #ifdef WORDS_BIGENDIAN devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsPacked; #else devices[ad].mashStreamBasicDescription_.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; #endif devices[ad].mashStreamBasicDescription_.mBytesPerPacket = 2; devices[ad].mashStreamBasicDescription_.mFramesPerPacket = 1; devices[ad].mashStreamBasicDescription_.mBytesPerFrame = 2; devices[ad].mashStreamBasicDescription_.mChannelsPerFrame = 1; devices[ad].mashStreamBasicDescription_.mBitsPerChannel = 16; // Inform the default output unit of our source format. err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty2"); printf("error setting output unit source format\n"); return 0; } // check the stream format err = AudioUnitGetPropertyInfo(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &propertySize, &writable); if (err != noErr) debug_msg("err getting propert info for kAudioUnitProperty_StreamFormat\n"); err = AudioUnitGetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamdesc_, &propertySize); if (err != noErr) debug_msg("err getting values for kAudioUnitProperty_StreamFormat\n"); char name[128]; audio_format_name(ifmt, name, 128); debug_msg("Requested ifmt %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); // handle the requested format if (ifmt->encoding != DEV_S16) { audio_format_change_encoding(ifmt, DEV_S16); debug_msg("Requested ifmt changed to %s\n",name); debug_msg("ifmt bytes pre block: %d\n",ifmt->bytes_per_block); } audio_format_name(ofmt, name, 128); debug_msg("Requested ofmt %s\n",name); debug_msg("ofmt bytes pre block: %d\n",ofmt->bytes_per_block); // Allocate the read buffer and Z delay line. //readBufferSize_ = 8192; readBufferSize_ = ifmt->bytes_per_block * ringBufferFactor_; //readBufferSize_ = 320; //printf("readBufferSize_ %d\n", readBufferSize_); readBuffer_ = malloc(sizeof(u_char)*readBufferSize_); bzero(readBuffer_, readBufferSize_ * sizeof(u_char)); //memset(readBuffer_, PCMU_AUDIO_ZERO, readBufferSize_); //inputReadIndex_ = -1; inputReadIndex_ = 0; inputWriteIndex_ = 0; zLine_ = malloc(sizeof(double)*DECIM441_LENGTH / 80); availableInput_ = 0; // Allocate the write buffer. //writeBufferSize_ = 8000; writeBufferSize_ = ofmt->bytes_per_block * ringBufferFactor_; writeBuffer_ = malloc(sizeof(SInt16)*writeBufferSize_); bzero(writeBuffer_, writeBufferSize_ * sizeof(SInt16)); outputReadIndex_ = 0; outputWriteIndex_ = 0; //outputWriteIndex_ = -1; // Start audio processing. err = AudioUnitInitialize(devices[ad].outputUnit_); if (err != noErr) { debug_msg("error AudioUnitInitialize\n"); return 0; } err = AudioDeviceStart(devices[ad].inputDeviceID_, audioIOProc); if (err != noErr) { fprintf(stderr, "Input device error: AudioDeviceStart\n"); return 0; } err = AudioOutputUnitStart(devices[ad].outputUnit_); if (err != noErr) { fprintf(stderr, "Output device error: AudioOutputUnitStart\n"); return 0; } // Inform the default output unit of our source format. /* err = AudioUnitSetProperty(devices[ad].outputUnit_, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &(devices[ad].mashStreamBasicDescription_), sizeof(AudioStreamBasicDescription)); if (err != noErr) { debug_msg("error AudioUnitSetProperty3"); return 0; } */ return 1; };
// ---------------------------------------------------------------------------- void AudioCoreDriver::initialize(PlayerLibSidplay* player, int sampleRate, int bitsPerSample) // ---------------------------------------------------------------------------- { if (mInstanceId != 0) return; mPlayer = player; mNumSamplesInBuffer = 512; mIsPlaying = false; mIsPlayingPreRenderedBuffer = false; mBufferUnderrunDetected = false; mBufferUnderrunCount = 0; mSpectrumTemporalSmoothing = 0.5f; mPreRenderedBuffer = NULL; mPreRenderedBufferSampleCount = 0; mPreRenderedBufferPlaybackPosition = 0; if (!mIsInitialized) { OSStatus err; //get default output device UInt32 propertySize = sizeof(mDeviceID); AudioObjectPropertyAddress prop1 = {kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster}; err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop1, 0, NULL, &propertySize, &mDeviceID); if (err != kAudioHardwareNoError) { printf("AudioObjectGetPropertyData(kAudioHardwarePropertyDefaultOutputDevice) failed\n"); return; } if (mDeviceID == kAudioDeviceUnknown) return; err = queryStreamFormat(mDeviceID, mStreamFormat); if (err != kAudioHardwareNoError) { printf("queryStreamFormat failed\n"); return; } //add property listeners #if USE_NEW_API AudioObjectPropertyAddress prop5 = { kAudioDevicePropertyStreamFormat, //TODO this is deprecated, how to get this notification? kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; err = AudioObjectAddPropertyListener(mDeviceID, &prop5, streamFormatChanged, (void*)this); if (err != kAudioHardwareNoError) { printf("AudioObjectAddPropertyListener(streamFormatChanged) failed\n"); return; } AudioObjectPropertyAddress prop6 = { kAudioDeviceProcessorOverload, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; err = AudioObjectAddPropertyListener(mDeviceID, &prop6, overloadDetected, (void*)this); if (err != kAudioHardwareNoError) { printf("AudioObjectAddPropertyListener(overloadDetected) failed\n"); return; } AudioObjectPropertyAddress prop7 = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; err = AudioObjectAddPropertyListener(mDeviceID, &prop7, deviceChanged, (void*)this); if (err != kAudioHardwareNoError) { printf("AudioObjectAddPropertyListener(deviceChanged) failed\n"); return; } #else if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDevicePropertyStreamFormat, streamFormatChanged, (void*) this) != kAudioHardwareNoError) return; if (AudioDeviceAddPropertyListener(mDeviceID, 0, false, kAudioDeviceProcessorOverload, overloadDetected, (void*) this) != kAudioHardwareNoError) return; if (AudioHardwareAddPropertyListener(kAudioHardwarePropertyDefaultOutputDevice, deviceChanged, (void*) this) != kAudioHardwareNoError) return; #endif mSampleBuffer1 = new short[mNumSamplesInBuffer]; memset(mSampleBuffer1, 0, sizeof(short) * mNumSamplesInBuffer); mSampleBuffer2 = new short[mNumSamplesInBuffer]; memset(mSampleBuffer2, 0, sizeof(short) * mNumSamplesInBuffer); mSpectrumBuffer = new float[mNumSamplesInBuffer/2]; memset(mSpectrumBuffer, 0, sizeof(float) * (mNumSamplesInBuffer/2)); mSampleBuffer = mSampleBuffer1; mRetSampleBuffer = mSampleBuffer2; int bufferByteSize = mNumSamplesInBuffer * mStreamFormat.mChannelsPerFrame * sizeof(float); propertySize = sizeof(bufferByteSize); #if USE_NEW_API AudioObjectPropertyAddress prop8 = { kAudioDevicePropertyBufferSize, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; err = AudioObjectSetPropertyData(mDeviceID, &prop8, 0, NULL, propertySize, &bufferByteSize); if (err != kAudioHardwareNoError) { printf("AudioObjectSetPropertyData(kAudioDevicePropertyBufferSize) failed\n"); return; } #else if (AudioDeviceSetProperty(mDeviceID, NULL, 0, false, kAudioDevicePropertyBufferSize, propertySize, &bufferByteSize) != kAudioHardwareNoError) return; #endif mScaleFactor = sBitScaleFactor; mPreRenderedBufferScaleFactor = sBitScaleFactor; if (AudioDeviceCreateIOProcID(mDeviceID, emulationPlaybackProc, (void*) this, &mEmulationPlaybackProcID) != kAudioHardwareNoError) { delete[] mSampleBuffer1; mSampleBuffer1 = NULL; delete[] mSampleBuffer2; mSampleBuffer2 = NULL; mSampleBuffer = NULL; mRetSampleBuffer = NULL; delete[] mSpectrumBuffer; mSpectrumBuffer = NULL; return; } if (AudioDeviceCreateIOProcID(mDeviceID, preRenderedBufferPlaybackProc, (void*) this, &mPreRenderedBufferPlaybackProcID) != kAudioHardwareNoError) { delete[] mSampleBuffer1; mSampleBuffer1 = NULL; delete[] mSampleBuffer2; mSampleBuffer2 = NULL; mSampleBuffer = NULL; mRetSampleBuffer = NULL; delete[] mSpectrumBuffer; mSpectrumBuffer = NULL; return; } } mVolume = 1.0f; mIsInitialized = true; }
// Initialize the BlockSound class BlockSound::BlockSound() { sample_size = 0; #ifdef __APPLE__ remaining = 0; UInt32 size = sizeof(device); if (AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &size, (void *)&device) != noErr) return; size = sizeof(format); if (AudioDeviceGetProperty(device, 0, false, kAudioDevicePropertyStreamFormat, &size, &format) != noErr) return; // Set up a format we like... format.mSampleRate = 44100.0; // 44.1kHz format.mChannelsPerFrame = 2; // stereo if (AudioDeviceSetProperty(device, NULL, 0, false, kAudioDevicePropertyStreamFormat, sizeof(format), &format) != noErr) return; // Check we got linear pcm - what to do if we did not ??? if (format.mFormatID != kAudioFormatLinearPCM) return; // Attach the callback and start the device # if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5 if (AudioDeviceCreateIOProcID(device, audio_cb, (void *)this, &audio_proc_id) != noErr) return; AudioDeviceStart(device, audio_proc_id); # else if (AudioDeviceAddIOProc(device, audio_cb, (void *)this) != noErr) return; AudioDeviceStart(device, audio_cb); # endif sample_size = (int)format.mSampleRate; #elif defined(WIN32) WAVEFORMATEX format; memset(&format, 0, sizeof(format)); format.cbSize = sizeof(format); format.wFormatTag = WAVE_FORMAT_PCM; format.nChannels = 2; format.nSamplesPerSec = 44100; format.nAvgBytesPerSec = 44100 * 4; format.nBlockAlign = 4; format.wBitsPerSample = 16; data_handle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, format.nSamplesPerSec * 4); if (!data_handle) return; data_ptr = (LPSTR)GlobalLock(data_handle); header_handle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, sizeof(WAVEHDR)); if (!header_handle) return; header_ptr = (WAVEHDR *)GlobalLock(header_handle); header_ptr->lpData = data_ptr; header_ptr->dwFlags = 0; header_ptr->dwLoops = 0; if (waveOutOpen(&device, WAVE_MAPPER, &format, 0, 0, WAVE_ALLOWSYNC) != MMSYSERR_NOERROR) return; sample_size = format.nSamplesPerSec; #else # ifdef HAVE_ALSA_ASOUNDLIB_H handle = NULL; if (snd_pcm_open(&handle, "default", SND_PCM_STREAM_PLAYBACK, 0) >= 0) { // Initialize PCM sound stuff... snd_pcm_hw_params_t *params; snd_pcm_hw_params_alloca(¶ms); snd_pcm_hw_params_any(handle, params); snd_pcm_hw_params_set_access(handle, params, SND_PCM_ACCESS_RW_INTERLEAVED); snd_pcm_hw_params_set_format(handle, params, SND_PCM_FORMAT_S16); snd_pcm_hw_params_set_channels(handle, params, 2); unsigned rate = 44100; int dir; snd_pcm_hw_params_set_rate_near(handle, params, &rate, &dir); snd_pcm_uframes_t period = (int)rate; snd_pcm_hw_params_set_period_size_near(handle, params, &period, &dir); sample_size = rate; if (snd_pcm_hw_params(handle, params) < 0) { sample_size = 0; snd_pcm_close(handle); handle = NULL; } } # endif // HAVE_ALSA_ASOUNDLIB_H #endif // __APPLE__ if (sample_size) { // Make an explosion sound by passing white noise through a low pass // filter with a decreasing frequency... sample_data = new short[2 * sample_size]; short *sample_ptr = sample_data; int max_sample = 2 * sample_size - 2; *sample_ptr++ = 0; *sample_ptr++ = 0; for (int j = max_sample; j > 0; j --, sample_ptr ++) { float freq = (float)j / (float)max_sample; float volume = 32767.0 * (0.5 * sqrt(freq) + 0.5); float sample = 0.0001 * ((rand() % 20001) - 10000); *sample_ptr = (int)(volume * freq * sample + (1.0 - freq) * sample_ptr[-2]); } } }
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) { OSStatus status; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw; int err; const char *typ = "playback"; AudioValueRange frameRange; Audiodev *dev = drv_opaque; AudiodevCoreaudioPerDirectionOptions *cpdo = dev->u.coreaudio.out; int frames; /* create mutex */ err = pthread_mutex_init(&core->mutex, NULL); if (err) { dolog("Could not create mutex\nReason: %s\n", strerror (err)); return -1; } audio_pcm_init_info (&hw->info, as); status = coreaudio_get_voice(&core->outputDeviceID); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get default output Device\n"); return -1; } if (core->outputDeviceID == kAudioDeviceUnknown) { dolog ("Could not initialize %s - Unknown Audiodevice\n", typ); return -1; } /* get minimum and maximum buffer frame sizes */ status = coreaudio_get_framesizerange(core->outputDeviceID, &frameRange); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame range\n"); return -1; } frames = audio_buffer_frames( qapi_AudiodevCoreaudioPerDirectionOptions_base(cpdo), as, 11610); if (frameRange.mMinimum > frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum); } else if (frameRange.mMaximum < frames) { core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum); } else { core->audioDevicePropertyBufferFrameSize = frames; } /* set Buffer Frame Size */ status = coreaudio_set_framesize(core->outputDeviceID, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set device buffer frame size %" PRIu32 "\n", (uint32_t)core->audioDevicePropertyBufferFrameSize); return -1; } /* get Buffer Frame Size */ status = coreaudio_get_framesize(core->outputDeviceID, &core->audioDevicePropertyBufferFrameSize); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get device buffer frame size\n"); return -1; } hw->samples = (cpdo->has_buffer_count ? cpdo->buffer_count : 4) * core->audioDevicePropertyBufferFrameSize; /* get StreamFormat */ status = coreaudio_get_streamformat(core->outputDeviceID, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not get Device Stream properties\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Samplerate */ core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq; status = coreaudio_set_streamformat(core->outputDeviceID, &core->outputStreamBasicDescription); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n", as->freq); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* set Callback */ core->ioprocid = NULL; status = AudioDeviceCreateIOProcID(core->outputDeviceID, audioDeviceIOProc, hw, &core->ioprocid); if (status != kAudioHardwareNoError || core->ioprocid == NULL) { coreaudio_logerr2 (status, typ, "Could not set IOProc\n"); core->outputDeviceID = kAudioDeviceUnknown; return -1; } /* start Playback */ if (!isPlaying(core->outputDeviceID)) { status = AudioDeviceStart(core->outputDeviceID, core->ioprocid); if (status != kAudioHardwareNoError) { coreaudio_logerr2 (status, typ, "Could not start playback\n"); AudioDeviceDestroyIOProcID(core->outputDeviceID, core->ioprocid); core->outputDeviceID = kAudioDeviceUnknown; return -1; } } return 0; }
/***************************************************************************** * Setup a encoded digital stream (SPDIF) *****************************************************************************/ static int OpenSPDIF(void) { OSStatus err = noErr; UInt32 i_param_size, b_mix = 0; Boolean b_writeable = 0; AudioStreamID *p_streams = NULL; int i, i_streams = 0; AudioObjectPropertyAddress property_address; /* Start doing the SPDIF setup process. */ ao->b_digital = 1; /* Hog the device. */ ao->i_hog_pid = getpid() ; err = SetAudioProperty(ao->i_selected_dev, kAudioDevicePropertyHogMode, sizeof(ao->i_hog_pid), &ao->i_hog_pid); if (err != noErr) { ao_msg(MSGT_AO, MSGL_WARN, "failed to set hogmode: [%4.4s]\n", (char *)&err); ao->i_hog_pid = -1; goto err_out; } /* Set mixable to false if we are allowed to. */ err = IsAudioPropertySettable(ao->i_selected_dev, kAudioDevicePropertySupportsMixing, &b_writeable); err = GetAudioProperty(ao->i_selected_dev, kAudioDevicePropertySupportsMixing, sizeof(UInt32), &b_mix); if (err != noErr && b_writeable) { b_mix = 0; err = SetAudioProperty(ao->i_selected_dev, kAudioDevicePropertySupportsMixing, sizeof(UInt32), &b_mix); ao->b_changed_mixing = 1; } if (err != noErr) { ao_msg(MSGT_AO, MSGL_WARN, "failed to set mixmode: [%4.4s]\n", (char *)&err); goto err_out; } /* Get a list of all the streams on this device. */ i_param_size = GetAudioPropertyArray(ao->i_selected_dev, kAudioDevicePropertyStreams, kAudioDevicePropertyScopeOutput, (void **)&p_streams); if (!i_param_size) { ao_msg(MSGT_AO, MSGL_WARN, "could not get number of streams.\n"); goto err_out; } i_streams = i_param_size / sizeof(AudioStreamID); ao_msg(MSGT_AO, MSGL_V, "current device stream number: %d\n", i_streams); for (i = 0; i < i_streams && ao->i_stream_index < 0; ++i) { /* Find a stream with a cac3 stream. */ AudioStreamBasicDescription *p_format_list = NULL; int i_formats = 0, j = 0, b_digital = 0; i_param_size = GetGlobalAudioPropertyArray(p_streams[i], kAudioStreamPropertyPhysicalFormats, (void **)&p_format_list); if (!i_param_size) { ao_msg(MSGT_AO, MSGL_WARN, "Could not get number of stream formats.\n"); continue; } i_formats = i_param_size / sizeof(AudioStreamBasicDescription); /* Check if one of the supported formats is a digital format. */ for (j = 0; j < i_formats; ++j) { if (p_format_list[j].mFormatID == 'IAC3' || p_format_list[j].mFormatID == kAudioFormat60958AC3) { b_digital = 1; break; } } if (b_digital) { /* If this stream supports a digital (cac3) format, then set it. */ int i_requested_rate_format = -1; int i_current_rate_format = -1; int i_backup_rate_format = -1; ao->i_stream_id = p_streams[i]; ao->i_stream_index = i; if (ao->b_revert == 0) { /* Retrieve the original format of this stream first if not done so already. */ err = GetAudioProperty(ao->i_stream_id, kAudioStreamPropertyPhysicalFormat, sizeof(ao->sfmt_revert), &ao->sfmt_revert); if (err != noErr) { ao_msg(MSGT_AO, MSGL_WARN, "Could not retrieve the original stream format: [%4.4s]\n", (char *)&err); if (p_format_list) free(p_format_list); continue; } ao->b_revert = 1; } for (j = 0; j < i_formats; ++j) if (p_format_list[j].mFormatID == 'IAC3' || p_format_list[j].mFormatID == kAudioFormat60958AC3) { if (p_format_list[j].mSampleRate == ao->stream_format.mSampleRate) { i_requested_rate_format = j; break; } if (p_format_list[j].mSampleRate == ao->sfmt_revert.mSampleRate) i_current_rate_format = j; else if (i_backup_rate_format < 0 || p_format_list[j].mSampleRate > p_format_list[i_backup_rate_format].mSampleRate) i_backup_rate_format = j; } if (i_requested_rate_format >= 0) /* We prefer to output at the samplerate of the original audio. */ ao->stream_format = p_format_list[i_requested_rate_format]; else if (i_current_rate_format >= 0) /* If not possible, we will try to use the current samplerate of the device. */ ao->stream_format = p_format_list[i_current_rate_format]; else ao->stream_format = p_format_list[i_backup_rate_format]; /* And if we have to, any digital format will be just fine (highest rate possible). */ } if (p_format_list) free(p_format_list); } if (p_streams) free(p_streams); if (ao->i_stream_index < 0) { ao_msg(MSGT_AO, MSGL_WARN, "Cannot find any digital output stream format when OpenSPDIF().\n"); goto err_out; } print_format(MSGL_V, "original stream format:", &ao->sfmt_revert); if (!AudioStreamChangeFormat(ao->i_stream_id, ao->stream_format)) goto err_out; property_address.mSelector = kAudioDevicePropertyDeviceHasChanged; property_address.mScope = kAudioObjectPropertyScopeGlobal; property_address.mElement = kAudioObjectPropertyElementMaster; err = AudioObjectAddPropertyListener(ao->i_selected_dev, &property_address, DeviceListener, NULL); if (err != noErr) ao_msg(MSGT_AO, MSGL_WARN, "AudioDeviceAddPropertyListener for kAudioDevicePropertyDeviceHasChanged failed: [%4.4s]\n", (char *)&err); /* FIXME: If output stream is not native byte-order, we need change endian somewhere. */ /* Although there's no such case reported. */ #if HAVE_BIGENDIAN if (!(ao->stream_format.mFormatFlags & kAudioFormatFlagIsBigEndian)) #else if (ao->stream_format.mFormatFlags & kAudioFormatFlagIsBigEndian) #endif ao_msg(MSGT_AO, MSGL_WARN, "Output stream has non-native byte order, digital output may fail.\n"); /* For ac3/dts, just use packet size 6144 bytes as chunk size. */ ao->chunk_size = ao->stream_format.mBytesPerPacket; ao_data.samplerate = ao->stream_format.mSampleRate; ao_data.channels = ao->stream_format.mChannelsPerFrame; ao_data.bps = ao_data.samplerate * (ao->stream_format.mBytesPerPacket/ao->stream_format.mFramesPerPacket); ao_data.outburst = ao->chunk_size; ao_data.buffersize = ao_data.bps; ao->num_chunks = (ao_data.bps+ao->chunk_size-1)/ao->chunk_size; ao->buffer_len = ao->num_chunks * ao->chunk_size; ao->buffer = av_fifo_alloc(ao->buffer_len); ao_msg(MSGT_AO,MSGL_V, "using %5d chunks of %d bytes (buffer len %d bytes)\n", (int)ao->num_chunks, (int)ao->chunk_size, (int)ao->buffer_len); /* Create IOProc callback. */ err = AudioDeviceCreateIOProcID(ao->i_selected_dev, (AudioDeviceIOProc)RenderCallbackSPDIF, (void *)ao, &ao->renderCallback); if (err != noErr || ao->renderCallback == NULL) { ao_msg(MSGT_AO, MSGL_WARN, "AudioDeviceAddIOProc failed: [%4.4s]\n", (char *)&err); goto err_out1; } reset(); return CONTROL_TRUE; err_out1: if (ao->b_revert) AudioStreamChangeFormat(ao->i_stream_id, ao->sfmt_revert); err_out: if (ao->b_changed_mixing && ao->sfmt_revert.mFormatID != kAudioFormat60958AC3) { int b_mix = 1; err = SetAudioProperty(ao->i_selected_dev, kAudioDevicePropertySupportsMixing, sizeof(int), &b_mix); if (err != noErr) ao_msg(MSGT_AO, MSGL_WARN, "failed to set mixmode: [%4.4s]\n", (char *)&err); } if (ao->i_hog_pid == getpid()) { ao->i_hog_pid = -1; err = SetAudioProperty(ao->i_selected_dev, kAudioDevicePropertyHogMode, sizeof(ao->i_hog_pid), &ao->i_hog_pid); if (err != noErr) ao_msg(MSGT_AO, MSGL_WARN, "Could not release hogmode: [%4.4s]\n", (char *)&err); } av_fifo_free(ao->buffer); free(ao); ao = NULL; return CONTROL_FALSE; }
/* * Initialize CoreAudio and return an opaque pointer * for keeping track of our audio device * */ void *audio_init(void) { OSStatus s; AudioDeviceID adev_id; AudioValueRange avr; UInt32 framesize = 512; UInt32 sz; AudioStreamBasicDescription fmt; CoreAudioDevice *device; DSFYDEBUG("Initializing CoreAudio\n"); sz = sizeof (adev_id); s = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &sz, &adev_id); if(s != 0) { fprintf(stderr, "AudioHardwareGetProperty() failed with error %d\n", s); return NULL; } else if (adev_id == kAudioDeviceUnknown) { fprintf(stderr, "AudioHardwareGetProperty() returned device kAudioDeviceUnknown\n"); return NULL; } /* Find out the bounds of buffer frame size */ sz = sizeof(avr); if ((s = AudioDeviceGetProperty(adev_id, 0, false, kAudioDevicePropertyBufferFrameSizeRange, &sz, &avr))) { printf("AudioDeviceGetProperty() failed with error %d\n", s); return NULL; } /* Keep the requested number of frames within bounds */ if (framesize < avr.mMinimum) framesize = avr.mMinimum; else if (framesize > avr.mMaximum) framesize = avr.mMaximum; /* Set buffer frame size for device */ sz = sizeof (framesize); s = AudioDeviceSetProperty (adev_id, 0, 0, false, kAudioDevicePropertyBufferFrameSize, sz, &framesize); if (s != kAudioHardwareNoError) { fprintf(stderr, "AudioDeviceSetProperty() failed with error %d\n", s); return NULL; } /* Get current audio format */ sz = sizeof (fmt); if (AudioDeviceGetProperty (adev_id, 0, false, kAudioDevicePropertyStreamFormat, &sz, &fmt)) { DSFYDEBUG ("AudioDeviceGetProperty() failed\n"); return NULL; } /* Setup audio format */ fmt.mFormatID = kAudioFormatLinearPCM; fmt.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked; fmt.mSampleRate = 44100; fmt.mChannelsPerFrame = 2; fmt.mBytesPerFrame = sizeof(Float32) * fmt.mChannelsPerFrame; fmt.mFramesPerPacket = 1; fmt.mBytesPerPacket = fmt.mFramesPerPacket * fmt.mBytesPerFrame; fmt.mReserved = 0; /* Update audio format */ sz = sizeof (fmt); if (AudioDeviceSetProperty (adev_id, NULL, 0, false, kAudioDevicePropertyStreamFormat, sz, &fmt)) { DSFYDEBUG ("AudioDeviceSetProperty() failed\n"); return NULL; } DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mSampleRate %f\n", fmt.mSampleRate); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mFormatFlags 0x%08x (IsSignedInteger:%s, isFloat:%s, isBigEndian:%s, kLinearPCMFormatFlagIsNonInterleaved:%s, kAudioFormatFlagIsPacked:%s)\n", fmt.mFormatFlags, fmt.mFormatFlags & kLinearPCMFormatFlagIsSignedInteger ? "yes" : "no", fmt.mFormatFlags & kLinearPCMFormatFlagIsFloat ? "yes" : "no", fmt.mFormatFlags & kLinearPCMFormatFlagIsBigEndian ? "yes" : "no", fmt.mFormatFlags & kLinearPCMFormatFlagIsNonInterleaved ? "yes" : "no", fmt.mFormatFlags & kAudioFormatFlagIsPacked ? "yes" : "no"); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mBitsPerChannel %u\n", fmt.mBitsPerChannel); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mChannelsPerFrame %u\n", fmt.mChannelsPerFrame); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mFramesPerPacket %u\n", fmt.mFramesPerPacket); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mBytesPerFrame %u\n", fmt.mBytesPerFrame); DSFYDEBUG ("kAudioDevicePropertyStreamFormat: mBytesPerPacket %u\n", fmt.mBytesPerPacket); device = (CoreAudioDevice *)malloc(sizeof(CoreAudioDevice)); if(!device) return NULL; device->adev_id = adev_id; device->playing = 0; device->buflen = 0; device->bufsize = sizeof(short) * 32768; if (AudioDeviceCreateIOProcID (device->adev_id, audio_callback, device, &device->proc_id)) { DSFYDEBUG ("AudioDeviceCreateIOProcID() returned FAIL!\n"); free(device); return NULL; } device->buf = (short *)malloc(device->bufsize); pthread_mutex_init(&device->mutex, NULL); pthread_cond_init(&device->event, NULL); pthread_cond_init(&device->hold, NULL); DSFYDEBUG("Done initializing CoreAudio\n"); return device; }
int audio_output_open(void) { UInt32 size; /* Obtain the audio device ID */ size = sizeof adid; if (AudioHardwareGetProperty( kAudioHardwarePropertyDefaultOutputDevice, &size, &adid) != 0 || adid == kAudioDeviceUnknown) goto error; /* Adjust the stream format */ size = sizeof afmt; if (AudioDeviceGetProperty(adid, 0, false, kAudioDevicePropertyStreamFormat, &size, &afmt) != 0 || afmt.mFormatID != kAudioFormatLinearPCM) goto error; /* To be set on the first run */ afmt.mSampleRate = 0; afmt.mChannelsPerFrame = 0; afmt.mBytesPerFrame = afmt.mChannelsPerFrame * sizeof (float); afmt.mBytesPerPacket = afmt.mBytesPerFrame * afmt.mFramesPerPacket; /* Decide what buffer size to use */ size = sizeof abuflen; abuflen = 32768; AudioDeviceSetProperty(adid, 0, 0, false, kAudioDevicePropertyBufferSize, size, &abuflen); if (AudioDeviceGetProperty(adid, 0, false, kAudioDevicePropertyBufferSize, &size, &abuflen) != 0) goto error; #ifdef BUILD_VOLUME /* Store the audio channels */ size = sizeof achans; AudioDeviceGetProperty(adid, 0, false, kAudioDevicePropertyPreferredChannelsForStereo, &size, &achans); #endif /* BUILD_VOLUME */ /* The buffer size reported is in floats */ abuflen /= sizeof(float); abufnew = g_malloc(abuflen * sizeof(int16_t)); abufcur = g_malloc(abuflen * sizeof(int16_t)); /* Locking down the buffer length */ abuflock = g_mutex_new(); abufdrained = g_cond_new(); /* Add our own I/O handling routine */ #ifdef MAC_OS_X_VERSION_10_5 if (AudioDeviceCreateIOProcID(adid, audio_output_ioproc, NULL, &aprocid) != 0) #else /* !MAC_OS_X_VERSION_10_5 */ if (AudioDeviceAddIOProc(adid, audio_output_ioproc, NULL) != 0) #endif /* MAC_OS_X_VERSION_10_5 */ goto error; return (0); error: g_printerr("%s\n", _("Cannot open the audio device.")); return (-1); }
/***************************************************************************** * Setup a encoded digital stream (SPDIF) *****************************************************************************/ int CoreAudioAUHAL::OpenSPDIF(struct CoreAudioDeviceParameters *deviceParameters, const CStdString& strName, int channels, float sampleRate, int bitsPerSample, int packetSize) { OSStatus err = noErr; UInt32 i_param_size = 0, b_mix = 0; Boolean b_writeable = false; AudioStreamID *p_streams = NULL; int i = 0, i_streams = 0; // We're digital. s_lastPlayWasSpdif = true; /* Start doing the SPDIF setup proces */ //deviceParameters->b_digital = true; deviceParameters->b_changed_mixing = false; /* Hog the device */ i_param_size = sizeof(deviceParameters->i_hog_pid); deviceParameters->i_hog_pid = getpid(); err = AudioDeviceSetProperty(deviceParameters->device_id, 0, 0, FALSE, kAudioDevicePropertyHogMode, i_param_size, &deviceParameters->i_hog_pid); if( err != noErr ) { CLog::Log(LOGERROR, "Failed to set hogmode: [%4.4s]", (char *)&err ); return false; } /* Set mixable to false if we are allowed to */ err = AudioDeviceGetPropertyInfo(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertySupportsMixing, &i_param_size, &b_writeable ); err = AudioDeviceGetProperty(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertySupportsMixing, &i_param_size, &b_mix ); if( !err && b_writeable ) { b_mix = 0; err = AudioDeviceSetProperty( deviceParameters->device_id, 0, 0, FALSE, kAudioDevicePropertySupportsMixing, i_param_size, &b_mix ); deviceParameters->b_changed_mixing = true; } if( err != noErr ) { CLog::Log(LOGERROR, "Failed to set mixmode: [%4.4s]", (char *)&err ); return false; } /* Get a list of all the streams on this device */ err = AudioDeviceGetPropertyInfo(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertyStreams, &i_param_size, NULL ); if( err != noErr ) { CLog::Log(LOGERROR, "Could not get number of streams: [%4.4s]", (char *)&err ); return false; } i_streams = i_param_size / sizeof( AudioStreamID ); p_streams = (AudioStreamID *)malloc( i_param_size ); if( p_streams == NULL ) return false; err = AudioDeviceGetProperty(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertyStreams, &i_param_size, p_streams ); if( err != noErr ) { CLog::Log(LOGERROR, "Could not get number of streams: [%4.4s]", (char *)&err ); free( p_streams ); return false; } for( i = 0; i < i_streams && deviceParameters->i_stream_index < 0 ; i++ ) { /* Find a stream with a cac3 stream */ AudioStreamBasicDescription *p_format_list = NULL; int i_formats = 0, j = 0; bool b_digital = false; /* Retrieve all the stream formats supported by each output stream */ err = AudioStreamGetPropertyInfo( p_streams[i], 0, kAudioStreamPropertyPhysicalFormats, &i_param_size, NULL ); if( err != noErr ) { CLog::Log(LOGERROR, "Could not get number of streamformats: [%4.4s]", (char *)&err ); continue; } i_formats = i_param_size / sizeof( AudioStreamBasicDescription ); p_format_list = (AudioStreamBasicDescription *)malloc( i_param_size ); if( p_format_list == NULL ) continue; err = AudioStreamGetProperty( p_streams[i], 0, kAudioStreamPropertyPhysicalFormats, &i_param_size, p_format_list ); if( err != noErr ) { CLog::Log(LOGERROR, "Could not get the list of streamformats: [%4.4s]", (char *)&err ); free( p_format_list ); continue; } /* Check if one of the supported formats is a digital format */ for( j = 0; j < i_formats; j++ ) { if( p_format_list[j].mFormatID == 'IAC3' || p_format_list[j].mFormatID == kAudioFormat60958AC3 ) { b_digital = true; break; } } if( b_digital ) { /* if this stream supports a digital (cac3) format, then go set it. */ int i_requested_rate_format = -1; int i_current_rate_format = -1; int i_backup_rate_format = -1; deviceParameters->i_stream_id = p_streams[i]; deviceParameters->i_stream_index = i; if(deviceParameters->b_revert == false ) { /* Retrieve the original format of this stream first if not done so already */ i_param_size = sizeof(deviceParameters->sfmt_revert); err = AudioStreamGetProperty(deviceParameters->i_stream_id, 0, kAudioStreamPropertyPhysicalFormat, &i_param_size, &deviceParameters->sfmt_revert ); if( err != noErr ) { CLog::Log(LOGERROR, "Could not retrieve the original streamformat: [%4.4s]", (char *)&err ); //continue; } else deviceParameters->b_revert = true; } for( j = 0; j < i_formats; j++ ) { if( p_format_list[j].mFormatID == 'IAC3' || p_format_list[j].mFormatID == kAudioFormat60958AC3 ) { if( p_format_list[j].mSampleRate == sampleRate) { i_requested_rate_format = j; break; } else if( p_format_list[j].mSampleRate == deviceParameters->sfmt_revert.mSampleRate ) { i_current_rate_format = j; } else { if( i_backup_rate_format < 0 || p_format_list[j].mSampleRate > p_format_list[i_backup_rate_format].mSampleRate ) i_backup_rate_format = j; } } } if( i_requested_rate_format >= 0 ) /* We prefer to output at the samplerate of the original audio */ deviceParameters->stream_format = p_format_list[i_requested_rate_format]; else if( i_current_rate_format >= 0 ) /* If not possible, we will try to use the current samplerate of the device */ deviceParameters->stream_format = p_format_list[i_current_rate_format]; else deviceParameters->stream_format = p_format_list[i_backup_rate_format]; /* And if we have to, any digital format will be just fine (highest rate possible) */ } free( p_format_list ); } free( p_streams ); CLog::Log(LOGINFO, STREAM_FORMAT_MSG("original stream format: ", deviceParameters->sfmt_revert ) ); if( !AudioStreamChangeFormat(deviceParameters, deviceParameters->i_stream_id, deviceParameters->stream_format)) return false; // Get device hardware buffer size uint32_t audioDeviceLatency, audioStreamLatency, audioDeviceBufferFrameSize, audioDeviceSafetyOffset; deviceParameters->hardwareFrameLatency = 0; i_param_size = sizeof(uint32_t); err = AudioDeviceGetProperty(deviceParameters->device_id, 0, false, kAudioDevicePropertyLatency, &i_param_size, &audioDeviceLatency); if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceLatency; err = AudioDeviceGetProperty(deviceParameters->device_id, 0, false, kAudioDevicePropertyBufferFrameSize, &i_param_size, &audioDeviceBufferFrameSize); if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceBufferFrameSize; err = AudioDeviceGetProperty(deviceParameters->device_id, 0, false, kAudioDevicePropertySafetyOffset, &i_param_size, &audioDeviceSafetyOffset); if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceSafetyOffset; err = AudioStreamGetProperty(deviceParameters->i_stream_id, 0, kAudioStreamPropertyLatency, &i_param_size, &audioStreamLatency); if (err == noErr) deviceParameters->hardwareFrameLatency += audioStreamLatency; CLog::Log(LOGINFO, "Hardware latency: %i frames (%.2f msec @ %.0fHz)", deviceParameters->hardwareFrameLatency, (float)deviceParameters->hardwareFrameLatency / deviceParameters->stream_format.mSampleRate * 1000, deviceParameters->stream_format.mSampleRate); // initialise the CoreAudio sink buffer uint32_t framecount = 1; while(framecount <= deviceParameters->stream_format.mSampleRate) // ensure power of 2 { framecount <<= 1; } #warning free deviceParameters->outputBuffer = (PaUtilRingBuffer *)malloc(sizeof(PaUtilRingBuffer)); deviceParameters->outputBufferData = calloc(1, framecount * channels * bitsPerSample/8); // use uncompressed size if encoding ac3 PaUtil_InitializeRingBuffer(deviceParameters->outputBuffer, channels * bitsPerSample/8, framecount, deviceParameters->outputBufferData); /* Add IOProc callback */ err = AudioDeviceCreateIOProcID(deviceParameters->device_id, (AudioDeviceIOProc)RenderCallbackSPDIF, deviceParameters, &deviceParameters->sInputIOProcID); if( err != noErr ) { CLog::Log(LOGERROR, "AudioDeviceAddIOProcID failed: [%4.4s]", (char *)&err ); return false; } /* Start device */ err = AudioDeviceStart(deviceParameters->device_id, (AudioDeviceIOProc)RenderCallbackSPDIF ); if( err != noErr ) { CLog::Log(LOGERROR, "AudioDeviceStart failed: [%4.4s]", (char *)&err ); err = AudioDeviceDestroyIOProcID(deviceParameters->device_id, (AudioDeviceIOProc)RenderCallbackSPDIF); if( err != noErr ) { CLog::Log(LOGERROR, "AudioDeviceRemoveIOProc failed: [%4.4s]", (char *)&err ); } return false; } return true; }